Merge branch 'master' into feature/v1-actions-execute-command-8034

This commit is contained in:
Alexander A. Klimov 2020-10-28 18:37:08 +01:00
commit bb851b0558
66 changed files with 1519 additions and 548 deletions

21
.github/workflows/docker.yml vendored Normal file
View File

@ -0,0 +1,21 @@
name: Docker image
on:
pull_request: {}
push:
branches:
- master
release:
types:
- published
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Docker image
uses: Icinga/docker-icinga2@master
env:
INPUT_TOKEN: '${{ github.token }}'
DOCKER_HUB_PASSWORD: '${{ secrets.DOCKER_HUB_PERSONAL_TOKEN }}'

View File

@ -11,6 +11,7 @@ jobs:
name: .deb
strategy:
fail-fast: false
matrix:
distro:
- name: debian
@ -19,9 +20,6 @@ jobs:
- name: debian
codename: stretch
has32bit: true
- name: debian
codename: jessie
has32bit: true
- name: ubuntu
codename: focal
has32bit: false
@ -63,7 +61,8 @@ jobs:
uses: actions/cache@v1
with:
path: deb-icinga2/ccache
key: '${{ matrix.distro.name }}/${{ matrix.distro.codename }}-ccache'
key: |-
${{ matrix.distro.name }}/${{ matrix.distro.codename }}-ccache-${{ hashFiles('deb-icinga2/ccache') }}
- name: Binary x64
run: |
@ -113,16 +112,11 @@ jobs:
-e ICINGA_BUILD_TYPE=snapshot \
registry.icinga.com/build-docker/${{ matrix.distro.name }}/${{ matrix.distro.codename }}:x86 \
icinga-build-test
- name: Artifacts
uses: actions/upload-artifact@v1
with:
name: '${{ matrix.distro.name }}-${{ matrix.distro.codename }}-packages'
path: deb-icinga2/build
rpm:
name: .rpm
strategy:
fail-fast: false
matrix:
distro:
- name: centos
@ -197,7 +191,8 @@ jobs:
uses: actions/cache@v1
with:
path: rpm-icinga2/ccache
key: '${{ matrix.distro.name }}/${{ matrix.distro.release }}-ccache'
key: |-
${{ matrix.distro.name }}/${{ matrix.distro.release }}-ccache-${{ hashFiles('rpm-icinga2/ccache') }}
- name: Binary
if: "steps.vars.outputs.CAN_BUILD == 'true'"
@ -228,10 +223,75 @@ jobs:
-e ICINGA_BUILD_TYPE=snapshot \
registry.icinga.com/build-docker/${{ matrix.distro.name }}/${{ matrix.distro.release }} \
icinga-build-test
raspbian:
name: Raspbian
- name: Artifacts
if: "steps.vars.outputs.CAN_BUILD == 'true'"
uses: actions/upload-artifact@v1
strategy:
fail-fast: false
matrix:
codename:
- buster
runs-on: ubuntu-latest
steps:
- name: Checkout HEAD
uses: actions/checkout@v1
- name: qemu-user-static
run: |
set -exo pipefail
sudo apt-get update
DEBIAN_FRONTEND=noninteractive sudo apt-get install -y qemu-user-static
- name: raspbian-icinga2
run: |
set -exo pipefail
git clone https://git.icinga.com/packaging/raspbian-icinga2.git
chmod o+w raspbian-icinga2
- name: Restore/backup ccache
id: ccache
uses: actions/cache@v1
with:
name: '${{ matrix.distro.name }}-${{ matrix.distro.release }}-packages'
path: rpm-icinga2/build
path: raspbian-icinga2/ccache
key: |-
raspbian/${{ matrix.codename }}-ccache-${{ hashFiles('raspbian-icinga2/ccache') }}
- name: Binary
run: |
set -exo pipefail
git checkout -B master
if [ -e raspbian-icinga2/ccache ]; then
chmod -R o+w raspbian-icinga2/ccache
fi
docker run --rm \
-v "$(pwd)/raspbian-icinga2:/raspbian-icinga2" \
-v "$(pwd)/.git:/icinga2.git:ro" \
-w /raspbian-icinga2 \
-e ICINGA_BUILD_PROJECT=icinga2 \
-e ICINGA_BUILD_TYPE=snapshot \
-e UPSTREAM_GIT_URL=file:///icinga2.git \
-e ICINGA_BUILD_DEB_DEFAULT_ARCH=armhf \
registry.icinga.com/build-docker/raspbian/${{ matrix.codename }} \
icinga-build-package
# Setting up icinga2-bin (2.12.0+rc1.25.g5d1c82a3d.20200526.0754+buster-0) ...
# enabling default icinga2 features
# qemu:handle_cpu_signal received signal outside vCPU context @ pc=0x6015c75c
# qemu:handle_cpu_signal received signal outside vCPU context @ pc=0x6015c75c
# qemu:handle_cpu_signal received signal outside vCPU context @ pc=0x600016ea
# dpkg: error processing package icinga2-bin (--configure):
# installed icinga2-bin package post-installation script subprocess returned error exit status 127
#
# - name: Test
# run: |
# set -exo pipefail
# docker run --rm \
# -v "$(pwd)/raspbian-icinga2:/raspbian-icinga2" \
# -w /raspbian-icinga2 \
# -e ICINGA_BUILD_PROJECT=icinga2 \
# -e ICINGA_BUILD_TYPE=snapshot \
# -e ICINGA_BUILD_DEB_DEFAULT_ARCH=armhf \
# registry.icinga.com/build-docker/raspbian/${{ matrix.codename }} \
# icinga-build-test

View File

@ -12,6 +12,7 @@ Alexander A. Klimov <alexander.klimov@icinga.com> <alexander.klimov@icinga.com>
<jason.young@velaspan.com> <jyoung15@gmail.com>
<jo.goossens@hosted-power.com> <sales@hosted-power.com>
<johannes.meyer@icinga.com> <johannes.meyer@netways.de>
<julian.brost@icinga.com> <julian@0x4a42.net>
<lars.engels@0x20.net> <lars@0x20.net>
<lennart.betz@icinga.com> <lennart.betz@netways.de>
<marius@graylog.com> <marius@torch.sh>
@ -24,6 +25,7 @@ Alexander A. Klimov <alexander.klimov@icinga.com> <alexander.klimov@icinga.com>
<thomas.widhalm@icinga.com> <thomas.widhalm@netways.de>
<thomas.widhalm@icinga.com> <widhalmt@widhalmt.or.at>
<tobias.vonderkrone@profitbricks.com> <tobias@vonderkrone.info>
Alex <alexp710@hotmail.com> <alexp710@hotmail.com>
Carsten Köbke <carsten.koebke@gmx.de> Carsten Koebke <carsten.koebke@koebbes.de>
Claudio Kuenzler <ck@claudiokuenzler.com>
Diana Flach <diana.flach@icinga.com> <crunsher@bamberg.ccc.de>
@ -32,15 +34,20 @@ Diana Flach <diana.flach@icinga.com> <jean-marcel.flach@netways.de>
Diana Flach <diana.flach@icinga.com> Jean Flach <jean-marcel.flach@icinga.com>
Dolf Schimmel <dolf@transip.nl> <dolf@dolfschimmel.nl>
Gunnar Beutner <gunnar.beutner@icinga.com> <icinga@net-icinga2.adm.netways.de>
Henrik Triem <henrik.triem@icinga.com> <henrik.triem@netways.de>
Henrik Triem <henrik.triem@icinga.com> Henrik Triem <43344334+htriem@users.noreply.github.com>
<henrik.triem@icinga.com> <Henrik.Triem@icinga.com>
Jens Schanz <jens.schanz@mueller.de> <mail@jensschanz.de>
Jens Schanz <jens.schanz@mueller.de> Schanz, Jens <jens.schanz@mueller.de>
Kálmán „KAMI” Szalai <kami911@gmail.com> <kami911@gmail.com>
Marianne Spiller <github@spiller.me>
Markus Waldmüller <markus.waldmueller@netways.de>
Michael Insel <mcktr55@gmail.com> <mcktr55@gmail.com>
Michael Insel <mcktr55@gmail.com> <michael@email.de>
Michael Insel <mcktr55@gmail.com> <michael@insel.email>
nemtrif <ntrifunovic@hotmail.com> <nemtrif@users.noreply.github.com>
nemtrif <ntrifunovic@hotmail.com> <ntrifunovic@hotmail.com>
Robin O'Brien <robin@labs.epiuse.com> <robinjohnobrien@gmail.com>
Roman Gerhardt <roman.gerhardt@cbc-x.com> <roman.gerhardt@cbc-x.com>
Sebastian Chrostek <sebastian@chrostek.net> <sebastian@chrostek.net>
Thomas Gelf <thomas.gelf@icinga.com> <thomas@gelf.net>

17
AUTHORS
View File

@ -23,6 +23,7 @@ Arnd Hannemann <arnd@arndnet.de>
Assaf Flatto <assaf@aikilinux.com>
azthec <azthec@users.noreply.github.com>
BarbUk <julien.virey@gmail.com>
Bård Dahlmo-Lerbæk <bard.dahlmo-lerbaek@skatteetaten.no>
Bas Couwenberg <sebastic@xs4all.nl>
bascarsija <bascarsija.dev@gmail.com>
Bastian Guse <bguse@nocopy.de>
@ -36,7 +37,6 @@ Brendan Jurd <direvus@gmail.com>
Brian De Wolf <git@bldewolf.com>
Brian Dockter <specus@gmail.com>
Bruno Lingner <mail@hugo.ro>
Bård Dahlmo-Lerbæk <bard.dahlmo-lerbaek@skatteetaten.no>
Carlos Cesario <carloscesario@gmail.com>
Carsten Köbke <carsten.koebke@gmx.de>
Chris Boot <crb@tiger-computing.co.uk>
@ -48,7 +48,6 @@ Christian Lehmann <christian_lehmann@gmx.de>
Christian Loos <cloos@netsandbox.de>
Christian Schmidt <github@chsc.dk>
Christopher Schirner <schinken@bamberg.ccc.de>
chrostek <sebastian@chrostek.net>
Claudio Bilotta <bilottalove@gmail.com>
Claudio Kuenzler <ck@claudiokuenzler.com>
Conrad Clement <cclement@printeron.com>
@ -72,6 +71,7 @@ Edgar Fuß <ef@math.uni-bonn.de>
Eduard Güldner <eduard.gueldner@gmail.com>
Edvin Seferovic <edvin@seferovic.net>
Elias Ohm <eohm@novomind.com>
Élie Bouttier <elie@bouttier.eu>
Eric Lippmann <eric.lippmann@icinga.com>
Evgeni Golov <evgeni@golov.de>
Ewoud Kohl van Wijngaarden <ewoud@kohlvanwijngaarden.nl>
@ -80,13 +80,11 @@ fbachmann <bachmann.f@gmail.com>
Federico Cuello <federico.cuello@sociomantic.com>
Federico Pires <federico.pires@upsight.com>
Ferdi Gueran <ferdi.gueran@nextevolution.de>
fluxX04 <alexp710@hotmail.com>
Francesco Colista <fcolista@alpinelinux.org>
Gaël Beaudoin <gaboo@gaboo.org>
Georg Faerber <georg@riseup.net>
Georg Haas <hax404foogit@hax404.de>
Gerd von Egidy <gerd@egidy.de>
Gerhardt Roman <roman.gerhardt@cbc-x.com>
gitmopp <mopp@gmx.net>
Glauco Vinicius <gl4uc0@gmail.com>
Greg Hewgill <greg@hewgill.com>
@ -98,7 +96,6 @@ Harald Laabs <github@dasr.de>
Heike Jurzik <icinga@huhnix.org>
Hendrik Röder <hendrik.biz@gmail.com>
Henrik Triem <henrik.triem@icinga.com>
htriem <henrik.triem@netways.de>
Ian Kelling <ian@iankelling.org>
Ildar Hizbulin <hizel@vyborg.ru>
Irina Kaprizkina <ikapriz@gmail.com>
@ -115,6 +112,7 @@ Jens Link <jenslink@quux.de>
Jens Schanz <jens.schanz@mueller.de>
Jeon Sang Wan <maxswjeon@naver.com>
Jeremy Armstrong <lepubel@gmail.com>
Jérôme Drouet <jerome.drouet@gmail.com>
Jesse Morgan <morgajel@gmail.com>
Jo Goossens <jo.goossens@hosted-power.com>
Johannes Meyer <johannes.meyer@icinga.com>
@ -122,15 +120,13 @@ Jonas Meurer <jonas@freesources.org>
Jordi van Scheijen <jordi.vanscheijen@solvinity.com>
Joseph L. Casale <jcasale@activenetwerx.com>
jre3brg <jorge.rebelo@pt.bosch.com>
Julian Brost <julian@0x4a42.net>
Jérôme Drouet <jerome.drouet@gmail.com>
Julian Brost <julian.brost@icinga.com>
K0nne <34264690+K0nne@users.noreply.github.com>
Kai Goller <kai.goller@netways.de>
Kálmán „KAMI” Szalai <kami911@gmail.com>
kiba <zombie32@gmail.com>
Konstantin Kelemen <konstantin@kel.mn>
krishna <gskrishna44@gmail.com>
Kálmán Szalai - KAMI <kami911@gmail.com>
Kálmán „KAMI” Szalai <kami911@gmail.com>
Lars Engels <lars.engels@0x20.net>
Lars Krüger <krueger-lars@web.de>
Leah Oswald <mail@leahoswald.de>
@ -182,7 +178,6 @@ Mirco Bauer <meebey@meebey.net>
Mirko Nardin <mirko.nardin@gmx.net>
mocruz <mocruz@theworkshop.com>
Muhammad Mominul Huque <nahidbinbaten1995@gmail.com>
Nemanja Trifunovic <ntrifunovic@hotmail.com>
nemtrif <ntrifunovic@hotmail.com>
Nicolai <nbuchwitz@users.noreply.github.com>
Nicolas Limage <github@xephon.org>
@ -260,6 +255,6 @@ Winfried Angele <winfried.angele@gmail.com>
Wolfgang Nieder <wnd@gmx.net>
Yannick Charton <tontonitch-pro@yahoo.fr>
Yohan Jarosz <yohanjarosz@yahoo.fr>
Yonas Habteab <yonas.habteab@icinga.com>
Zachary McGibbon <zachary.mcgibbon@gmail.com>
Zoltan Nagy <abesto@abesto.net>
Élie Bouttier <elie@bouttier.eu>

View File

@ -7,6 +7,122 @@ documentation before upgrading to a new release.
Released closed milestones can be found on [GitHub](https://github.com/Icinga/icinga2/milestones?state=closed).
## 2.12.0 (2020-08-05)
[Issue and PRs](https://github.com/Icinga/icinga2/issues?utf8=%E2%9C%93&q=milestone%3A2.12.0)
### Notes
Upgrading docs: https://icinga.com/docs/icinga2/snapshot/doc/16-upgrading-icinga-2/#upgrading-to-v212
Thanks to all contributors:
[Ant1x](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AAnt1x+milestone%3A2.12.0),
[azthec](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aazthec+milestone%3A2.12.0),
[baurmatt](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Abaurmatt+milestone%3A2.12.0),
[bootc](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Abootc+milestone%3A2.12.0),
[Foxeronie](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AFoxeronie+milestone%3A2.12.0),
[ggzengel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aggzengel+milestone%3A2.12.0),
[islander](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aislander+milestone%3A2.12.0),
[joni1993](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Ajoni1993+milestone%3A2.12.0),
[KAMI911](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AKAMI911+milestone%3A2.12.0),
[mcktr](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amcktr+milestone%3A2.12.0),
[MichalMMac](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AMichalMMac+milestone%3A2.12.0),
[sebastic](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Asebastic+milestone%3A2.12.0),
[sthen](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Asthen+milestone%3A2.12.0),
[unki](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aunki+milestone%3A2.12.0),
[vigiroux](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Avigiroux+milestone%3A2.12.0),
[wopfel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Awopfel+milestone%3A2.12.0)
### Breaking changes
* Deprecate Windows plugins in favor of our
[PowerShell plugins](https://github.com/Icinga/icinga-powershell-plugins) #8071
* Deprecate Livestatus #8051
* Refuse acknowledging an already acknowledged checkable #7695
* Config lexer: complain on EOF in heredocs, i.e. `{{{abc<EOF>` #7541
### Enhancements
* Core
* Implement new database backend: Icinga DB #7571
* Re-send notifications previously suppressed by their time periods #7816
* API
* Host/Service: Add `acknowledgement_last_change` and `next_update` attributes #7881 #7534
* Improve error message for POST queries #7681
* /v1/actions/remove-comment: let users specify themselves #7646
* /v1/actions/remove-downtime: let users specify themselves #7645
* /v1/config/stages: Add 'activate' parameter #7535
* CLI
* Add `pki verify` command for better TLS certificate troubleshooting #7843
* Add OpenSSL version to 'Build' section in --version #7833
* Improve experience with 'Node Setup for Agents/Satellite' #7835
* DSL
* Add `get_template()` and `get_templates()` #7632
* `MacroProcessor::ResolveArguments()`: skip null argument values #7567
* Fix crash due to dependency apply rule with `ignore_on_error` and non-existing parent #7538
* Introduce ternary operator (`x ? y : z`) #7442
* LegacyTimePeriod: support specifying seconds #7439
* Add support for Lambda Closures (`() use(x) => x and () use(x) => { return x }`) #7417
* ITL
* Add notemp parameter to oracle health #7748
* Add extended checks options to snmp-interface command template #7602
* Add file age check for Windows command definition #7540
* Docs
* Development: Update debugging instructions #7867
* Add new API clients #7859
* Clarify CRITICAL vs. UNKNOWN #7665
* Explicitly explain how to disable freshness checks #7664
* Update installation for RHEL/CentOS 8 and SLES 15 #7640
* Add Powershell example to validate the certificate #7603
* Misc
* Don't send `event::Heartbeat` to unauthenticated peers #7747
* OpenTsdbWriter: Add custom tag support #7357
### Bugfixes
* Core
* Fix JSON-RPC crashes #7532 #7737
* Fix zone definitions in zones #7546
* Fix deadlock during start on OpenBSD #7739
* Consider PENDING not a problem #7685
* Fix zombie processes after reload #7606
* Don't wait for checks to finish during reload #7894
* Cluster
* Fix segfault during heartbeat timeout with clients not yet signed #7970
* Make the config update process mutually exclusive (Prevents file system race conditions) #7936
* Fix `check_timeout` not being forwarded to agent command endpoints #7861
* Config sync: Use a more friendly message when configs are equal and don't need a reload #7811
* Fix open connections when agent waits for CA approval #7686
* Consider a JsonRpcConnection alive on a single byte of TLS payload, not only on a whole message #7836
* Send JsonRpcConnection heartbeat every 20s instead of 10s #8102
* Use JsonRpcConnection heartbeat only to update connection liveness (m\_Seen) #8142
* Fix TLS context not being updated on signed certificate messages on agents #7654
* API
* Close connections w/o successful TLS handshakes after 10s #7809
* Handle permission exceptions soon enough, returning 404 #7528
* SELinux
* Fix safe-reload #7858
* Allow direct SMTP notifications #7749
* Windows
* Terminate check processes with UNKNOWN state on timeout #7788
* Ensure that log replay files are properly renamed #7767
* Metrics
* Graphite/OpenTSDB: Ensure that reconnect failure is detected #7765
* Always send 0 as value for thresholds #7696
* Scripts
* Fix notification scripts to stay compatible with Dash #7706
* Fix bash line continuation in mail-host-notification.sh #7701
* Fix notification scripts string comparison #7647
* Service and host mail-notifications: Add line-breaks to very long output #6822
* Set correct UTF-8 email subject header (RFC1342) #6369
* Misc
* DSL: Fix segfault due to passing null as custom function to `Array#{sort,map,reduce,filter,any,all}()` #8053
* CLI: `pki save-cert`: allow to specify --key and --cert for backwards compatibility #7995
* Catch exception when trusted cert is not readable during node setup on agent/satellite #7838
* CheckCommand ssl: Fix wrong parameter `-N` #7741
* Code quality fixes
* Small documentation fixes
## 2.12.0 RC1 (2020-03-13)
[Issue and PRs](https://github.com/Icinga/icinga2/issues?utf8=%E2%9C%93&q=milestone%3A2.12.0)

View File

@ -119,7 +119,9 @@ endif()
# NuGet on Windows requires a semantic versioning, example: 2.10.4.123 (only 4 element, only numeric)
string(REGEX REPLACE "-([0-9]+).*$" ".\\1" ICINGA2_VERSION_SAFE "${ICINGA2_VERSION}")
string(REGEX REPLACE "-[^\\.]*(.*)$" "\\1" ICINGA2_VERSION_SAFE "${ICINGA2_VERSION_SAFE}")
message(STATUS "ICINGA2_VERSION_SAFE=${ICINGA2_VERSION_SAFE}")
string(REGEX REPLACE "^([0-9]+\\.[0-9]+\\.[0-9]+)[\\.]?[0-9]*" "\\1" CHOCO_VERSION_SHORT "${ICINGA2_VERSION_SAFE}")
message(STATUS "ICINGA2_VERSION_SAFE=${ICINGA2_VERSION_SAFE} CHOCO_VERSION_SHORT=${CHOCO_VERSION_SHORT}")
if(WIN32)
set(Boost_USE_STATIC_LIBS ON)
@ -518,4 +520,4 @@ if(WIN32)
)
endif()
include(CPack)
include(CPack)

View File

@ -5,7 +5,6 @@
- [1. Preparations](#preparations)
- [1.1. Issues](#issues)
- [1.2. Backport Commits](#backport-commits)
- [1.3. Authors](#authors)
- [2. Version](#version)
- [3. Changelog](#changelog)
- [4. Git Tag](#git-tag)
@ -15,7 +14,7 @@
- [6. Build Server](#build-infrastructure)
- [7. Release Tests](#release-tests)
- [8. GitHub Release](#github-release)
- [9. Chocolatey](#chocolatey)
- [9. Docker](#docker)
- [10. Post Release](#post-release)
- [10.1. Online Documentation](#online-documentation)
- [10.2. Announcement](#announcement)
@ -49,14 +48,6 @@ Check issues at https://github.com/Icinga/icinga2
For minor versions you need to manually backports any and all commits from the
master branch which should be part of this release.
### Authors <a id="authors"></a>
Update the [.mailmap](.mailmap) and [AUTHORS](AUTHORS) files:
```
git checkout master
git log --use-mailmap | grep '^Author:' | cut -f2- -d' ' | sort -f | uniq > AUTHORS
```
## Version <a id="version"></a>
@ -88,7 +79,7 @@ git tag -s -m "Version $VERSION" v$VERSION
Push the tag:
```
git push --tags
git push origin v$VERSION
```
**For major releases:** Create a new `support` branch:
@ -112,7 +103,7 @@ cd $HOME/dev/icinga/packaging
### RPM Packages <a id="rpm-packages"></a>
```
git clone git@git.icinga.com:icinga/rpm-icinga2.git && cd rpm-icinga2
git clone git@git.icinga.com:packaging/rpm-icinga2.git && cd rpm-icinga2
```
### DEB Packages <a id="deb-packages"></a>
@ -124,39 +115,39 @@ git clone git@git.icinga.com:packaging/deb-icinga2.git && cd deb-icinga2
#### Raspbian Packages
```
git clone git@git.icinga.com:icinga/raspbian-icinga2.git && cd raspbian-icinga2
git clone git@git.icinga.com:packaging/raspbian-icinga2.git && cd raspbian-icinga2
```
### Windows Packages
```
git clone git@git.icinga.com:icinga/windows-icinga2.git && cd windows-icinga2
git clone git@git.icinga.com:packaging/windows-icinga2.git && cd windows-icinga2
```
### Branch Workflow
Checkout `master` and create a new branch.
* For releases use x.x[.x] as branch name (e.g. 2.11 or 2.11.1)
* For releases with revision use x.x.x-n (e.g. 2.11.0-2)
For each support branch in this repo (e.g. support/2.12), there exists a corresponding branch in the packaging repos
(e.g. 2.12). Each package revision is a tagged commit on these branches. When doing a major release, create the new
branch, otherweise switch to the existing one.
### Switch Build Type
Edit file `.gitlab-ci.yml` and comment variable `ICINGA_BUILD_TYPE` out.
Ensure that `ICINGA_BUILD_TYPE` is set to `release` in `.gitlab-ci.yml`. This should only be necessary after creating a
new branch.
```yaml
variables:
...
#ICINGA_BUILD_TYPE: snapshot
ICINGA_BUILD_TYPE: release
...
```
Commit the change.
```
git commit -av -m "Switch build type for $VERSION-1"
git commit -av -m "Switch build type for 2.13"
```
#### RPM Release Preparations
@ -186,6 +177,16 @@ icinga2 (2.11.0-1) icinga; urgency=medium
```
#### Windows Release Preparations
Update the file `.gitlab-ci.yml`:
```
sed -i "s/^ UPSTREAM_GIT_BRANCH: .*/ UPSTREAM_GIT_BRANCH: v$VERSION/g" .gitlab-ci.yml
sed -i "s/^ ICINGA_FORCE_VERSION: .*/ ICINGA_FORCE_VERSION: v$VERSION/g" .gitlab-ci.yml
```
### Release Commit
Commit the changes and push the branch.
@ -300,24 +301,28 @@ The release body should contain a short changelog, with links
into the roadmap, changelog and blogpost.
## Chocolatey <a id="chocolatey"></a>
## Docker <a id="docker"></a>
Navigate to the git repository on your Windows box which
already has chocolatey installed. Pull/checkout the release.
> Only for final versions (not for RCs).
Create the nupkg package (or use the one generated on https://packages.icinga.com/windows):
Once the release has been published on GitHub, wait for its
[GitHub actions](https://github.com/Icinga/icinga2/actions) to complete.
```
cpack
```
```bash
VERSION=2.12.1
Fetch the API key from https://chocolatey.org/account and use the `choco push`
command line.
TAGS=(2.12)
#TAGS=(2.12 2 latest)
```
choco apikey --key xxx --source https://push.chocolatey.org/
docker pull icinga/icinga2:$VERSION
choco push Icinga2-v2.11.0.nupkg --source https://push.chocolatey.org/
for t in "${TAGS[@]}"; do
docker tag icinga/icinga2:$VERSION icinga/icinga2:$t
done
for t in "${TAGS[@]}"; do
docker push icinga/icinga2:$t
done
```

View File

@ -1,2 +1,2 @@
Version: 2.12.0-rc1
Version: 2.12.0
Revision: 1

View File

@ -1,16 +1,17 @@
---
version: 2.11.0.dev.{build}
os: Visual Studio 2017
os: Visual Studio 2019
platform: x64
environment:
BITS: 64
CMAKE_BUILD_TYPE: Debug
CMAKE_GENERATOR: "Visual Studio 15 2017 Win64"
CMAKE_GENERATOR: "Visual Studio 16 2019"
CMAKE_GENERATOR_PLATFORM: x64
# https://www.appveyor.com/docs/windows-images-software/#boost
BOOST_ROOT: 'C:\Libraries\boost_1_67_0'
BOOST_LIBRARYDIR: 'C:\Libraries\boost_1_67_0\lib64-msvc-14.1'
BOOST_ROOT: 'C:\Libraries\boost_1_71_0'
BOOST_LIBRARYDIR: 'C:\Libraries\boost_1_71_0\lib64-msvc-14.2'
# https://www.appveyor.com/docs/windows-images-software/#tools
OPENSSL_ROOT_DIR: 'C:\OpenSSL-v111-Win64'
BISON_BINARY: 'C:\ProgramData\chocolatey\lib\winflexbison3\tools\win_bison.exe'

View File

@ -1,14 +1,6 @@
# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
if(WIN32)
find_program(CHOCO_BINARY choco)
configure_file(icinga2.nuspec.cmake icinga2.nuspec)
configure_file(chocolateyInstall.ps1.cmake chocolateyInstall.ps1)
add_custom_target(choco-pkg ALL
COMMAND choco pack
COMMAND ${CMAKE_COMMAND} -E rename ${CMAKE_CURRENT_BINARY_DIR}/icinga2.${ICINGA2_VERSION_SAFE}.nupkg ${CMAKE_CURRENT_BINARY_DIR}/icinga2.nupkg
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/icinga2.nuspec ${CMAKE_CURRENT_BINARY_DIR}/chocolateyInstall.ps1 chocolateyUninstall.ps1
)
configure_file(chocolateyInstall.ps1.template.cmake chocolateyInstall.ps1.template)
endif()

View File

@ -1,8 +0,0 @@
$packageName = 'icinga2'
$installerType = 'msi'
$url32 = 'https://packages.icinga.com/windows/Icinga2-v${ICINGA2_VERSION_SAFE}-x86.msi'
$url64 = 'https://packages.icinga.com/windows/Icinga2-v${ICINGA2_VERSION_SAFE}-x86_64.msi'
$silentArgs = '/qn /norestart'
$validExitCodes = @(0)
Install-ChocolateyPackage "$packageName" "$installerType" "$silentArgs" "$url32" "$url64" -validExitCodes $validExitCodes

View File

@ -0,0 +1,20 @@
$packageName= 'icinga2'
$toolsDir = "$(Split-Path -Parent $MyInvocation.MyCommand.Definition)"
$url = 'https://packages.icinga.com/windows/Icinga2-v${CHOCO_VERSION_SHORT}-x86.msi'
$url64 = 'https://packages.icinga.com/windows/Icinga2-v${CHOCO_VERSION_SHORT}-x86_64.msi'
$packageArgs = @{
packageName = $packageName
fileType = 'msi'
url = $url
url64bit = $url64
silentArgs = "/qn /norestart"
validExitCodes= @(0)
softwareName = 'Icinga 2*'
checksum = '%CHOCO_32BIT_CHECKSUM%'
checksumType = 'sha256'
checksum64 = '%CHOCO_64BIT_CHECKSUM%'
checksumType64= 'sha256'
}
Install-ChocolateyPackage @packageArgs

View File

@ -10,17 +10,20 @@
<authors>Icinga GmbH</authors>
<owners>Icinga GmbH</owners>
<summary>icinga2 - Monitoring Agent for Windows</summary>
<description>Icinga 2 is an open source monitoring platform which notifies users about host and service outages.</description>
<description>Icinga is an open source monitoring platform which notifies users about host and service outages.</description>
<projectUrl>https://icinga.com/</projectUrl>
<tags>icinga2 agent monitoring admin</tags>
<licenseUrl>https://icinga.com/resources/faq/</licenseUrl>
<tags>icinga2 icinga agent monitoring admin</tags>
<licenseUrl>https://github.com/Icinga/icinga2/blob/master/COPYING</licenseUrl>
<releaseNotes>https://github.com/Icinga/icinga2/blob/master/ChangeLog</releaseNotes>
<docsUrl>https://docs.icinga.com/icinga2/</docsUrl>
<docsUrl>https://icinga.com/docs/icinga2/latest/</docsUrl>
<bugTrackerUrl>https://github.com/Icinga/icinga2/issues</bugTrackerUrl>
<packageSourceUrl>https://github.com/Icinga/icinga2</packageSourceUrl>
<projectSourceUrl>https://github.com/Icinga/icinga2</projectSourceUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<iconUrl>https://icinga.com/wp-content/uploads/2015/05/icinga_icon_128x128.png</iconUrl>
<iconUrl>https://raw.githubusercontent.com/Icinga/icinga2/master/icinga-app/icinga.ico</iconUrl>
<dependencies>
<dependency id='netfx-4.6.2' />
</dependencies>
</metadata>
<files>
<file src="${CMAKE_CURRENT_BINARY_DIR}/chocolateyInstall.ps1" target="tools" />

View File

@ -607,50 +607,16 @@ $ nano /etc/icinga2/conf.d/templates.conf
Icinga 2 can be used with Icinga Web 2 and a variety of modules.
This chapter explains how to set up Icinga Web 2.
Either Icinga DB or the DB IDO (Database Icinga Data Output) feature for Icinga 2 takes care of
The DB IDO (Database Icinga Data Output) feature for Icinga 2 takes care of
exporting all configuration and status information into a database.
Please choose whether to install [Icinga DB](02-installation.md#configuring-icinga-db) (MySQL only)
or DB IDO ([MySQL](02-installation.md#configuring-db-ido-mysql) or
[PostgreSQL](02-installation.md#configuring-db-ido-postgresql)).
It's recommended to use the newer Icinga DB feature, if you don't need PostgreSQL.
### Configuring Icinga DB <a id="configuring-icinga-db"></a>
First, make sure to setup Icinga DB itself and its database backends (Redis and MySQL) by following the [installation instructions](https://icinga.com/docs/icingadb/latest/doc/02-Installation/).
#### Enabling the Icinga DB feature <a id="enabling-icinga-db"></a>
Icinga 2 provides a configuration file that is installed in
`/etc/icinga2/features-available/icingadb.conf`. You can update
the Redis credentials in this file.
All available attributes are explained in the
[IcingaDB object](09-object-types.md#objecttype-icingadb)
chapter.
You can enable the `icingadb` feature configuration file using
`icinga2 feature enable`:
```
# icinga2 feature enable icingadb
Module 'icingadb' was enabled.
Make sure to restart Icinga 2 for these changes to take effect.
```
Restart Icinga 2.
```
systemctl restart icinga2
```
Alpine Linux:
```
rc-service icinga2 restart
```
Continue with the [webserver setup](02-installation.md#icinga2-user-interface-webserver).
> **Note**
>
> We're currently working on a new data backend called Icinga DB.
> If you want to try the latest release candidate skip to
> the [Icinga DB Chapter](02-installation.md#icingadb).
> Please keep in mind, that this version is not ready for use in
> production and currently only supports MySQL.
### Configuring DB IDO MySQL <a id="configuring-db-ido-mysql"></a>
@ -1168,3 +1134,49 @@ PostgreSQL:
* [Documentation](https://www.postgresql.org/docs/9.3/static/backup.html)
## Icinga DB <a id="icingadb"></a>
Icinga DB is a new data backend currently in development.
It's purpose is to synchronise data between Icinga 2 (Redis) and Icinga Web 2 (MySQL), some day replacing the IDO.
Don't worry, we won't drop support on the IDO any time soon.
> **Note**
> Icinga DB is not ready to be used in production
> and should only be used for testing purposes.
### Configuring Icinga DB <a id="configuring-icinga-db"></a>
First, make sure to setup Icinga DB itself and its database backends (Redis and MySQL) by following the [installation instructions](https://icinga.com/docs/icingadb/latest/doc/02-Installation/).
#### Enabling the Icinga DB feature <a id="enabling-icinga-db"></a>
Icinga 2 provides a configuration file that is installed in
`/etc/icinga2/features-available/icingadb.conf`. You can update
the Redis credentials in this file.
All available attributes are explained in the
[IcingaDB object](09-object-types.md#objecttype-icingadb)
chapter.
You can enable the `icingadb` feature configuration file using
`icinga2 feature enable`:
```
# icinga2 feature enable icingadb
Module 'icingadb' was enabled.
Make sure to restart Icinga 2 for these changes to take effect.
```
Restart Icinga 2.
```
systemctl restart icinga2
```
Alpine Linux:
```
rc-service icinga2 restart
```
Continue with the [webserver setup](02-installation.md#icinga2-user-interface-webserver).

View File

@ -791,7 +791,7 @@ after the installation. Select the check box to proceed.
#### Agent Setup on Windows: Configuration Wizard <a id="distributed-monitoring-setup-agent-windows-configuration-wizard"></a>
On a fresh installation the setup wizard guides you through the initial configuration.
It also provides a mechanism to send a certificate request to the [CSR signing master](distributed-monitoring-setup-sign-certificates-master).
It also provides a mechanism to send a certificate request to the [CSR signing master](06-distributed-monitoring.md#distributed-monitoring-setup-sign-certificates-master).
The following configuration details are required:

View File

@ -1661,6 +1661,11 @@ require the [CompatLogger](09-object-types.md#objecttype-compatlogger) feature e
pointing to the log files using the `compat_log_path` configuration attribute.
This configuration object is available as [livestatus feature](14-features.md#setting-up-livestatus).
> **Note**
>
> This feature is DEPRECATED and will be removed in future releases.
> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
Examples:
```

View File

@ -1503,6 +1503,13 @@ uptime_since | **Optional.** Show last boot in yyyy-mm-dd HH:MM:SS format (ou
## Windows Plugins for Icinga 2 <a id="windows-plugins"></a>
> **Note**
>
> These plugins are DEPRECATED in favor of our
> [PowerShell Plugins](https://github.com/Icinga/icinga-powershell-plugins)
> and will be removed in a future release.
> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
To allow a basic monitoring of Windows clients Icinga 2 comes with a set of Windows only plugins. While trying to mirror the functionalities of their linux cousins from the monitoring-plugins package, the differences between Windows and Linux are too big to be able use the same CheckCommands for both systems.
A check-commands-windows.conf comes with Icinga 2, it assumes that the Windows Plugins are installed in the PluginDir set in your constants.conf. To enable them the following include directive is needed in you icinga2.conf:

View File

@ -915,210 +915,6 @@ is running on.
## Livestatus <a id="setting-up-livestatus"></a>
The [MK Livestatus](https://mathias-kettner.de/checkmk_livestatus.html) project
implements a query protocol that lets users query their Icinga instance for
status information. It can also be used to send commands.
The Livestatus component that is distributed as part of Icinga 2 is a
re-implementation of the Livestatus protocol which is compatible with MK
Livestatus.
> **Tip**
>
> Only install the Livestatus feature if your web interface or addon requires
> you to do so.
> [Icinga Web 2](02-installation.md#setting-up-icingaweb2) does not need
> Livestatus.
Details on the available tables and attributes with Icinga 2 can be found
in the [Livestatus Schema](24-appendix.md#schema-livestatus) section.
You can enable Livestatus using icinga2 feature enable:
```
# icinga2 feature enable livestatus
```
After that you will have to restart Icinga 2:
```
# systemctl restart icinga2
```
By default the Livestatus socket is available in `/var/run/icinga2/cmd/livestatus`.
In order for queries and commands to work you will need to add your query user
(e.g. your web server) to the `icingacmd` group:
```
# usermod -a -G icingacmd www-data
```
The Debian packages use `nagios` as the user and group name. Make sure to change `icingacmd` to
`nagios` if you're using Debian.
Change `www-data` to the user you're using to run queries.
In order to use the historical tables provided by the livestatus feature (for example, the
`log` table) you need to have the `CompatLogger` feature enabled. By default these logs
are expected to be in `/var/log/icinga2/compat`. A different path can be set using the
`compat_log_path` configuration attribute.
```
# icinga2 feature enable compatlog
```
### Livestatus Sockets <a id="livestatus-sockets"></a>
Other to the Icinga 1.x Addon, Icinga 2 supports two socket types
* Unix socket (default)
* TCP socket
Details on the configuration can be found in the [LivestatusListener](09-object-types.md#objecttype-livestatuslistener)
object configuration.
### Livestatus GET Queries <a id="livestatus-get-queries"></a>
> **Note**
>
> All Livestatus queries require an additional empty line as query end identifier.
> The `nc` tool (`netcat`) provides the `-U` parameter to communicate using
> a unix socket.
There also is a Perl module available in CPAN for accessing the Livestatus socket
programmatically: [Monitoring::Livestatus](http://search.cpan.org/~nierlein/Monitoring-Livestatus-0.74/)
Example using the unix socket:
```
# echo -e "GET services\n" | /usr/bin/nc -U /var/run/icinga2/cmd/livestatus
Example using the tcp socket listening on port `6558`:
# echo -e 'GET services\n' | netcat 127.0.0.1 6558
# cat servicegroups <<EOF
GET servicegroups
EOF
(cat servicegroups; sleep 1) | netcat 127.0.0.1 6558
```
### Livestatus COMMAND Queries <a id="livestatus-command-queries"></a>
A list of available external commands and their parameters can be found [here](24-appendix.md#external-commands-list-detail)
```
$ echo -e 'COMMAND <externalcommandstring>' | netcat 127.0.0.1 6558
```
### Livestatus Filters <a id="livestatus-filters"></a>
and, or, negate
Operator | Negate | Description
----------|----------|-------------
= | != | Equality
~ | !~ | Regex match
=~ | !=~ | Equality ignoring case
~~ | !~~ | Regex ignoring case
< | | Less than
> | | Greater than
<= | | Less than or equal
>= | | Greater than or equal
### Livestatus Stats <a id="livestatus-stats"></a>
Schema: "Stats: aggregatefunction aggregateattribute"
Aggregate Function | Description
-------------------|--------------
sum | &nbsp;
min | &nbsp;
max | &nbsp;
avg | sum / count
std | standard deviation
suminv | sum (1 / value)
avginv | suminv / count
count | ordinary default for any stats query if not aggregate function defined
Example:
```
GET hosts
Filter: has_been_checked = 1
Filter: check_type = 0
Stats: sum execution_time
Stats: sum latency
Stats: sum percent_state_change
Stats: min execution_time
Stats: min latency
Stats: min percent_state_change
Stats: max execution_time
Stats: max latency
Stats: max percent_state_change
OutputFormat: json
ResponseHeader: fixed16
```
### Livestatus Output <a id="livestatus-output"></a>
* CSV
CSV output uses two levels of array separators: The members array separator
is a comma (1st level) while extra info and host|service relation separator
is a pipe (2nd level).
Separators can be set using ASCII codes like:
```
Separators: 10 59 44 124
```
* JSON
Default separators.
### Livestatus Error Codes <a id="livestatus-error-codes"></a>
Code | Description
----------|--------------
200 | OK
404 | Table does not exist
452 | Exception on query
### Livestatus Tables <a id="livestatus-tables"></a>
Table | Join |Description
--------------|-----------|----------------------------
hosts | &nbsp; | host config and status attributes, services counter
hostgroups | &nbsp; | hostgroup config, status attributes and host/service counters
services | hosts | service config and status attributes
servicegroups | &nbsp; | servicegroup config, status attributes and service counters
contacts | &nbsp; | contact config and status attributes
contactgroups | &nbsp; | contact config, members
commands | &nbsp; | command name and line
status | &nbsp; | programstatus, config and stats
comments | services | status attributes
downtimes | services | status attributes
timeperiods | &nbsp; | name and is inside flag
endpoints | &nbsp; | config and status attributes
log | services, hosts, contacts, commands | parses [compatlog](09-object-types.md#objecttype-compatlogger) and shows log attributes
statehist | hosts, services | parses [compatlog](09-object-types.md#objecttype-compatlogger) and aggregates state change attributes
hostsbygroup | hostgroups | host attributes grouped by hostgroup and its attributes
servicesbygroup | servicegroups | service attributes grouped by servicegroup and its attributes
servicesbyhostgroup | hostgroups | service attributes grouped by hostgroup and its attributes
The `commands` table is populated with `CheckCommand`, `EventCommand` and `NotificationCommand` objects.
A detailed list on the available table attributes can be found in the [Livestatus Schema documentation](24-appendix.md#schema-livestatus).
## Deprecated Features <a id="deprecated-features"></a>
@ -1237,3 +1033,212 @@ object CheckResultReader "reader" {
spool_dir = "/data/check-results"
}
```
### Livestatus <a id="setting-up-livestatus"></a>
> **Note**
>
> This feature is DEPRECATED and will be removed in future releases.
> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
The [MK Livestatus](https://mathias-kettner.de/checkmk_livestatus.html) project
implements a query protocol that lets users query their Icinga instance for
status information. It can also be used to send commands.
The Livestatus component that is distributed as part of Icinga 2 is a
re-implementation of the Livestatus protocol which is compatible with MK
Livestatus.
> **Tip**
>
> Only install the Livestatus feature if your web interface or addon requires
> you to do so.
> [Icinga Web 2](02-installation.md#setting-up-icingaweb2) does not need
> Livestatus.
Details on the available tables and attributes with Icinga 2 can be found
in the [Livestatus Schema](24-appendix.md#schema-livestatus) section.
You can enable Livestatus using icinga2 feature enable:
```
# icinga2 feature enable livestatus
```
After that you will have to restart Icinga 2:
```
# systemctl restart icinga2
```
By default the Livestatus socket is available in `/var/run/icinga2/cmd/livestatus`.
In order for queries and commands to work you will need to add your query user
(e.g. your web server) to the `icingacmd` group:
```
# usermod -a -G icingacmd www-data
```
The Debian packages use `nagios` as the user and group name. Make sure to change `icingacmd` to
`nagios` if you're using Debian.
Change `www-data` to the user you're using to run queries.
In order to use the historical tables provided by the livestatus feature (for example, the
`log` table) you need to have the `CompatLogger` feature enabled. By default these logs
are expected to be in `/var/log/icinga2/compat`. A different path can be set using the
`compat_log_path` configuration attribute.
```
# icinga2 feature enable compatlog
```
#### Livestatus Sockets <a id="livestatus-sockets"></a>
Other to the Icinga 1.x Addon, Icinga 2 supports two socket types
* Unix socket (default)
* TCP socket
Details on the configuration can be found in the [LivestatusListener](09-object-types.md#objecttype-livestatuslistener)
object configuration.
#### Livestatus GET Queries <a id="livestatus-get-queries"></a>
> **Note**
>
> All Livestatus queries require an additional empty line as query end identifier.
> The `nc` tool (`netcat`) provides the `-U` parameter to communicate using
> a unix socket.
There also is a Perl module available in CPAN for accessing the Livestatus socket
programmatically: [Monitoring::Livestatus](http://search.cpan.org/~nierlein/Monitoring-Livestatus-0.74/)
Example using the unix socket:
```
# echo -e "GET services\n" | /usr/bin/nc -U /var/run/icinga2/cmd/livestatus
Example using the tcp socket listening on port `6558`:
# echo -e 'GET services\n' | netcat 127.0.0.1 6558
# cat servicegroups <<EOF
GET servicegroups
EOF
(cat servicegroups; sleep 1) | netcat 127.0.0.1 6558
```
#### Livestatus COMMAND Queries <a id="livestatus-command-queries"></a>
A list of available external commands and their parameters can be found [here](24-appendix.md#external-commands-list-detail)
```
$ echo -e 'COMMAND <externalcommandstring>' | netcat 127.0.0.1 6558
```
#### Livestatus Filters <a id="livestatus-filters"></a>
and, or, negate
Operator | Negate | Description
----------|----------|-------------
= | != | Equality
~ | !~ | Regex match
=~ | !=~ | Equality ignoring case
~~ | !~~ | Regex ignoring case
< | | Less than
> | | Greater than
<= | | Less than or equal
>= | | Greater than or equal
#### Livestatus Stats <a id="livestatus-stats"></a>
Schema: "Stats: aggregatefunction aggregateattribute"
Aggregate Function | Description
-------------------|--------------
sum | &nbsp;
min | &nbsp;
max | &nbsp;
avg | sum / count
std | standard deviation
suminv | sum (1 / value)
avginv | suminv / count
count | ordinary default for any stats query if not aggregate function defined
Example:
```
GET hosts
Filter: has_been_checked = 1
Filter: check_type = 0
Stats: sum execution_time
Stats: sum latency
Stats: sum percent_state_change
Stats: min execution_time
Stats: min latency
Stats: min percent_state_change
Stats: max execution_time
Stats: max latency
Stats: max percent_state_change
OutputFormat: json
ResponseHeader: fixed16
```
#### Livestatus Output <a id="livestatus-output"></a>
* CSV
CSV output uses two levels of array separators: The members array separator
is a comma (1st level) while extra info and host|service relation separator
is a pipe (2nd level).
Separators can be set using ASCII codes like:
```
Separators: 10 59 44 124
```
* JSON
Default separators.
#### Livestatus Error Codes <a id="livestatus-error-codes"></a>
Code | Description
----------|--------------
200 | OK
404 | Table does not exist
452 | Exception on query
#### Livestatus Tables <a id="livestatus-tables"></a>
Table | Join |Description
--------------|-----------|----------------------------
hosts | &nbsp; | host config and status attributes, services counter
hostgroups | &nbsp; | hostgroup config, status attributes and host/service counters
services | hosts | service config and status attributes
servicegroups | &nbsp; | servicegroup config, status attributes and service counters
contacts | &nbsp; | contact config and status attributes
contactgroups | &nbsp; | contact config, members
commands | &nbsp; | command name and line
status | &nbsp; | programstatus, config and stats
comments | services | status attributes
downtimes | services | status attributes
timeperiods | &nbsp; | name and is inside flag
endpoints | &nbsp; | config and status attributes
log | services, hosts, contacts, commands | parses [compatlog](09-object-types.md#objecttype-compatlogger) and shows log attributes
statehist | hosts, services | parses [compatlog](09-object-types.md#objecttype-compatlogger) and aggregates state change attributes
hostsbygroup | hostgroups | host attributes grouped by hostgroup and its attributes
servicesbygroup | servicegroups | service attributes grouped by servicegroup and its attributes
servicesbyhostgroup | hostgroups | service attributes grouped by hostgroup and its attributes
The `commands` table is populated with `CheckCommand`, `EventCommand` and `NotificationCommand` objects.
A detailed list on the available table attributes can be found in the [Livestatus Schema documentation](24-appendix.md#schema-livestatus).

View File

@ -1400,6 +1400,40 @@ Message updates will be dropped when:
* Checkable does not exist.
* Origin endpoint's zone is not allowed to access this checkable.
#### event::SetLastCheckStarted <a id="technical-concepts-json-rpc-messages-event-setlastcheckstarted"></a>
> Location: `clusterevents.cpp`
##### Message Body
Key | Value
----------|---------
jsonrpc | 2.0
method | event::SetLastCheckStarted
params | Dictionary
##### Params
Key | Type | Description
---------------------|-----------|------------------
host | String | Host name
service | String | Service name
last\_check\_started | Timestamp | Last check's start time as UNIX timestamp.
##### Functions
Event Sender: `Checkable::OnLastCheckStartedChanged`
Event Receiver: `LastCheckStartedChangedAPIHandler`
##### Permissions
The receiver will not process messages from not configured endpoints.
Message updates will be dropped when:
* Checkable does not exist.
* Origin endpoint's zone is not allowed to access this checkable.
#### event::SuppressedNotifications <a id="technical-concepts-json-rpc-messages-event-setsupressednotifications"></a>
> Location: `clusterevents.cpp`
@ -1434,6 +1468,39 @@ Message updates will be dropped when:
* Checkable does not exist.
* Origin endpoint's zone is not allowed to access this checkable.
#### event::SetSuppressedNotificationTypes <a id="technical-concepts-json-rpc-messages-event-setsuppressednotificationtypes"></a>
> Location: `clusterevents.cpp`
##### Message Body
Key | Value
----------|---------
jsonrpc | 2.0
method | event::SetSuppressedNotificationTypes
params | Dictionary
##### Params
Key | Type | Description
-------------------------|--------|------------------
notification | String | Notification name
supressed\_notifications | Number | Bitmask for suppressed notifications.
##### Functions
Event Sender: `Notification::OnSuppressedNotificationsChanged`
Event Receiver: `SuppressedNotificationTypesChangedAPIHandler`
##### Permissions
The receiver will not process messages from not configured endpoints.
Message updates will be dropped when:
* Notification does not exist.
* Origin endpoint's zone is not allowed to access this notification.
#### event::SetNextNotification <a id="technical-concepts-json-rpc-messages-event-setnextnotification"></a>

View File

@ -1789,8 +1789,7 @@ as community version, free for use for open source projects such as Icinga.
The installation requires ~9GB disk space. [Download](https://www.visualstudio.com/downloads/)
the web installer and start the installation.
Note: Both Visual Studio 2017 and 2019 are covered here. Older versions
are not supported.
Note: Only Visual Studio 2019 is covered here. Older versions are not supported.
You need a free Microsoft account to download and also store your preferences.
@ -1883,7 +1882,6 @@ Icinga needs the development header and library files from the Boost library.
Visual Studio translates into the following compiler versions:
- `msvc-14.1` = Visual Studio 2017
- `msvc-14.2` = Visual Studio 2019
##### Pre-built Binaries
@ -1963,9 +1961,11 @@ CMake uses CPack and NSIS to create the setup executable including all binaries
in addition to setup dialogues and configuration. Therefore well need to install [NSIS](http://nsis.sourceforge.net/Download)
first.
We also need to install the Windows Installer XML (WIX) toolset.
We also need to install the Windows Installer XML (WIX) toolset. This has .NET 3.5 as a dependency which might need a
reboot of the system which is not handled properly by Chocolatey. Therefore install it first and reboot when asked.
```
Enable-WindowsOptionalFeature -FeatureName "NetFx3" -Online
choco install -y wixtoolset
```
@ -1993,7 +1993,6 @@ Build Icinga with specific CMake variables. This generates a new Visual Studio p
Visual Studio translates into the following:
- `msvc-14.1` = Visual Studio 2017
- `msvc-14.2` = Visual Studio 2019
You need to specify the previously installed component paths.
@ -2001,7 +2000,7 @@ You need to specify the previously installed component paths.
Variable | Value | Description
----------------------|----------------------------------------------------------------------|-------------------------------------------------------
`BOOST_ROOT` | `C:\local\boost_1_71_0` | Root path where you've extracted and compiled Boost.
`BOOST_LIBRARYDIR` | Binary: `C:\local\boost_1_71_0\lib64-msvc-14.1`, Source: `C:\local\boost_1_71_0\stage` | Path to the static compiled Boost libraries, directory must contain `lib`.
`BOOST_LIBRARYDIR` | Binary: `C:\local\boost_1_71_0\lib64-msvc-14.2`, Source: `C:\local\boost_1_71_0\stage` | Path to the static compiled Boost libraries, directory must contain `lib`.
`BISON_EXECUTABLE` | `C:\ProgramData\chocolatey\lib\winflexbison\tools\win_bison.exe` | Path to the Bison executable.
`FLEX_EXECUTABLE` | `C:\ProgramData\chocolatey\lib\winflexbison\tools\win_flex.exe` | Path to the Flex executable.
`ICINGA2_WITH_MYSQL` | OFF | Requires extra setup for MySQL if set to `ON`. Not supported for client setups.
@ -2027,9 +2026,8 @@ cd %HOMEPATH%\source\repos\icinga2
The debug MSI package is located in the `debug` directory.
If you did not follow the above steps with Boost binaries
and OpenSSL paths, or using VS 2017, you can still modify
the environment variables.
If you did not follow the above steps with Boost binaries and OpenSSL
paths, you can still modify the environment variables.
```
$env:CMAKE_GENERATOR='Visual Studio 16 2019'

View File

@ -26,12 +26,10 @@ getent group $ICINGA2_GROUP >/dev/null 2>&1 || (echo "Icinga group '$ICINGA2_GRO
getent group $ICINGA2_COMMAND_GROUP >/dev/null 2>&1 || (echo "Icinga command group '$ICINGA2_COMMAND_GROUP' does not exist. Exiting." && exit 6)
if [ ! -e "$ICINGA2_INIT_RUN_DIR" ]; then
mkdir "$ICINGA2_INIT_RUN_DIR"
mkdir "$ICINGA2_INIT_RUN_DIR"/cmd
mkdir -m 755 "$ICINGA2_INIT_RUN_DIR"
mkdir -m 2750 "$ICINGA2_INIT_RUN_DIR"/cmd
fi
chmod 755 "$ICINGA2_INIT_RUN_DIR"
chmod 2750 "$ICINGA2_INIT_RUN_DIR"/cmd
chown -R $ICINGA2_USER:$ICINGA2_COMMAND_GROUP "$ICINGA2_INIT_RUN_DIR"
test -e "$ICINGA2_LOG_DIR" || install -m 750 -o $ICINGA2_USER -g $ICINGA2_COMMAND_GROUP -d "$ICINGA2_LOG_DIR"

View File

@ -64,6 +64,7 @@ set(base_SOURCES
shared-object.hpp
singleton.hpp
socket.cpp socket.hpp
spinlock.cpp spinlock.hpp
stacktrace.cpp stacktrace.hpp
statsfunction.hpp
stdiostream.cpp stdiostream.hpp

View File

@ -342,7 +342,9 @@ void Application::RunEventLoop()
ConfigObject::StopObjects();
Application::GetInstance()->OnShutdown();
UninitializeBase();
#ifdef I2_DEBUG
UninitializeBase(); // Inspired from Exit()
#endif /* I2_DEBUG */
}
bool Application::IsShuttingDown()

View File

@ -83,6 +83,7 @@ static Array::Ptr ArraySort(const std::vector<Value>& args)
std::sort(arr->Begin(), arr->End());
} else {
Function::Ptr function = args[0];
REQUIRE_NOT_NULL(function);
if (vframe->Sandboxed && !function->IsSideEffectFree())
BOOST_THROW_EXCEPTION(ScriptError("Sort function must be side-effect free."));
@ -123,6 +124,7 @@ static Array::Ptr ArrayMap(const Function::Ptr& function)
ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
REQUIRE_NOT_NULL(self);
REQUIRE_NOT_NULL(function);
if (vframe->Sandboxed && !function->IsSideEffectFree())
BOOST_THROW_EXCEPTION(ScriptError("Map function must be side-effect free."));
@ -142,6 +144,7 @@ static Value ArrayReduce(const Function::Ptr& function)
ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
REQUIRE_NOT_NULL(self);
REQUIRE_NOT_NULL(function);
if (vframe->Sandboxed && !function->IsSideEffectFree())
BOOST_THROW_EXCEPTION(ScriptError("Reduce function must be side-effect free."));
@ -164,6 +167,7 @@ static Array::Ptr ArrayFilter(const Function::Ptr& function)
ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
REQUIRE_NOT_NULL(self);
REQUIRE_NOT_NULL(function);
if (vframe->Sandboxed && !function->IsSideEffectFree())
BOOST_THROW_EXCEPTION(ScriptError("Filter function must be side-effect free."));
@ -184,6 +188,7 @@ static bool ArrayAny(const Function::Ptr& function)
ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
REQUIRE_NOT_NULL(self);
REQUIRE_NOT_NULL(function);
if (vframe->Sandboxed && !function->IsSideEffectFree())
BOOST_THROW_EXCEPTION(ScriptError("Filter function must be side-effect free."));
@ -202,6 +207,7 @@ static bool ArrayAll(const Function::Ptr& function)
ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
REQUIRE_NOT_NULL(self);
REQUIRE_NOT_NULL(function);
if (vframe->Sandboxed && !function->IsSideEffectFree())
BOOST_THROW_EXCEPTION(ScriptError("Filter function must be side-effect free."));

View File

@ -144,3 +144,11 @@ void AsioConditionVariable::Wait(boost::asio::yield_context yc)
boost::system::error_code ec;
m_Timer.async_wait(yc[ec]);
}
void Timeout::Cancel()
{
m_Cancelled.store(true);
boost::system::error_code ec;
m_Timer.cancel(ec);
}

View File

@ -6,10 +6,12 @@
#include "base/exception.hpp"
#include "base/lazy-init.hpp"
#include "base/logger.hpp"
#include "base/shared-object.hpp"
#include <atomic>
#include <exception>
#include <memory>
#include <thread>
#include <utility>
#include <vector>
#include <stdexcept>
#include <boost/exception/all.hpp>
@ -153,6 +155,56 @@ private:
boost::asio::deadline_timer m_Timer;
};
/**
* I/O timeout emulator
*
* @ingroup base
*/
class Timeout : public SharedObject
{
public:
DECLARE_PTR_TYPEDEFS(Timeout);
template<class Executor, class TimeoutFromNow, class OnTimeout>
Timeout(boost::asio::io_context& io, Executor& executor, TimeoutFromNow timeoutFromNow, OnTimeout onTimeout)
: m_Timer(io)
{
Ptr keepAlive (this);
m_Cancelled.store(false);
m_Timer.expires_from_now(std::move(timeoutFromNow));
IoEngine::SpawnCoroutine(executor, [this, keepAlive, onTimeout](boost::asio::yield_context yc) {
if (m_Cancelled.load()) {
return;
}
{
boost::system::error_code ec;
m_Timer.async_wait(yc[ec]);
if (ec) {
return;
}
}
if (m_Cancelled.load()) {
return;
}
auto f (onTimeout);
f(std::move(yc));
});
}
void Cancel();
private:
boost::asio::deadline_timer m_Timer;
std::atomic<bool> m_Cancelled;
};
}
#endif /* IO_ENGINE_H */

22
lib/base/spinlock.cpp Normal file
View File

@ -0,0 +1,22 @@
/* Icinga 2 | (c) 2020 Icinga GmbH | GPLv2+ */
#include "base/spinlock.hpp"
#include <atomic>
using namespace icinga;
void SpinLock::lock()
{
while (m_Locked.test_and_set(std::memory_order_acquire)) {
}
}
bool SpinLock::try_lock()
{
return !m_Locked.test_and_set(std::memory_order_acquire);
}
void SpinLock::unlock()
{
m_Locked.clear(std::memory_order_release);
}

35
lib/base/spinlock.hpp Normal file
View File

@ -0,0 +1,35 @@
/* Icinga 2 | (c) 2020 Icinga GmbH | GPLv2+ */
#ifndef SPINLOCK_H
#define SPINLOCK_H
#include <atomic>
namespace icinga
{
/**
* A spin lock.
*
* @ingroup base
*/
class SpinLock
{
public:
SpinLock() = default;
SpinLock(const SpinLock&) = delete;
SpinLock& operator=(const SpinLock&) = delete;
SpinLock(SpinLock&&) = delete;
SpinLock& operator=(SpinLock&&) = delete;
void lock();
bool try_lock();
void unlock();
private:
std::atomic_flag m_Locked = ATOMIC_FLAG_INIT;
};
}
#endif /* SPINLOCK_H */

View File

@ -41,6 +41,8 @@ void StreamLogger::FlushLogTimerHandler()
void StreamLogger::Flush()
{
ObjectLock oLock (this);
if (m_Stream)
m_Stream->flush();
}

View File

@ -9,17 +9,53 @@
#include "base/stream.hpp"
#include "base/tlsutility.hpp"
#include "base/fifo.hpp"
#include "base/utility.hpp"
#include <atomic>
#include <memory>
#include <utility>
#include <boost/asio/buffered_stream.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/ssl/context.hpp>
#include <boost/asio/ssl/stream.hpp>
namespace icinga
{
template<class ARS>
class SeenStream : public ARS
{
public:
template<class... Args>
SeenStream(Args&&... args) : ARS(std::forward<Args>(args)...)
{
m_Seen.store(nullptr);
}
template<class... Args>
auto async_read_some(Args&&... args) -> decltype(((ARS*)nullptr)->async_read_some(std::forward<Args>(args)...))
{
{
auto seen (m_Seen.load());
if (seen) {
*seen = Utility::GetTime();
}
}
return ((ARS*)this)->async_read_some(std::forward<Args>(args)...);
}
inline void SetSeen(double* seen)
{
m_Seen.store(seen);
}
private:
std::atomic<double*> m_Seen;
};
struct UnbufferedAsioTlsStreamParams
{
boost::asio::io_context& IoContext;
@ -27,14 +63,14 @@ struct UnbufferedAsioTlsStreamParams
const String& Hostname;
};
typedef boost::asio::ssl::stream<boost::asio::ip::tcp::socket> AsioTcpTlsStream;
typedef SeenStream<boost::asio::ssl::stream<boost::asio::ip::tcp::socket>> AsioTcpTlsStream;
class UnbufferedAsioTlsStream : public AsioTcpTlsStream
{
public:
inline
UnbufferedAsioTlsStream(UnbufferedAsioTlsStreamParams& init)
: stream(init.IoContext, init.SslContext), m_VerifyOK(true), m_Hostname(init.Hostname)
: AsioTcpTlsStream(init.IoContext, init.SslContext), m_VerifyOK(true), m_Hostname(init.Hostname)
{
}

View File

@ -52,8 +52,6 @@ public:
static String DirName(const String& path);
static String BaseName(const String& path);
static String GetEnv(const String& key);
static void NullDeleter(void *);
static double GetTime();

View File

@ -14,9 +14,9 @@ using namespace icinga;
std::atomic<int> WorkQueue::m_NextID(1);
boost::thread_specific_ptr<WorkQueue *> l_ThreadWorkQueue;
WorkQueue::WorkQueue(size_t maxItems, int threadCount)
WorkQueue::WorkQueue(size_t maxItems, int threadCount, LogSeverity statsLogLevel)
: m_ID(m_NextID++), m_ThreadCount(threadCount), m_MaxItems(maxItems),
m_TaskStats(15 * 60)
m_TaskStats(15 * 60), m_StatsLogLevel(statsLogLevel)
{
/* Initialize logger. */
m_StatusTimerTimeout = Utility::GetTime();
@ -216,7 +216,7 @@ void WorkQueue::StatusTimerHandler()
/* Log if there are pending items, or 5 minute timeout is reached. */
if (pending > 0 || m_StatusTimerTimeout < now) {
Log(LogInformation, "WorkQueue")
Log(m_StatsLogLevel, "WorkQueue")
<< "#" << m_ID << " (" << m_Name << ") "
<< "items: " << pending << ", "
<< "rate: " << std::setw(2) << GetTaskCount(60) / 60.0 << "/s "

View File

@ -6,6 +6,7 @@
#include "base/i2-base.hpp"
#include "base/timer.hpp"
#include "base/ringbuffer.hpp"
#include "base/logger.hpp"
#include <boost/thread/thread.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
@ -52,7 +53,7 @@ class WorkQueue
public:
typedef std::function<void (boost::exception_ptr)> ExceptionCallback;
WorkQueue(size_t maxItems = 0, int threadCount = 1);
WorkQueue(size_t maxItems = 0, int threadCount = 1, LogSeverity statsLogLevel = LogInformation);
~WorkQueue();
void SetName(const String& name);
@ -129,6 +130,7 @@ private:
std::vector<boost::exception_ptr> m_Exceptions;
Timer::Ptr m_StatusTimer;
double m_StatusTimerTimeout;
LogSeverity m_StatsLogLevel;
RingBuffer m_TaskStats;
size_t m_PendingTasks{0};

View File

@ -74,30 +74,6 @@ void CheckerComponent::Stop(bool runtimeRemoved)
m_CV.notify_all();
}
double wait = 0.0;
while (Checkable::GetPendingChecks() > 0) {
Log(LogDebug, "CheckerComponent")
<< "Waiting for running checks (" << Checkable::GetPendingChecks()
<< ") to finish. Waited for " << wait << " seconds now.";
Utility::Sleep(0.1);
wait += 0.1;
/* Pick a timeout slightly shorther than the process reload timeout. */
double reloadTimeout = Application::GetReloadTimeout();
double waitMax = reloadTimeout - 30;
if (waitMax <= 0)
waitMax = 1;
if (wait > waitMax) {
Log(LogWarning, "CheckerComponent")
<< "Checks running too long for " << wait
<< " seconds, hard shutdown before reload timeout: " << reloadTimeout << ".";
break;
}
}
m_ResultTimer->Stop();
m_Thread.join();

View File

@ -11,6 +11,7 @@
#include "base/defer.hpp"
#include "base/logger.hpp"
#include "base/application.hpp"
#include "base/process.hpp"
#include "base/timer.hpp"
#include "base/utility.hpp"
#include "base/exception.hpp"
@ -397,6 +398,10 @@ static void UmbrellaSignalHandler(int num, siginfo_t *info, void*)
static void WorkerSignalHandler(int num, siginfo_t *info, void*)
{
switch (num) {
case SIGUSR1:
// Catches SIGUSR1 as long as the actual handler (logrotate)
// has not been installed not to let SIGUSR1 terminate the process
break;
case SIGUSR2:
if (info->si_pid == 0 || info->si_pid == l_UmbrellaPid) {
// The umbrella process allowed us to continue working beyond config validation
@ -489,6 +494,7 @@ static pid_t StartUnixWorker(const std::vector<std::string>& configs, bool close
sa.sa_sigaction = &WorkerSignalHandler;
sa.sa_flags = SA_RESTART | SA_SIGINFO;
(void)sigaction(SIGUSR1, &sa, nullptr);
(void)sigaction(SIGUSR2, &sa, nullptr);
(void)sigaction(SIGINT, &sa, nullptr);
(void)sigaction(SIGTERM, &sa, nullptr);
@ -504,6 +510,14 @@ static pid_t StartUnixWorker(const std::vector<std::string>& configs, bool close
_exit(EXIT_FAILURE);
}
try {
Process::InitializeSpawnHelper();
} catch (const std::exception& ex) {
Log(LogCritical, "cli")
<< "Failed to initialize process spawn helper after forking (child): " << DiagnosticInformation(ex);
_exit(EXIT_FAILURE);
}
_exit(RunWorker(configs, closeConsoleLog, stderrFile));
} catch (...) {
_exit(EXIT_FAILURE);
@ -776,6 +790,17 @@ int DaemonCommand::Run(const po::variables_map& vm, const std::vector<std::strin
<< "Waited for " << Utility::FormatDuration(Utility::GetTime() - start) << " on old process to exit.";
}
for (int info;;) {
auto pid (waitpid(-1, &info, WNOHANG));
if (pid < 1) {
break;
}
Log(LogNotice, "cli")
<< "Reaped child process " << pid << ".";
}
// Old instance shut down, allow the new one to continue working beyond config validation
(void)kill(nextWorker, SIGUSR2);

View File

@ -29,6 +29,10 @@ void PKISaveCertCommand::InitParameters(boost::program_options::options_descript
("trustedcert", po::value<std::string>(), "Trusted certificate file path (output)")
("host", po::value<std::string>(), "Parent Icinga instance to fetch the public TLS certificate from")
("port", po::value<std::string>()->default_value("5665"), "Icinga 2 port");
hiddenDesc.add_options()
("key", po::value<std::string>())
("cert", po::value<std::string>());
}
std::vector<String> PKISaveCertCommand::GetArgumentSuggestions(const String& argument, const String& word) const

View File

@ -76,6 +76,13 @@ void DbConnection::Resume()
m_CleanUpTimer->SetInterval(60);
m_CleanUpTimer->OnTimerExpired.connect(std::bind(&DbConnection::CleanUpHandler, this));
m_CleanUpTimer->Start();
m_LogStatsTimeout = 0;
m_LogStatsTimer = new Timer();
m_LogStatsTimer->SetInterval(10);
m_LogStatsTimer->OnTimerExpired.connect([this](const Timer * const&) { LogStatsHandler(); });
m_LogStatsTimer->Start();
}
void DbConnection::Pause()
@ -145,7 +152,7 @@ void DbConnection::UpdateProgramStatus()
DbQuery query1;
query1.Table = "programstatus";
query1.IdColumn = "programstatus_id";
query1.Type = DbQueryInsert | DbQueryUpdate;
query1.Type = DbQueryInsert | DbQueryDelete;
query1.Category = DbCatProgramStatus;
query1.Fields = new Dictionary({
@ -172,7 +179,7 @@ void DbConnection::UpdateProgramStatus()
{ "instance_id", 0 } /* DbConnection class fills in real ID */
});
query1.Priority = PriorityHigh;
query1.Priority = PriorityImmediate;
queries.emplace_back(std::move(query1));
DbQuery query2;
@ -236,6 +243,38 @@ void DbConnection::CleanUpHandler()
}
void DbConnection::LogStatsHandler()
{
if (!GetConnected() || IsPaused())
return;
auto pending = m_PendingQueries.load();
auto now = Utility::GetTime();
bool timeoutReached = m_LogStatsTimeout < now;
if (pending == 0u && !timeoutReached) {
return;
}
auto output = round(m_OutputQueries.CalculateRate(now, 10));
if (pending < output * 5 && !timeoutReached) {
return;
}
auto input = round(m_InputQueries.CalculateRate(now, 10));
Log(LogInformation, GetReflectionType()->GetName())
<< "Pending queries: " << pending << " (Input: " << input
<< "/s; Output: " << output << "/s)";
/* Reschedule next log entry in 5 minutes. */
if (timeoutReached) {
m_LogStatsTimeout = now + 60 * 5;
}
}
void DbConnection::CleanUpExecuteQuery(const String&, const String&, double)
{
/* Default handler does nothing. */
@ -446,7 +485,7 @@ void DbConnection::UpdateAllObjects()
continue;
for (const ConfigObject::Ptr& object : dtype->GetObjects()) {
UpdateObject(object);
m_QueryQueue.Enqueue([this, object](){ UpdateObject(object); }, PriorityHigh);
}
}
}
@ -507,3 +546,15 @@ int DbConnection::GetSessionToken()
{
return Application::GetStartTime();
}
void DbConnection::IncreasePendingQueries(int count)
{
m_PendingQueries.fetch_add(count);
m_InputQueries.InsertValue(Utility::GetTime(), count);
}
void DbConnection::DecreasePendingQueries(int count)
{
m_PendingQueries.fetch_sub(count);
m_OutputQueries.InsertValue(Utility::GetTime(), count);
}

View File

@ -92,6 +92,11 @@ protected:
static int GetSessionToken();
void IncreasePendingQueries(int count);
void DecreasePendingQueries(int count);
WorkQueue m_QueryQueue{10000000, 1, LogNotice};
private:
bool m_IDCacheValid{false};
std::map<std::pair<DbType::Ptr, DbReference>, String> m_ConfigHashes;
@ -101,8 +106,12 @@ private:
std::set<DbObject::Ptr> m_ConfigUpdates;
std::set<DbObject::Ptr> m_StatusUpdates;
Timer::Ptr m_CleanUpTimer;
Timer::Ptr m_LogStatsTimer;
double m_LogStatsTimeout;
void CleanUpHandler();
void LogStatsHandler();
static Timer::Ptr m_ProgramStatusTimer;
static boost::once_flag m_OnceFlag;
@ -112,6 +121,10 @@ private:
mutable boost::mutex m_StatsMutex;
RingBuffer m_QueryStats{15 * 60};
bool m_ActiveChangedHandler{false};
RingBuffer m_InputQueries{10};
RingBuffer m_OutputQueries{10};
Atomic<uint_fast64_t> m_PendingQueries{0};
};
struct database_error : virtual std::exception, virtual boost::exception { };

View File

@ -111,7 +111,6 @@ void DbObject::SendConfigUpdateHeavy(const Dictionary::Ptr& configFields)
{
/* update custom var config and status */
SendVarsConfigUpdateHeavy();
SendVarsStatusUpdate();
/* config attributes */
if (!configFields)
@ -245,6 +244,22 @@ void DbObject::SendVarsConfigUpdateHeavy()
{ "instance_id", 0 } /* DbConnection class fills in real ID */
});
queries.emplace_back(std::move(query3));
DbQuery query4;
query4.Table = "customvariablestatus";
query4.Type = DbQueryInsert;
query4.Category = DbCatState;
query4.Fields = new Dictionary({
{ "varname", kv.first },
{ "varvalue", value },
{ "is_json", is_json },
{ "status_update_time", DbValue::FromTimestamp(Utility::GetTime()) },
{ "object_id", obj },
{ "instance_id", 0 } /* DbConnection class fills in real ID */
});
queries.emplace_back(std::move(query4));
}
}

View File

@ -356,8 +356,12 @@ String HostDbObject::CalculateConfigHash(const Dictionary::Ptr& configFields) co
Array::Ptr groups = host->GetGroups();
if (groups)
if (groups) {
groups = groups->ShallowClone();
ObjectLock oLock (groups);
std::sort(groups->Begin(), groups->End());
hashData += DbObject::HashValue(groups);
}
ArrayData parents;

View File

@ -308,8 +308,12 @@ String ServiceDbObject::CalculateConfigHash(const Dictionary::Ptr& configFields)
Array::Ptr groups = service->GetGroups();
if (groups)
if (groups) {
groups = groups->ShallowClone();
ObjectLock oLock (groups);
std::sort(groups->Begin(), groups->End());
hashData += DbObject::HashValue(groups);
}
ArrayData dependencies;

View File

@ -150,8 +150,12 @@ String UserDbObject::CalculateConfigHash(const Dictionary::Ptr& configFields) co
Array::Ptr groups = user->GetGroups();
if (groups)
if (groups) {
groups = groups->ShallowClone();
ObjectLock oLock (groups);
std::sort(groups->Begin(), groups->End());
hashData += DbObject::HashValue(groups);
}
return SHA256(hashData);
}

View File

@ -13,6 +13,7 @@
#include "base/configtype.hpp"
#include "base/exception.hpp"
#include "base/statsfunction.hpp"
#include "base/defer.hpp"
#include <utility>
using namespace icinga;
@ -175,6 +176,8 @@ void IdoMysqlConnection::InternalNewTransaction()
if (!GetConnected())
return;
IncreasePendingQueries(2);
AsyncQuery("COMMIT");
AsyncQuery("BEGIN");
}
@ -472,7 +475,7 @@ void IdoMysqlConnection::FinishConnect(double startTime)
{
AssertOnWorkQueue();
if (!GetConnected())
if (!GetConnected() || IsPaused())
return;
FinishAsyncQueries();
@ -510,11 +513,6 @@ void IdoMysqlConnection::AsyncQuery(const String& query, const std::function<voi
*/
aq.Callback = callback;
m_AsyncQueries.emplace_back(std::move(aq));
if (m_AsyncQueries.size() > 25000) {
FinishAsyncQueries();
InternalNewTransaction();
}
}
void IdoMysqlConnection::FinishAsyncQueries()
@ -524,12 +522,29 @@ void IdoMysqlConnection::FinishAsyncQueries()
std::vector<IdoAsyncQuery>::size_type offset = 0;
// This will be executed if there is a problem with executing the queries,
// at which point this function throws an exception and the queries should
// not be listed as still pending in the queue.
Defer decreaseQueries ([this, &offset, &queries]() {
auto lostQueries = queries.size() - offset;
if (lostQueries > 0) {
DecreasePendingQueries(lostQueries);
}
});
while (offset < queries.size()) {
std::ostringstream querybuf;
std::vector<IdoAsyncQuery>::size_type count = 0;
size_t num_bytes = 0;
Defer decreaseQueries ([this, &offset, &count]() {
offset += count;
DecreasePendingQueries(count);
m_UncommittedAsyncQueries += count;
});
for (std::vector<IdoAsyncQuery>::size_type i = offset; i < queries.size(); i++) {
const IdoAsyncQuery& aq = queries[i];
@ -608,8 +623,13 @@ void IdoMysqlConnection::FinishAsyncQueries()
);
}
}
}
offset += count;
if (m_UncommittedAsyncQueries > 25000) {
m_UncommittedAsyncQueries = 0;
Query("COMMIT");
Query("BEGIN");
}
}
@ -617,6 +637,9 @@ IdoMysqlResult IdoMysqlConnection::Query(const String& query)
{
AssertOnWorkQueue();
IncreasePendingQueries(1);
Defer decreaseQueries ([this]() { DecreasePendingQueries(1); });
/* finish all async queries to maintain the right order for queries */
FinishAsyncQueries();
@ -770,6 +793,7 @@ void IdoMysqlConnection::InternalActivateObject(const DbObject::Ptr& dbobj)
SetObjectID(dbobj, GetLastInsertID());
} else {
qbuf << "UPDATE " + GetTablePrefix() + "objects SET is_active = 1 WHERE object_id = " << static_cast<long>(dbref);
IncreasePendingQueries(1);
AsyncQuery(qbuf.str());
}
}
@ -804,6 +828,7 @@ void IdoMysqlConnection::InternalDeactivateObject(const DbObject::Ptr& dbobj)
std::ostringstream qbuf;
qbuf << "UPDATE " + GetTablePrefix() + "objects SET is_active = 0 WHERE object_id = " << static_cast<long>(dbref);
IncreasePendingQueries(1);
AsyncQuery(qbuf.str());
/* Note that we're _NOT_ clearing the db refs via SetReference/SetConfigUpdate/SetStatusUpdate
@ -893,6 +918,7 @@ void IdoMysqlConnection::ExecuteQuery(const DbQuery& query)
<< "Scheduling execute query task, type " << query.Type << ", table '" << query.Table << "'.";
#endif /* I2_DEBUG */
IncreasePendingQueries(1);
m_QueryQueue.Enqueue(std::bind(&IdoMysqlConnection::InternalExecuteQuery, this, query, -1), query.Priority, true);
}
@ -909,6 +935,7 @@ void IdoMysqlConnection::ExecuteMultipleQueries(const std::vector<DbQuery>& quer
<< "Scheduling multiple execute query task, type " << queries[0].Type << ", table '" << queries[0].Table << "'.";
#endif /* I2_DEBUG */
IncreasePendingQueries(queries.size());
m_QueryQueue.Enqueue(std::bind(&IdoMysqlConnection::InternalExecuteMultipleQueries, this, queries), queries[0].Priority, true);
}
@ -948,11 +975,16 @@ void IdoMysqlConnection::InternalExecuteMultipleQueries(const std::vector<DbQuer
{
AssertOnWorkQueue();
if (IsPaused())
if (IsPaused()) {
DecreasePendingQueries(queries.size());
return;
}
if (!GetConnected())
if (!GetConnected()) {
DecreasePendingQueries(queries.size());
return;
}
for (const DbQuery& query : queries) {
ASSERT(query.Type == DbQueryNewTransaction || query.Category != DbCatInvalid);
@ -979,23 +1011,32 @@ void IdoMysqlConnection::InternalExecuteQuery(const DbQuery& query, int typeOver
{
AssertOnWorkQueue();
if (IsPaused())
if (IsPaused()) {
DecreasePendingQueries(1);
return;
}
if (!GetConnected())
if (!GetConnected()) {
DecreasePendingQueries(1);
return;
}
if (query.Type == DbQueryNewTransaction) {
DecreasePendingQueries(1);
InternalNewTransaction();
return;
}
/* check whether we're allowed to execute the query first */
if (GetCategoryFilter() != DbCatEverything && (query.Category & GetCategoryFilter()) == 0)
if (GetCategoryFilter() != DbCatEverything && (query.Category & GetCategoryFilter()) == 0) {
DecreasePendingQueries(1);
return;
}
if (query.Object && query.Object->GetObject()->GetExtension("agent_check").ToBool())
if (query.Object && query.Object->GetObject()->GetExtension("agent_check").ToBool()) {
DecreasePendingQueries(1);
return;
}
/* check if there are missing object/insert ids and re-enqueue the query */
if (!CanExecuteQuery(query)) {
@ -1066,6 +1107,7 @@ void IdoMysqlConnection::InternalExecuteQuery(const DbQuery& query, int typeOver
if ((type & DbQueryInsert) && (type & DbQueryDelete)) {
std::ostringstream qdel;
qdel << "DELETE FROM " << GetTablePrefix() << query.Table << where.str();
IncreasePendingQueries(1);
AsyncQuery(qdel.str());
type = DbQueryInsert;
@ -1150,6 +1192,7 @@ void IdoMysqlConnection::FinishExecuteQuery(const DbQuery& query, int type, bool
<< "Rescheduling DELETE/INSERT query: Upsert UPDATE did not affect rows, type " << type << ", table '" << query.Table << "'.";
#endif /* I2_DEBUG */
IncreasePendingQueries(1);
m_QueryQueue.Enqueue(std::bind(&IdoMysqlConnection::InternalExecuteQuery, this, query, DbQueryDelete | DbQueryInsert), query.Priority);
return;
@ -1178,6 +1221,7 @@ void IdoMysqlConnection::CleanUpExecuteQuery(const String& table, const String&
<< time_column << "'. max_age is set to '" << max_age << "'.";
#endif /* I2_DEBUG */
IncreasePendingQueries(1);
m_QueryQueue.Enqueue(std::bind(&IdoMysqlConnection::InternalCleanUpExecuteQuery, this, table, time_column, max_age), PriorityLow, true);
}
@ -1185,11 +1229,15 @@ void IdoMysqlConnection::InternalCleanUpExecuteQuery(const String& table, const
{
AssertOnWorkQueue();
if (IsPaused())
if (IsPaused()) {
DecreasePendingQueries(1);
return;
}
if (!GetConnected())
if (!GetConnected()) {
DecreasePendingQueries(1);
return;
}
AsyncQuery("DELETE FROM " + GetTablePrefix() + table + " WHERE instance_id = " +
Convert::ToString(static_cast<long>(m_InstanceID)) + " AND " + time_column +

View File

@ -9,6 +9,7 @@
#include "base/timer.hpp"
#include "base/workqueue.hpp"
#include "base/library.hpp"
#include <cstdint>
namespace icinga
{
@ -54,8 +55,6 @@ protected:
private:
DbReference m_InstanceID;
WorkQueue m_QueryQueue{10000000};
Library m_Library;
std::unique_ptr<MysqlInterface, MysqlInterfaceDeleter> m_Mysql;
@ -64,6 +63,7 @@ private:
unsigned int m_MaxPacketSize;
std::vector<IdoAsyncQuery> m_AsyncQueries;
uint_fast32_t m_UncommittedAsyncQueries = 0;
Timer::Ptr m_ReconnectTimer;
Timer::Ptr m_TxTimer;

View File

@ -14,6 +14,7 @@
#include "base/exception.hpp"
#include "base/context.hpp"
#include "base/statsfunction.hpp"
#include "base/defer.hpp"
#include <utility>
using namespace icinga;
@ -137,6 +138,7 @@ void IdoPgsqlConnection::Disconnect()
if (!GetConnected())
return;
IncreasePendingQueries(1);
Query("COMMIT");
m_Pgsql->finish(m_Connection);
@ -166,6 +168,7 @@ void IdoPgsqlConnection::InternalNewTransaction()
if (!GetConnected())
return;
IncreasePendingQueries(2);
Query("COMMIT");
Query("BEGIN");
}
@ -191,6 +194,7 @@ void IdoPgsqlConnection::Reconnect()
if (GetConnected()) {
/* Check if we're really still connected */
try {
IncreasePendingQueries(1);
Query("SELECT 1");
return;
} catch (const std::exception&) {
@ -260,10 +264,13 @@ void IdoPgsqlConnection::Reconnect()
/* explicitely require legacy mode for string escaping in PostgreSQL >= 9.1
* changing standard_conforming_strings to on by default
*/
if (m_Pgsql->serverVersion(m_Connection) >= 90100)
if (m_Pgsql->serverVersion(m_Connection) >= 90100) {
IncreasePendingQueries(1);
result = Query("SET standard_conforming_strings TO off");
}
String dbVersionName = "idoutils";
IncreasePendingQueries(1);
result = Query("SELECT version FROM " + GetTablePrefix() + "dbversion WHERE name=E'" + Escape(dbVersionName) + "'");
Dictionary::Ptr row = FetchRow(result, 0);
@ -295,10 +302,12 @@ void IdoPgsqlConnection::Reconnect()
String instanceName = GetInstanceName();
IncreasePendingQueries(1);
result = Query("SELECT instance_id FROM " + GetTablePrefix() + "instances WHERE instance_name = E'" + Escape(instanceName) + "'");
row = FetchRow(result, 0);
if (!row) {
IncreasePendingQueries(1);
Query("INSERT INTO " + GetTablePrefix() + "instances (instance_name, instance_description) VALUES (E'" + Escape(instanceName) + "', E'" + Escape(GetInstanceDescription()) + "')");
m_InstanceID = GetSequenceValue(GetTablePrefix() + "instances", "instance_id");
} else {
@ -310,6 +319,7 @@ void IdoPgsqlConnection::Reconnect()
/* we have an endpoint in a cluster setup, so decide if we can proceed here */
if (my_endpoint && GetHAMode() == HARunOnce) {
/* get the current endpoint writing to programstatus table */
IncreasePendingQueries(1);
result = Query("SELECT UNIX_TIMESTAMP(status_update_time) AS status_update_time, endpoint_name FROM " +
GetTablePrefix() + "programstatus WHERE instance_id = " + Convert::ToString(m_InstanceID));
row = FetchRow(result, 0);
@ -372,12 +382,14 @@ void IdoPgsqlConnection::Reconnect()
<< "PGSQL IDO instance id: " << static_cast<long>(m_InstanceID) << " (schema version: '" + version + "')"
<< (!sslMode.IsEmpty() ? ", sslmode='" + sslMode + "'" : "");
IncreasePendingQueries(1);
Query("BEGIN");
/* update programstatus table */
UpdateProgramStatus();
/* record connection */
IncreasePendingQueries(1);
Query("INSERT INTO " + GetTablePrefix() + "conninfo " +
"(instance_id, connect_time, last_checkin_time, agent_name, agent_version, connect_type, data_start_time) VALUES ("
+ Convert::ToString(static_cast<long>(m_InstanceID)) + ", NOW(), NOW(), E'icinga2 db_ido_pgsql', E'" + Escape(Application::GetAppVersion())
@ -388,6 +400,7 @@ void IdoPgsqlConnection::Reconnect()
std::ostringstream q1buf;
q1buf << "SELECT object_id, objecttype_id, name1, name2, is_active FROM " + GetTablePrefix() + "objects WHERE instance_id = " << static_cast<long>(m_InstanceID);
IncreasePendingQueries(1);
result = Query(q1buf.str());
std::vector<DbObject::Ptr> activeDbObjs;
@ -442,6 +455,7 @@ void IdoPgsqlConnection::FinishConnect(double startTime)
<< "Finished reconnecting to '" << GetName() << "' database '" << GetDatabase() << "' in "
<< std::setw(2) << Utility::GetTime() - startTime << " second(s).";
IncreasePendingQueries(2);
Query("COMMIT");
Query("BEGIN");
}
@ -455,6 +469,7 @@ void IdoPgsqlConnection::ClearTablesBySession()
void IdoPgsqlConnection::ClearTableBySession(const String& table)
{
IncreasePendingQueries(1);
Query("DELETE FROM " + GetTablePrefix() + table + " WHERE instance_id = " +
Convert::ToString(static_cast<long>(m_InstanceID)) + " AND session_token <> " +
Convert::ToString(GetSessionToken()));
@ -464,6 +479,8 @@ IdoPgsqlResult IdoPgsqlConnection::Query(const String& query)
{
AssertOnWorkQueue();
Defer decreaseQueries ([this]() { DecreasePendingQueries(1); });
Log(LogDebug, "IdoPgsqlConnection")
<< "Query: " << query;
@ -512,6 +529,7 @@ DbReference IdoPgsqlConnection::GetSequenceValue(const String& table, const Stri
{
AssertOnWorkQueue();
IncreasePendingQueries(1);
IdoPgsqlResult result = Query("SELECT CURRVAL(pg_get_serial_sequence(E'" + Escape(table) + "', E'" + Escape(column) + "')) AS id");
Dictionary::Ptr row = FetchRow(result, 0);
@ -601,10 +619,12 @@ void IdoPgsqlConnection::InternalActivateObject(const DbObject::Ptr& dbobj)
<< "E'" << Escape(dbobj->GetName1()) << "', 1)";
}
IncreasePendingQueries(1);
Query(qbuf.str());
SetObjectID(dbobj, GetSequenceValue(GetTablePrefix() + "objects", "object_id"));
} else {
qbuf << "UPDATE " + GetTablePrefix() + "objects SET is_active = 1 WHERE object_id = " << static_cast<long>(dbref);
IncreasePendingQueries(1);
Query(qbuf.str());
}
}
@ -631,6 +651,7 @@ void IdoPgsqlConnection::InternalDeactivateObject(const DbObject::Ptr& dbobj)
std::ostringstream qbuf;
qbuf << "UPDATE " + GetTablePrefix() + "objects SET is_active = 0 WHERE object_id = " << static_cast<long>(dbref);
IncreasePendingQueries(1);
Query(qbuf.str());
/* Note that we're _NOT_ clearing the db refs via SetReference/SetConfigUpdate/SetStatusUpdate
@ -715,6 +736,7 @@ void IdoPgsqlConnection::ExecuteQuery(const DbQuery& query)
ASSERT(query.Category != DbCatInvalid);
IncreasePendingQueries(1);
m_QueryQueue.Enqueue(std::bind(&IdoPgsqlConnection::InternalExecuteQuery, this, query, -1), query.Priority, true);
}
@ -726,6 +748,7 @@ void IdoPgsqlConnection::ExecuteMultipleQueries(const std::vector<DbQuery>& quer
if (queries.empty())
return;
IncreasePendingQueries(queries.size());
m_QueryQueue.Enqueue(std::bind(&IdoPgsqlConnection::InternalExecuteMultipleQueries, this, queries), queries[0].Priority, true);
}
@ -765,11 +788,15 @@ void IdoPgsqlConnection::InternalExecuteMultipleQueries(const std::vector<DbQuer
{
AssertOnWorkQueue();
if (IsPaused())
if (IsPaused()) {
DecreasePendingQueries(queries.size());
return;
}
if (!GetConnected())
if (!GetConnected()) {
DecreasePendingQueries(queries.size());
return;
}
for (const DbQuery& query : queries) {
ASSERT(query.Type == DbQueryNewTransaction || query.Category != DbCatInvalid);
@ -789,23 +816,32 @@ void IdoPgsqlConnection::InternalExecuteQuery(const DbQuery& query, int typeOver
{
AssertOnWorkQueue();
if (IsPaused())
if (IsPaused()) {
DecreasePendingQueries(1);
return;
}
if (!GetConnected())
if (!GetConnected()) {
DecreasePendingQueries(1);
return;
}
if (query.Type == DbQueryNewTransaction) {
DecreasePendingQueries(1);
InternalNewTransaction();
return;
}
/* check whether we're allowed to execute the query first */
if (GetCategoryFilter() != DbCatEverything && (query.Category & GetCategoryFilter()) == 0)
if (GetCategoryFilter() != DbCatEverything && (query.Category & GetCategoryFilter()) == 0) {
DecreasePendingQueries(1);
return;
}
if (query.Object && query.Object->GetObject()->GetExtension("agent_check").ToBool())
if (query.Object && query.Object->GetObject()->GetExtension("agent_check").ToBool()) {
DecreasePendingQueries(1);
return;
}
/* check if there are missing object/insert ids and re-enqueue the query */
if (!CanExecuteQuery(query)) {
@ -862,6 +898,7 @@ void IdoPgsqlConnection::InternalExecuteQuery(const DbQuery& query, int typeOver
if ((type & DbQueryInsert) && (type & DbQueryDelete)) {
std::ostringstream qdel;
qdel << "DELETE FROM " << GetTablePrefix() << query.Table << where.str();
IncreasePendingQueries(1);
Query(qdel.str());
type = DbQueryInsert;
@ -929,6 +966,7 @@ void IdoPgsqlConnection::InternalExecuteQuery(const DbQuery& query, int typeOver
Query(qbuf.str());
if (upsert && GetAffectedRows() == 0) {
IncreasePendingQueries(1);
InternalExecuteQuery(query, DbQueryDelete | DbQueryInsert);
return;
@ -959,6 +997,7 @@ void IdoPgsqlConnection::CleanUpExecuteQuery(const String& table, const String&
if (IsPaused())
return;
IncreasePendingQueries(1);
m_QueryQueue.Enqueue(std::bind(&IdoPgsqlConnection::InternalCleanUpExecuteQuery, this, table, time_column, max_age), PriorityLow, true);
}
@ -966,8 +1005,10 @@ void IdoPgsqlConnection::InternalCleanUpExecuteQuery(const String& table, const
{
AssertOnWorkQueue();
if (!GetConnected())
if (!GetConnected()) {
DecreasePendingQueries(1);
return;
}
Query("DELETE FROM " + GetTablePrefix() + table + " WHERE instance_id = " +
Convert::ToString(static_cast<long>(m_InstanceID)) + " AND " + time_column +
@ -977,6 +1018,7 @@ void IdoPgsqlConnection::InternalCleanUpExecuteQuery(const String& table, const
void IdoPgsqlConnection::FillIDCache(const DbType::Ptr& type)
{
String query = "SELECT " + type->GetIDColumn() + " AS object_id, " + type->GetTable() + "_id, config_hash FROM " + GetTablePrefix() + type->GetTable() + "s";
IncreasePendingQueries(1);
IdoPgsqlResult result = Query(query);
Dictionary::Ptr row;

View File

@ -48,8 +48,6 @@ protected:
private:
DbReference m_InstanceID;
WorkQueue m_QueryQueue{1000000};
Library m_Library;
std::unique_ptr<PgsqlInterface, PgsqlInterfaceDeleter> m_Pgsql;

View File

@ -358,9 +358,12 @@ void Checkable::ProcessCheckResult(const CheckResult::Ptr& cr, const MessageOrig
SetLastCheckResult(cr);
if (GetProblem() != wasProblem) {
for (auto& service : host->GetServices()) {
auto services = host->GetServices();
olock.Unlock();
for (auto& service : services) {
Service::OnHostProblemChanged(service, cr, origin);
}
olock.Lock();
}
}
@ -514,6 +517,8 @@ void Checkable::ExecuteCheck()
double scheduled_start = GetNextCheck();
double before_check = Utility::GetTime();
SetLastCheckStarted(Utility::GetTime());
/* This calls SetNextCheck() which updates the CheckerComponent's idle/pending
* queues and ensures that checks are not fired multiple times. ProcessCheckResult()
* is called too late. See #6421.

View File

@ -147,25 +147,7 @@ static void FireSuppressedNotifications(Checkable* checkable)
for (auto type : {NotificationProblem, NotificationRecovery, NotificationFlappingStart, NotificationFlappingEnd}) {
if (suppressed_types & type) {
bool still_applies;
auto cr (checkable->GetLastCheckResult());
switch (type) {
case NotificationProblem:
still_applies = cr && !checkable->IsStateOK(cr->GetState()) && checkable->GetStateType() == StateTypeHard;
break;
case NotificationRecovery:
still_applies = cr && checkable->IsStateOK(cr->GetState());
break;
case NotificationFlappingStart:
still_applies = checkable->IsFlapping();
break;
case NotificationFlappingEnd:
still_applies = !checkable->IsFlapping();
break;
default:
break;
}
bool still_applies = checkable->NotificationReasonApplies(type);
if (still_applies) {
bool still_suppressed;
@ -185,28 +167,8 @@ static void FireSuppressedNotifications(Checkable* checkable)
break;
}
if (!still_suppressed && checkable->GetEnableActiveChecks()) {
/* If e.g. the downtime just ended, but the service is still not ok, we would re-send the stashed problem notification.
* But if the next check result recovers the service soon, we would send a recovery notification soon after the problem one.
* This is not desired, especially for lots of services at once.
* Because of that if there's likely to be a check result soon,
* we delay the re-sending of the stashed notification until the next check.
* That check either doesn't change anything and we finally re-send the stashed problem notification
* or recovers the service and we drop the stashed notification. */
/* One minute unless the check interval is too short so the next check will always run during the next minute. */
auto threshold (checkable->GetCheckInterval() - 10);
if (threshold > 60)
threshold = 60;
else if (threshold < 0)
threshold = 0;
still_suppressed = checkable->GetNextCheck() <= Utility::GetTime() + threshold;
}
if (!still_suppressed) {
Checkable::OnNotificationsRequested(checkable, type, cr, "", "", nullptr);
if (!still_suppressed && !checkable->IsLikelyToBeCheckedSoon()) {
Checkable::OnNotificationsRequested(checkable, type, checkable->GetLastCheckResult(), "", "", nullptr);
subtract |= type;
}
@ -241,3 +203,62 @@ void Checkable::FireSuppressedNotifications(const Timer * const&)
::FireSuppressedNotifications(service.get());
}
}
/**
* Returns whether sending a notification of type type right now would represent *this' current state correctly.
*
* @param type The type of notification to send (or not to send).
*
* @return Whether to send the notification.
*/
bool Checkable::NotificationReasonApplies(NotificationType type)
{
switch (type) {
case NotificationProblem:
{
auto cr (GetLastCheckResult());
return cr && !IsStateOK(cr->GetState()) && GetStateType() == StateTypeHard;
}
case NotificationRecovery:
{
auto cr (GetLastCheckResult());
return cr && IsStateOK(cr->GetState());
}
case NotificationFlappingStart:
return IsFlapping();
case NotificationFlappingEnd:
return !IsFlapping();
default:
VERIFY(!"Checkable#NotificationReasonStillApplies(): given type not implemented");
return false;
}
}
/**
* E.g. we're going to re-send a stashed problem notification as *this is still not ok.
* But if the next check result recovers *this soon, we would send a recovery notification soon after the problem one.
* This is not desired, especially for lots of checkables at once.
* Because of that if there's likely to be a check result soon,
* we delay the re-sending of the stashed notification until the next check.
* That check either doesn't change anything and we finally re-send the stashed problem notification
* or recovers *this and we drop the stashed notification.
*
* @return Whether *this is likely to be checked soon
*/
bool Checkable::IsLikelyToBeCheckedSoon()
{
if (!GetEnableActiveChecks()) {
return false;
}
// One minute unless the check interval is too short so the next check will always run during the next minute.
auto threshold (GetCheckInterval() - 10);
if (threshold > 60) {
threshold = 60;
} else if (threshold < 0) {
threshold = 0;
}
return GetNextCheck() <= Utility::GetTime() + threshold;
}

View File

@ -66,6 +66,14 @@ void Checkable::Start(bool runtimeCreated)
{
double now = Utility::GetTime();
{
auto cr (GetLastCheckResult());
if (GetLastCheckStarted() > (cr ? cr->GetExecutionEnd() : 0.0)) {
SetNextCheck(GetLastCheckStarted());
}
}
if (GetNextCheck() < now + 60) {
double delta = std::min(GetCheckInterval(), 60.0);
delta *= (double)std::rand() / RAND_MAX;

View File

@ -174,6 +174,9 @@ public:
void ValidateRetryInterval(const Lazy<double>& lvalue, const ValidationUtils& value) final;
void ValidateMaxCheckAttempts(const Lazy<int>& lvalue, const ValidationUtils& value) final;
bool NotificationReasonApplies(NotificationType type);
bool IsLikelyToBeCheckedSoon();
static void IncreasePendingChecks();
static void DecreasePendingChecks();
static int GetPendingChecks();

View File

@ -90,6 +90,8 @@ abstract class Checkable : CustomVarObject
[config] String icon_image_alt;
[state] Timestamp next_check;
[state, no_user_view, no_user_modify] Timestamp last_check_started;
[state] int check_attempt {
default {{{ return 1; }}}
};

View File

@ -24,7 +24,9 @@ INITIALIZE_ONCE(&ClusterEvents::StaticInitialize);
REGISTER_APIFUNCTION(CheckResult, event, &ClusterEvents::CheckResultAPIHandler);
REGISTER_APIFUNCTION(SetNextCheck, event, &ClusterEvents::NextCheckChangedAPIHandler);
REGISTER_APIFUNCTION(SetLastCheckStarted, event, &ClusterEvents::LastCheckStartedChangedAPIHandler);
REGISTER_APIFUNCTION(SetSuppressedNotifications, event, &ClusterEvents::SuppressedNotificationsChangedAPIHandler);
REGISTER_APIFUNCTION(SetSuppressedNotificationTypes, event, &ClusterEvents::SuppressedNotificationTypesChangedAPIHandler);
REGISTER_APIFUNCTION(SetNextNotification, event, &ClusterEvents::NextNotificationChangedAPIHandler);
REGISTER_APIFUNCTION(SetForceNextCheck, event, &ClusterEvents::ForceNextCheckChangedAPIHandler);
REGISTER_APIFUNCTION(SetForceNextNotification, event, &ClusterEvents::ForceNextNotificationChangedAPIHandler);
@ -41,7 +43,9 @@ void ClusterEvents::StaticInitialize()
{
Checkable::OnNewCheckResult.connect(&ClusterEvents::CheckResultHandler);
Checkable::OnNextCheckChanged.connect(&ClusterEvents::NextCheckChangedHandler);
Checkable::OnLastCheckStartedChanged.connect(&ClusterEvents::LastCheckStartedChangedHandler);
Checkable::OnSuppressedNotificationsChanged.connect(&ClusterEvents::SuppressedNotificationsChangedHandler);
Notification::OnSuppressedNotificationsChanged.connect(&ClusterEvents::SuppressedNotificationTypesChangedHandler);
Notification::OnNextNotificationChanged.connect(&ClusterEvents::NextNotificationChangedHandler);
Checkable::OnForceNextCheckChanged.connect(&ClusterEvents::ForceNextCheckChangedHandler);
Checkable::OnForceNextNotificationChanged.connect(&ClusterEvents::ForceNextNotificationChangedHandler);
@ -236,6 +240,68 @@ Value ClusterEvents::NextCheckChangedAPIHandler(const MessageOrigin::Ptr& origin
return Empty;
}
void ClusterEvents::LastCheckStartedChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin)
{
ApiListener::Ptr listener = ApiListener::GetInstance();
if (!listener)
return;
Host::Ptr host;
Service::Ptr service;
tie(host, service) = GetHostService(checkable);
Dictionary::Ptr params = new Dictionary();
params->Set("host", host->GetName());
if (service)
params->Set("service", service->GetShortName());
params->Set("last_check_started", checkable->GetLastCheckStarted());
Dictionary::Ptr message = new Dictionary();
message->Set("jsonrpc", "2.0");
message->Set("method", "event::SetLastCheckStarted");
message->Set("params", params);
listener->RelayMessage(origin, checkable, message, true);
}
Value ClusterEvents::LastCheckStartedChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
{
Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
if (!endpoint) {
Log(LogNotice, "ClusterEvents")
<< "Discarding 'last_check_started changed' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
return Empty;
}
Host::Ptr host = Host::GetByName(params->Get("host"));
if (!host)
return Empty;
Checkable::Ptr checkable;
if (params->Contains("service"))
checkable = host->GetServiceByShortName(params->Get("service"));
else
checkable = host;
if (!checkable)
return Empty;
if (origin->FromZone && !origin->FromZone->CanAccessObject(checkable)) {
Log(LogNotice, "ClusterEvents")
<< "Discarding 'last_check_started changed' message for checkable '" << checkable->GetName()
<< "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
return Empty;
}
checkable->SetLastCheckStarted(params->Get("last_check_started"), false, origin);
return Empty;
}
void ClusterEvents::SuppressedNotificationsChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin)
{
ApiListener::Ptr listener = ApiListener::GetInstance();
@ -298,6 +364,52 @@ Value ClusterEvents::SuppressedNotificationsChangedAPIHandler(const MessageOrigi
return Empty;
}
void ClusterEvents::SuppressedNotificationTypesChangedHandler(const Notification::Ptr& notification, const MessageOrigin::Ptr& origin)
{
ApiListener::Ptr listener = ApiListener::GetInstance();
if (!listener)
return;
Dictionary::Ptr params = new Dictionary();
params->Set("notification", notification->GetName());
params->Set("suppressed_notifications", notification->GetSuppressedNotifications());
Dictionary::Ptr message = new Dictionary();
message->Set("jsonrpc", "2.0");
message->Set("method", "event::SetSuppressedNotificationTypes");
message->Set("params", params);
listener->RelayMessage(origin, notification, message, true);
}
Value ClusterEvents::SuppressedNotificationTypesChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
{
Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
if (!endpoint) {
Log(LogNotice, "ClusterEvents")
<< "Discarding 'suppressed notifications changed' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
return Empty;
}
auto notification (Notification::GetByName(params->Get("notification")));
if (!notification)
return Empty;
if (origin->FromZone && !origin->FromZone->CanAccessObject(notification)) {
Log(LogNotice, "ClusterEvents")
<< "Discarding 'suppressed notification types changed' message for notification '" << notification->GetName()
<< "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
return Empty;
}
notification->SetSuppressedNotifications(params->Get("suppressed_notifications"), false, origin);
return Empty;
}
void ClusterEvents::NextNotificationChangedHandler(const Notification::Ptr& notification, const MessageOrigin::Ptr& origin)
{
ApiListener::Ptr listener = ApiListener::GetInstance();

View File

@ -26,9 +26,15 @@ public:
static void NextCheckChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin);
static Value NextCheckChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
static void LastCheckStartedChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin);
static Value LastCheckStartedChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
static void SuppressedNotificationsChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin);
static Value SuppressedNotificationsChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
static void SuppressedNotificationTypesChangedHandler(const Notification::Ptr& notification, const MessageOrigin::Ptr& origin);
static Value SuppressedNotificationTypesChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
static void NextNotificationChangedHandler(const Notification::Ptr& notification, const MessageOrigin::Ptr& origin);
static Value NextNotificationChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);

View File

@ -345,10 +345,19 @@ void Downtime::RemoveDowntime(const String& id, bool cancelled, bool expired, co
reason = "<unknown>";
}
Log(LogInformation, "Downtime")
<< "Removed downtime '" << downtime->GetName() << "' from checkable '"
<< downtime->GetCheckable()->GetName() << "' (Reason: " << reason << ").";
Log msg (LogInformation, "Downtime");
msg << "Removed downtime '" << downtime->GetName() << "' from checkable";
{
auto checkable (downtime->GetCheckable());
if (checkable) {
msg << " '" << checkable->GetName() << "'";
}
}
msg << " (Reason: " << reason << ").";
}
bool Downtime::CanBeTriggered()

View File

@ -513,8 +513,6 @@ Value MacroProcessor::ResolveArguments(const Value& command, const Dictionary::P
continue;
}
arg.SkipValue = arg.SkipValue || arg.AValue.GetType() == ValueEmpty;
args.emplace_back(std::move(arg));
}

View File

@ -234,6 +234,39 @@ void Notification::BeginExecuteNotification(NotificationType type, const CheckRe
Log(LogNotice, "Notification")
<< "Not sending " << (reminder ? "reminder " : "") << "notifications for notification object '" << notificationName
<< "': not in timeperiod '" << tp->GetName() << "'";
if (!reminder) {
switch (type) {
case NotificationProblem:
case NotificationRecovery:
case NotificationFlappingStart:
case NotificationFlappingEnd:
{
/* If a non-reminder notification was suppressed, but just because of its time period,
* stash it into a notification types bitmask for maybe re-sending later.
*/
ObjectLock olock (this);
int suppressedTypesBefore (GetSuppressedNotifications());
int suppressedTypesAfter (suppressedTypesBefore | type);
for (int conflict : {NotificationProblem | NotificationRecovery, NotificationFlappingStart | NotificationFlappingEnd}) {
/* E.g. problem and recovery notifications neutralize each other. */
if ((suppressedTypesAfter & conflict) == conflict) {
suppressedTypesAfter &= ~conflict;
}
}
if (suppressedTypesAfter != suppressedTypesBefore) {
SetSuppressedNotifications(suppressedTypesAfter);
}
}
default:
; // Cheating the compiler on "5 enumeration values not handled in switch"
}
}
return;
}

View File

@ -86,6 +86,10 @@ class Notification : CustomVarObject < NotificationNameComposer
[state] int notification_number;
[state] Timestamp last_problem_notification;
[state, no_user_view, no_user_modify] int suppressed_notifications {
default {{{ return 0; }}}
};
[config, navigation] name(Endpoint) command_endpoint (CommandEndpointRaw) {
navigate {{{
return Endpoint::GetByName(GetCommandEndpointRaw());

View File

@ -56,6 +56,69 @@ void NotificationComponent::Stop(bool runtimeRemoved)
ObjectImpl<NotificationComponent>::Stop(runtimeRemoved);
}
static inline
void SubtractSuppressedNotificationTypes(const Notification::Ptr& notification, int types)
{
ObjectLock olock (notification);
int suppressedTypesBefore (notification->GetSuppressedNotifications());
int suppressedTypesAfter (suppressedTypesBefore & ~types);
if (suppressedTypesAfter != suppressedTypesBefore) {
notification->SetSuppressedNotifications(suppressedTypesAfter);
}
}
static inline
void FireSuppressedNotifications(const Notification::Ptr& notification)
{
int suppressedTypes (notification->GetSuppressedNotifications());
if (!suppressedTypes)
return;
int subtract = 0;
auto checkable (notification->GetCheckable());
for (auto type : {NotificationProblem, NotificationRecovery, NotificationFlappingStart, NotificationFlappingEnd}) {
if ((suppressedTypes & type) && !checkable->NotificationReasonApplies(type)) {
subtract |= type;
suppressedTypes &= ~type;
}
}
if (suppressedTypes) {
auto tp (notification->GetPeriod());
if ((!tp || tp->IsInside(Utility::GetTime())) && !checkable->IsLikelyToBeCheckedSoon()) {
for (auto type : {NotificationProblem, NotificationRecovery, NotificationFlappingStart, NotificationFlappingEnd}) {
if (!(suppressedTypes & type))
continue;
auto notificationName (notification->GetName());
Log(LogNotice, "NotificationComponent")
<< "Attempting to re-send previously suppressed notification '" << notificationName << "'.";
subtract |= type;
SubtractSuppressedNotificationTypes(notification, subtract);
subtract = 0;
try {
notification->BeginExecuteNotification(type, checkable->GetLastCheckResult(), false, false);
} catch (const std::exception& ex) {
Log(LogWarning, "NotificationComponent")
<< "Exception occurred during notification for object '"
<< notificationName << "': " << DiagnosticInformation(ex, false);
}
}
}
}
if (subtract) {
SubtractSuppressedNotificationTypes(notification, subtract);
}
}
/**
* Periodically sends notifications.
*
@ -104,37 +167,41 @@ void NotificationComponent::NotificationTimerHandler()
bool reachable = checkable->IsReachable(DependencyNotification);
if (reachable) {
Array::Ptr unstashedNotifications = new Array();
{
auto stashedNotifications (notification->GetStashedNotifications());
ObjectLock olock(stashedNotifications);
Array::Ptr unstashedNotifications = new Array();
stashedNotifications->CopyTo(unstashedNotifications);
stashedNotifications->Clear();
}
{
auto stashedNotifications (notification->GetStashedNotifications());
ObjectLock olock(stashedNotifications);
ObjectLock olock(unstashedNotifications);
stashedNotifications->CopyTo(unstashedNotifications);
stashedNotifications->Clear();
}
for (Dictionary::Ptr unstashedNotification : unstashedNotifications) {
try {
Log(LogNotice, "NotificationComponent")
<< "Attempting to send stashed notification '" << notificationName << "'.";
ObjectLock olock(unstashedNotifications);
notification->BeginExecuteNotification(
(NotificationType)(int)unstashedNotification->Get("type"),
(CheckResult::Ptr)unstashedNotification->Get("cr"),
(bool)unstashedNotification->Get("force"),
(bool)unstashedNotification->Get("reminder"),
(String)unstashedNotification->Get("author"),
(String)unstashedNotification->Get("text")
);
} catch (const std::exception& ex) {
Log(LogWarning, "NotificationComponent")
<< "Exception occurred during notification for object '"
<< notificationName << "': " << DiagnosticInformation(ex, false);
for (Dictionary::Ptr unstashedNotification : unstashedNotifications) {
try {
Log(LogNotice, "NotificationComponent")
<< "Attempting to send stashed notification '" << notificationName << "'.";
notification->BeginExecuteNotification(
(NotificationType)(int)unstashedNotification->Get("type"),
(CheckResult::Ptr)unstashedNotification->Get("cr"),
(bool)unstashedNotification->Get("force"),
(bool)unstashedNotification->Get("reminder"),
(String)unstashedNotification->Get("author"),
(String)unstashedNotification->Get("text")
);
} catch (const std::exception& ex) {
Log(LogWarning, "NotificationComponent")
<< "Exception occurred during notification for object '"
<< notificationName << "': " << DiagnosticInformation(ex, false);
}
}
}
FireSuppressedNotifications(notification);
}
if (notification->GetInterval() <= 0 && notification->GetNoMoreNotifications()) {
@ -165,6 +232,10 @@ void NotificationComponent::NotificationTimerHandler()
if ((service && service->GetState() == ServiceOK) || (!service && host->GetState() == HostUp))
continue;
/* Don't send reminder notifications before initial ones. */
if (checkable->GetSuppressedNotifications() & NotificationProblem)
continue;
/* Skip in runtime filters. */
if (!reachable || checkable->IsInDowntime() || checkable->IsAcknowledged() || checkable->IsFlapping())
continue;

View File

@ -20,7 +20,7 @@ using namespace icinga;
REGISTER_APIFUNCTION(Update, config, &ApiListener::ConfigUpdateHandler);
boost::mutex ApiListener::m_ConfigSyncStageLock;
SpinLock ApiListener::m_ConfigSyncStageLock;
/**
* Entrypoint for updating all authoritative configs from /etc/zones.d, packages, etc.
@ -312,7 +312,16 @@ Value ApiListener::ConfigUpdateHandler(const MessageOrigin::Ptr& origin, const D
return Empty;
}
std::thread([origin, params]() { HandleConfigUpdate(origin, params); }).detach();
std::thread([origin, params, listener]() {
try {
listener->HandleConfigUpdate(origin, params);
} catch (const std::exception& ex) {
auto msg ("Exception during config sync: " + DiagnosticInformation(ex));
Log(LogCritical, "ApiListener") << msg;
listener->UpdateLastFailedZonesStageValidation(msg);
}
}).detach();
return Empty;
}
@ -321,7 +330,7 @@ void ApiListener::HandleConfigUpdate(const MessageOrigin::Ptr& origin, const Dic
/* Only one transaction is allowed, concurrent message handlers need to wait.
* This affects two parent endpoints sending the config in the same moment.
*/
auto lock (Shared<boost::mutex::scoped_lock>::Make(m_ConfigSyncStageLock));
auto lock (Shared<std::unique_lock<SpinLock>>::Make(m_ConfigSyncStageLock));
String apiZonesStageDir = GetApiZonesStageDir();
String fromEndpointName = origin->FromClient->GetEndpoint()->GetName();
@ -534,6 +543,7 @@ void ApiListener::HandleConfigUpdate(const MessageOrigin::Ptr& origin, const Dic
Log(LogInformation, "ApiListener")
<< "Received configuration updates (" << count << ") from endpoint '" << fromEndpointName
<< "' are equal to production, skipping validation and reload.";
ClearLastFailedZonesStageValidation();
}
}
@ -618,7 +628,7 @@ void ApiListener::TryActivateZonesStageCallback(const ProcessResult& pr,
*
* @param relativePaths Required for later file operations in the callback. Provides the zone name plus path in a list.
*/
void ApiListener::AsyncTryActivateZonesStage(const std::vector<String>& relativePaths, const Shared<boost::mutex::scoped_lock>::Ptr& lock)
void ApiListener::AsyncTryActivateZonesStage(const std::vector<String>& relativePaths, const Shared<std::unique_lock<SpinLock>>::Ptr& lock)
{
VERIFY(Application::GetArgC() >= 1);

View File

@ -560,32 +560,19 @@ void ApiListener::NewClientHandlerInternal(
boost::system::error_code ec;
{
struct DoneHandshake
{
bool Done = false;
};
auto doneHandshake (Shared<DoneHandshake>::Make());
IoEngine::SpawnCoroutine(*strand, [strand, client, doneHandshake](asio::yield_context yc) {
namespace sys = boost::system;
{
boost::asio::deadline_timer timer (strand->context());
timer.expires_from_now(boost::posix_time::microseconds(intmax_t(Configuration::TlsHandshakeTimeout * 1000000)));
sys::error_code ec;
timer.async_wait(yc[ec]);
}
if (!doneHandshake->Done) {
sys::error_code ec;
Timeout::Ptr handshakeTimeout (new Timeout(
strand->context(),
*strand,
boost::posix_time::microseconds(intmax_t(Configuration::TlsHandshakeTimeout * 1000000)),
[strand, client](asio::yield_context yc) {
boost::system::error_code ec;
client->lowest_layer().cancel(ec);
}
});
));
sslConn.async_handshake(role == RoleClient ? sslConn.client : sslConn.server, yc[ec]);
doneHandshake->Done = true;
handshakeTimeout->Cancel();
}
if (ec) {

View File

@ -11,6 +11,7 @@
#include "base/configobject.hpp"
#include "base/process.hpp"
#include "base/shared.hpp"
#include "base/spinlock.hpp"
#include "base/timer.hpp"
#include "base/workqueue.hpp"
#include "base/tcpsocket.hpp"
@ -22,6 +23,7 @@
#include <boost/asio/spawn.hpp>
#include <boost/asio/ssl/context.hpp>
#include <cstdint>
#include <mutex>
#include <set>
namespace icinga
@ -115,7 +117,7 @@ public:
/* filesync */
static Value ConfigUpdateHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
static void HandleConfigUpdate(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
void HandleConfigUpdate(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
/* configsync */
static void ConfigUpdateObjectHandler(const ConfigObject::Ptr& object, const Value& cookie);
@ -216,7 +218,7 @@ private:
void RemoveStatusFile();
/* filesync */
static boost::mutex m_ConfigSyncStageLock;
static SpinLock m_ConfigSyncStageLock;
void SyncLocalZoneDirs() const;
void SyncLocalZoneDir(const Zone::Ptr& zone) const;
@ -230,7 +232,7 @@ private:
static void TryActivateZonesStageCallback(const ProcessResult& pr,
const std::vector<String>& relativePaths);
static void AsyncTryActivateZonesStage(const std::vector<String>& relativePaths, const Shared<boost::mutex::scoped_lock>::Ptr& lock);
static void AsyncTryActivateZonesStage(const std::vector<String>& relativePaths, const Shared<std::unique_lock<SpinLock>>::Ptr& lock);
static String GetChecksum(const String& content);
static bool CheckConfigChange(const ConfigDirInformation& oldConfig, const ConfigDirInformation& newConfig);

View File

@ -15,47 +15,34 @@ using namespace icinga;
REGISTER_APIFUNCTION(Heartbeat, event, &JsonRpcConnection::HeartbeatAPIHandler);
/**
* We still send a heartbeat without timeout here
* to keep the m_Seen variable up to date. This is to keep the
* cluster connection alive when there isn't much going on.
*/
void JsonRpcConnection::HandleAndWriteHeartbeats(boost::asio::yield_context yc)
{
boost::system::error_code ec;
for (;;) {
m_HeartbeatTimer.expires_from_now(boost::posix_time::seconds(10));
m_HeartbeatTimer.expires_from_now(boost::posix_time::seconds(20));
m_HeartbeatTimer.async_wait(yc[ec]);
if (m_ShuttingDown) {
break;
}
if (m_NextHeartbeat != 0 && m_NextHeartbeat < Utility::GetTime()) {
Log(LogWarning, "JsonRpcConnection")
<< "Client for endpoint '" << m_Endpoint->GetName() << "' has requested "
<< "heartbeat message but hasn't responded in time. Closing connection.";
Disconnect();
break;
}
if (m_Endpoint) {
SendMessageInternal(new Dictionary({
{ "jsonrpc", "2.0" },
{ "method", "event::Heartbeat" },
{ "params", new Dictionary({
{ "timeout", 120 }
}) }
}));
}
SendMessageInternal(new Dictionary({
{ "jsonrpc", "2.0" },
{ "method", "event::Heartbeat" },
{ "params", new Dictionary() }
}));
}
}
Value JsonRpcConnection::HeartbeatAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
{
Value vtimeout = params->Get("timeout");
if (!vtimeout.IsEmpty()) {
origin->FromClient->m_NextHeartbeat = Utility::GetTime() + vtimeout;
}
return Empty;
}

View File

@ -60,6 +60,8 @@ void JsonRpcConnection::Start()
void JsonRpcConnection::HandleIncomingMessages(boost::asio::yield_context yc)
{
m_Stream->next_layer().SetSeen(&m_Seen);
for (;;) {
String message;
@ -233,8 +235,20 @@ void JsonRpcConnection::Disconnect()
m_Stream->lowest_layer().cancel(ec);
Timeout::Ptr shutdownTimeout (new Timeout(
m_IoStrand.context(),
m_IoStrand,
boost::posix_time::seconds(10),
[this, keepAlive](asio::yield_context yc) {
boost::system::error_code ec;
m_Stream->lowest_layer().cancel(ec);
}
));
m_Stream->next_layer().async_shutdown(yc[ec]);
shutdownTimeout->Cancel();
m_Stream->lowest_layer().shutdown(m_Stream->lowest_layer().shutdown_both, ec);
}
});

View File

@ -0,0 +1,42 @@
Set-PsDebug -Trace 1
if(-not (Test-Path "$($env:ProgramData)\chocolatey\choco.exe")) {
throw "Could not find Choco executable. Abort."
}
if (-not (Test-Path env:ICINGA2_BUILDPATH)) {
$env:ICINGA2_BUILDPATH = '.\build'
}
if(-not (Test-Path "$($env:ICINGA2_BUILDPATH)\choco\chocolateyInstall.ps1.template")) {
throw "Could not find Chocolatey install script template. Abort."
}
$chocoInstallScriptTemplatePath = "$($env:ICINGA2_BUILDPATH)\choco\chocolateyInstall.ps1.template"
$chocoInstallScript = Get-Content $chocoInstallScriptTemplatePath
if(-not (Test-Path "$($env:ICINGA2_BUILDPATH)\*-x86.msi")) {
throw "Could not find Icinga 2 32 bit MSI package. Abort."
}
$hashMSIpackage32 = Get-FileHash "$($env:ICINGA2_BUILDPATH)\*-x86.msi"
Write-Output "File Hash for 32 bit MSI package: $($hashMSIpackage32.Hash)."
if(-not (Test-Path "$($env:ICINGA2_BUILDPATH)\*-x86_64.msi")) {
throw "Could not find Icinga 2 64 bit MSI package. Abort."
}
$hashMSIpackage64 = Get-FileHash "$($env:ICINGA2_BUILDPATH)\*-x86_64.msi"
Write-Output "File Hash for 32 bit MSI package: $($hashMSIpackage64.Hash)"
$chocoInstallScript = $chocoInstallScript.Replace("%CHOCO_32BIT_CHECKSUM%", "$($hashMSIpackage32.Hash)")
$chocoInstallScript = $chocoInstallScript.Replace("%CHOCO_64BIT_CHECKSUM%", "$($hashMSIpackage64.Hash)")
Write-Output $chocoInstallScript
Set-Content -Path "$($env:ICINGA2_BUILDPATH)\choco\chocolateyInstall.ps1" -Value $chocoInstallScript
cd "$($env:ICINGA2_BUILDPATH)\choco"
& "$($env:ProgramData)\chocolatey\choco.exe" "pack"
cd "..\.."
Move-Item -Path "$($env:ICINGA2_BUILDPATH)\choco\*.nupkg" -Destination "$($env:ICINGA2_BUILDPATH)"

View File

@ -17,16 +17,19 @@ if (-not ($env:PATH -contains $env:CMAKE_PATH)) {
$env:PATH = $env:CMAKE_PATH + ';' + $env:PATH
}
if (-not (Test-Path env:CMAKE_GENERATOR)) {
$env:CMAKE_GENERATOR = 'Visual Studio 15 2017 Win64'
$env:CMAKE_GENERATOR = 'Visual Studio 16 2019'
}
if (-not (Test-Path env:CMAKE_GENERATOR_PLATFORM)) {
$env:CMAKE_GENERATOR_PLATFORM = 'x64'
}
if (-not (Test-Path env:OPENSSL_ROOT_DIR)) {
$env:OPENSSL_ROOT_DIR = 'c:\local\OpenSSL_1_1_1b-Win64'
$env:OPENSSL_ROOT_DIR = 'c:\local\OpenSSL_1_1_1h-Win64'
}
if (-not (Test-Path env:BOOST_ROOT)) {
$env:BOOST_ROOT = 'c:\local\boost_1_69_0-Win64'
$env:BOOST_ROOT = 'c:\local\boost_1_71_0-Win64'
}
if (-not (Test-Path env:BOOST_LIBRARYDIR)) {
$env:BOOST_LIBRARYDIR = 'c:\local\boost_1_69_0-Win64\lib64-msvc-14.1'
$env:BOOST_LIBRARYDIR = 'c:\local\boost_1_71_0-Win64\lib64-msvc-14.2'
}
if (-not (Test-Path env:FLEX_BINARY)) {
$env:FLEX_BINARY = 'C:\ProgramData\chocolatey\bin\win_flex.exe'
@ -48,7 +51,7 @@ if (Test-Path CMakeCache.txt) {
& cmake.exe "$sourcePath" `
-DCMAKE_BUILD_TYPE="$env:CMAKE_BUILD_TYPE" `
-G "$env:CMAKE_GENERATOR" -DCPACK_GENERATOR=WIX `
-G "$env:CMAKE_GENERATOR" -A "$env:CMAKE_GENERATOR_PLATFORM" -DCPACK_GENERATOR=WIX `
-DICINGA2_WITH_MYSQL=OFF -DICINGA2_WITH_PGSQL=OFF `
-DICINGA2_WITH_LIVESTATUS=OFF -DICINGA2_WITH_COMPAT=OFF `
-DOPENSSL_ROOT_DIR="$env:OPENSSL_ROOT_DIR" `

View File

@ -18,7 +18,7 @@ if (-not (Test-Path $BUILD)) {
if (Test-Path env:VS_INSTALL_PATH) {
$VSBASE = $env:VS_INSTALL_PATH
} else {
$VSBASE = "C:\Program Files (x86)\Microsoft Visual Studio\2017"
$VSBASE = "C:\Program Files (x86)\Microsoft Visual Studio\2019"
}
if (Test-Path env:BITS) {