mirror of
https://github.com/Icinga/icinga2.git
synced 2025-09-26 02:58:43 +02:00
Merge remote-tracking branch 'origin/master' into aklimov/windows-agents-assume-parent-nodes-being-in-master-zone-9602
This commit is contained in:
commit
f8f155a0d4
2
.github/ISSUE_TEMPLATE/release.md
vendored
2
.github/ISSUE_TEMPLATE/release.md
vendored
@ -13,6 +13,7 @@ assignees: ''
|
||||
- [ ] Update bundled Windows dependencies
|
||||
- [ ] Harden global TLS defaults (consult https://ssl-config.mozilla.org)
|
||||
- [ ] Update `CHANGELOG.md`
|
||||
- [ ] Update `doc/16-upgrading-icinga-2.md` if applicable
|
||||
- [ ] Create and push a signed tag for the version
|
||||
- [ ] Build and release DEB and RPM packages
|
||||
- [ ] Build and release Windows packages
|
||||
@ -35,7 +36,6 @@ https://packages.icinga.com/windows/dependencies/, e.g.:
|
||||
|
||||
### Update Build Server, CI/CD and Documentation
|
||||
|
||||
* [doc/21-development.md](doc/21-development.md)
|
||||
* [doc/win-dev.ps1](doc/win-dev.ps1) (also affects CI/CD)
|
||||
* [tools/win32/configure.ps1](tools/win32/configure.ps1)
|
||||
* [tools/win32/configure-dev.ps1](tools/win32/configure-dev.ps1)
|
||||
|
7
.github/dependabot.yml
vendored
Normal file
7
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
version: 2
|
||||
updates:
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: daily
|
8
.github/workflows/alpine-bash.Dockerfile
vendored
Normal file
8
.github/workflows/alpine-bash.Dockerfile
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# This Dockerfile is used in the linux job for Alpine Linux.
|
||||
#
|
||||
# As the linux.bash script is, in fact, a bash script and Alpine does not ship
|
||||
# a bash by default, the "alpine:bash" container will be built using this
|
||||
# Dockerfile in the GitHub Action.
|
||||
|
||||
FROM alpine:3
|
||||
RUN ["apk", "--no-cache", "add", "bash"]
|
4
.github/workflows/authors-file.yml
vendored
4
.github/workflows/authors-file.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout HEAD
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@ -21,7 +21,7 @@ jobs:
|
||||
git add AUTHORS
|
||||
git log --format='format:%aN <%aE>' "$(
|
||||
git merge-base HEAD^1 HEAD^2
|
||||
)..HEAD^2" >> AUTHORS
|
||||
)..HEAD^2" | sed '/^dependabot\[bot] /d' >> AUTHORS
|
||||
sort -uo AUTHORS AUTHORS
|
||||
git diff AUTHORS >> AUTHORS.diff
|
||||
|
||||
|
71
.github/workflows/linux.bash
vendored
71
.github/workflows/linux.bash
vendored
@ -1,19 +1,28 @@
|
||||
#!/bin/bash
|
||||
set -exo pipefail
|
||||
|
||||
export PATH="/usr/lib/ccache:/usr/lib64/ccache:/opt/rh/devtoolset-11/root/usr/bin:$PATH"
|
||||
export PATH="/usr/lib/ccache/bin:/usr/lib/ccache:/usr/lib64/ccache:$PATH"
|
||||
export CCACHE_DIR=/icinga2/ccache
|
||||
export CTEST_OUTPUT_ON_FAILURE=1
|
||||
CMAKE_OPTS=''
|
||||
CMAKE_OPTS=()
|
||||
|
||||
case "$DISTRO" in
|
||||
alpine:*)
|
||||
# Packages inspired by the Alpine package, just
|
||||
# - LibreSSL instead of OpenSSL 3 and
|
||||
# - no MariaDB or libpq as they depend on OpenSSL.
|
||||
# https://gitlab.alpinelinux.org/alpine/aports/-/blob/master/community/icinga2/APKBUILD
|
||||
apk add bison boost-dev ccache cmake flex g++ libedit-dev libressl-dev ninja-build tzdata
|
||||
ln -vs /usr/lib/ninja-build/bin/ninja /usr/local/bin/ninja
|
||||
;;
|
||||
|
||||
amazonlinux:2)
|
||||
amazon-linux-extras install -y epel
|
||||
yum install -y bison ccache cmake3 gcc-c++ flex ninja-build \
|
||||
yum install -y bison ccache cmake3 gcc-c++ flex ninja-build system-rpm-config \
|
||||
{libedit,mariadb,ncurses,openssl,postgresql,systemd}-devel
|
||||
|
||||
yum install -y bzip2 tar wget
|
||||
wget https://boostorg.jfrog.io/artifactory/main/release/1.69.0/source/boost_1_69_0.tar.bz2
|
||||
wget https://archives.boost.io/release/1.69.0/source/boost_1_69_0.tar.bz2
|
||||
tar -xjf boost_1_69_0.tar.bz2
|
||||
|
||||
(
|
||||
@ -24,42 +33,34 @@ case "$DISTRO" in
|
||||
|
||||
ln -vs /usr/bin/cmake3 /usr/local/bin/cmake
|
||||
ln -vs /usr/bin/ninja-build /usr/local/bin/ninja
|
||||
CMAKE_OPTS='-DBOOST_INCLUDEDIR=/boost_1_69_0 -DBOOST_LIBRARYDIR=/boost_1_69_0/stage/lib'
|
||||
CMAKE_OPTS+=(-DBOOST_{INCLUDEDIR=/boost_1_69_0,LIBRARYDIR=/boost_1_69_0/stage/lib})
|
||||
export LD_LIBRARY_PATH=/boost_1_69_0/stage/lib
|
||||
;;
|
||||
|
||||
amazonlinux:20*)
|
||||
dnf install -y bison cmake flex gcc-c++ ninja-build \
|
||||
{boost,libedit,mariadb1\*,ncurses,openssl,postgresql,systemd}-devel
|
||||
;;
|
||||
|
||||
centos:*)
|
||||
yum install -y centos-release-scl epel-release
|
||||
yum install -y bison ccache cmake3 devtoolset-11-gcc-c++ flex ninja-build \
|
||||
{boost169,libedit,mariadb,ncurses,openssl,postgresql,systemd}-devel
|
||||
|
||||
ln -vs /usr/bin/cmake3 /usr/local/bin/cmake
|
||||
ln -vs /usr/bin/ccache /usr/lib64/ccache/g++
|
||||
CMAKE_OPTS='-DBOOST_INCLUDEDIR=/usr/include/boost169 -DBOOST_LIBRARYDIR=/usr/lib64/boost169'
|
||||
dnf install -y amazon-rpm-config bison cmake flex gcc-c++ ninja-build \
|
||||
{boost,libedit,mariadb-connector-c,ncurses,openssl,postgresql,systemd}-devel
|
||||
;;
|
||||
|
||||
debian:*|ubuntu:*)
|
||||
apt-get update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-{recommends,suggests} -y bison \
|
||||
ccache cmake flex g++ lib{boost-all,edit,mariadb,ncurses,pq,ssl,systemd}-dev ninja-build tzdata
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-{recommends,suggests} -y \
|
||||
bison ccache cmake dpkg-dev flex g++ ninja-build tzdata \
|
||||
lib{boost-all,edit,mariadb,ncurses,pq,ssl,systemd}-dev
|
||||
;;
|
||||
|
||||
fedora:*)
|
||||
dnf install -y bison ccache cmake flex gcc-c++ ninja-build \
|
||||
dnf install -y bison ccache cmake flex gcc-c++ ninja-build redhat-rpm-config \
|
||||
{boost,libedit,mariadb,ncurses,openssl,postgresql,systemd}-devel
|
||||
;;
|
||||
|
||||
opensuse/*)
|
||||
zypper in -y bison ccache cmake flex gcc-c++ ninja {lib{edit,mariadb,openssl},ncurses,postgresql,systemd}-devel \
|
||||
*suse*)
|
||||
zypper in -y bison ccache cmake flex gcc-c++ ninja rpm-config-SUSE \
|
||||
{lib{edit,mariadb,openssl},ncurses,postgresql,systemd}-devel \
|
||||
libboost_{context,coroutine,filesystem,iostreams,program_options,regex,system,test,thread}-devel
|
||||
;;
|
||||
|
||||
rockylinux:*)
|
||||
*rockylinux:*)
|
||||
dnf install -y 'dnf-command(config-manager)' epel-release
|
||||
|
||||
case "$DISTRO" in
|
||||
@ -71,8 +72,22 @@ case "$DISTRO" in
|
||||
;;
|
||||
esac
|
||||
|
||||
dnf install -y bison ccache cmake gcc-c++ flex ninja-build \
|
||||
{boost,libedit,mariadb,ncurses,openssl,postgresql,systemd}-devel
|
||||
dnf install -y bison ccache cmake gcc-c++ flex ninja-build redhat-rpm-config \
|
||||
{boost,bzip2,libedit,mariadb,ncurses,openssl,postgresql,systemd,xz,libzstd}-devel
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$DISTRO" in
|
||||
alpine:*)
|
||||
CMAKE_OPTS+=(-DUSE_SYSTEMD=OFF -DICINGA2_WITH_MYSQL=OFF -DICINGA2_WITH_PGSQL=OFF)
|
||||
;;
|
||||
debian:*|ubuntu:*)
|
||||
CMAKE_OPTS+=(-DICINGA2_LTO_BUILD=ON)
|
||||
source <(dpkg-buildflags --export=sh)
|
||||
;;
|
||||
*)
|
||||
CMAKE_OPTS+=(-DCMAKE_{C,CXX}_FLAGS="$(rpm -E '%{optflags} %{?march_flag}')")
|
||||
export LDFLAGS="$(rpm -E '%{?build_ldflags}')"
|
||||
;;
|
||||
esac
|
||||
|
||||
@ -81,14 +96,14 @@ cd /icinga2/build
|
||||
|
||||
cmake \
|
||||
-GNinja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
|
||||
-DICINGA2_UNITY_BUILD=ON \
|
||||
-DUSE_SYSTEMD=ON \
|
||||
-DICINGA2_USER=$(id -un) \
|
||||
-DICINGA2_GROUP=$(id -gn) \
|
||||
$CMAKE_OPTS ..
|
||||
"${CMAKE_OPTS[@]}" ..
|
||||
|
||||
ninja
|
||||
ninja -v
|
||||
|
||||
ninja test
|
||||
ninja install
|
||||
|
67
.github/workflows/linux.yml
vendored
67
.github/workflows/linux.yml
vendored
@ -13,7 +13,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
name: ${{ matrix.distro }}
|
||||
name: ${{ matrix.distro }}${{ matrix.platform != 'linux/amd64' && format(' ({0})', matrix.platform) || '' }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
@ -21,37 +21,68 @@ jobs:
|
||||
max-parallel: 2
|
||||
matrix:
|
||||
distro:
|
||||
# Alpine Linux to build Icinga 2 with LibreSSL, OpenBSD's default.
|
||||
# The "alpine:bash" image will be built below based on "alpine:3".
|
||||
- alpine:bash
|
||||
|
||||
- amazonlinux:2
|
||||
- amazonlinux:2023
|
||||
- centos:7 # and RHEL 7
|
||||
- debian:11 # and Raspbian 11
|
||||
- debian:12 # and Raspbian 12
|
||||
- fedora:37
|
||||
- fedora:38
|
||||
|
||||
# Raspberry Pi OS is close enough to Debian to test just one of them.
|
||||
# Its architecture is different, though, and covered by the Docker job.
|
||||
- debian:11
|
||||
- debian:12
|
||||
- debian:13
|
||||
|
||||
- fedora:39
|
||||
- fedora:40
|
||||
- opensuse/leap:15.3 # SLES 15.3
|
||||
- opensuse/leap:15.4 # and SLES 15.4
|
||||
- opensuse/leap:15.5 # and SLES 15.5
|
||||
- rockylinux:8 # RHEL 8
|
||||
- rockylinux:9 # RHEL 9
|
||||
- ubuntu:20.04
|
||||
- fedora:41
|
||||
- fedora:42
|
||||
|
||||
- opensuse/leap:15.5
|
||||
- opensuse/leap:15.6
|
||||
|
||||
# We don't actually support Rocky Linux as such!
|
||||
# We just use that RHEL clone to test the original.
|
||||
- rockylinux:8
|
||||
- rockylinux:9
|
||||
- rockylinux/rockylinux:10
|
||||
|
||||
- registry.suse.com/suse/sle15:15.5
|
||||
- registry.suse.com/suse/sle15:15.6
|
||||
- registry.suse.com/suse/sle15:15.7
|
||||
|
||||
- ubuntu:22.04
|
||||
- ubuntu:23.04
|
||||
- ubuntu:23.10
|
||||
- ubuntu:24.04
|
||||
- ubuntu:24.10
|
||||
- ubuntu:25.04
|
||||
|
||||
platform:
|
||||
- linux/amd64
|
||||
|
||||
include:
|
||||
- distro: debian:11
|
||||
platform: linux/386
|
||||
- distro: debian:12
|
||||
platform: linux/386
|
||||
|
||||
steps:
|
||||
- name: Checkout HEAD
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Restore/backup ccache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ccache
|
||||
key: ccache/${{ matrix.distro }}
|
||||
|
||||
- name: Build
|
||||
- name: Build Alpine Docker Image
|
||||
if: "matrix.distro == 'alpine:bash'"
|
||||
run: >-
|
||||
docker build --file .github/workflows/alpine-bash.Dockerfile
|
||||
--tag alpine:bash `mktemp -d`
|
||||
|
||||
- name: Build Icinga
|
||||
run: >-
|
||||
docker run --rm -v "$(pwd):/icinga2" -e DISTRO=${{ matrix.distro }}
|
||||
${{ matrix.distro }} /icinga2/.github/workflows/linux.bash
|
||||
--platform ${{ matrix.platform }} ${{ matrix.distro }} /icinga2/.github/workflows/linux.bash
|
||||
|
116
.github/workflows/rpm.yml
vendored
116
.github/workflows/rpm.yml
vendored
@ -1,116 +0,0 @@
|
||||
name: .rpm
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
pull_request: {}
|
||||
|
||||
concurrency:
|
||||
group: rpm-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
rpm:
|
||||
name: .rpm (${{ matrix.distro.name }}, ${{ matrix.distro.release }})
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
distro:
|
||||
- name: sles
|
||||
release: '12.5'
|
||||
subscription: true
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Vars
|
||||
id: vars
|
||||
env:
|
||||
GITLAB_RO_TOKEN: '${{ secrets.GITLAB_RO_TOKEN }}'
|
||||
run: |
|
||||
if [ ${{ matrix.distro.subscription }} = true ]; then
|
||||
if [ "$(tr -d '\n' <<<"$GITLAB_RO_TOKEN" |wc -c)" -eq 0 ]; then
|
||||
echo '::set-output name=CAN_BUILD::false'
|
||||
echo '::set-output name=NEED_LOGIN::false'
|
||||
else
|
||||
echo '::set-output name=CAN_BUILD::true'
|
||||
echo '::set-output name=NEED_LOGIN::true'
|
||||
fi
|
||||
else
|
||||
echo '::set-output name=CAN_BUILD::true'
|
||||
echo '::set-output name=NEED_LOGIN::false'
|
||||
fi
|
||||
|
||||
- name: Checkout HEAD
|
||||
if: "steps.vars.outputs.CAN_BUILD == 'true'"
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Login
|
||||
if: "steps.vars.outputs.NEED_LOGIN == 'true'"
|
||||
env:
|
||||
GITLAB_RO_TOKEN: '${{ secrets.GITLAB_RO_TOKEN }}'
|
||||
run: |
|
||||
docker login registry.icinga.com -u github-actions --password-stdin <<<"$GITLAB_RO_TOKEN"
|
||||
|
||||
- name: rpm-icinga2
|
||||
if: "steps.vars.outputs.CAN_BUILD == 'true' && !matrix.distro.subscription"
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git clone https://git.icinga.com/packaging/rpm-icinga2.git
|
||||
chmod o+w rpm-icinga2
|
||||
|
||||
- name: subscription-rpm-icinga2
|
||||
if: "steps.vars.outputs.CAN_BUILD == 'true' && matrix.distro.subscription"
|
||||
env:
|
||||
GITLAB_RO_TOKEN: '${{ secrets.GITLAB_RO_TOKEN }}'
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git config --global credential.helper store
|
||||
cat <<EOF >~/.git-credentials
|
||||
https://github-actions:${GITLAB_RO_TOKEN}@git.icinga.com
|
||||
EOF
|
||||
git clone https://git.icinga.com/packaging/subscription-rpm-icinga2.git rpm-icinga2
|
||||
chmod o+w rpm-icinga2
|
||||
|
||||
- name: Restore/backup ccache
|
||||
if: "steps.vars.outputs.CAN_BUILD == 'true'"
|
||||
id: ccache
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: rpm-icinga2/ccache
|
||||
key: |-
|
||||
${{ matrix.distro.name }}/${{ matrix.distro.release }}-ccache-${{ hashFiles('rpm-icinga2/ccache') }}
|
||||
|
||||
- name: Binary
|
||||
if: "steps.vars.outputs.CAN_BUILD == 'true'"
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git checkout -B master
|
||||
if [ -e rpm-icinga2/ccache ]; then
|
||||
chmod -R o+w rpm-icinga2/ccache
|
||||
fi
|
||||
docker run --rm \
|
||||
-v "$(pwd)/rpm-icinga2:/rpm-icinga2" \
|
||||
-v "$(pwd)/.git:/icinga2.git:ro" \
|
||||
-w /rpm-icinga2 \
|
||||
-e ICINGA_BUILD_PROJECT=icinga2 \
|
||||
-e ICINGA_BUILD_TYPE=snapshot \
|
||||
-e UPSTREAM_GIT_URL=file:///icinga2.git \
|
||||
registry.icinga.com/build-docker/${{ matrix.distro.name }}/${{ matrix.distro.release }} \
|
||||
icinga-build-package
|
||||
|
||||
- name: Test
|
||||
if: "steps.vars.outputs.CAN_BUILD == 'true'"
|
||||
run: |
|
||||
set -exo pipefail
|
||||
docker run --rm \
|
||||
-v "$(pwd)/rpm-icinga2:/rpm-icinga2" \
|
||||
-w /rpm-icinga2 \
|
||||
-e ICINGA_BUILD_PROJECT=icinga2 \
|
||||
-e ICINGA_BUILD_TYPE=snapshot \
|
||||
registry.icinga.com/build-docker/${{ matrix.distro.name }}/${{ matrix.distro.release }} \
|
||||
icinga-build-test
|
40
.github/workflows/windows.yml
vendored
40
.github/workflows/windows.yml
vendored
@ -21,33 +21,39 @@ jobs:
|
||||
matrix:
|
||||
bits: [32, 64]
|
||||
|
||||
runs-on: windows-2019
|
||||
runs-on: windows-2025
|
||||
|
||||
env:
|
||||
BITS: '${{ matrix.bits }}'
|
||||
ICINGA_BUILD_TYPE: snapshot
|
||||
UPSTREAM_GIT_URL: file://D:/a/icinga2/icinga2/.git
|
||||
CMAKE_BUILD_TYPE: RelWithDebInfo
|
||||
|
||||
steps:
|
||||
- name: Checkout HEAD
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: windows-icinga2
|
||||
run: |
|
||||
git clone https://git.icinga.com/packaging/windows-icinga2.git
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build tools
|
||||
run: |
|
||||
Set-PSDebug -Trace 1
|
||||
& .\doc\win-dev.ps1
|
||||
|
||||
- name: Source
|
||||
run: |
|
||||
git checkout -B master
|
||||
cd windows-icinga2
|
||||
& .\source.ps1
|
||||
|
||||
- name: Binary
|
||||
working-directory: windows-icinga2
|
||||
run: |
|
||||
New-Item -ItemType Directory -Path 'C:\Program Files\Icinga2\WillBeRemoved' -ErrorAction SilentlyContinue
|
||||
& .\build.ps1
|
||||
Set-PSDebug -Trace 1
|
||||
& .\tools\win32\load-vsenv.ps1
|
||||
& powershell.exe .\tools\win32\configure.ps1
|
||||
if ($LastExitCode -ne 0) { throw "Error during configure" }
|
||||
& powershell.exe .\tools\win32\build.ps1
|
||||
if ($LastExitCode -ne 0) { throw "Error during build" }
|
||||
& powershell.exe .\tools\win32\test.ps1
|
||||
if ($LastExitCode -ne 0) { throw "Error during test" }
|
||||
|
||||
- name: Show Log Files
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
foreach ($file in Get-ChildItem -Recurse -Filter "*.log") {
|
||||
Write-Host "::group::$($file.FullName)"
|
||||
Get-Content $file.FullName
|
||||
Write-Host "::endgroup::"
|
||||
}
|
||||
|
2
.mailmap
2
.mailmap
@ -35,6 +35,7 @@ Alexander A. Klimov <alexander.klimov@icinga.com> <al2klimov@gmail.com>
|
||||
<tobias.vonderkrone@profitbricks.com> <tobias@vonderkrone.info>
|
||||
<yonas.habteab@icinga.com> <yonas.habteab@netways.de>
|
||||
Alex <alexp710@hotmail.com> <alexp710@hotmail.com>
|
||||
Alvar Penning <alvar.penning@icinga.com> <8402811+oxzi@users.noreply.github.com>
|
||||
Baptiste Beauplat <lyknode@cilg.org> <lyknode@cilg.org>
|
||||
Carsten Köbke <carsten.koebke@gmx.de> Carsten Koebke <carsten.koebke@koebbes.de>
|
||||
Claudio Kuenzler <ck@claudiokuenzler.com>
|
||||
@ -42,6 +43,7 @@ Diana Flach <diana.flach@icinga.com> <crunsher@bamberg.ccc.de>
|
||||
Diana Flach <diana.flach@icinga.com> <Crunsher@users.noreply.github.com>
|
||||
Diana Flach <diana.flach@icinga.com> <jean-marcel.flach@netways.de>
|
||||
Diana Flach <diana.flach@icinga.com> Jean Flach <jean-marcel.flach@icinga.com>
|
||||
Dirk Wening <dirk.wening@netways.de> <170401214+SpeedD3@users.noreply.github.com>
|
||||
Dolf Schimmel <dolf@transip.nl> <dolf@dolfschimmel.nl>
|
||||
Gunnar Beutner <gunnar.beutner@icinga.com> <icinga@net-icinga2.adm.netways.de>
|
||||
Henrik Triem <henrik.triem@icinga.com> <henrik.triem@netways.de>
|
||||
|
12
AUTHORS
12
AUTHORS
@ -21,6 +21,7 @@ Andres Ivanov <andres@andres.wtf>
|
||||
Andrew Jaffie <ajaffie@gmail.com>
|
||||
Andrew Meyer <ameyer+secure@nodnetwork.org>
|
||||
Andy Grunwald <andygrunwald@gmail.com>
|
||||
Angel Roman <angel.r.roman77@gmail.com>
|
||||
Ant1x <37016240+Ant1x@users.noreply.github.com>
|
||||
Arnd Hannemann <arnd@arndnet.de>
|
||||
Assaf Flatto <assaf@aikilinux.com>
|
||||
@ -47,11 +48,13 @@ C C Magnus Gustavsson <magnus@gustavsson.se>
|
||||
Carlos Cesario <carloscesario@gmail.com>
|
||||
Carsten Köbke <carsten.koebke@gmx.de>
|
||||
Chris Boot <crb@tiger-computing.co.uk>
|
||||
Chris Malton <chris@deltav-tech.co.uk>
|
||||
Christian Birk <mail@birkc.de>
|
||||
Christian Gut <cycloon@is-root.org>
|
||||
Christian Harke <ch.harke@gmail.com>
|
||||
Christian Jonak <christian@jonak.org>
|
||||
Christian Lehmann <christian_lehmann@gmx.de>
|
||||
Christian Lauf <github.com@christian-lauf.info>
|
||||
Christian Loos <cloos@netsandbox.de>
|
||||
Christian Schmidt <github@chsc.dk>
|
||||
Christopher Peterson <3893680+cspeterson@users.noreply.github.com>
|
||||
@ -75,6 +78,7 @@ Diana Flach <diana.flach@icinga.com>
|
||||
Didier 'OdyX' Raboud <didier.raboud@liip.ch>
|
||||
Dinesh Majrekar <dinesh.majrekar@serverchoice.com>
|
||||
Dirk Goetz <dirk.goetz@icinga.com>
|
||||
Dirk Wening <dirk.wening@netways.de>
|
||||
Dirk Melchers <dirk@dirk-melchers.de>
|
||||
Dolf Schimmel <dolf@transip.nl>
|
||||
Dominik Riva <driva@protonmail.com>
|
||||
@ -133,6 +137,7 @@ Jesse Morgan <morgajel@gmail.com>
|
||||
Jo Goossens <jo.goossens@hosted-power.com>
|
||||
Jochen Friedrich <j.friedrich@nwe.de>
|
||||
Johannes Meyer <johannes.meyer@icinga.com>
|
||||
Johannes Schmidt <johannes.schmidt@icinga.com>
|
||||
Jonas Meurer <jonas@freesources.org>
|
||||
Jordi van Scheijen <jordi.vanscheijen@solvinity.com>
|
||||
Josef Friedrich <josef@friedrich.rocks>
|
||||
@ -176,6 +181,7 @@ Marius Bergmann <marius@yeai.de>
|
||||
Marius Sturm <marius@graylog.com>
|
||||
Mark Leary <mleary@mit.edu>
|
||||
Markus Frosch <markus.frosch@icinga.com>
|
||||
Markus Opolka <markus.opolka@netways.de>
|
||||
Markus Waldmüller <markus.waldmueller@netways.de>
|
||||
Markus Weber <github@ztweb.de>
|
||||
Martijn van Duren <m.vanduren@itisit.nl>
|
||||
@ -212,6 +218,7 @@ nemtrif <ntrifunovic@hotmail.com>
|
||||
Nicolai <nbuchwitz@users.noreply.github.com>
|
||||
Nicolas Berens <nicolas.berens@planet.com>
|
||||
Nicolas Limage <github@xephon.org>
|
||||
Nicolas Rodriguez <nico@nicoladmin.fr>
|
||||
Nicole Lang <nicole.lang@icinga.com>
|
||||
Niflou <dubuscyr@gmail.com>
|
||||
Noah Hilverling <noah.hilverling@icinga.com>
|
||||
@ -239,6 +246,7 @@ pv2b <pvz@pvz.pp.se>
|
||||
Ralph Breier <ralph.breier@roedl.com>
|
||||
Reto Zeder <reto.zeder@arcade.ch>
|
||||
Ricardo Bartels <ricardo@bitchbrothers.com>
|
||||
Richard Mortimer <richm@oldelvet.org.uk>
|
||||
Rinck H. Sonnenberg <r.sonnenberg@netson.nl>
|
||||
Robert Lindgren <robert.lindgren@gmail.com>
|
||||
Robert Scheck <robert@fedoraproject.org>
|
||||
@ -255,6 +263,7 @@ Sascha Westermann <sascha.westermann@hl-services.de>
|
||||
Sebastian Brückner <mail@invlid.com>
|
||||
Sebastian Chrostek <sebastian@chrostek.net>
|
||||
Sebastian Eikenberg <eikese@mail.uni-paderborn.de>
|
||||
Sebastian Grund <s.grund@openinfrastructure.de>
|
||||
Sebastian Marsching <sebastian-git-2016@marsching.com>
|
||||
Silas <67681686+Tqnsls@users.noreply.github.com>
|
||||
Simon Murray <spjmurray@yahoo.co.uk>
|
||||
@ -283,6 +292,7 @@ Thomas Widhalm <thomas.widhalm@icinga.com>
|
||||
Tim Hardeck <thardeck@suse.de>
|
||||
Tim Weippert <weiti@weiti.eu>
|
||||
Timo Buhrmester <van.fstd@gmail.com>
|
||||
Tobias Bauriedel <tobias.bauriedel@netways.de>
|
||||
Tobias Birnbaum <osterd@gmx.de>
|
||||
Tobias Deiminger <haxtibal@posteo.de>
|
||||
Tobias von der Krone <tobias.vonderkrone@profitbricks.com>
|
||||
@ -293,10 +303,12 @@ vigiroux <vincent.giroux@nokia.com>
|
||||
Vytenis Darulis <vytenis@uber.com>
|
||||
Wenger Florian <wenger@unifox.at>
|
||||
Will Frey <will.frey@digitalreasoning.com>
|
||||
William Calliari <42240136+w1ll-i-code@users.noreply.github.com>
|
||||
Winfried Angele <winfried.angele@gmail.com>
|
||||
Wolfgang Nieder <wnd@gmx.net>
|
||||
XnS <git@xns.be>
|
||||
Yannick Charton <tontonitch-pro@yahoo.fr>
|
||||
Yannick Martin <yannick.martin@ovhcloud.com>
|
||||
Yohan Jarosz <yohanjarosz@yahoo.fr>
|
||||
Yonas Habteab <yonas.habteab@icinga.com>
|
||||
Zachary McGibbon <zachary.mcgibbon@gmail.com>
|
||||
|
321
CHANGELOG.md
321
CHANGELOG.md
@ -7,6 +7,266 @@ documentation before upgrading to a new release.
|
||||
|
||||
Released closed milestones can be found on [GitHub](https://github.com/Icinga/icinga2/milestones?state=closed).
|
||||
|
||||
## 2.15.0 (2025-06-18)
|
||||
|
||||
This Icinga 2 release is focused on adding Icinga 2 dependencies support to Icinga DB, but also includes a number
|
||||
of bugfixes, enhancements and code quality improvements. Below is a summary of the most important changes, for the
|
||||
complete list of issues and PRs, please see the [milestone on GitHub](https://github.com/Icinga/icinga2/issues?q=is%3Aclosed+milestone%3A2.15.0).
|
||||
|
||||
### Notes
|
||||
|
||||
Thanks to all contributors:
|
||||
[ChrLau](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3AChrLau),
|
||||
[Josef-Friedrich](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3AJosef-Friedrich),
|
||||
[LordHepipud](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3ALordHepipud),
|
||||
[OdyX](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3AOdyX),
|
||||
[RincewindsHat](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3ARincewindsHat),
|
||||
[SebastianOpeni](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3ASebastianOpeni),
|
||||
[SpeedD3](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3ASpeedD3),
|
||||
[Tqnsls](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3ATqnsls),
|
||||
[botovq](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Abotovq),
|
||||
[cycloon](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Acycloon),
|
||||
[legioner0](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Alegioner0),
|
||||
[legna-namor](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Alegna-namor),
|
||||
[macdems](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Amacdems),
|
||||
[mathiasaerts](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Amathiasaerts),
|
||||
[mcodato](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Amcodato),
|
||||
[n-rodriguez](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3An-rodriguez),
|
||||
[netphantm](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Anetphantm),
|
||||
[nicolasberens](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Anicolasberens),
|
||||
[oldelvet](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Aoldelvet),
|
||||
[peteeckel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Apeteeckel),
|
||||
[tbauriedel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Atbauriedel),
|
||||
[w1ll-i-code](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Aw1ll-i-code),
|
||||
[ymartin-ovh](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.15.0+author%3Aymartin-ovh)
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
* API: Fix `/v1/objects/*` queries with `attrs` set to `[]` to return empty attributes instead of all of them. #8169
|
||||
* Drop the undocumented `Checkable#process_check_result` and broken `System#track_parents` DSL functions. #10457
|
||||
|
||||
### Enhancements
|
||||
|
||||
* Gracefully disconnect all clients on shutdown and prevent from accepting new connections. #10460
|
||||
* Icinga DB: Send data to Redis® exactly as they're stored in the database to avoid extra value-mapping routines by the Go daemon. #10452
|
||||
* Add support for Icinga 2 dependencies in Icinga DB. #10290
|
||||
* Take host/service reachability into account when computing its severity. #10399
|
||||
* Rework the dependency cycle detection to efficiently handle large configs and provide better error messages. #10360
|
||||
* Don't log next check timestamp in scientific notation. #10352
|
||||
* Automatically remove child downtimes when removing parent downtime. #10345
|
||||
* Ensure compatibility with Boost version up to v1.88. #10278 #10419
|
||||
* Reject infinite performance data values. #10077
|
||||
* Support `host_template` and `service_template` tags in `ElasticsearchWriter`. #10074
|
||||
* Icinga DB: Support Redis® username authentication. #10102
|
||||
* Cluster: Distribute host child objects (e.g. services, notifications, etc.) based on the host's name. #10161
|
||||
* Icinga DB Check: Report an error if both Icinga DB instances are responsible in a HA setup. #10188
|
||||
* Windows: upgrade build toolchain to Visual Studio 2022. #9747
|
||||
|
||||
### Bugfixes
|
||||
|
||||
* Core
|
||||
* Use `Checkable#check_timeout` also for rescheduling remote checks. #10443
|
||||
* Log: Don't unnecessarily buffer log messages that are going to be dropped anyway. #10177
|
||||
* Don't loose perfdata counter (`c`) unit when normalizing performance data for Icinga DB. #10432
|
||||
* Fix broken SELinux policy on Fedora ≥ 41 due to the new `/usr/sbin` to `/usr/bin` equivalence. #10429
|
||||
* Don't load `Notification` objects before `User` and `UserGroup` objects to allow them to be referenced in notifications. #10427
|
||||
* Ensure consistent DST handling across different platforms. #10422
|
||||
* Fix Icinga 2 doesn't generate a core dump when it crashes with SIGABRT. #10416
|
||||
* Don't process concurrent checks for the same checkable. #10372
|
||||
* Don't process check results after the checker and API listener have been stopped. #10397
|
||||
* Avoid zombie processes on plugin execution timeout on busy systems. #10375
|
||||
* Properly restore the notification object state on `Recovery` notification. #10361
|
||||
* Fix incorrectly dropped acknowledgement and recovery notifications. #10211
|
||||
* Prevent checks from always being rescheduled outside the configured `check_period`. #10070
|
||||
* Don't send reminder notifications after a `Custom` notification while `interval` is set to `0`. #7818
|
||||
* Reset all signal handlers of child processes to their defaults before starting a plugin. #8011
|
||||
* tests: Fix `FormatDateTime` test cases with invalid formats on macOS and all BSD-based systems. #10149
|
||||
* Mark move constructor and assignment operator in `String` as `noexcept` to allow optimizations. #10353 #10365
|
||||
* Cluster and API
|
||||
* Fix an inverted condition in `ApiListener#IsHACluster()` that caused to always return `true` in a non-HA setup. #10417
|
||||
* Don't silently accept authenticated JSON-RPC connections with no valid endpoint. #10415
|
||||
* Sync `Notification#notified_problem_users` across the cluster to prevent lost recovery notifications. #10380
|
||||
* Remove superfluous `)` from a HTTP request log message. #9966
|
||||
* Disable TLS renegotiation (handshake on existing connection) on OpenBSD as well. #9943
|
||||
* Log also the underlying error message when a HTTP request is closed with `No data received` by Icinga 2. #9928
|
||||
* Fix a deadlock triggered by concurrent `/v1/actions/add-comment` and `/v1/actions/acknowledge-problem` requests on
|
||||
the same checkable, as well as a crash that might occur when running perfectly timed `/v1/actions/add-comment`
|
||||
and `/v1/actions/remove-comment` requests targeting the same comment. #9924
|
||||
* Icinga DB
|
||||
* Fix missing acknowledgement and flapping history entries due to a number overflow. #10467
|
||||
* Send downtime `cancel_time` only if it is cancelled. #10379
|
||||
* Send only the necessary data to the `icinga:stats` Redis® stream. #10359
|
||||
* Remove a spin lock in `RedisConnection#Connect()` to avoid busy waiting. #10265
|
||||
* Writers
|
||||
* Serialize all required metrics before queueing them to a `WorkQueue`. #10420
|
||||
* `OpenTsdbWriter`: Include checkable name in log messages to ease troubleshooting. #10009
|
||||
* `OpenTsdbWriter`: Don't send custom empty tags. #7928
|
||||
* `InfluxDBWriter`: Add missing closing quote in validation error message. #10174
|
||||
|
||||
### ITL
|
||||
|
||||
* Add `--maintenance_mode_state` (`$vmware_maintenance_mode_state`) argument to `vmware-esx-command` check command. #10435
|
||||
* Add `-n` (`$load_procs_to_show$`) argument to `load` check command. #10426
|
||||
* Add `--inode-perfdata` (`$disk_np_inode_perfdata$`) argument to `disk` check command. #10395
|
||||
* Add `-r` (`$ssh_remote_version$`) and `-P` (`$ssh_remote_protocol$`) arguments to `ssh` check command. #10283
|
||||
* Add `--unplugged_nics_state` (`$vmware_unplugged_nics_state$`) argument to `vmware-esx-soap-host-net` and `vmware-esx-soap-host-net-nic` check commands. #10261
|
||||
* Add `-X` (`$proc_exclude_process$`) argument to `procs` check command. #10232
|
||||
* Add `--dane` (`$ssl_cert_dane$`) argument to `ssl_cert` check command. #10196
|
||||
* Fix `check_ssl_cert` deprecation warnings. #9758
|
||||
* Fix `check_systemd` executable name add add all missing arguments. #10035
|
||||
* Add `-M` (`$snmp_multiplier$` & `$snmpv3_multiplier$`) argument to `snmp` and `snmpv3` check commands. #9975
|
||||
* Add `--continue-after-certificate` (`$http_certificate_continue$`) argument to `http` check command. #9974
|
||||
* Add `--ignore-maximum-validity` (`$ssl_cert_ignore_maximum_validity$`) argument to `ssl_cert` check command. #10396
|
||||
* Add `--maximum-validity` (`$ssl_cert_maximum_validity$`) argument to `ssl_cert` check command. #9881
|
||||
* Add `--url` (`$ssl_cert_http_url$`) argument to `ssl_cert` check command. #9759
|
||||
* Add `fuse.sshfs` and `fuse.*` (supported only by Monitoring Plugins) to the list of default disk exclude types. #9749
|
||||
* Add `check_curl` check command. #9205
|
||||
* Add the `--extra-opts` argument to various commands that support it. #8010
|
||||
|
||||
### Documentation
|
||||
|
||||
* Don't use `dnf config-manager` to configure Fedora repository and mention `icingadb-redis-selinux` package. #10479
|
||||
* Update the outdated cold startup duration documentation to reflect the current behavior. #10446
|
||||
* Indent second-level unordered lists with four spaces to correctly render them in the HTML documentation. #10441
|
||||
* Add a reference to the check result state documentation from within the Advanced Topics section. #10421
|
||||
* Improve the documentation of how to generate Icinga 2 core dumps. #10418
|
||||
* Update Icinga 2 CLI output examples to match the current output. #10323
|
||||
* Fix incorrect `ping_timeout` value in the `hostalive` check command documentation. #10069
|
||||
|
||||
### Code Quality
|
||||
|
||||
* Simplify deferred SSL shutdown in `ApiListener#NewClientHandlerInternal()`. #10301
|
||||
* Don't unnecessarily shuffle configuration items during config load. #10008
|
||||
* Sort config types by their load dependencies at namespace initialization time to save some round trips during config load. #10148
|
||||
* Fix `livestatus` build error on macOS without unity builds. #10176
|
||||
* Remove unused methods in `SharedObject` class. #10456
|
||||
* Remove unused `ProcessingResult#NoCheckResult` enum value. #10444
|
||||
* CMake: Drop all third-party cmake modules and use the ones shipped with CMake v3.8+. #10403
|
||||
* CMake: Raise the minimum required policy to `3.8`. #10402 #10478
|
||||
* CMake: Turn on `-Wsuggest-override` to warn about missing `override` specifiers. #10225 #10356
|
||||
* Make `icinga::Empty` a constant to prevent accidental modifications. #10224
|
||||
* Remove various unused methods in the `Registry` class. #10222
|
||||
* Fix missing parent `std::atomic<T>` constructor call in our `Atomic<T>` wrapper class. #10215
|
||||
* Drop unused `m_NextHeartbeat` member variable from `JsonRpcConnection`. #10208
|
||||
* Enhance some of the validation error messages. #10201
|
||||
* Don't allow `Type#GetLoadDependencies()` to return non-config object type dependencies. #10169
|
||||
* Don't allow `Type#GetLoadDependencies()` to return a set of nullptr type dependencies. #10155
|
||||
* Remove EOL distros detection code from `Utility::ReleaseHelper()` function. #10147
|
||||
* Remove dead code in TLS `GetSignatureAlgorithm()` function. #9882
|
||||
* Mark `Logger#GetSeverity()` as non-virtual to avoid unnecessary vtable lookups. #9851
|
||||
* Remove unused `Stream#Peak()` method and unused `allow_partial` parameter from `Stream#Read()`. #9734 #9736
|
||||
* Suppress compiler warnings in third-party libraries. #9732
|
||||
* Fix various compiler warnings. #9731 #10442
|
||||
* Reduce task function allocation overhead by using a per-thread created lambda in `WorkQueue`. #9575
|
||||
* Remove redundant trailing empty lines and add missing newlines in some files. #7799
|
||||
|
||||
## 2.14.6 (2025-05-27)
|
||||
|
||||
This security release fixes a critical issue in the certificate renewal logic in Icinga 2, which
|
||||
might incorrectly renew an invalid certificate. However, only nodes with access to the Icinga CA
|
||||
private key running with OpenSSL older than version 1.1.0 (released in 2016) are vulnerable. So this
|
||||
typically affects Icinga 2 masters running on operating systems like RHEL 7 and Amazon Linux 2.
|
||||
|
||||
* CVE-2025-48057: Prevent invalid certificates from being renewed with OpenSSL older than v1.1.0.
|
||||
* Fix use-after-free in VerifyCertificate(): Additionally, a use-after-free was found in the same
|
||||
function which is fixed as well, but in case it is triggered, typically only a wrong error code
|
||||
may be shown in a log message.
|
||||
* Windows: Update OpenSSL shipped on Windows to v3.0.16.
|
||||
|
||||
## 2.14.5 (2025-02-06)
|
||||
|
||||
This release fixes a regression introduced in 2.14.4 that caused the `icinga2 node setup`,
|
||||
`icinga2 node wizard`, and `icinga2 pki request` commands to fail if a certificate was
|
||||
requested from a node that has to forward the request to another node for signing.
|
||||
Additionally, it fixes a small bug in the performance data normalization and includes
|
||||
various documentation improvements.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Don't close anonymous connections before sending the response for a certificate request #10337
|
||||
* Performance data: Don't discard min/max values even if crit/warn thresholds aren’t given #10339
|
||||
* Fix a failing test case on systems `time_t` is only 32 bits #10343
|
||||
|
||||
### Documentation
|
||||
|
||||
* Document the -X option for the mail-host-notification and mail-service-notification commands #10335
|
||||
* Include Nagios in the migration docs #10324
|
||||
* Remove RHEL 7 from installation instructions #10334
|
||||
* Add instructions for installing build dependencies on Windows Server #10336
|
||||
|
||||
## 2.14.4 (2025-01-23)
|
||||
|
||||
This bugfix release is focused on improving HA cluster stability and easing
|
||||
troubleshooting of issues in this area. It also addresses several crashes,
|
||||
in the core itself and both in Icinga DB and IDO (numbers out of range).
|
||||
In addition, it fixes several other issues such as lost notifications
|
||||
or TimePeriod/ScheduledDowntime exceeding specified date ranges.
|
||||
|
||||
### Crash Fixes
|
||||
|
||||
* Invalid `DateTime#format()` arguments in config and console on Windows Server 2016 and older. #10112
|
||||
* Downtime scheduling at runtime with non-existent trigger. #10049
|
||||
* Object creation at runtime during Icinga DB initialization. #10151
|
||||
* Comment on a service of a non-existent host. #9861
|
||||
|
||||
### Miscellaneous Bugfixes
|
||||
|
||||
* Lost notifications after recovery outside the notification time period. #10187
|
||||
* TimePeriod/ScheduledDowntime exceeding specified date range. #9983 #10107
|
||||
* Clean up failure for obsolete Downtimes. #10062
|
||||
* ifw-api check command: use correct process-finished handler. #10140
|
||||
* Email notification scripts: strip 0x0D (CR) for a proper Content-Type. #10061
|
||||
* Several fixes and improvements of the code quality. #10066 #10214 #10254 #10263 #10264
|
||||
|
||||
### Cluster and API
|
||||
|
||||
* Sync runtime objects in topological order to honor their dependencies. #10000
|
||||
* Make parallel config syncs more robust. #10013
|
||||
* After object creation via API fails, clean up properly for the next try. #10111
|
||||
* Close HTTPS connections properly to prevent leaks. #10005 #10006
|
||||
* Reduce the number of cluster messages in memory at the same time. #9991 #9999 #10210
|
||||
* Once a cluster connection shall be closed, stop communicating. #10213 #10221
|
||||
* Remove unnecessary blocking of semaphores. #9992 #9994
|
||||
* Reduce unnecessary cluster messages setting the next check time. #10011
|
||||
|
||||
### Icinga DB and IDO
|
||||
|
||||
* IDO: fix object relations after aborted synchronization. #10065
|
||||
* Icinga DB, IDO: limit all timestamps to four year digits. #10058 #10059
|
||||
* Icinga DB: limit execution\_time and latency (milliseconds) to database schema. #10060
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
* Add `/v1/debug/malloc_info` which calls `malloc_info(3)` if available. #10015
|
||||
* Add log messages about own network I/O. #9993 #10141 #10207
|
||||
* Several fixes and improvements of log messages. #9997 #10021 #10209
|
||||
|
||||
### Windows
|
||||
|
||||
* Update OpenSSL shipped on Windows to v3.0.15. #10170
|
||||
* Update Boost shipped on Windows to v1.86. #10114
|
||||
* Support CMake v3.29. #10037
|
||||
* Don't require to build .msi as admin. #10137
|
||||
* Build configuration scripts: allow custom `$CMAKE_ARGS`. #10312
|
||||
|
||||
### Documentation
|
||||
|
||||
* Distributed Monitoring: add section "External CA/PKI". #9825
|
||||
* Explain how to enable/disable debug logging on the fly. #9981
|
||||
* Update supported OS versions and repository configuration. #10064 #10090 #10120 #10135 #10136 #10205
|
||||
* Several fixes and improvements. #9960 #10050 #10071 #10156 #10194
|
||||
* Replace broken links. #10115 #10118 #10282
|
||||
* Fix typographical and similarly trivial errors. #9953 #9967 #10056 #10116 #10152 #10153 #10204
|
||||
|
||||
## 2.14.3 (2024-11-12)
|
||||
|
||||
This security release fixes a TLS certificate validation bypass.
|
||||
Given the severity of that issue, users are advised to upgrade all nodes immediately.
|
||||
|
||||
* Security: fix TLS certificate validation bypass. CVE-2024-49369
|
||||
* Security: update OpenSSL shipped on Windows to v3.0.15.
|
||||
* Windows: sign MSI packages with a certificate the OS trusts by default.
|
||||
|
||||
## 2.14.2 (2024-01-18)
|
||||
|
||||
Version 2.14.2 is a hotfix release for master nodes that mainly
|
||||
@ -234,6 +494,58 @@ Add `linux_netdev` check command. #9045
|
||||
* Several code quality improvements. #8815 #9106 #9250
|
||||
#9508 #9517 #9537 #9594 #9605 #9606 #9641 #9658 #9702 #9717 #9738
|
||||
|
||||
## 2.13.12 (2025-05-27)
|
||||
|
||||
This security release fixes a critical issue in the certificate renewal logic in Icinga 2, which
|
||||
might incorrectly renew an invalid certificate. However, only nodes with access to the Icinga CA
|
||||
private key running with OpenSSL older than version 1.1.0 (released in 2016) are vulnerable. So this
|
||||
typically affects Icinga 2 masters running on operating systems like RHEL 7 and Amazon Linux 2.
|
||||
|
||||
* CVE-2025-48057: Prevent invalid certificates from being renewed with OpenSSL older than v1.1.0.
|
||||
* Fix use-after-free in VerifyCertificate(): Additionally, a use-after-free was found in the same
|
||||
function which is fixed as well, but in case it is triggered, typically only a wrong error code
|
||||
may be shown in a log message.
|
||||
* Windows: Update OpenSSL shipped on Windows to v3.0.16.
|
||||
* Fix a failing test case on systems `time_t` is only 32 bits #10344.
|
||||
|
||||
## 2.13.11 (2025-01-23)
|
||||
|
||||
This bugfix release addresses several crashes,
|
||||
both in the core itself and in Icinga DB (numbers out of range).
|
||||
In addition, it fixes several other issues such as lost notifications
|
||||
or TimePeriod/ScheduledDowntime exceeding specified date ranges.
|
||||
|
||||
### Crash Fixes
|
||||
|
||||
* Invalid `DateTime#format()` arguments in config and console on Windows Server 2016 and older. #10165
|
||||
* Downtime scheduling at runtime with non-existent trigger. #10127
|
||||
* Object creation at runtime during Icinga DB initialization. #10164
|
||||
* Icinga DB: several numbers out of database schema range. #10244
|
||||
|
||||
### Miscellaneous Bugfixes
|
||||
|
||||
* Lost notifications after recovery outside the notification time period. #10241
|
||||
* TimePeriod/ScheduledDowntime exceeding specified date range. #10128 #10133
|
||||
* Make parallel config syncs more robust. #10126
|
||||
* Reduce unnecessary cluster messages setting the next check time. #10168
|
||||
|
||||
### Windows
|
||||
|
||||
* Update OpenSSL shipped on Windows to v3.0.15. #10175
|
||||
* Update Boost shipped on Windows to v1.86. #10134
|
||||
* Support CMake v3.29. #10087
|
||||
* Don't require to build .msi as admin. #10305
|
||||
* Build configuration scripts: allow custom `$CMAKE_ARGS`. #10315
|
||||
|
||||
## 2.13.10 (2024-11-12)
|
||||
|
||||
This security release fixes a TLS certificate validation bypass.
|
||||
Given the severity of that issue, users are advised to upgrade all nodes immediately.
|
||||
|
||||
* Security: fix TLS certificate validation bypass. CVE-2024-49369
|
||||
* Security: update OpenSSL shipped on Windows to v3.0.15.
|
||||
* Windows: sign MSI packages with a certificate the OS trusts by default.
|
||||
|
||||
## 2.13.9 (2023-12-21)
|
||||
|
||||
Version 2.13.9 is a hotfix release for masters and satellites that mainly
|
||||
@ -967,6 +1279,15 @@ Thanks to all contributors:
|
||||
* Code quality fixes
|
||||
* Small documentation fixes
|
||||
|
||||
## 2.11.12 (2024-11-12)
|
||||
|
||||
This security release fixes a TLS certificate validation bypass.
|
||||
Given the severity of that issue, users are advised to upgrade all nodes immediately.
|
||||
|
||||
* Security: fix TLS certificate validation bypass. CVE-2024-49369
|
||||
* Security: update OpenSSL shipped on Windows to v3.0.15.
|
||||
* Windows: sign MSI packages with a certificate the OS trusts by default.
|
||||
|
||||
## 2.11.11 (2021-08-19)
|
||||
|
||||
The main focus of these versions is a security vulnerability in the TLS certificate verification of our metrics writers ElasticsearchWriter, GelfWriter and InfluxdbWriter.
|
||||
|
@ -1,17 +1,12 @@
|
||||
# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
|
||||
|
||||
cmake_minimum_required(VERSION 2.8.12)
|
||||
# CMake 3.8 is required, CMake policy compatibility was verified up to 3.17.
|
||||
cmake_minimum_required(VERSION 3.8...3.17)
|
||||
set(BOOST_MIN_VERSION "1.66.0")
|
||||
|
||||
if("${CMAKE_VERSION}" VERSION_LESS "3.8") # SLES 12.5
|
||||
if(NOT MSVC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
|
||||
endif()
|
||||
else()
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
endif()
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
|
||||
project(icinga2)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
@ -23,6 +18,10 @@ if(NOT CMAKE_BUILD_TYPE)
|
||||
FORCE)
|
||||
endif()
|
||||
|
||||
# Include symbols in executables so that function names can be printed in stack traces, for example in crash dumps.
|
||||
set(CMAKE_ENABLE_EXPORTS ON) # Added in CMake 3.4
|
||||
set(CMAKE_EXECUTABLE_ENABLE_EXPORTS ON) # Added in CMake 3.27 and supersedes the above one.
|
||||
|
||||
if(WIN32)
|
||||
set(ICINGA2_MASTER OFF)
|
||||
else()
|
||||
@ -173,7 +172,7 @@ else()
|
||||
set(LOGROTATE_CREATE "\n\tcreate 644 ${ICINGA2_USER} ${ICINGA2_GROUP}")
|
||||
endif()
|
||||
|
||||
find_package(Boost ${BOOST_MIN_VERSION} COMPONENTS coroutine context date_time filesystem iostreams thread system program_options regex REQUIRED)
|
||||
find_package(Boost ${BOOST_MIN_VERSION} COMPONENTS coroutine context date_time filesystem iostreams thread program_options regex REQUIRED)
|
||||
|
||||
# Boost.Coroutine2 (the successor of Boost.Coroutine)
|
||||
# (1) doesn't even exist in old Boost versions and
|
||||
@ -186,21 +185,21 @@ add_definitions(-DBOOST_FILESYSTEM_NO_DEPRECATED)
|
||||
add_definitions(-DBOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT)
|
||||
|
||||
link_directories(${Boost_LIBRARY_DIRS})
|
||||
include_directories(${Boost_INCLUDE_DIRS})
|
||||
include_directories(SYSTEM ${Boost_INCLUDE_DIRS})
|
||||
|
||||
find_package(OpenSSL REQUIRED)
|
||||
include_directories(${OPENSSL_INCLUDE_DIR})
|
||||
include_directories(SYSTEM ${OPENSSL_INCLUDE_DIR})
|
||||
|
||||
set(base_DEPS ${CMAKE_DL_LIBS} ${Boost_LIBRARIES} ${OPENSSL_LIBRARIES})
|
||||
set(base_OBJS $<TARGET_OBJECTS:mmatch> $<TARGET_OBJECTS:socketpair> $<TARGET_OBJECTS:base>)
|
||||
|
||||
# JSON
|
||||
find_package(JSON)
|
||||
include_directories(${JSON_INCLUDE})
|
||||
include_directories(SYSTEM ${JSON_INCLUDE})
|
||||
|
||||
# UTF8CPP
|
||||
find_package(UTF8CPP)
|
||||
include_directories(${UTF8CPP_INCLUDE})
|
||||
include_directories(SYSTEM ${UTF8CPP_INCLUDE})
|
||||
|
||||
find_package(Editline)
|
||||
set(HAVE_EDITLINE "${EDITLINE_FOUND}")
|
||||
@ -223,22 +222,23 @@ endif()
|
||||
|
||||
if(EDITLINE_FOUND)
|
||||
list(APPEND base_DEPS ${EDITLINE_LIBRARIES})
|
||||
include_directories(${EDITLINE_INCLUDE_DIR})
|
||||
include_directories(SYSTEM ${EDITLINE_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if(TERMCAP_FOUND)
|
||||
list(APPEND base_DEPS ${TERMCAP_LIBRARIES})
|
||||
include_directories(${TERMCAP_INCLUDE_DIR})
|
||||
include_directories(SYSTEM ${TERMCAP_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if(WIN32)
|
||||
list(APPEND base_DEPS ws2_32 dbghelp shlwapi msi)
|
||||
endif()
|
||||
|
||||
set(CMAKE_MACOSX_RPATH 1)
|
||||
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH};${CMAKE_INSTALL_FULL_LIBDIR}/icinga2")
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Winconsistent-missing-override -Wrange-loop-construct")
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Qunused-arguments -fcolor-diagnostics -fno-limit-debug-info")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Qunused-arguments -fcolor-diagnostics -fno-limit-debug-info")
|
||||
|
||||
@ -256,6 +256,12 @@ if(CMAKE_C_COMPILER_ID STREQUAL "SunPro")
|
||||
endif()
|
||||
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsuggest-override")
|
||||
|
||||
if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_GREATER_EQUAL "11.0.0")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wrange-loop-construct")
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES AIX)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -lpthread")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -lpthread")
|
||||
@ -365,6 +371,7 @@ check_function_exists(vfork HAVE_VFORK)
|
||||
check_function_exists(backtrace_symbols HAVE_BACKTRACE_SYMBOLS)
|
||||
check_function_exists(pipe2 HAVE_PIPE2)
|
||||
check_function_exists(nice HAVE_NICE)
|
||||
check_function_exists(malloc_info HAVE_MALLOC_INFO)
|
||||
check_library_exists(dl dladdr "dlfcn.h" HAVE_DLADDR)
|
||||
check_library_exists(execinfo backtrace_symbols "" HAVE_LIBEXECINFO)
|
||||
check_include_file_cxx(cxxabi.h HAVE_CXXABI_H)
|
||||
|
@ -1,2 +1,2 @@
|
||||
Version: 2.14.0
|
||||
Version: 2.15.0
|
||||
Revision: 1
|
||||
|
@ -8,6 +8,7 @@
|
||||
#cmakedefine HAVE_LIBEXECINFO
|
||||
#cmakedefine HAVE_CXXABI_H
|
||||
#cmakedefine HAVE_NICE
|
||||
#cmakedefine HAVE_MALLOC_INFO
|
||||
#cmakedefine HAVE_EDITLINE
|
||||
#cmakedefine HAVE_SYSTEMD
|
||||
|
||||
|
@ -67,4 +67,3 @@ Read more about development builds in the [development chapter](21-development.m
|
||||
Icinga 2 and the Icinga 2 documentation are licensed under the terms of the GNU
|
||||
General Public License Version 2. You will find a copy of this license in the
|
||||
LICENSE file included in the source package.
|
||||
|
||||
|
@ -31,9 +31,13 @@ Here's how to add it to your system:
|
||||
|
||||
```bash
|
||||
apt update
|
||||
apt -y install apt-transport-https wget gnupg
|
||||
apt -y install apt-transport-https wget
|
||||
|
||||
wget -O - https://packages.icinga.com/icinga.key | gpg --dearmor -o /usr/share/keyrings/icinga-archive-keyring.gpg
|
||||
wget -O icinga-archive-keyring.deb "https://packages.icinga.com/icinga-archive-keyring_latest+debian$(
|
||||
. /etc/os-release; echo "$VERSION_ID"
|
||||
).deb"
|
||||
|
||||
apt install ./icinga-archive-keyring.deb
|
||||
|
||||
DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
|
||||
echo "deb [signed-by=/usr/share/keyrings/icinga-archive-keyring.gpg] https://packages.icinga.com/debian icinga-${DIST} main" > \
|
||||
@ -43,21 +47,6 @@ DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
|
||||
|
||||
apt update
|
||||
```
|
||||
|
||||
#### Debian Backports Repository <a id="debian-backports-repository"></a>
|
||||
|
||||
This repository is required for Debian Stretch since Icinga v2.11.
|
||||
|
||||
Debian Stretch:
|
||||
|
||||
```bash
|
||||
DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
|
||||
echo "deb https://deb.debian.org/debian ${DIST}-backports main" > \
|
||||
/etc/apt/sources.list.d/${DIST}-backports.list
|
||||
|
||||
apt update
|
||||
```
|
||||
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if ubuntu %} -->
|
||||
@ -65,9 +54,13 @@ apt update
|
||||
|
||||
```bash
|
||||
apt update
|
||||
apt -y install apt-transport-https wget gnupg
|
||||
apt -y install apt-transport-https wget
|
||||
|
||||
wget -O - https://packages.icinga.com/icinga.key | gpg --dearmor -o /usr/share/keyrings/icinga-archive-keyring.gpg
|
||||
wget -O icinga-archive-keyring.deb "https://packages.icinga.com/icinga-archive-keyring_latest+ubuntu$(
|
||||
. /etc/os-release; echo "$VERSION_ID"
|
||||
).deb"
|
||||
|
||||
apt install ./icinga-archive-keyring.deb
|
||||
|
||||
. /etc/os-release; if [ ! -z ${UBUNTU_CODENAME+x} ]; then DIST="${UBUNTU_CODENAME}"; else DIST="$(lsb_release -c| awk '{print $2}')"; fi; \
|
||||
echo "deb [signed-by=/usr/share/keyrings/icinga-archive-keyring.gpg] https://packages.icinga.com/ubuntu icinga-${DIST} main" > \
|
||||
@ -79,40 +72,6 @@ apt update
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if raspbian %} -->
|
||||
### Raspbian Repository <a id="raspbian-repository"></a>
|
||||
|
||||
```bash
|
||||
apt update
|
||||
apt -y install apt-transport-https wget gnupg
|
||||
|
||||
wget -O - https://packages.icinga.com/icinga.key | gpg --dearmor -o /usr/share/keyrings/icinga-archive-keyring.gpg
|
||||
|
||||
DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
|
||||
echo "deb [signed-by=/usr/share/keyrings/icinga-archive-keyring.gpg] https://packages.icinga.com/raspbian icinga-${DIST} main" > \
|
||||
/etc/apt/sources.list.d/icinga.list
|
||||
echo "deb-src [signed-by=/usr/share/keyrings/icinga-archive-keyring.gpg] https://packages.icinga.com/raspbian icinga-${DIST} main" >> \
|
||||
/etc/apt/sources.list.d/icinga.list
|
||||
|
||||
apt update
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if centos %} -->
|
||||
### CentOS Repository <a id="centos-repository"></a>
|
||||
|
||||
```bash
|
||||
wget https://packages.icinga.com/centos/ICINGA-release.repo -O /etc/yum.repos.d/ICINGA-release.repo
|
||||
```
|
||||
|
||||
The packages for CentOS depend on other packages which are distributed
|
||||
as part of the [EPEL repository](https://fedoraproject.org/wiki/EPEL):
|
||||
|
||||
```bash
|
||||
yum install epel-release
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if rhel %} -->
|
||||
### RHEL Repository <a id="rhel-repository"></a>
|
||||
|
||||
@ -141,23 +100,16 @@ subscription-manager repos --enable "codeready-builder-for-rhel-${OSVER}-${ARCH}
|
||||
dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-${OSVER}.noarch.rpm
|
||||
```
|
||||
|
||||
#### RHEL 7
|
||||
|
||||
```bash
|
||||
subscription-manager repos --enable rhel-7-server-optional-rpms
|
||||
|
||||
yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
|
||||
<!-- {% if fedora %} -->
|
||||
### Fedora Repository <a id="fedora-repository"></a>
|
||||
|
||||
```bash
|
||||
dnf install -y 'dnf-command(config-manager)'
|
||||
dnf config-manager --add-repo https://packages.icinga.com/fedora/$(. /etc/os-release; echo "$VERSION_ID")/release
|
||||
rpm --import https://packages.icinga.com/icinga.key
|
||||
curl -o /etc/yum.repos.d/ICINGA-release.repo https://packages.icinga.com/fedora/ICINGA-release.repo
|
||||
```
|
||||
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if sles %} -->
|
||||
@ -191,12 +143,6 @@ SUSEConnect -p PackageHub/$VERSION_ID/x86_64
|
||||
zypper ar https://packages.icinga.com/openSUSE/ICINGA-release.repo
|
||||
zypper ref
|
||||
```
|
||||
|
||||
You need to additionally add the `server:monitoring` repository to fulfill dependencies:
|
||||
|
||||
```bash
|
||||
zypper ar https://download.opensuse.org/repositories/server:/monitoring/15.3/server:monitoring.repo
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if amazon_linux %} -->
|
||||
@ -238,36 +184,21 @@ You can install Icinga 2 by using your distribution's package manager
|
||||
to install the `icinga2` package. The following commands must be executed
|
||||
with `root` permissions unless noted otherwise.
|
||||
|
||||
<!-- {% if centos or rhel or fedora or amazon_linux %} -->
|
||||
<!-- {% if rhel or fedora or amazon_linux %} -->
|
||||
!!! tip
|
||||
|
||||
If you have [SELinux](22-selinux.md) enabled, the package `icinga2-selinux` is also required.
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if debian or ubuntu or raspbian %} -->
|
||||
<!-- {% if debian or ubuntu %} -->
|
||||
<!-- {% if not icingaDocs %} -->
|
||||
#### Debian / Ubuntu / Raspbian
|
||||
#### Debian / Ubuntu / Raspberry Pi OS
|
||||
<!-- {% endif %} -->
|
||||
```bash
|
||||
apt install icinga2
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if centos %} -->
|
||||
<!-- {% if not icingaDocs %} -->
|
||||
#### CentOS
|
||||
<!-- {% endif %} -->
|
||||
!!! info
|
||||
|
||||
Note that installing Icinga 2 is only supported on CentOS 7 as CentOS 8 is EOL.
|
||||
|
||||
```bash
|
||||
yum install icinga2
|
||||
systemctl enable icinga2
|
||||
systemctl start icinga2
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if rhel %} -->
|
||||
#### RHEL 8 or Later
|
||||
|
||||
@ -276,14 +207,6 @@ dnf install icinga2
|
||||
systemctl enable icinga2
|
||||
systemctl start icinga2
|
||||
```
|
||||
|
||||
#### RHEL 7
|
||||
|
||||
```bash
|
||||
yum install icinga2
|
||||
systemctl enable icinga2
|
||||
systemctl start icinga2
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if fedora %} -->
|
||||
@ -356,26 +279,15 @@ to determine where to find the plugin binaries.
|
||||
additional check plugins into your Icinga 2 setup.
|
||||
|
||||
|
||||
<!-- {% if debian or ubuntu or raspbian %} -->
|
||||
<!-- {% if debian or ubuntu %} -->
|
||||
<!-- {% if not icingaDocs %} -->
|
||||
#### Debian / Ubuntu / Raspbian
|
||||
#### Debian / Ubuntu / Raspberry Pi OS
|
||||
<!-- {% endif %} -->
|
||||
```bash
|
||||
apt install monitoring-plugins
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if centos %} -->
|
||||
<!-- {% if not icingaDocs %} -->
|
||||
#### CentOS
|
||||
<!-- {% endif %} -->
|
||||
The packages for CentOS depend on other packages which are distributed as part of the EPEL repository.
|
||||
|
||||
```bash
|
||||
yum install nagios-plugins-all
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if rhel %} -->
|
||||
<!-- {% if not icingaDocs %} -->
|
||||
#### RHEL
|
||||
@ -387,12 +299,6 @@ The packages for RHEL depend on other packages which are distributed as part of
|
||||
```bash
|
||||
dnf install nagios-plugins-all
|
||||
```
|
||||
|
||||
#### RHEL 7
|
||||
|
||||
```bash
|
||||
yum install nagios-plugins-all
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if fedora %} -->
|
||||
@ -463,7 +369,6 @@ Restart Icinga 2 for these changes to take effect.
|
||||
systemctl restart icinga2
|
||||
```
|
||||
|
||||
<!-- {% if amazon_linux or centos or debian or rhel or sles or ubuntu %} -->
|
||||
## Set up Icinga DB <a id="set-up-icinga-db"></a>
|
||||
|
||||
Icinga DB is a set of components for publishing, synchronizing and
|
||||
@ -504,7 +409,13 @@ A Redis server from version 6.2 is required.
|
||||
|
||||
#### Install Icinga DB Redis Package <a id="install-icinga-db-redis-package"></a>
|
||||
|
||||
Use your distribution's package manager to install the `icingadb-redis` package as follows:
|
||||
Use your distribution's package manager to install the `icingadb-redis` package.
|
||||
|
||||
<!-- {% if amazon_linux or fedora or rhel or opensuse or sles %} -->
|
||||
!!! tip
|
||||
|
||||
If you have [SELinux](22-selinux.md) enabled, the package `icingadb-redis-selinux` is also required.
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if amazon_linux %} -->
|
||||
<!-- {% if not icingaDocs %} -->
|
||||
@ -515,23 +426,9 @@ yum install icingadb-redis
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if centos %} -->
|
||||
<!-- {% if not icingaDocs %} -->
|
||||
##### CentOS
|
||||
<!-- {% endif %} -->
|
||||
|
||||
!!! info
|
||||
|
||||
Note that installing Icinga DB Redis is only supported on CentOS 7 as CentOS 8 is EOL.
|
||||
|
||||
```bash
|
||||
yum install icingadb-redis
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if debian or ubuntu %} -->
|
||||
<!-- {% if not icingaDocs %} -->
|
||||
##### Debian / Ubuntu
|
||||
##### Debian / Ubuntu / Raspberry Pi OS
|
||||
<!-- {% endif %} -->
|
||||
```bash
|
||||
apt install icingadb-redis
|
||||
@ -544,17 +441,20 @@ apt install icingadb-redis
|
||||
```bash
|
||||
dnf install icingadb-redis
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
##### RHEL 7
|
||||
|
||||
<!-- {% if fedora %} -->
|
||||
<!-- {% if not icingaDocs %} -->
|
||||
##### Fedora
|
||||
<!-- {% endif %} -->
|
||||
```bash
|
||||
yum install icingadb-redis
|
||||
dnf install icingadb-redis
|
||||
```
|
||||
<!-- {% endif %} -->
|
||||
|
||||
<!-- {% if sles %} -->
|
||||
<!-- {% if sles or opensuse %} -->
|
||||
<!-- {% if not icingaDocs %} -->
|
||||
##### SLES
|
||||
##### SLES / openSUSE
|
||||
<!-- {% endif %} -->
|
||||
```bash
|
||||
zypper install icingadb-redis
|
||||
@ -640,25 +540,27 @@ the Icinga DB daemon that synchronizes monitoring data between the Redis server
|
||||
The Icinga DB daemon package is also included in the Icinga repository, and since it is already set up,
|
||||
you have completed the instructions here and can proceed to
|
||||
<!-- {% if amazon_linux %} -->
|
||||
[install the Icinga DB daemon on Amazon Linux](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/01-Amazon-Linux/#installing-icinga-db-package),
|
||||
<!-- {% endif %} -->
|
||||
<!-- {% if centos %} -->
|
||||
[install the Icinga DB daemon on CentOS](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/02-CentOS/#installing-icinga-db-package),
|
||||
[install the Icinga DB daemon on Amazon Linux](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/Amazon-Linux/#installing-the-package),
|
||||
<!-- {% endif %} -->
|
||||
<!-- {% if debian %} -->
|
||||
[install the Icinga DB daemon on Debian](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/03-Debian/#installing-icinga-db-package),
|
||||
[install the Icinga DB daemon on Debian](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/Debian/#installing-the-package),
|
||||
<!-- {% endif %} -->
|
||||
<!-- {% if fedora %} -->
|
||||
[install the Icinga DB daemon on Fedora](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/Fedora/#installing-the-package),
|
||||
<!-- {% endif %} -->
|
||||
<!-- {% if rhel %} -->
|
||||
[install the Icinga DB daemon on RHEL](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/04-RHEL/#installing-icinga-db-package),
|
||||
[install the Icinga DB daemon on RHEL](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/RHEL/#installing-the-package),
|
||||
<!-- {% endif %} -->
|
||||
<!-- {% if sles %} -->
|
||||
[install the Icinga DB daemon on SLES](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/05-SLES/#installing-icinga-db-package),
|
||||
[install the Icinga DB daemon on SLES](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/SLES/#installing-the-package),
|
||||
<!-- {% endif %} -->
|
||||
<!-- {% if ubuntu %} -->
|
||||
[install the Icinga DB daemon on Ubuntu](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/06-Ubuntu/#installing-icinga-db-package),
|
||||
[install the Icinga DB daemon on Ubuntu](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/Ubuntu/#installing-the-package),
|
||||
<!-- {% endif %} -->
|
||||
<!-- {% if opensuse %} -->
|
||||
[install the Icinga DB daemon on openSUSE](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/openSUSE/#installing-the-package),
|
||||
<!-- {% endif %} -->
|
||||
which will also guide you through the setup of the database and Icinga DB Web.
|
||||
<!-- {% endif %} -->
|
||||
|
||||
## Backup <a id="install-backup"></a>
|
||||
|
||||
|
3
doc/02-installation.md.d/03-Raspberry-Pi-OS.md
Normal file
3
doc/02-installation.md.d/03-Raspberry-Pi-OS.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Install Icinga 2 on Raspberry Pi OS
|
||||
<!-- {% set debian = True %} -->
|
||||
<!-- {% include "02-installation.md" %} -->
|
@ -1,3 +0,0 @@
|
||||
# Install Icinga 2 on Raspbian
|
||||
<!-- {% set raspbian = True %} -->
|
||||
<!-- {% include "02-installation.md" %} -->
|
@ -1,3 +0,0 @@
|
||||
# Install Icinga 2 on CentOS
|
||||
<!-- {% set centos = True %} -->
|
||||
<!-- {% include "02-installation.md" %} -->
|
@ -766,7 +766,7 @@ apply Notification "mail-icingaadmin" to Host {
|
||||
|
||||
A more advanced example is to use [apply rules with for loops on arrays or
|
||||
dictionaries](03-monitoring-basics.md#using-apply-for) provided by
|
||||
[custom atttributes](03-monitoring-basics.md#custom-variables) or groups.
|
||||
[custom attributes](03-monitoring-basics.md#custom-variables) or groups.
|
||||
|
||||
Remember the examples shown for [custom variable values](03-monitoring-basics.md#custom-variables-values):
|
||||
|
||||
@ -1599,7 +1599,7 @@ A common pattern is to store the users and user groups
|
||||
on the host or service objects instead of the notification
|
||||
object itself.
|
||||
|
||||
The sample configuration provided in [hosts.conf](04-configuration.md#hosts-conf) and [notifications.conf](notifications-conf)
|
||||
The sample configuration provided in [hosts.conf](04-configuration.md#hosts-conf) and [notifications.conf](04-configuration.md#notifications-conf)
|
||||
already provides an example for this question.
|
||||
|
||||
> **Tip**
|
||||
@ -2135,7 +2135,7 @@ In order to find out about the command argument, call the plugin's help
|
||||
or consult the README.
|
||||
|
||||
```
|
||||
./check_systemd.py --help
|
||||
./check_systemd --help
|
||||
|
||||
...
|
||||
|
||||
@ -2194,7 +2194,7 @@ With the [example above](03-monitoring-basics.md#command-arguments-value),
|
||||
inspect the parameter's help text.
|
||||
|
||||
```
|
||||
./check_systemd.py --help
|
||||
./check_systemd --help
|
||||
|
||||
...
|
||||
|
||||
@ -2579,6 +2579,7 @@ information.
|
||||
`notification_useremail` | **Required.** The notification's recipient(s). Defaults to `$user.email$`.
|
||||
`notification_hoststate` | **Required.** Current state of host. Defaults to `$host.state$`.
|
||||
`notification_type` | **Required.** Type of notification. Defaults to `$notification.type$`.
|
||||
`notification_hostnotes` | **Optional.** The host's notes. Defaults to `$host.notes$`.
|
||||
`notification_address` | **Optional.** The host's IPv4 address. Defaults to `$address$`.
|
||||
`notification_address6` | **Optional.** The host's IPv6 address. Defaults to `$address6$`.
|
||||
`notification_author` | **Optional.** Comment author. Defaults to `$notification.author$`.
|
||||
@ -2607,6 +2608,8 @@ information.
|
||||
`notification_useremail` | **Required.** The notification's recipient(s). Defaults to `$user.email$`.
|
||||
`notification_servicestate` | **Required.** Current state of host. Defaults to `$service.state$`.
|
||||
`notification_type` | **Required.** Type of notification. Defaults to `$notification.type$`.
|
||||
`notification_hostnotes` | **Optional.** The host's notes. Defaults to `$host.notes$`.
|
||||
`notification_servicenotes` | **Optional.** The service's notes. Defaults to `$service.notes$`.
|
||||
`notification_address` | **Optional.** The host's IPv4 address. Defaults to `$address$`.
|
||||
`notification_address6` | **Optional.** The host's IPv6 address. Defaults to `$address6$`.
|
||||
`notification_author` | **Optional.** Comment author. Defaults to `$notification.author$`.
|
||||
@ -2729,7 +2732,7 @@ Requirements:
|
||||
* Icinga 2 as client on the remote node
|
||||
* icinga user with sudo permissions to the httpd daemon
|
||||
|
||||
Example on CentOS 7:
|
||||
Example on RHEL:
|
||||
|
||||
```
|
||||
# visudo
|
||||
@ -3094,6 +3097,12 @@ via the [REST API](12-icinga2-api.md#icinga2-api).
|
||||
> Reachability calculation depends on fresh and processed check results. If dependencies
|
||||
> disable checks for child objects, this won't work reliably.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> The parent of a dependency can have a parent itself and so on. The nesting depth of
|
||||
> dependencies is currently limited to 256 which should be more than enough for any practical
|
||||
> use. This is an implementation detail and may change in the future.
|
||||
|
||||
### Implicit Dependencies for Services on Host <a id="dependencies-implicit-host-service"></a>
|
||||
|
||||
Icinga 2 automatically adds an implicit dependency for services on their host. That way
|
||||
@ -3169,16 +3178,16 @@ i.e. to consider the parent unreachable only if no dependency is fulfilled.
|
||||
Think of a host connected to both a network and a storage switch vs. a host connected to redundant routers.
|
||||
|
||||
Sometimes you even want a mixture of both.
|
||||
Think of a service like SSH depeding on both LDAP and DNS to function,
|
||||
Think of a service like SSH depending on both LDAP and DNS to function,
|
||||
while operating redundant LDAP servers as well as redundant DNS resolvers.
|
||||
|
||||
Before v2.12, Icinga regarded all dependecies as cumulative.
|
||||
Before v2.12, Icinga regarded all dependencies as cumulative.
|
||||
In v2.12 and v2.13, Icinga regarded all dependencies redundant.
|
||||
The latter led to unrelated services being inadvertantly regarded to be redundant to each other.
|
||||
The latter led to unrelated services being inadvertently regarded to be redundant to each other.
|
||||
|
||||
v2.14 restored the former behavior and allowed to override it.
|
||||
I.e. all dependecies are regarded as essential for the parent by default.
|
||||
Specifying the `redundancy_group` attribute for two dependecies of a child object with the equal value
|
||||
I.e. all dependencies are regarded as essential for the parent by default.
|
||||
Specifying the `redundancy_group` attribute for two dependencies of a child object with the equal value
|
||||
causes them to be regarded as redundant (only inside that redundancy group).
|
||||
|
||||
<!-- Keep this for compatibility -->
|
||||
|
@ -593,7 +593,7 @@ Read more on that topic [here](03-monitoring-basics.md#notification-commands).
|
||||
|
||||
#### groups.conf <a id="groups-conf"></a>
|
||||
|
||||
The example host defined in [hosts.conf](hosts-conf) already has the
|
||||
The example host defined in [hosts.conf](#hosts-conf) already has the
|
||||
custom variable `os` set to `Linux` and is therefore automatically
|
||||
a member of the host group `linux-servers`.
|
||||
|
||||
|
@ -51,7 +51,7 @@ described. Try running the plugin after setup and [ensure it works](05-service-m
|
||||
Prior to using the check plugin with Icinga 2 you should ensure that it is working properly
|
||||
by trying to run it on the console using whichever user Icinga 2 is running as:
|
||||
|
||||
RHEL/CentOS/Fedora
|
||||
RHEL/Fedora
|
||||
|
||||
```bash
|
||||
sudo -u icinga /usr/lib64/nagios/plugins/check_mysql_health --help
|
||||
@ -111,7 +111,7 @@ Can't locate Net/SNMP.pm in @INC (you may need to install the Net::SNMP module)
|
||||
|
||||
Prior to installing the Perl module via CPAN, look for a distribution
|
||||
specific package, e.g. `libnet-snmp-perl` on Debian/Ubuntu or `perl-Net-SNMP`
|
||||
on RHEL/CentOS.
|
||||
on RHEL.
|
||||
|
||||
|
||||
#### Optional: Custom Path <a id="service-monitoring-plugins-custom-path"></a>
|
||||
@ -281,10 +281,10 @@ that [it works](05-service-monitoring.md#service-monitoring-plugins-it-works). T
|
||||
`--help` parameter to see the actual parameters (docs might be outdated).
|
||||
|
||||
```
|
||||
./check_systemd.py --help
|
||||
./check_systemd --help
|
||||
|
||||
usage: check_systemd.py [-h] [-c SECONDS] [-e UNIT | -u UNIT] [-v] [-V]
|
||||
[-w SECONDS]
|
||||
usage: check_systemd [-h] [-c SECONDS] [-e UNIT | -u UNIT] [-v] [-V]
|
||||
[-w SECONDS]
|
||||
|
||||
...
|
||||
|
||||
@ -319,14 +319,14 @@ Start with the basic plugin call without any parameters.
|
||||
|
||||
```
|
||||
object CheckCommand "systemd" { // Plugin name without 'check_' prefix
|
||||
command = [ PluginContribDir + "/check_systemd.py" ] // Use the 'PluginContribDir' constant, see the contributed ITL commands
|
||||
command = [ PluginContribDir + "/check_systemd" ] // Use the 'PluginContribDir' constant, see the contributed ITL commands
|
||||
}
|
||||
```
|
||||
|
||||
Run a config validation to see if that works, `icinga2 daemon -C`
|
||||
|
||||
Next, analyse the plugin parameters. Plugins with a good help output show
|
||||
optional parameters in square brackes. This is the case for all parameters
|
||||
optional parameters in square brackets. This is the case for all parameters
|
||||
for this plugin. If there are required parameters, use the `required` key
|
||||
inside the argument.
|
||||
|
||||
@ -689,7 +689,7 @@ liters (l) | ml, l, hl
|
||||
|
||||
The UoM "c" represents a continuous counter (e.g. interface traffic counters).
|
||||
|
||||
Unknown UoMs are discarted (as if none was given).
|
||||
Unknown UoMs are discarded (as if none was given).
|
||||
A value without any UoM may be an integer or floating point number
|
||||
for any type (processes, users, etc.).
|
||||
|
||||
|
@ -264,7 +264,7 @@ The setup wizard will ensure that the following steps are taken:
|
||||
* Update the [ApiListener](06-distributed-monitoring.md#distributed-monitoring-apilistener) and [constants](04-configuration.md#constants-conf) configuration.
|
||||
* Update the [icinga2.conf](04-configuration.md#icinga2-conf) to disable the `conf.d` inclusion, and add the `api-users.conf` file inclusion.
|
||||
|
||||
Here is an example of a master setup for the `icinga2-master1.localdomain` node on CentOS 7:
|
||||
Here is an example of a master setup for the `icinga2-master1.localdomain` node:
|
||||
|
||||
```
|
||||
[root@icinga2-master1.localdomain /]# icinga2 node wizard
|
||||
@ -1031,9 +1031,7 @@ in `/etc/icinga2/icinga2.conf`.
|
||||
> Defaults to disabled.
|
||||
|
||||
Now it is time to validate the configuration and to restart the Icinga 2 daemon
|
||||
on both nodes.
|
||||
|
||||
Example on CentOS 7:
|
||||
on both nodes:
|
||||
|
||||
```
|
||||
[root@icinga2-agent1.localdomain /]# icinga2 daemon -C
|
||||
@ -1112,7 +1110,8 @@ Save the changes and validate the configuration on the master node:
|
||||
```
|
||||
[root@icinga2-master1.localdomain /]# icinga2 daemon -C
|
||||
```
|
||||
Restart the Icinga 2 daemon (example for CentOS 7):
|
||||
|
||||
Restart the Icinga 2 daemon:
|
||||
|
||||
```
|
||||
[root@icinga2-master1.localdomain /]# systemctl restart icinga2
|
||||
@ -1221,9 +1220,7 @@ object ApiListener "api" {
|
||||
```
|
||||
|
||||
Now it is time to validate the configuration and to restart the Icinga 2 daemon
|
||||
on both nodes.
|
||||
|
||||
Example on CentOS 7:
|
||||
on both nodes:
|
||||
|
||||
```
|
||||
[root@icinga2-satellite1.localdomain /]# icinga2 daemon -C
|
||||
@ -1285,7 +1282,7 @@ Save the changes and validate the configuration on the master node:
|
||||
[root@icinga2-master1.localdomain /]# icinga2 daemon -C
|
||||
```
|
||||
|
||||
Restart the Icinga 2 daemon (example for CentOS 7):
|
||||
Restart the Icinga 2 daemon:
|
||||
|
||||
```
|
||||
[root@icinga2-master1.localdomain /]# systemctl restart icinga2
|
||||
@ -2208,7 +2205,7 @@ object Zone "icinga2-agent2.localdomain" {
|
||||
The two agent nodes do not need to know about each other. The only important thing
|
||||
is that they know about the parent zone (the satellite) and their endpoint members (and optionally the global zone).
|
||||
|
||||
> **Tipp**
|
||||
> **Tip**
|
||||
>
|
||||
> In the example above we've specified the `host` attribute in the agent endpoint configuration. In this mode,
|
||||
> the satellites actively connect to the agents. This costs some resources on the satellite -- if you prefer to
|
||||
@ -3134,7 +3131,7 @@ object Endpoint "icinga2-master2.localdomain" {
|
||||
> **Note**
|
||||
>
|
||||
> This is required if you decide to change an already running single endpoint production
|
||||
> environment into a HA-enabled cluster zone with two endpoints.
|
||||
> environment into an HA-enabled cluster zone with two endpoints.
|
||||
> The [initial setup](06-distributed-monitoring.md#distributed-monitoring-scenarios-ha-master-clients)
|
||||
> with 2 HA masters doesn't require this step.
|
||||
|
||||
@ -3230,6 +3227,53 @@ information/pki: Writing certificate to file 'icinga2-satellite1.localdomain.crt
|
||||
|
||||
Copy and move these certificates to the respective instances e.g. with SSH/SCP.
|
||||
|
||||
#### External CA/PKI
|
||||
|
||||
Icinga works best with its own certificates.
|
||||
The commands described above take care of the optimal certificate properties.
|
||||
Also, Icinga renews them periodically at runtime to avoid expiry.
|
||||
But you can also provide your own certificates,
|
||||
just like to any other application which uses TLS.
|
||||
|
||||
!!! warning
|
||||
|
||||
The only serious reasons to generate own certificates are company policies.
|
||||
You are responsible for making Icinga working with your certificates,
|
||||
as well as for [expiry monitoring](10-icinga-template-library.md#plugin-check-command-ssl_cert)
|
||||
and renewal.
|
||||
|
||||
Especially `icinga2 pki` CLI commands do not expect such certificates.
|
||||
|
||||
Also, do not provide your custom CA private key to Icinga 2!
|
||||
Otherwise, it will automatically renew leaf certificates
|
||||
with our hardcoded properties, not your custom ones.
|
||||
|
||||
The CA certificate must be located in `/var/lib/icinga2/certs/ca.crt`.
|
||||
The basic requirements for all leaf certificates are:
|
||||
|
||||
* Located in `/var/lib/icinga2/certs/NODENAME.crt`
|
||||
and `/var/lib/icinga2/certs/NODENAME.key`
|
||||
* Subject with CN matching the endpoint name
|
||||
* A DNS SAN matching the endpoint name
|
||||
|
||||
Pretty much everything else is limited only by your company policy
|
||||
and the OpenSSL versions your Icinga nodes use. E.g. the following works:
|
||||
|
||||
* Custom key sizes, e.g. 2048 bits
|
||||
* Custom key types, e.g. ECC
|
||||
* Any number of intermediate CAs (but see limitations below)
|
||||
* Multiple trusted root CAs in `/var/lib/icinga2/certs/ca.crt`
|
||||
* Different root CAs per cluster subtree, as long as each node trusts the
|
||||
certificate issuers of all nodes it's directly connected to
|
||||
|
||||
Intermediate CA restrictions:
|
||||
|
||||
* Each side has to provide its intermediate CAs along with the leaf certificate
|
||||
in `/var/lib/icinga2/certs/NODENAME.crt`, ordered from leaf to root.
|
||||
* Intermediate CAs may not be used directly as root CAs. To trust only specific
|
||||
intermediate CAs, cross-sign them with themselves, so that you get equal
|
||||
certificates except that they're self-signed. Use them as root CAs in Icinga.
|
||||
|
||||
## Automation <a id="distributed-monitoring-automation"></a>
|
||||
|
||||
These hints should get you started with your own automation tools (Puppet, Ansible, Chef, Salt, etc.)
|
||||
|
@ -484,7 +484,7 @@ host or service is considered flapping until it drops below the low flapping thr
|
||||
The attribute `flapping_ignore_states` allows to ignore state changes to specified states during the flapping calculation.
|
||||
|
||||
`FlappingStart` and `FlappingEnd` notifications will be sent out accordingly, if configured. See the chapter on
|
||||
[notifications](alert-notifications) for details
|
||||
[notifications](03-monitoring-basics.md#notifications) for details
|
||||
|
||||
> Note: There is no distinctions between hard and soft states with flapping. All state changes count and notifications
|
||||
> will be sent out regardless of the objects state.
|
||||
@ -1181,7 +1181,7 @@ to represent its internal state. The following types are exposed via the [API](1
|
||||
performance\_data | Array | Array of [performance data values](08-advanced-topics.md#advanced-value-types-perfdatavalue).
|
||||
check\_source | String | Name of the node executing the check.
|
||||
scheduling\_source | String | Name of the node scheduling the check.
|
||||
state | Number | The current state (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN).
|
||||
state | Number | Current state according to the [check result state mapping](03-monitoring-basics.md#check-result-state-mapping).
|
||||
command | Value | Array of command with shell-escaped arguments or command line string.
|
||||
execution\_start | Timestamp | Check execution start time (as a UNIX timestamp).
|
||||
execution\_end | Timestamp | Check execution end time (as a UNIX timestamp).
|
||||
|
@ -1134,7 +1134,7 @@ for a more secure configuration is provided by the [Mozilla Wiki](https://wiki.m
|
||||
Ensure to use the same configuration for both attributes on **all** endpoints to avoid communication problems which
|
||||
requires to use `cipher_list` compatible with the endpoint using the oldest version of the OpenSSL library. If using
|
||||
other tools to connect to the API ensure also compatibility with them as this setting affects not only inter-cluster
|
||||
communcation but also the REST API.
|
||||
communication but also the REST API.
|
||||
|
||||
### CheckerComponent <a id="objecttype-checkercomponent"></a>
|
||||
|
||||
@ -1181,7 +1181,7 @@ Configuration Attributes:
|
||||
|
||||
### ElasticsearchWriter <a id="objecttype-elasticsearchwriter"></a>
|
||||
|
||||
Writes check result metrics and performance data to an Elasticsearch instance.
|
||||
Writes check result metrics and performance data to an Elasticsearch or OpenSearch instance.
|
||||
This configuration object is available as [elasticsearch feature](14-features.md#elasticsearch-writer).
|
||||
|
||||
Example:
|
||||
@ -1194,6 +1194,10 @@ object ElasticsearchWriter "elasticsearch" {
|
||||
|
||||
enable_send_perfdata = true
|
||||
|
||||
host_tags_template = {
|
||||
os_name = "$host.vars.os$"
|
||||
}
|
||||
|
||||
flush_threshold = 1024
|
||||
flush_interval = 10
|
||||
}
|
||||
@ -1207,7 +1211,7 @@ Configuration Attributes:
|
||||
--------------------------|-----------------------|----------------------------------
|
||||
host | String | **Required.** Elasticsearch host address. Defaults to `127.0.0.1`.
|
||||
port | Number | **Required.** Elasticsearch port. Defaults to `9200`.
|
||||
index | String | **Required.** Elasticsearch index name. Defaults to `icinga2`.
|
||||
index | String | **Required.** Prefix for the index names. Defaults to `icinga2`.
|
||||
enable\_send\_perfdata | Boolean | **Optional.** Send parsed performance data metrics for check results. Defaults to `false`.
|
||||
flush\_interval | Duration | **Optional.** How long to buffer data points before transferring to Elasticsearch. Defaults to `10s`.
|
||||
flush\_threshold | Number | **Optional.** How many data points to buffer before forcing a transfer to Elasticsearch. Defaults to `1024`.
|
||||
@ -1215,6 +1219,8 @@ Configuration Attributes:
|
||||
password | String | **Optional.** Basic auth password if Elasticsearch is hidden behind an HTTP proxy.
|
||||
enable\_tls | Boolean | **Optional.** Whether to use a TLS stream. Defaults to `false`. Requires an HTTP proxy.
|
||||
insecure\_noverify | Boolean | **Optional.** Disable TLS peer verification.
|
||||
host\_tags\_template | Dictionary | **Optional.** Allows to apply additional tags to the Elasticsearch host entries.
|
||||
service\_tags\_template | Dictionary | **Optional.** Allows to apply additional tags to the Elasticsearch service entries.
|
||||
ca\_path | String | **Optional.** Path to CA certificate to validate the remote host. Requires `enable_tls` set to `true`.
|
||||
cert\_path | String | **Optional.** Path to host certificate to present to the remote host for mutual verification. Requires `enable_tls` set to `true`.
|
||||
key\_path | String | **Optional.** Path to host key to accompany the cert\_path. Requires `enable_tls` set to `true`.
|
||||
@ -1223,6 +1229,11 @@ Configuration Attributes:
|
||||
Note: If `flush_threshold` is set too low, this will force the feature to flush all data to Elasticsearch too often.
|
||||
Experiment with the setting, if you are processing more than 1024 metrics per second or similar.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Be aware that `enable_send_perfdata` will create a new field mapping in the index for each performance data metric in a check plugin.
|
||||
> Elasticsearch/OpenSearch have a maximum number of fields in an index. The default value is usually 1000 fields. See [mapping settings limit](https://www.elastic.co/guide/en/elasticsearch/reference/8.18/mapping-settings-limit.html)
|
||||
|
||||
Basic auth is supported with the `username` and `password` attributes. This requires an
|
||||
HTTP proxy (Nginx, etc.) in front of the Elasticsearch instance. Check [this blogpost](https://blog.netways.de/2017/09/14/secure-elasticsearch-and-kibana-with-an-nginx-http-proxy/)
|
||||
for an example.
|
||||
@ -1387,7 +1398,9 @@ Configuration Attributes:
|
||||
host | String | **Optional.** Redis host. Defaults to `127.0.0.1`.
|
||||
port | Number | **Optional.** Redis port. Defaults to `6380` since the Redis server provided by the `icingadb-redis` package listens on that port.
|
||||
path | String | **Optional.** Redis unix socket path. Can be used instead of `host` and `port` attributes.
|
||||
username | String | **Optional.** Redis auth username. Only possible if Redis ACLs are used. Requires `password` to be set as well.
|
||||
password | String | **Optional.** Redis auth password.
|
||||
db\_index | Number | **Optional.** Redis logical database by its number. Defaults to `0`.
|
||||
enable\_tls | Boolean | **Optional.** Whether to use TLS.
|
||||
cert\_path | String | **Optional.** Path to the certificate.
|
||||
key\_path | String | **Optional.** Path to the private key.
|
||||
@ -1672,11 +1685,11 @@ Configuration Attributes:
|
||||
flush\_threshold | Number | **Optional.** How many data points to buffer before forcing a transfer to InfluxDB. Defaults to `1024`.
|
||||
enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-features). Defaults to `false`.
|
||||
|
||||
Note: If `flush_threshold` is set too low, this will always force the feature to flush all data
|
||||
to InfluxDB. Experiment with the setting, if you are processing more than 1024 metrics per second
|
||||
or similar.
|
||||
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> If `flush_threshold` is set too low, this will always force the feature to flush all data
|
||||
> to InfluxDB. Experiment with the setting, if you are processing more than 1024 metrics per second
|
||||
> or similar.
|
||||
|
||||
### Influxdb2Writer <a id="objecttype-influxdb2writer"></a>
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -13,18 +13,18 @@ options.
|
||||
|
||||
```
|
||||
# icinga2
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.14.4)
|
||||
|
||||
Usage:
|
||||
icinga2 <command> [<arguments>]
|
||||
|
||||
Supported commands:
|
||||
* api setup (setup for API)
|
||||
* ca list (lists all certificate signing requests)
|
||||
* ca list (lists pending certificate signing requests)
|
||||
* ca remove (removes an outstanding certificate request)
|
||||
* ca restore (restores a removed certificate request)
|
||||
* ca remove (removes an outstanding certificate request)
|
||||
* ca sign (signs an outstanding certificate request)
|
||||
* console (Icinga debug console)
|
||||
* console (Icinga console)
|
||||
* daemon (starts Icinga 2)
|
||||
* feature disable (disables specified feature)
|
||||
* feature enable (enables specified feature)
|
||||
@ -48,8 +48,6 @@ Global options:
|
||||
--color use VT100 color codes even when stdout is not a
|
||||
terminal
|
||||
-D [ --define ] arg define a constant
|
||||
-a [ --app ] arg application library name (default: icinga)
|
||||
-l [ --library ] arg load a library
|
||||
-I [ --include ] arg add include search directory
|
||||
-x [ --log-level ] arg specify the log level for the console log.
|
||||
The valid value is either debug, notice,
|
||||
@ -57,6 +55,8 @@ Global options:
|
||||
-X [ --script-debugger ] whether to enable the script debugger
|
||||
|
||||
Report bugs at <https://github.com/Icinga/icinga2>
|
||||
Get support: <https://icinga.com/support/>
|
||||
Documentation: <https://icinga.com/docs/>
|
||||
Icinga home page: <https://icinga.com/>
|
||||
```
|
||||
|
||||
@ -73,7 +73,7 @@ RPM and Debian packages install the bash completion files into
|
||||
|
||||
You need to install the `bash-completion` package if not already installed.
|
||||
|
||||
RHEL/CentOS/Fedora:
|
||||
RHEL/Fedora:
|
||||
|
||||
```bash
|
||||
yum install bash-completion
|
||||
@ -102,18 +102,6 @@ source /etc/bash-completion.d/icinga2
|
||||
|
||||
## Icinga 2 CLI Global Options <a id="cli-commands-global-options"></a>
|
||||
|
||||
### Application Type
|
||||
|
||||
By default the `icinga2` binary loads the `icinga` library. A different application type
|
||||
can be specified with the `--app` command-line option.
|
||||
Note: This is not needed by the average Icinga user, only developers.
|
||||
|
||||
### Libraries
|
||||
|
||||
Instead of loading libraries using the [`library` config directive](17-language-reference.md#library)
|
||||
you can also use the `--library` command-line option.
|
||||
Note: This is not needed by the average Icinga user, only developers.
|
||||
|
||||
### Constants
|
||||
|
||||
[Global constants](17-language-reference.md#constants) can be set using the `--define` command-line option.
|
||||
@ -144,7 +132,7 @@ Provides helper functions to enable and setup the
|
||||
|
||||
```
|
||||
# icinga2 api setup --help
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.14.4)
|
||||
|
||||
Usage:
|
||||
icinga2 api setup [<arguments>]
|
||||
@ -176,20 +164,20 @@ Icinga home page: <https://icinga.com/>
|
||||
|
||||
List and manage incoming certificate signing requests. More details
|
||||
can be found in the [signing methods](06-distributed-monitoring.md#distributed-monitoring-setup-sign-certificates-master)
|
||||
chapter. This CLI command is available since v2.8.
|
||||
chapter.
|
||||
|
||||
```
|
||||
# icinga2 ca --help
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.14.4)
|
||||
|
||||
Usage:
|
||||
icinga2 <command> [<arguments>]
|
||||
|
||||
Supported commands:
|
||||
* ca list (lists all certificate signing requests)
|
||||
* ca sign (signs an outstanding certificate request)
|
||||
* ca restore (restores a removed certificate request)
|
||||
* ca list (lists pending certificate signing requests)
|
||||
* ca remove (removes an outstanding certificate request)
|
||||
* ca restore (restores a removed certificate request)
|
||||
* ca sign (signs an outstanding certificate request)
|
||||
|
||||
Global options:
|
||||
-h [ --help ] show this help message
|
||||
@ -197,8 +185,6 @@ Global options:
|
||||
--color use VT100 color codes even when stdout is not a
|
||||
terminal
|
||||
-D [ --define ] arg define a constant
|
||||
-a [ --app ] arg application library name (default: icinga)
|
||||
-l [ --library ] arg load a library
|
||||
-I [ --include ] arg add include search directory
|
||||
-x [ --log-level ] arg specify the log level for the console log.
|
||||
The valid value is either debug, notice,
|
||||
@ -206,6 +192,8 @@ Global options:
|
||||
-X [ --script-debugger ] whether to enable the script debugger
|
||||
|
||||
Report bugs at <https://github.com/Icinga/icinga2>
|
||||
Get support: <https://icinga.com/support/>
|
||||
Documentation: <https://icinga.com/docs/>
|
||||
Icinga home page: <https://icinga.com/>
|
||||
```
|
||||
|
||||
@ -213,8 +201,8 @@ Icinga home page: <https://icinga.com/>
|
||||
### CLI command: Ca List <a id="cli-command-ca-list"></a>
|
||||
|
||||
```
|
||||
icinga2 ca list --help
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
|
||||
# icinga2 ca list --help
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.14.4)
|
||||
|
||||
Usage:
|
||||
icinga2 ca list [<arguments>]
|
||||
@ -249,11 +237,14 @@ Icinga home page: <https://icinga.com/>
|
||||
## CLI command: Console <a id="cli-command-console"></a>
|
||||
|
||||
The CLI command `console` can be used to debug and evaluate Icinga 2 config expressions,
|
||||
e.g. to test [functions](17-language-reference.md#functions) in your local sandbox.
|
||||
e.g., to test [functions](17-language-reference.md#functions) in your local sandbox.
|
||||
|
||||
This command can be executed by any user and does not require access to the Icinga 2 configuration.
|
||||
|
||||
```
|
||||
$ icinga2 console
|
||||
Icinga 2 (version: v2.11.0)
|
||||
# icinga2 console
|
||||
Icinga 2 (version: v2.14.4)
|
||||
Type $help to view available commands.
|
||||
<1> => function test(name) {
|
||||
<1> .. log("Hello " + name)
|
||||
<1> .. }
|
||||
@ -268,7 +259,7 @@ Further usage examples can be found in the [library reference](18-library-refere
|
||||
|
||||
```
|
||||
# icinga2 console --help
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.14.4)
|
||||
|
||||
Usage:
|
||||
icinga2 console [<arguments>]
|
||||
@ -281,8 +272,6 @@ Global options:
|
||||
--color use VT100 color codes even when stdout is not a
|
||||
terminal
|
||||
-D [ --define ] arg define a constant
|
||||
-a [ --app ] arg application library name (default: icinga)
|
||||
-l [ --library ] arg load a library
|
||||
-I [ --include ] arg add include search directory
|
||||
-x [ --log-level ] arg specify the log level for the console log.
|
||||
The valid value is either debug, notice,
|
||||
@ -297,11 +286,13 @@ Command options:
|
||||
--sandbox enable sandbox mode
|
||||
|
||||
Report bugs at <https://github.com/Icinga/icinga2>
|
||||
Get support: <https://icinga.com/support/>
|
||||
Documentation: <https://icinga.com/docs/>
|
||||
Icinga home page: <https://icinga.com/>
|
||||
```
|
||||
|
||||
|
||||
On operating systems without the `libedit` library installed there is no
|
||||
On operating systems without the `libedit` library installed, there is no
|
||||
support for line-editing or a command history. However you can
|
||||
use the `rlwrap` program if you require those features:
|
||||
|
||||
@ -311,7 +302,7 @@ rlwrap icinga2 console
|
||||
|
||||
The debug console can be used to connect to a running Icinga 2 instance using
|
||||
the [REST API](12-icinga2-api.md#icinga2-api). [API permissions](12-icinga2-api.md#icinga2-api-permissions)
|
||||
are required for executing config expressions and auto-completion.
|
||||
for `console` are required for executing config expressions and auto-completion.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
@ -323,20 +314,20 @@ are required for executing config expressions and auto-completion.
|
||||
|
||||
You can specify the API URL using the `--connect` parameter.
|
||||
|
||||
Although the password can be specified there process arguments on UNIX platforms are
|
||||
usually visible to other users (e.g. through `ps`). In order to securely specify the
|
||||
user credentials the debug console supports two environment variables:
|
||||
Although the password can be specified there, process arguments are usually
|
||||
visible to other users (e.g. through `ps`). In order to securely specify the
|
||||
user credentials, the debug console supports two environment variables:
|
||||
|
||||
Environment variable | Description
|
||||
---------------------|-------------
|
||||
ICINGA2_API_USERNAME | The API username.
|
||||
ICINGA2_API_PASSWORD | The API password.
|
||||
|
||||
Here's an example:
|
||||
Here is an example:
|
||||
|
||||
```
|
||||
$ ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://root@localhost:5665/'
|
||||
Icinga 2 (version: v2.11.0)
|
||||
Icinga 2 (version: v2.14.4)
|
||||
<1> =>
|
||||
```
|
||||
|
||||
@ -383,7 +374,7 @@ The `--syntax-only` option can be used in combination with `--eval` or `--file`
|
||||
to check a script for syntax errors. In this mode the script is parsed to identify
|
||||
syntax errors but not evaluated.
|
||||
|
||||
Here's an example that retrieves the command that was used by Icinga to check the `icinga2-agent1.localdomain` host:
|
||||
Here is an example that retrieves the command that was used by Icinga to check the `icinga2-agent1.localdomain` host:
|
||||
|
||||
```
|
||||
$ ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://root@localhost:5665/' --eval 'get_host("icinga2-agent1.localdomain").last_check_result.command' | python -m json.tool
|
||||
@ -405,7 +396,7 @@ Furthermore it allows to run the [configuration validation](11-cli-commands.md#c
|
||||
|
||||
```
|
||||
# icinga2 daemon --help
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.14.4)
|
||||
|
||||
Usage:
|
||||
icinga2 daemon [<arguments>]
|
||||
@ -418,8 +409,6 @@ Global options:
|
||||
--color use VT100 color codes even when stdout is not a
|
||||
terminal
|
||||
-D [ --define ] arg define a constant
|
||||
-a [ --app ] arg application library name (default: icinga)
|
||||
-l [ --library ] arg load a library
|
||||
-I [ --include ] arg add include search directory
|
||||
-x [ --log-level ] arg specify the log level for the console log.
|
||||
The valid value is either debug, notice,
|
||||
@ -430,7 +419,8 @@ Command options:
|
||||
-c [ --config ] arg parse a configuration file
|
||||
-z [ --no-config ] start without a configuration file
|
||||
-C [ --validate ] exit after validating the configuration
|
||||
--dump-objects write icinga2.debug cache file for icinga2 object list
|
||||
--dump-objects write icinga2.debug cache file for icinga2 object
|
||||
list
|
||||
-e [ --errorlog ] arg log fatal errors to the specified log file (only
|
||||
works in combination with --daemonize or
|
||||
--close-stdio)
|
||||
@ -438,6 +428,8 @@ Command options:
|
||||
--close-stdio do not log to stdout (or stderr) after startup
|
||||
|
||||
Report bugs at <https://github.com/Icinga/icinga2>
|
||||
Get support: <https://icinga.com/support/>
|
||||
Documentation: <https://icinga.com/docs/>
|
||||
Icinga home page: <https://icinga.com/>
|
||||
```
|
||||
|
||||
@ -476,8 +468,8 @@ The `feature list` command shows which features are currently enabled:
|
||||
|
||||
```
|
||||
# icinga2 feature list
|
||||
Disabled features: compatlog debuglog gelf ido-pgsql influxdb livestatus opentsdb perfdata statusdata syslog
|
||||
Enabled features: api checker command graphite ido-mysql mainlog notification
|
||||
Disabled features: debuglog elasticsearch gelf ido-mysql ido-pgsql influxdb influxdb2 journald opentsdb perfdata syslog
|
||||
Enabled features: api checker graphite icingadb mainlog notification
|
||||
```
|
||||
|
||||
## CLI command: Node <a id="cli-command-node"></a>
|
||||
@ -529,7 +521,7 @@ More information can be found in the [troubleshooting](15-troubleshooting.md#tro
|
||||
|
||||
```
|
||||
# icinga2 object --help
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.14.4)
|
||||
|
||||
Usage:
|
||||
icinga2 <command> [<arguments>]
|
||||
@ -543,8 +535,6 @@ Global options:
|
||||
--color use VT100 color codes even when stdout is not a
|
||||
terminal
|
||||
-D [ --define ] arg define a constant
|
||||
-a [ --app ] arg application library name (default: icinga)
|
||||
-l [ --library ] arg load a library
|
||||
-I [ --include ] arg add include search directory
|
||||
-x [ --log-level ] arg specify the log level for the console log.
|
||||
The valid value is either debug, notice,
|
||||
@ -552,6 +542,8 @@ Global options:
|
||||
-X [ --script-debugger ] whether to enable the script debugger
|
||||
|
||||
Report bugs at <https://github.com/Icinga/icinga2>
|
||||
Get support: <https://icinga.com/support/>
|
||||
Documentation: <https://icinga.com/docs/>
|
||||
Icinga home page: <https://icinga.com/>
|
||||
```
|
||||
|
||||
@ -571,7 +563,7 @@ You will need them in the [distributed monitoring chapter](06-distributed-monito
|
||||
|
||||
```
|
||||
# icinga2 pki --help
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.12.0)
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.14.4)
|
||||
|
||||
Usage:
|
||||
icinga2 <command> [<arguments>]
|
||||
@ -591,8 +583,6 @@ Global options:
|
||||
--color use VT100 color codes even when stdout is not a
|
||||
terminal
|
||||
-D [ --define ] arg define a constant
|
||||
-a [ --app ] arg application library name (default: icinga)
|
||||
-l [ --library ] arg load a library
|
||||
-I [ --include ] arg add include search directory
|
||||
-x [ --log-level ] arg specify the log level for the console log.
|
||||
The valid value is either debug, notice,
|
||||
@ -600,6 +590,8 @@ Global options:
|
||||
-X [ --script-debugger ] whether to enable the script debugger
|
||||
|
||||
Report bugs at <https://github.com/Icinga/icinga2>
|
||||
Get support: <https://icinga.com/support/>
|
||||
Documentation: <https://icinga.com/docs/>
|
||||
Icinga home page: <https://icinga.com/>
|
||||
```
|
||||
|
||||
@ -609,7 +601,7 @@ Lists all configured variables (constants) in a similar fashion like [object lis
|
||||
|
||||
```
|
||||
# icinga2 variable --help
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
|
||||
icinga2 - The Icinga 2 network monitoring daemon (version: v2.14.4)
|
||||
|
||||
Usage:
|
||||
icinga2 <command> [<arguments>]
|
||||
@ -624,8 +616,6 @@ Global options:
|
||||
--color use VT100 color codes even when stdout is not a
|
||||
terminal
|
||||
-D [ --define ] arg define a constant
|
||||
-a [ --app ] arg application library name (default: icinga)
|
||||
-l [ --library ] arg load a library
|
||||
-I [ --include ] arg add include search directory
|
||||
-x [ --log-level ] arg specify the log level for the console log.
|
||||
The valid value is either debug, notice,
|
||||
@ -633,6 +623,8 @@ Global options:
|
||||
-X [ --script-debugger ] whether to enable the script debugger
|
||||
|
||||
Report bugs at <https://github.com/Icinga/icinga2>
|
||||
Get support: <https://icinga.com/support/>
|
||||
Documentation: <https://icinga.com/docs/>
|
||||
Icinga home page: <https://icinga.com/>
|
||||
```
|
||||
|
||||
@ -651,8 +643,8 @@ You can view a list of enabled and disabled features:
|
||||
|
||||
```
|
||||
# icinga2 feature list
|
||||
Disabled features: api command compatlog debuglog graphite icingastatus ido-mysql ido-pgsql livestatus notification perfdata statusdata syslog
|
||||
Enabled features: checker mainlog notification
|
||||
Disabled features: debuglog elasticsearch gelf ido-mysql ido-pgsql influxdb influxdb2 journald opentsdb perfdata syslog
|
||||
Enabled features: api checker graphite icingadb mainlog notification
|
||||
```
|
||||
|
||||
Using the `icinga2 feature enable` command you can enable features:
|
||||
@ -675,10 +667,9 @@ restart Icinga 2. You will need to restart Icinga 2 using the init script
|
||||
after enabling or disabling features.
|
||||
|
||||
|
||||
|
||||
## Configuration Validation <a id="config-validation"></a>
|
||||
|
||||
Once you've edited the configuration files make sure to tell Icinga 2 to validate
|
||||
Once you have edited the configuration, make sure to tell Icinga 2 to validate
|
||||
the configuration changes. Icinga 2 will log any configuration error including
|
||||
a hint on the file, the line number and the affected configuration line itself.
|
||||
|
||||
@ -716,12 +707,12 @@ to read the [troubleshooting](15-troubleshooting.md#troubleshooting) chapter.
|
||||
You can also use the [CLI command](11-cli-commands.md#cli-command-object) `icinga2 object list`
|
||||
after validation passes to analyze object attributes, inheritance or created
|
||||
objects by apply rules.
|
||||
Find more on troubleshooting with `object list` in [this chapter](15-troubleshooting.md#troubleshooting-list-configuration-objects).
|
||||
Find more on troubleshooting with `icinga2 object list` in [this chapter](15-troubleshooting.md#troubleshooting-list-configuration-objects).
|
||||
|
||||
|
||||
## Reload on Configuration Changes <a id="config-change-reload"></a>
|
||||
|
||||
Every time you have changed your configuration you should first tell Icinga 2
|
||||
Every time you have changed your configuration, you should first tell Icinga 2
|
||||
to [validate](11-cli-commands.md#config-validation). If there are no validation errors, you can
|
||||
safely reload the Icinga 2 daemon.
|
||||
|
||||
|
@ -115,7 +115,7 @@ You can also use [jq](https://stedolan.github.io/jq/) or `python -m json.tool`
|
||||
in combination with curl on the CLI.
|
||||
|
||||
```bash
|
||||
curl ... | jq
|
||||
curl ... | jq
|
||||
curl ... | python -m json.tool
|
||||
```
|
||||
|
||||
@ -288,6 +288,7 @@ Available permissions for specific URL endpoints:
|
||||
config/query | /v1/config | No | 1
|
||||
config/modify | /v1/config | No | 512
|
||||
console | /v1/console | No | 1
|
||||
debug | /v1/debug | No | 1
|
||||
events/<type> | /v1/events | No | 1
|
||||
objects/query/<type> | /v1/objects | Yes | 1
|
||||
objects/create/<type> | /v1/objects | No | 1
|
||||
@ -497,7 +498,7 @@ The example below is not valid:
|
||||
-d '{ "type": "Host", "filter": ""linux-servers" in host.groups" }'
|
||||
```
|
||||
|
||||
The double quotes need to be escaped with a preceeding backslash:
|
||||
The double quotes need to be escaped with a preceding backslash:
|
||||
|
||||
```
|
||||
-d '{ "type": "Host", "filter": "\"linux-servers\" in host.groups" }'
|
||||
@ -565,7 +566,7 @@ created by the API.
|
||||
### Querying Objects <a id="icinga2-api-config-objects-query"></a>
|
||||
|
||||
You can request information about configuration objects by sending
|
||||
a `GET` query to the `/v1/objects/<type>` URL endpoint. `<type` has
|
||||
a `GET` query to the `/v1/objects/<type>` URL endpoint. `<type>` has
|
||||
to be replaced with the plural name of the object type you are interested
|
||||
in:
|
||||
|
||||
@ -1008,7 +1009,7 @@ curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
|
||||
There are several actions available for Icinga 2 provided by the `/v1/actions`
|
||||
URL endpoint. You can run actions by sending a `POST` request.
|
||||
|
||||
The following actions are also used by [Icinga Web 2](https://icinga.com/products/icinga-web-2/):
|
||||
The following actions are also used by [Icinga Web 2](https://icinga.com/docs/icinga-web/latest/):
|
||||
|
||||
* sending check results to Icinga from scripts, remote agents, etc.
|
||||
* scheduling downtimes from external scripts or cronjobs
|
||||
@ -1072,7 +1073,7 @@ Send a `POST` request to the URL endpoint `/v1/actions/process-check-result`.
|
||||
exit\_status | Number | **Required.** For services: 0=OK, 1=WARNING, 2=CRITICAL, 3=UNKNOWN, for hosts: 0=UP, 1=DOWN.
|
||||
plugin\_output | String | **Required.** One or more lines of the plugin main output. Does **not** contain the performance data.
|
||||
performance\_data | Array<code>|</code>String | **Optional.** The performance data as array of strings. The raw performance data string can be used too.
|
||||
check\_command | Array<code>|</code>String | **Optional.** The first entry should be the check commands path, then one entry for each command line option followed by an entry for each of its argument. Alternativly a single string can be used.
|
||||
check\_command | Array<code>|</code>String | **Optional.** The first entry should be the check commands path, then one entry for each command line option followed by an entry for each of its argument. Alternatively a single string can be used.
|
||||
check\_source | String | **Optional.** Usually the name of the `command_endpoint`
|
||||
execution\_start | Timestamp | **Optional.** The timestamp where a script/process started its execution.
|
||||
execution\_end | Timestamp | **Optional.** The timestamp where a script/process ended its execution. This timestamp is used in features to determine e.g. the metric timestamp.
|
||||
@ -1657,14 +1658,14 @@ Send a `POST` request to the URL endpoint `/v1/actions/execute-command`.
|
||||
--------------|------------|--------------
|
||||
ttl | Number | **Required.** The time to live of the execution expressed in seconds.
|
||||
command_type | String | **Optional.** The command type: `CheckCommand` or `EventCommand` or `NotificationCommand`. Default: `EventCommand`
|
||||
command | String | **Optional.** The command to execute. Its type must the same as `command_type`. It can be a macro string. Default: depending on the `command_type` it's either `$check_command$`, `$event_command$` or `$notification_command$`
|
||||
command | String | **Optional.** The command to execute. Its type must the same as `command_type`. It can be a macro string. Default: depending on the `command_type` it's either `$check_command$`, `$event_command$` or `$notification_command$`
|
||||
endpoint | String | **Optional.** The endpoint to execute the command on. It can be a macro string. Default: `$command_endpoint$`.
|
||||
macros | Dictionary | **Optional.** Macro overrides. Default: `{}`
|
||||
user | String | **Optional.** The user used for the notification command.
|
||||
user | String | **Optional.** The user used for the notification command.
|
||||
notification | String | **Optional.** The notification used for the notification command.
|
||||
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
```bash
|
||||
curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
|
||||
-X POST 'https://localhost:5665/v1/actions/execute-command' \
|
||||
@ -2018,7 +2019,7 @@ validate the configuration asynchronously and populate a status log which
|
||||
can be fetched in a separated request. Once the validation succeeds,
|
||||
a reload is triggered by default.
|
||||
|
||||
This functionality was primarly developed for the [Icinga Director](https://icinga.com/docs/director/latest/)
|
||||
This functionality was primarily developed for the [Icinga Director](https://icinga.com/docs/director/latest/)
|
||||
but can be used with your own deployments too. It also solves the problem
|
||||
with certain runtime objects (zones, endpoints) and can be used to
|
||||
deploy global templates in [global cluster zones](06-distributed-monitoring.md#distributed-monitoring-global-zone-config-sync).
|
||||
@ -2373,7 +2374,7 @@ Creation, modification and deletion of templates at runtime is not supported.
|
||||
### Querying Templates <a id="icinga2-api-config-templates-query"></a>
|
||||
|
||||
You can request information about configuration templates by sending
|
||||
a `GET` query to the `/v1/templates/<type>` URL endpoint. `<type` has
|
||||
a `GET` query to the `/v1/templates/<type>` URL endpoint. `<type>` has
|
||||
to be replaced with the plural name of the object type you are interested
|
||||
in:
|
||||
|
||||
@ -2528,6 +2529,72 @@ curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
|
||||
}
|
||||
```
|
||||
|
||||
## Memory Usage Analysis <a id="icinga2-api-memory"></a>
|
||||
|
||||
The GNU libc function `malloc_info(3)` provides memory allocation and usage
|
||||
statistics of Icinga 2 itself. You can call it directly by sending a `GET`
|
||||
request to the URL endpoint `/v1/debug/malloc_info`.
|
||||
|
||||
The [API permission](12-icinga2-api.md#icinga2-api-permissions) `debug` is required.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl -k -s -S -i -u root:icinga https://localhost:5665/v1/debug/malloc_info
|
||||
```
|
||||
|
||||
In contrast to other API endpoints, the response is not JSON,
|
||||
but the raw XML output from `malloc_info(3)`. See also the
|
||||
[glibc malloc(3) internals](https://sourceware.org/glibc/wiki/MallocInternals).
|
||||
|
||||
```xml
|
||||
<malloc version="1">
|
||||
<heap nr="0">
|
||||
<sizes>
|
||||
<size from="33" to="48" total="96" count="2"/>
|
||||
<size from="49" to="64" total="192" count="3"/>
|
||||
<size from="65" to="80" total="80" count="1"/>
|
||||
<unsorted from="84817" to="84817" total="84817" count="1"/>
|
||||
</sizes>
|
||||
<total type="fast" count="6" size="368"/>
|
||||
<total type="rest" count="2" size="859217"/>
|
||||
<system type="current" size="7409664"/>
|
||||
<system type="max" size="7409664"/>
|
||||
<aspace type="total" size="7409664"/>
|
||||
<aspace type="mprotect" size="7409664"/>
|
||||
</heap>
|
||||
<!-- ... -->
|
||||
<heap nr="30">
|
||||
<sizes>
|
||||
<size from="17" to="32" total="96" count="3"/>
|
||||
<size from="33" to="48" total="576" count="12"/>
|
||||
<size from="49" to="64" total="64" count="1"/>
|
||||
<size from="97" to="112" total="3584" count="32"/>
|
||||
<size from="49" to="49" total="98" count="2"/>
|
||||
<size from="81" to="81" total="810" count="10"/>
|
||||
<size from="257" to="257" total="2827" count="11"/>
|
||||
<size from="689" to="689" total="689" count="1"/>
|
||||
<size from="705" to="705" total="705" count="1"/>
|
||||
<unsorted from="81" to="81" total="81" count="1"/>
|
||||
</sizes>
|
||||
<total type="fast" count="48" size="4320"/>
|
||||
<total type="rest" count="27" size="118618"/>
|
||||
<system type="current" size="135168"/>
|
||||
<system type="max" size="135168"/>
|
||||
<aspace type="total" size="135168"/>
|
||||
<aspace type="mprotect" size="135168"/>
|
||||
<aspace type="subheaps" size="1"/>
|
||||
</heap>
|
||||
<total type="fast" count="938" size="79392"/>
|
||||
<total type="rest" count="700" size="4409469"/>
|
||||
<total type="mmap" count="0" size="0"/>
|
||||
<system type="current" size="15114240"/>
|
||||
<system type="max" size="15114240"/>
|
||||
<aspace type="total" size="15114240"/>
|
||||
<aspace type="mprotect" size="15114240"/>
|
||||
</malloc>
|
||||
```
|
||||
|
||||
## API Clients <a id="icinga2-api-clients"></a>
|
||||
|
||||
After its initial release in 2015, community members
|
||||
@ -2571,7 +2638,7 @@ Name | Language | Description
|
||||
[BitBar for OSX](https://getbitbar.com/plugins/Dev/Icinga2/icinga2.24m.py) | Python | macOS tray app for highlighting the host/service status
|
||||
[Icinga 2 Multistatus](https://chrome.google.com/webstore/detail/icinga-multi-status/khabbhcojgkibdeipanmiphceeoiijal/related) | - | Chrome Extension
|
||||
[Naglite4](https://github.com/wftech/icinga2-naglite4) | Python | Naglite3 rewrite using the Icinga 2 REST API.
|
||||
[icinga-telegram-bot](https://github.com/joni1993/icinga-telegram-bot) | Python | Telegram Bot using the Icinga 2 REST API
|
||||
[icinga-telegram-bot](https://github.com/joni1993/icinga-telegram-bot) | Python | Telegram Bot using the Icinga 2 REST API
|
||||
|
||||
### Manage Objects <a id="icinga2-api-clients-management"></a>
|
||||
|
||||
@ -2632,7 +2699,7 @@ The following languages are covered:
|
||||
* [Golang](12-icinga2-api.md#icinga2-api-clients-programmatic-examples-golang)
|
||||
* [Powershell](12-icinga2-api.md#icinga2-api-clients-programmatic-examples-powershell)
|
||||
|
||||
The [request method](icinga2-api-requests) is `POST` using [X-HTTP-Method-Override: GET](12-icinga2-api.md#icinga2-api-requests-method-override)
|
||||
The [request method](#icinga2-api-requests) is `POST` using [X-HTTP-Method-Override: GET](12-icinga2-api.md#icinga2-api-requests-method-override)
|
||||
which allows you to send a JSON request body. The examples request specific service
|
||||
attributes joined with host attributes. `attrs` and `joins` are therefore specified
|
||||
as array.
|
||||
|
@ -32,7 +32,7 @@ vim /etc/icinga2/conf.d/templates.conf
|
||||
|
||||
Install the package `nano-icinga2` with your distribution's package manager.
|
||||
|
||||
**Note:** On Debian, Ubuntu and Raspbian, the syntax files are installed with the `icinga2-common` package already.
|
||||
**Note:** On Debian, Ubuntu and Raspberry Pi OS, the syntax files are installed with the `icinga2-common` package already.
|
||||
|
||||
Copy the `/etc/nanorc` sample file to your home directory.
|
||||
|
||||
@ -71,9 +71,6 @@ via email.
|
||||
|
||||

|
||||
|
||||
Follow along in this [hands-on blog post](https://icinga.com/2019/06/17/icinga-reporting-hands-on/).
|
||||
|
||||
|
||||
## Graphs and Metrics <a id="addons-graphs-metrics"></a>
|
||||
|
||||
### Graphite <a id="addons-graphing-graphite"></a>
|
||||
@ -125,7 +122,7 @@ icinga2 feature enable influxdb2
|
||||
|
||||
A popular frontend for InfluxDB is for example [Grafana](https://grafana.org).
|
||||
|
||||
Integration in Icinga Web 2 is possible by installing the community [Grafana module](https://github.com/Mikesch-mp/icingaweb2-module-grafana).
|
||||
Integration in Icinga Web 2 is possible by installing the community [Grafana module](https://github.com/NETWAYS/icingaweb2-module-grafana).
|
||||
|
||||

|
||||
|
||||
@ -185,7 +182,7 @@ in a tree or list overview and can be added to any dashboard.
|
||||
|
||||

|
||||
|
||||
Read more [here](https://icinga.com/products/icinga-business-process-modelling/).
|
||||
Read more [here](https://icinga.com/docs/icinga-business-process-modeling/latest/).
|
||||
|
||||
### Certificate Monitoring <a id="addons-visualization-certificate-monitoring"></a>
|
||||
|
||||
@ -194,8 +191,7 @@ actions and view all details at a glance.
|
||||
|
||||

|
||||
|
||||
Read more [here](https://icinga.com/products/icinga-certificate-monitoring/)
|
||||
and [here](https://icinga.com/2019/06/03/monitoring-automation-with-icinga-certificate-monitoring/).
|
||||
Read more [here](https://icinga.com/products/icinga-certificate-monitoring/).
|
||||
|
||||
### Dashing Dashboard <a id="addons-visualization-dashing-dashboard"></a>
|
||||
|
||||
@ -204,7 +200,7 @@ on top of Dashing and uses the [REST API](12-icinga2-api.md#icinga2-api) to visu
|
||||
on with your monitoring. It combines several popular widgets and provides development
|
||||
instructions for your own implementation.
|
||||
|
||||
The dashboard also allows to embed the [Icinga Web 2](https://icinga.com/products/icinga-web-2/)
|
||||
The dashboard also allows to embed the [Icinga Web 2](https://icinga.com/docs/icinga-web/latest/)
|
||||
host and service problem lists as Iframe.
|
||||
|
||||

|
||||
@ -234,10 +230,6 @@ There's a variety of resources available, for example different notification scr
|
||||
* Ticket systems
|
||||
* etc.
|
||||
|
||||
Blog posts and howtos:
|
||||
|
||||
* [Environmental Monitoring and Alerting](https://icinga.com/2019/09/02/environmental-monitoring-and-alerting-via-text-message/)
|
||||
|
||||
Additionally external services can be [integrated with Icinga 2](https://icinga.com/products/integrations/):
|
||||
|
||||
* [Pagerduty](https://icinga.com/products/integrations/pagerduty/)
|
||||
|
@ -52,7 +52,7 @@ Icinga DB is a set of components for publishing, synchronizing and
|
||||
visualizing monitoring data in the Icinga ecosystem, consisting of:
|
||||
|
||||
* Icinga 2 with its `icingadb` feature enabled,
|
||||
responsible for publishing monitoring data to a Redis server, i.e. configuration and its runtime updates,
|
||||
responsible for publishing monitoring data to a Redis server, i.e. configuration and its runtime updates,
|
||||
check results, state changes, downtimes, acknowledgements, notifications, and other events such as flapping
|
||||
* The [Icinga DB daemon](https://icinga.com/docs/icinga-db),
|
||||
which synchronizes the data between the Redis server and a database
|
||||
@ -106,7 +106,7 @@ The current naming schema is defined as follows. The [Icinga Web 2 Graphite modu
|
||||
depends on this schema.
|
||||
|
||||
The default prefix for hosts and services is configured using
|
||||
[runtime macros](03-monitoring-basics.md#runtime-macros)like this:
|
||||
[runtime macros](03-monitoring-basics.md#runtime-macros) like this:
|
||||
|
||||
```
|
||||
icinga2.$host.name$.host.$host.check_command$
|
||||
@ -147,7 +147,7 @@ parsed from plugin output:
|
||||
|
||||
Note that labels may contain dots (`.`) allowing to
|
||||
add more subsequent levels inside the Graphite tree.
|
||||
`::` adds support for [multi performance labels](http://my-plugin.de/wiki/projects/check_multi/configuration/performance)
|
||||
`::` adds support for [multi performance labels](https://github.com/flackem/check_multi/blob/next/doc/configuration/performance.md)
|
||||
and is therefore replaced by `.`.
|
||||
|
||||
By enabling `enable_send_thresholds` Icinga 2 automatically adds the following threshold metrics:
|
||||
@ -246,7 +246,7 @@ resolved, it will be dropped and not sent to the target host.
|
||||
|
||||
Backslashes are allowed in tag keys, tag values and field keys, however they are also
|
||||
escape characters when followed by a space or comma, but cannot be escaped themselves.
|
||||
As a result all trailling slashes in these fields are replaced with an underscore. This
|
||||
As a result all trailing slashes in these fields are replaced with an underscore. This
|
||||
predominantly affects Windows paths e.g. `C:\` becomes `C:_`.
|
||||
|
||||
The database/bucket is assumed to exist so this object will make no attempt to create it currently.
|
||||
@ -335,16 +335,14 @@ More integrations:
|
||||
#### Elasticsearch Writer <a id="elasticsearch-writer"></a>
|
||||
|
||||
This feature forwards check results, state changes and notification events
|
||||
to an [Elasticsearch](https://www.elastic.co/products/elasticsearch) installation over its HTTP API.
|
||||
to an [Elasticsearch](https://www.elastic.co/products/elasticsearch) or an [OpenSearch](https://opensearch.org/) installation over its HTTP API.
|
||||
|
||||
The check results include parsed performance data metrics if enabled.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Elasticsearch 5.x or 6.x are required. This feature has been successfully tested with
|
||||
> Elasticsearch 5.6.7 and 6.3.1.
|
||||
|
||||
|
||||
> Elasticsearch 7.x, 8.x or Opensearch 2.12.x are required. This feature has been successfully tested with
|
||||
> Elasticsearch 7.17.10, 8.8.1 and OpenSearch 2.13.0.
|
||||
|
||||
Enable the feature and restart Icinga 2.
|
||||
|
||||
@ -366,7 +364,8 @@ The following event types are written to Elasticsearch:
|
||||
* icinga2.event.notification
|
||||
|
||||
Performance data metrics must be explicitly enabled with the `enable_send_perfdata`
|
||||
attribute.
|
||||
attribute. Be aware that this will create a new field mapping in the index for each performance data metric in a check plugin.
|
||||
See: [ElasticsearchWriter](09-object-types.md#objecttype-elasticsearchwriter)
|
||||
|
||||
Metric values are stored like this:
|
||||
|
||||
@ -385,7 +384,7 @@ The following characters are escaped in perfdata labels:
|
||||
|
||||
Note that perfdata labels may contain dots (`.`) allowing to
|
||||
add more subsequent levels inside the tree.
|
||||
`::` adds support for [multi performance labels](http://my-plugin.de/wiki/projects/check_multi/configuration/performance)
|
||||
`::` adds support for [multi performance labels](https://github.com/flackem/check_multi/blob/next/doc/configuration/performance.md)
|
||||
and is therefore replaced by `.`.
|
||||
|
||||
Icinga 2 automatically adds the following threshold metrics
|
||||
@ -398,6 +397,28 @@ check_result.perfdata.<perfdata-label>.warn
|
||||
check_result.perfdata.<perfdata-label>.crit
|
||||
```
|
||||
|
||||
Additionally it is possible to configure custom tags that are applied to the metrics via `host_tags_template` or `service_tags_template`.
|
||||
Depending on whether the write event was triggered on a service or host object, additional tags are added to the ElasticSearch entries.
|
||||
|
||||
A host metrics entry configured with the following `host_tags_template`:
|
||||
|
||||
```
|
||||
host_tags_template = {
|
||||
|
||||
os_name = "$host.vars.os$"
|
||||
custom_label = "A Custom Label"
|
||||
list = [ "$host.groups$", "$host.vars.foo$" ]
|
||||
}
|
||||
```
|
||||
|
||||
Will in addition to the above mentioned lines also contain:
|
||||
|
||||
```
|
||||
os_name = "Linux"
|
||||
custom_label = "A Custom Label"
|
||||
list = [ "group-A;linux-servers", "bar" ]
|
||||
```
|
||||
|
||||
#### Elasticsearch in Cluster HA Zones <a id="elasticsearch-writer-cluster-ha"></a>
|
||||
|
||||
The Elasticsearch feature supports [high availability](06-distributed-monitoring.md#distributed-monitoring-high-availability-features)
|
||||
@ -422,11 +443,11 @@ or Logstash for additional filtering.
|
||||
|
||||
#### GELF Writer <a id="gelfwriter"></a>
|
||||
|
||||
The `Graylog Extended Log Format` (short: [GELF](https://docs.graylog.org/en/latest/pages/gelf.html))
|
||||
The `Graylog Extended Log Format` (short: GELF)
|
||||
can be used to send application logs directly to a TCP socket.
|
||||
|
||||
While it has been specified by the [Graylog](https://www.graylog.org) project as their
|
||||
[input resource standard](https://docs.graylog.org/en/latest/pages/sending_data.html), other tools such as
|
||||
[input resource standard](https://go2docs.graylog.org/current/getting_in_log_data/inputs.htm), other tools such as
|
||||
[Logstash](https://www.elastic.co/products/logstash) also support `GELF` as
|
||||
[input type](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-gelf.html).
|
||||
|
||||
@ -554,7 +575,7 @@ with the following tags
|
||||
Functionality exists to modify the built in OpenTSDB metric names that the plugin
|
||||
writes to. By default this is `icinga.host` and `icinga.service.<servicename>`.
|
||||
|
||||
These prefixes can be modified as necessary to any arbitary string. The prefix
|
||||
These prefixes can be modified as necessary to any arbitrary string. The prefix
|
||||
configuration also supports Icinga macros, so if you rather use `<checkcommand>`
|
||||
or any other variable instead of `<servicename>` you may do so.
|
||||
|
||||
@ -815,16 +836,6 @@ apt-get install icinga2-ido-mysql
|
||||
default. You can skip the automated setup and install/upgrade the
|
||||
database manually if you prefer.
|
||||
|
||||
###### CentOS 7
|
||||
|
||||
!!! info
|
||||
|
||||
Note that installing `icinga2-ido-mysql` is only supported on CentOS 7 as CentOS 8 is EOL.
|
||||
|
||||
```bash
|
||||
yum install icinga2-ido-mysql
|
||||
```
|
||||
|
||||
###### RHEL 8
|
||||
|
||||
```bash
|
||||
@ -914,16 +925,6 @@ apt-get install icinga2-ido-pgsql
|
||||
You can skip the automated setup and install/upgrade the database manually
|
||||
if you prefer that.
|
||||
|
||||
###### CentOS 7
|
||||
|
||||
!!! info
|
||||
|
||||
Note that installing `icinga2-ido-pgsql` is only supported on CentOS 7 as CentOS 8 is EOL.
|
||||
|
||||
```bash
|
||||
yum install icinga2-ido-pgsql
|
||||
```
|
||||
|
||||
###### RHEL 8
|
||||
|
||||
```bash
|
||||
@ -1118,7 +1119,7 @@ As with any application database, there are ways to optimize and tune the databa
|
||||
|
||||
General tips for performance tuning:
|
||||
|
||||
* [MariaDB KB](https://mariadb.com/kb/en/library/optimization-and-tuning/)
|
||||
* [MariaDB KB](https://mariadb.com/docs/server/ha-and-performance/optimization-and-tuning)
|
||||
* [PostgreSQL Wiki](https://wiki.postgresql.org/wiki/Performance_Optimization)
|
||||
|
||||
Re-creation of indexes, changed column values, etc. will increase the database size. Ensure to
|
||||
@ -1235,7 +1236,7 @@ on the [Icinga 1.x documentation](https://docs.icinga.com/latest/en/extcommands2
|
||||
> This feature is DEPRECATED and may be removed in future releases.
|
||||
> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
|
||||
|
||||
The [MK Livestatus](https://mathias-kettner.de/checkmk_livestatus.html) project
|
||||
The [MK Livestatus](https://exchange.nagios.org/directory/Documentation/MK-Livestatus/details) project
|
||||
implements a query protocol that lets users query their Icinga instance for
|
||||
status information. It can also be used to send commands.
|
||||
|
||||
|
@ -19,8 +19,8 @@ findings and details please.
|
||||
* `icinga2 --version`
|
||||
* `icinga2 feature list`
|
||||
* `icinga2 daemon -C`
|
||||
* [Icinga Web 2](https://icinga.com/products/icinga-web-2/) version (screenshot from System - About)
|
||||
* [Icinga Web 2 modules](https://icinga.com/products/icinga-web-2-modules/) e.g. the Icinga Director (optional)
|
||||
* [Icinga Web 2](https://icinga.com/docs/icinga-web/latest/) version (screenshot from System - About)
|
||||
* Icinga Web 2 modules e.g. the Icinga Director (optional)
|
||||
* Configuration insights:
|
||||
* Provide complete configuration snippets explaining your problem in detail
|
||||
* Your [icinga2.conf](04-configuration.md#icinga2-conf) file
|
||||
@ -42,7 +42,7 @@ is also key to identify bottlenecks and issues.
|
||||
>
|
||||
> [Monitor Icinga 2](08-advanced-topics.md#monitoring-icinga) and use the hints for further analysis.
|
||||
|
||||
* Analyze the system's performance and dentify bottlenecks and issues.
|
||||
* Analyze the system's performance and identify bottlenecks and issues.
|
||||
* Collect details about all applications (e.g. Icinga 2, MySQL, Apache, Graphite, Elastic, etc.).
|
||||
* If data is exchanged via network (e.g. central MySQL cluster) ensure to monitor the bandwidth capabilities too.
|
||||
* Add graphs from Grafana or Graphite as screenshots to your issue description
|
||||
@ -176,6 +176,64 @@ C:\> cd C:\ProgramData\icinga2\var\log\icinga2
|
||||
C:\ProgramData\icinga2\var\log\icinga2> Get-Content .\debug.log -tail 10 -wait
|
||||
```
|
||||
|
||||
### Enable/Disable Debug Output on the fly <a id="troubleshooting-enable-disable-debug-output-api"></a>
|
||||
|
||||
The `debuglog` feature can also be created and deleted at runtime without having to restart Icinga 2.
|
||||
Technically, this is possible because this feature is a [FileLogger](09-object-types.md#objecttype-filelogger)
|
||||
that can be managed through the [API](12-icinga2-api.md#icinga2-api-config-objects).
|
||||
|
||||
This is a good alternative to `icinga2 feature enable debuglog` as object
|
||||
creation/deletion via API happens immediately and requires no restart.
|
||||
|
||||
The above matters in setups large enough for the reload to take a while.
|
||||
Especially these produce a lot of debug log output until disabled again.
|
||||
|
||||
!!! info
|
||||
|
||||
In case of [an HA zone](06-distributed-monitoring.md#distributed-monitoring-scenarios-ha-master-agents),
|
||||
the following API examples toggle the feature on both nodes.
|
||||
|
||||
#### Enable Debug Output on the fly <a id="troubleshooting-enable-debug-output-api"></a>
|
||||
|
||||
```bash
|
||||
curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
|
||||
-X PUT 'https://localhost:5665/v1/objects/fileloggers/on-the-fly-debug-file' \
|
||||
-d '{ "attrs": { "severity": "debug", "path": "/var/log/icinga2/on-the-fly-debug.log" }, "pretty": true }'
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"code": 200.0,
|
||||
"status": "Object was created."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Disable Debug Output on the fly <a id="troubleshooting-disable-debug-output-api"></a>
|
||||
|
||||
This works only for debug loggers enabled on the fly as above!
|
||||
|
||||
```bash
|
||||
curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
|
||||
-X DELETE 'https://localhost:5665/v1/objects/fileloggers/on-the-fly-debug-file?pretty=1'
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"code": 200.0,
|
||||
"name": "on-the-fly-debug-file",
|
||||
"status": "Object was deleted.",
|
||||
"type": "FileLogger"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Icinga starts/restarts/reloads very slowly
|
||||
|
||||
### Try swapping out the allocator
|
||||
@ -814,7 +872,7 @@ trying because you probably have a problem that requires manual intervention.
|
||||
|
||||
### Late Check Results <a id="late-check-results"></a>
|
||||
|
||||
[Icinga Web 2](https://icinga.com/products/icinga-web-2/) provides
|
||||
[Icinga Web 2](https://icinga.com/docs/icinga-web/latest/) provides
|
||||
a dashboard overview for `overdue checks`.
|
||||
|
||||
The REST API provides the [status](12-icinga2-api.md#icinga2-api-status) URL endpoint with some generic metrics
|
||||
@ -829,8 +887,7 @@ You can also calculate late check results via the REST API:
|
||||
* Fetch the `last_check` timestamp from each object
|
||||
* Compare the timestamp with the current time and add `check_interval` multiple times (change it to see which results are really late, like five times check_interval)
|
||||
|
||||
You can use the [icinga2 console](11-cli-commands.md#cli-command-console) to connect to the instance, fetch all data
|
||||
and calculate the differences. More infos can be found in [this blogpost](https://icinga.com/2016/08/11/analyse-icinga-2-problems-using-the-console-api/).
|
||||
You can use the [icinga2 console](11-cli-commands.md#cli-command-console) to connect to the instance, fetch all data and calculate the differences.
|
||||
|
||||
```
|
||||
# ICINGA2_API_USERNAME=root ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://localhost:5665/'
|
||||
@ -878,7 +935,7 @@ actively attempts to schedule and execute checks. Otherwise the node does not fe
|
||||
}
|
||||
```
|
||||
|
||||
You may ask why this analysis is important? Fair enough - if the numbers are not inverted in a HA zone
|
||||
You may ask why this analysis is important? Fair enough - if the numbers are not inverted in an HA zone
|
||||
with two members, this may give a hint that the cluster nodes are in a split-brain scenario, or you've
|
||||
found a bug in the cluster.
|
||||
|
||||
@ -1640,6 +1697,9 @@ Typical errors are:
|
||||
* The api feature doesn't [accept config](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync). This is logged into `/var/lib/icinga2/icinga2.log`.
|
||||
* The received configuration zone is not configured in [zones.conf](04-configuration.md#zones-conf) and Icinga denies it. This is logged into `/var/lib/icinga2/icinga2.log`.
|
||||
* The satellite/agent has local configuration in `/etc/icinga2/zones.d` and thinks it is authoritive for this zone. It then denies the received update. Purge the content from `/etc/icinga2/zones.d`, `/var/lib/icinga2/api/zones/*` and restart Icinga to fix this.
|
||||
* Configuration parts stored outside of `/etc/icinga2/zones.d` on the master, for example a constant in `/etc/icinga2/constants.conf`, are then missing on the satellite/agent.
|
||||
|
||||
Note that if set up, the [built-in icinga CheckCommand](10-icinga-template-library.md#icinga) will notify you in case the config sync wasn't successful.
|
||||
|
||||
#### New configuration does not trigger a reload <a id="troubleshooting-cluster-config-sync-no-reload"></a>
|
||||
|
||||
|
@ -8,6 +8,28 @@ Specific version upgrades are described below. Please note that version
|
||||
updates are incremental. An upgrade from v2.6 to v2.8 requires to
|
||||
follow the instructions for v2.7 too.
|
||||
|
||||
## Upgrading to v2.15 <a id="upgrading-to-2-15"></a>
|
||||
|
||||
### Icinga DB <a id="upgrading-to-2-15-icingadb"></a>
|
||||
|
||||
Version 2.15.0 of Icinga 2 is released alongside Icinga DB 1.4.0 and Icinga DB
|
||||
Web 1.2.0. A change to the internal communication API requires these updates to
|
||||
be applied together. To put it simply, Icinga 2.15.0 needs Icinga DB 1.4.0 or
|
||||
later.
|
||||
|
||||
### REST API Attribute Filter <a id="upgrading-to-2-15-attrs"></a>
|
||||
|
||||
When [querying objects](12-icinga2-api.md#icinga2-api-config-objects-query)
|
||||
using the API, specifying `{"attrs":[]}` now returns the objects with no
|
||||
attributes. Not supplying the parameter or using `{"attrs":null}` still returns
|
||||
the unfiltered list of all attributes.
|
||||
|
||||
### Removed DSL Functions <a id="upgrading-to-2-15-dsl"></a>
|
||||
|
||||
The undocumented `Checkable#process_check_result` and `System#track_parents`
|
||||
functions were removed from the Icinga 2 config language (the
|
||||
`process-check-result` API action is unaffected by this).
|
||||
|
||||
## Upgrading to v2.14 <a id="upgrading-to-2-14"></a>
|
||||
|
||||
### Dependencies and Redundancy Groups <a id="upgrading-to-2-14-dependencies"></a>
|
||||
@ -106,7 +128,7 @@ have been removed from the command and documentation.
|
||||
### Bugfixes for 2.11 <a id="upgrading-to-2-11-bugfixes"></a>
|
||||
|
||||
2.11.1 on agents/satellites fixes a problem where 2.10.x as config master would send out an unwanted config marker file,
|
||||
thus rendering the agent to think it is autoritative for the config, and never accepting any new
|
||||
thus rendering the agent to think it is authoritative for the config, and never accepting any new
|
||||
config files for the zone(s). **If your config master is 2.11.x already, you are not affected by this problem.**
|
||||
|
||||
In order to fix this, upgrade to at least 2.11.1, and purge away the local config sync storage once, then restart.
|
||||
@ -368,7 +390,7 @@ This affects the following features:
|
||||
The reconnect failover has been improved, and the default `failover_timeout`
|
||||
for the DB IDO features has been lowered from 60 to 30 seconds.
|
||||
Object authority updates (required for balancing in the cluster) happen
|
||||
more frequenty (was 30, is 10 seconds).
|
||||
more frequently (was 30, is 10 seconds).
|
||||
Also the cold startup without object authority updates has been reduced
|
||||
from 60 to 30 seconds. This is to allow cluster reconnects (lowered from 60s to 10s in 2.10)
|
||||
before actually considering a failover/split brain scenario.
|
||||
|
@ -97,6 +97,7 @@ Character | Escape sequence
|
||||
--------------------------|------------------------------------
|
||||
" | \\"
|
||||
\\ | \\\\
|
||||
$ | $$
|
||||
<TAB> | \\t
|
||||
<CARRIAGE-RETURN> | \\r
|
||||
<LINE-FEED> | \\n
|
||||
@ -107,6 +108,10 @@ In addition to these pre-defined escape sequences you can specify
|
||||
arbitrary ASCII characters using the backslash character (\\) followed
|
||||
by an ASCII character in octal encoding.
|
||||
|
||||
In Icinga 2, the `$` character is reserved for resolving [runtime macros](03-monitoring-basics.md#runtime-macros).
|
||||
However, in situations where a string that isn't intended to be used as a runtime macro contains the `$` character,
|
||||
it is necessary to escape it with another `$` character.
|
||||
|
||||
### Multi-line String Literals <a id="multiline-string-literals"></a>
|
||||
|
||||
Strings spanning multiple lines can be specified by enclosing them in
|
||||
@ -661,7 +666,7 @@ setting the `check_command` attribute or custom variables as command parameters.
|
||||
and afterwards the `assign where` and `ignore where` conditions are evaluated.
|
||||
|
||||
It is not necessary to check attributes referenced in the `for loop` expression
|
||||
for their existance using an additional `assign where` condition.
|
||||
for their existence using an additional `assign where` condition.
|
||||
|
||||
More usage examples are documented in the [monitoring basics](03-monitoring-basics.md#using-apply-for)
|
||||
chapter.
|
||||
|
@ -1648,9 +1648,9 @@ Example:
|
||||
function set_x(val) {
|
||||
this.x = val
|
||||
}
|
||||
|
||||
|
||||
dict = {}
|
||||
|
||||
|
||||
set_x.call(dict, 7) /* Invokes set_x using `dict` as `this` */
|
||||
```
|
||||
|
||||
@ -1671,7 +1671,7 @@ Example:
|
||||
function set_x(val) {
|
||||
this.x = val
|
||||
}
|
||||
|
||||
|
||||
var dict = {}
|
||||
|
||||
var args = [ 7 ]
|
||||
|
@ -204,7 +204,7 @@ You can read the full story [here](https://github.com/Icinga/icinga2/issues/7309
|
||||
|
||||
With 2.11 you'll now see 3 processes:
|
||||
|
||||
- The umbrella process which takes care about signal handling and process spawning/stopping
|
||||
- The umbrella process which takes care of signal handling and process spawning/stopping
|
||||
- The main process with the check scheduler, notifications, etc.
|
||||
- The execution helper process
|
||||
|
||||
@ -622,25 +622,23 @@ The algorithm works like this:
|
||||
|
||||
* Determine whether this instance is assigned to a local zone and endpoint.
|
||||
* Collects all endpoints in this zone if they are connected.
|
||||
* If there's two endpoints, but only us seeing ourselves and the application start is less than 60 seconds in the past, do nothing (wait for cluster reconnect to take place, grace period).
|
||||
* If there's two endpoints, but only us seeing ourselves and the application start is less than
|
||||
30 seconds in the past, do nothing (wait for cluster reconnect to take place, grace period).
|
||||
* Sort the collected endpoints by name.
|
||||
* Iterate over all config types and their respective objects
|
||||
* Ignore !active objects
|
||||
* Ignore objects which are !HARunOnce. This means, they can run multiple times in a zone and don't need an authority update.
|
||||
* If this instance doesn't have a local zone, set authority to true. This is for non-clustered standalone environments where everything belongs to this instance.
|
||||
* Calculate the object authority based on the connected endpoint names.
|
||||
* Set the authority (true or false)
|
||||
* Ignore !active objects
|
||||
* Ignore objects which are !HARunOnce. This means, they can run multiple times in a zone and don't need an authority update.
|
||||
* If this instance doesn't have a local zone, set authority to true. This is for non-clustered standalone environments where everything belongs to this instance.
|
||||
* Calculate the object authority based on the connected endpoint names.
|
||||
* Set the authority (true or false)
|
||||
|
||||
The object authority calculation works "offline" without any message exchange.
|
||||
Each instance alculates the SDBM hash of the config object name, puts that in contrast
|
||||
modulo the connected endpoints size.
|
||||
This index is used to lookup the corresponding endpoint in the connected endpoints array,
|
||||
including the local endpoint. Whether the local endpoint is equal to the selected endpoint,
|
||||
or not, this sets the authority to `true` or `false`.
|
||||
|
||||
```cpp
|
||||
authority = endpoints[Utility::SDBM(object->GetName()) % endpoints.size()] == my_endpoint;
|
||||
```
|
||||
Each instance calculates the SDBM hash of the config object name. However, for objects bound to some
|
||||
host, i.e. the object name is composed of `<host_name>!<object_name>`, the SDBM hash is calculated based
|
||||
on the host name only instead of the full object name. That way, each child object like services, downtimes,
|
||||
etc. will be assigned to the same endpoint as the host object itself. The resulting hash modulo (`%`) the number of
|
||||
connected endpoints produces the index of the endpoint which is authoritative for this config object. If the
|
||||
endpoint at this index is equal to the local endpoint, the authority is set to `true`, otherwise it is set to `false`.
|
||||
|
||||
`ConfigObject::SetAuthority(bool authority)` triggers the following events:
|
||||
|
||||
@ -651,7 +649,7 @@ authority = endpoints[Utility::SDBM(object->GetName()) % endpoints.size()] == my
|
||||
that by querying the `paused` attribute for all objects via REST API
|
||||
or debug console on both endpoints.
|
||||
|
||||
Endpoints inside a HA zone calculate the object authority independent from each other.
|
||||
Endpoints inside an HA zone calculate the object authority independent from each other.
|
||||
This object authority is important for selected features explained below.
|
||||
|
||||
Since features are configuration objects too, you must ensure that all nodes
|
||||
@ -1887,7 +1885,7 @@ source | String | The execution UUID
|
||||
|
||||
Special handling, calls `ClusterEvents::EnqueueCheck()` for command endpoint checks.
|
||||
This function enqueues check tasks into a queue which is controlled in `RemoteCheckThreadProc()`.
|
||||
If the `endpoint` parameter is specified and is not equal to the local endpoint then the message is forwarded to the correct endpoint zone.
|
||||
If the `endpoint` parameter is specified and is not equal to the local endpoint then the message is forwarded to the correct endpoint zone.
|
||||
|
||||
##### Permissions
|
||||
|
||||
@ -1932,7 +1930,7 @@ executions | Dictionary | Executions to be updated
|
||||
##### Functions
|
||||
|
||||
**Event Sender:** `ClusterEvents::ExecutedCommandAPIHandler`, `ClusterEvents::UpdateExecutionsAPIHandler`, `ApiActions::ExecuteCommand`
|
||||
**Event Receiver:** `ClusterEvents::UpdateExecutionsAPIHandler`
|
||||
**Event Receiver:** `ClusterEvents::UpdateExecutionsAPIHandler`
|
||||
|
||||
##### Permissions
|
||||
|
||||
@ -1962,7 +1960,7 @@ Key | Type | Description
|
||||
host | String | Host name.
|
||||
service | String | Service name.
|
||||
execution | String | The execution ID executed.
|
||||
exitStatus | Number | The command exit status.
|
||||
exitStatus | Number | The command exit status.
|
||||
output | String | The command output.
|
||||
start | Number | The unix timestamp at the start of the command execution
|
||||
end | Number | The unix timestamp at the end of the command execution
|
||||
@ -1970,7 +1968,7 @@ end | Number | The unix timestamp at the end of the command ex
|
||||
##### Functions
|
||||
|
||||
**Event Sender:** `ClusterEvents::ExecuteCheckFromQueue`, `ClusterEvents::ExecuteCommandAPIHandler`
|
||||
**Event Receiver:** `ClusterEvents::ExecutedCommandAPIHandler`
|
||||
**Event Receiver:** `ClusterEvents::ExecutedCommandAPIHandler`
|
||||
|
||||
##### Permissions
|
||||
|
||||
|
@ -48,7 +48,7 @@ or `icinga2-ido-mysql`.
|
||||
Distribution | Command
|
||||
-------------------|------------------------------------------
|
||||
Debian/Ubuntu | `apt-get install icinga2-dbg`
|
||||
RHEL/CentOS | `yum install icinga2-debuginfo`
|
||||
RHEL | `yum install icinga2-debuginfo`
|
||||
Fedora | `dnf install icinga2-debuginfo icinga2-bin-debuginfo icinga2-ido-mysql-debuginfo`
|
||||
SLES/openSUSE | `zypper install icinga2-bin-debuginfo icinga2-ido-mysql-debuginfo`
|
||||
|
||||
@ -65,7 +65,7 @@ Install GDB in your development environment.
|
||||
Distribution | Command
|
||||
-------------------|------------------------------------------
|
||||
Debian/Ubuntu | `apt-get install gdb`
|
||||
RHEL/CentOS | `yum install gdb`
|
||||
RHEL | `yum install gdb`
|
||||
Fedora | `dnf install gdb`
|
||||
SLES/openSUSE | `zypper install gdb`
|
||||
|
||||
@ -267,73 +267,130 @@ $3 = std::vector of length 11, capacity 16 = {{static NPos = 1844674407370955161
|
||||
|
||||
### Core Dump <a id="development-debug-core-dump"></a>
|
||||
|
||||
When the Icinga 2 daemon crashes with a `SIGSEGV` signal
|
||||
a core dump file should be written. This will help
|
||||
developers to analyze and fix the problem.
|
||||
When the Icinga 2 daemon is terminated by `SIGSEGV` or `SIGABRT`, a core dump file
|
||||
should be written. This will help developers to analyze and fix the problem.
|
||||
|
||||
#### Core Dump File Size Limit <a id="development-debug-core-dump-limit"></a>
|
||||
#### Core Dump Kernel Pattern <a id="development-debug-core-dump-format"></a>
|
||||
|
||||
This requires setting the core dump file size to `unlimited`.
|
||||
Core dumps are generated according to the format specified in
|
||||
`/proc/sys/kernel/core_pattern`. This can either be a path relative to the
|
||||
directory the program was started in, an absolute path or a pipe to a different
|
||||
program.
|
||||
|
||||
For more information see the [core(5)](https://man7.org/linux/man-pages/man5/core.5.html) man page.
|
||||
|
||||
##### Systemd
|
||||
#### Systemd Coredumpctl <a id="development-debug-core-dump-systemd"></a>
|
||||
|
||||
Most distributions offer systemd's coredumpctl either by default or as a package.
|
||||
Distributions that offer it by default include RHEL and SLES, on others like
|
||||
Debian or Ubuntu it can be installed via the `systemd-coredump` package.
|
||||
When set up correctly, `core_pattern` will look something like this:
|
||||
```
|
||||
systemctl edit icinga2.service
|
||||
|
||||
[Service]
|
||||
...
|
||||
LimitCORE=infinity
|
||||
|
||||
systemctl daemon-reload
|
||||
|
||||
systemctl restart icinga2
|
||||
# cat /proc/sys/kernel/core_pattern
|
||||
|/usr/lib/systemd/systemd-coredump %P %u %g %s %t %c %h`
|
||||
```
|
||||
|
||||
##### Init Script
|
||||
You can look at the generated core dumps with the `coredumpctl list` command.
|
||||
You can show information, including a stack trace using
|
||||
`coredumpctl show icinga2 -1` and retrieve the actual core dump file with
|
||||
`coredumpctl dump icinga2 -1 --output <file>`.
|
||||
|
||||
For further information on how to configure and use coredumpctl, read the man pages
|
||||
[coredumpctl(1)](https://man7.org/linux/man-pages/man1/coredumpctl.1.html) and
|
||||
[coredump.conf(5)](https://man7.org/linux/man-pages/man5/coredump.conf.5.html).
|
||||
|
||||
#### Ubuntu Apport <a id="development-debug-core-dump-apport"></a>
|
||||
|
||||
Ubuntu uses their own application `apport` to record core dumps. When it is
|
||||
enabled, your `core_pattern` will look like this:
|
||||
```
|
||||
vim /etc/init.d/icinga2
|
||||
...
|
||||
ulimit -c unlimited
|
||||
|
||||
service icinga2 restart
|
||||
# cat /proc/sys/kernel/core_pattern
|
||||
|/usr/share/apport/apport -p%p -s%s -c%c -d%d -P%P -u%u -g%g -- %E
|
||||
```
|
||||
|
||||
##### Verify
|
||||
|
||||
Verify that the Icinga 2 process core file size limit is set to `unlimited`.
|
||||
Apport is unsuitable for development work, because by default it only works
|
||||
with Ubuntu packages and it has a rather complicated interface for retrieving
|
||||
the core dump. So unless you rely on Apport for some other workflow, systemd's
|
||||
coredumpctl is a much better option and is available on Ubuntu in the
|
||||
`systemd-coredump` package that can replace Apport on your system with no
|
||||
further setup required.
|
||||
|
||||
If you still want to use Apport however, to set it up to work with unpackaged programs,
|
||||
add the following (create the file if it doesn't exist) to `/etc/apport/settings`:
|
||||
```
|
||||
for pid in $(pidof icinga2); do cat /proc/$pid/limits; done
|
||||
|
||||
...
|
||||
Max core file size unlimited unlimited bytes
|
||||
[main]
|
||||
unpackaged=true
|
||||
```
|
||||
and restart Apport:
|
||||
```
|
||||
systemctl restart apport.service
|
||||
```
|
||||
|
||||
When the program crashes you can then find an Apport crash report in `/var/crash/`
|
||||
that you can read with the interactive `apport-cli` command. To extract the core
|
||||
dump you run `apport-unpack /var/crash/<crash-file> <output-dir>` which then
|
||||
saves a `<outputdir>/CoreDump` file that contains the actual core dump.
|
||||
|
||||
#### Core Dump Kernel Format <a id="development-debug-core-dump-format"></a>
|
||||
#### Directly to a File <a id="development-debug-core-dump-direct"></a>
|
||||
|
||||
The Icinga 2 daemon runs with the SUID bit set. Therefore you need
|
||||
to explicitly enable core dumps for SUID on Linux.
|
||||
If coredumpctl is not available, simply writing the core dump directly to a file
|
||||
is also sufficient. You can set up your `core_pattern` to write a file to a
|
||||
suitable path:
|
||||
|
||||
```bash
|
||||
sysctl -w fs.suid_dumpable=2
|
||||
```
|
||||
|
||||
Adjust the coredump kernel format and file location on Linux:
|
||||
|
||||
```bash
|
||||
sysctl -w kernel.core_pattern=/var/lib/cores/core.%e.%p
|
||||
|
||||
sysctl -w kernel.core_pattern=/var/lib/cores/core.%e.%p.%h.%t
|
||||
install -m 1777 -d /var/lib/cores
|
||||
```
|
||||
|
||||
MacOS:
|
||||
If you want to make this setting permanent you can also add a file to
|
||||
`/etc/sysctl.d`, named something like `80-coredumps.conf`:
|
||||
```
|
||||
kernel.core_pattern = /var/lib/cores/core.%e.%p.%h.%t
|
||||
```
|
||||
|
||||
This will create core dump files in `/var/lib/cores` where `%e` is the truncated
|
||||
name of the program, `%p` is the programs PID, `%h` is the hostname, and `%t` a
|
||||
timestamp.
|
||||
|
||||
Note that unlike the other methods this requires the core size limit to be set
|
||||
for the process. When starting Icinga 2 via systemd you can set it to unlimited
|
||||
by adding the following to `/etc/systemd/system/icinga2.service.d/limits.conf`:
|
||||
```
|
||||
[Service]
|
||||
LimitCORE=infinity
|
||||
```
|
||||
|
||||
Then reload and restart icinga:
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
systemctl restart icinga2.service
|
||||
```
|
||||
|
||||
Alternatively you edit and reload in one step:
|
||||
```bash
|
||||
systemctl edit --drop-in=limits icinga2.service`
|
||||
```
|
||||
|
||||
When using an init script or starting manually, you need to run `ulimit -c unlimited`
|
||||
before starting the program:
|
||||
```bash
|
||||
ulimit -c unlimited
|
||||
./icinga2 daemon
|
||||
```
|
||||
|
||||
To verify that the limit has been set to `unlimited` run the following:
|
||||
```bash
|
||||
for pid in $(pidof icinga2); do cat /proc/$pid/limits; done
|
||||
```
|
||||
And look for the line:
|
||||
```
|
||||
Max core file size unlimited unlimited bytes
|
||||
```
|
||||
|
||||
#### MacOS <a id="development-debug-core-dump-macos"></a>
|
||||
|
||||
```bash
|
||||
sysctl -w kern.corefile=/cores/core.%P
|
||||
|
||||
chmod 777 /cores
|
||||
```
|
||||
|
||||
@ -537,7 +594,7 @@ packages.
|
||||
If you encounter a problem, please [open a new issue](https://github.com/Icinga/icinga2/issues/new/choose)
|
||||
on GitHub and mention that you're testing the snapshot packages.
|
||||
|
||||
#### RHEL/CentOS <a id="development-tests-snapshot-packages-rhel"></a>
|
||||
#### RHEL <a id="development-tests-snapshot-packages-rhel"></a>
|
||||
|
||||
2.11+ requires the EPEL repository for Boost 1.66+.
|
||||
|
||||
@ -683,7 +740,7 @@ these tools:
|
||||
- vim
|
||||
- CLion (macOS, Linux)
|
||||
- MS Visual Studio (Windows)
|
||||
- Atom
|
||||
- Emacs
|
||||
|
||||
Editors differ on the functionality. The more helpers you get for C++ development,
|
||||
the faster your development workflow will be.
|
||||
@ -741,12 +798,12 @@ perfdata | Performance data related, including Graphite, Elastic, etc.
|
||||
db\_ido | IDO database abstraction layer.
|
||||
db\_ido\_mysql | IDO database driver for MySQL.
|
||||
db\_ido\_pgsql | IDO database driver for PgSQL.
|
||||
mysql\_shin | Library stub for linking against the MySQL client libraries.
|
||||
mysql\_shim | Library stub for linking against the MySQL client libraries.
|
||||
pgsql\_shim | Library stub for linking against the PgSQL client libraries.
|
||||
|
||||
#### Class Compiler <a id="development-develop-design-patterns-class-compiler"></a>
|
||||
|
||||
Another thing you will recognize are the `.ti` files which are compiled
|
||||
Something else you might notice are the `.ti` files which are compiled
|
||||
by our own class compiler into actual source code. The meta language allows
|
||||
developers to easily add object attributes and specify their behaviour.
|
||||
|
||||
@ -792,17 +849,18 @@ The most common benefits:
|
||||
|
||||
#### Unity Builds <a id="development-develop-builds-unity-builds"></a>
|
||||
|
||||
Another thing you should be aware of: Unity builds on and off.
|
||||
You should be aware that by default unity builds are enabled. You can turn them
|
||||
off by setting the `ICINGA2_UNITY_BUILD` CMake option to `OFF`.
|
||||
|
||||
Typically, we already use caching mechanisms to reduce recompile time with ccache.
|
||||
For release builds, there's always a new build needed as the difference is huge compared
|
||||
to a previous (major) release.
|
||||
|
||||
Therefore we've invented the Unity builds, which basically concatenates all source files
|
||||
into one big library source code file. The compiler then doesn't need to load the many small
|
||||
files but compiles and links this huge one.
|
||||
Unity builds basically concatenate all source files into one big library source code file.
|
||||
The compiler then doesn't need to load many small files, each with all of their includes,
|
||||
but compiles and links only a few huge ones.
|
||||
|
||||
Unity builds require more memory which is why you should disable them for development
|
||||
However, unity builds require more memory which is why you should disable them for development
|
||||
builds in small sized VMs (Linux, Windows) and also Docker containers.
|
||||
|
||||
There's a couple of header files which are included everywhere. If you touch/edit them,
|
||||
@ -1228,7 +1286,7 @@ every second.
|
||||
|
||||
Avoid log messages which could irritate the user. During
|
||||
implementation, developers can change log levels to better
|
||||
see what's going one, but remember to change this back to `debug`
|
||||
see what's going on, but remember to change this back to `debug`
|
||||
or remove it entirely.
|
||||
|
||||
|
||||
@ -1332,9 +1390,6 @@ autocmd BufWinLeave * call clearmatches()
|
||||
|
||||
### Linux Dev Environment <a id="development-linux-dev-env"></a>
|
||||
|
||||
Based on CentOS 7, we have an early draft available inside the Icinga Vagrant boxes:
|
||||
[centos7-dev](https://github.com/Icinga/icinga-vagrant/tree/master/centos7-dev).
|
||||
|
||||
If you're compiling Icinga 2 natively without any virtualization layer in between,
|
||||
this usually is faster. This is also the reason why developers on macOS prefer native builds
|
||||
over Linux or Windows VMs. Don't forget to test the actual code on Linux later! Socket specific
|
||||
@ -1357,21 +1412,20 @@ mkdir -p release debug
|
||||
Proceed with the specific distribution examples below. Keep in mind that these instructions
|
||||
are best effort and sometimes out-of-date. Git Master may contain updates.
|
||||
|
||||
* [CentOS 7](21-development.md#development-linux-dev-env-centos)
|
||||
* [Fedora 40](21-development.md#development-linux-dev-env-fedora)
|
||||
* [Debian 10 Buster](21-development.md#development-linux-dev-env-debian)
|
||||
* [Ubuntu 18 Bionic](21-development.md#development-linux-dev-env-ubuntu)
|
||||
|
||||
|
||||
#### CentOS 7 <a id="development-linux-dev-env-centos"></a>
|
||||
#### Fedora 40 <a id="development-linux-dev-env-fedora"></a>
|
||||
|
||||
```bash
|
||||
yum -y install gdb vim git bash-completion htop centos-release-scl
|
||||
yum -y install gdb vim git bash-completion htop
|
||||
|
||||
yum -y install rpmdevtools ccache \
|
||||
cmake make devtoolset-11-gcc-c++ flex bison \
|
||||
openssl-devel boost169-devel systemd-devel \
|
||||
cmake make gcc-c++ flex bison \
|
||||
openssl-devel boost-devel systemd-devel \
|
||||
mysql-devel postgresql-devel libedit-devel \
|
||||
devtoolset-11-libstdc++-devel
|
||||
libstdc++-devel
|
||||
|
||||
groupadd icinga
|
||||
groupadd icingacmd
|
||||
@ -1389,47 +1443,42 @@ slower but allows for better debugging insights.
|
||||
For benchmarks, change `CMAKE_BUILD_TYPE` to `RelWithDebInfo` and
|
||||
build inside the `release` directory.
|
||||
|
||||
First, off export some generics for Boost.
|
||||
First, override the default prefix path.
|
||||
|
||||
```bash
|
||||
export I2_BOOST="-DBoost_NO_BOOST_CMAKE=TRUE -DBoost_NO_SYSTEM_PATHS=TRUE -DBOOST_LIBRARYDIR=/usr/lib64/boost169 -DBOOST_INCLUDEDIR=/usr/include/boost169 -DBoost_ADDITIONAL_VERSIONS='1.69;1.69.0'"
|
||||
export I2_GENERIC="-DCMAKE_INSTALL_PREFIX=/usr/local/icinga2"
|
||||
```
|
||||
|
||||
Second, add the prefix path to it.
|
||||
|
||||
```bash
|
||||
export I2_GENERIC="$I2_BOOST -DCMAKE_INSTALL_PREFIX=/usr/local/icinga2"
|
||||
```
|
||||
|
||||
Third, define the two build types with their specific CMake variables.
|
||||
Second, define the two build types with their specific CMake variables.
|
||||
|
||||
```bash
|
||||
export I2_DEBUG="-DCMAKE_BUILD_TYPE=Debug -DICINGA2_UNITY_BUILD=OFF $I2_GENERIC"
|
||||
export I2_RELEASE="-DCMAKE_BUILD_TYPE=RelWithDebInfo -DICINGA2_WITH_TESTS=ON -DICINGA2_UNITY_BUILD=ON $I2_GENERIC"
|
||||
```
|
||||
|
||||
Fourth, depending on your likings, you may add a bash alias for building,
|
||||
Third, depending on your likings, you may use a bash alias for building,
|
||||
or invoke the commands inside:
|
||||
|
||||
```bash
|
||||
alias i2_debug="cd /root/icinga2; mkdir -p debug; cd debug; scl enable devtoolset-11 -- cmake $I2_DEBUG ..; make -j2; sudo make -j2 install; cd .."
|
||||
alias i2_release="cd /root/icinga2; mkdir -p release; cd release; scl enable devtoolset-11 -- cmake $I2_RELEASE ..; make -j2; sudo make -j2 install; cd .."
|
||||
alias i2_debug="cd /root/icinga2; mkdir -p debug; cd debug; cmake $I2_DEBUG ..; make -j2; sudo make -j2 install; cd .."
|
||||
alias i2_release="cd /root/icinga2; mkdir -p release; cd release; cmake $I2_RELEASE ..; make -j2; sudo make -j2 install; cd .."
|
||||
```
|
||||
|
||||
This is taken from the [centos7-dev](https://github.com/Icinga/icinga-vagrant/tree/master/centos7-dev) Vagrant box.
|
||||
|
||||
```bash
|
||||
i2_debug
|
||||
```
|
||||
|
||||
The source installation doesn't set proper permissions, this is
|
||||
handled in the package builds which are officially supported.
|
||||
|
||||
```bash
|
||||
chown -R icinga:icinga /usr/local/icinga2/var/
|
||||
chown -R icinga:icinga /usr/local/icinga2/{etc,var}/
|
||||
|
||||
/usr/local/icinga2/lib/icinga2/prepare-dirs /usr/local/icinga2/etc/sysconfig/icinga2
|
||||
/usr/local/icinga2/sbin/icinga2 api setup
|
||||
vim /usr/local/icinga2/etc/icinga2/conf.d/api-users.conf
|
||||
|
||||
/usr/local/icinga2/lib/icinga2/sbin/icinga2 daemon
|
||||
/usr/local/icinga2/lib64/icinga2/sbin/icinga2 daemon
|
||||
```
|
||||
|
||||
#### Debian 10 <a id="development-linux-dev-env-debian"></a>
|
||||
@ -1476,7 +1525,7 @@ The source installation doesn't set proper permissions, this is
|
||||
handled in the package builds which are officially supported.
|
||||
|
||||
```bash
|
||||
chown -R icinga:icinga /usr/local/icinga2/var/
|
||||
chown -R icinga:icinga /usr/local/icinga2/{etc,var}/
|
||||
|
||||
/usr/local/icinga2/lib/icinga2/prepare-dirs /usr/local/icinga2/etc/sysconfig/icinga2
|
||||
/usr/local/icinga2/sbin/icinga2 api setup
|
||||
@ -1540,7 +1589,7 @@ The source installation doesn't set proper permissions, this is
|
||||
handled in the package builds which are officially supported.
|
||||
|
||||
```bash
|
||||
chown -R icinga:icinga /usr/local/icinga2/var/
|
||||
chown -R icinga:icinga /usr/local/icinga2/{etc,var}/
|
||||
|
||||
/usr/local/icinga2/lib/icinga2/prepare-dirs /usr/local/icinga2/etc/sysconfig/icinga2
|
||||
/usr/local/icinga2/sbin/icinga2 api setup
|
||||
@ -1745,10 +1794,12 @@ and don't care for the details,
|
||||
|
||||
1. ensure there are 35 GB free space on C:
|
||||
2. run the following in an administrative Powershell:
|
||||
1. `Enable-WindowsOptionalFeature -FeatureName "NetFx3" -Online`
|
||||
(reboot when asked!)
|
||||
2. `powershell -NoProfile -ExecutionPolicy Bypass -Command "Invoke-Expression (New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/Icinga/icinga2/master/doc/win-dev.ps1')"`
|
||||
(will take some time)
|
||||
1. Windows Server only:
|
||||
`Enable-WindowsOptionalFeature -FeatureName NetFx3ServerFeatures -Online`
|
||||
2. `Enable-WindowsOptionalFeature -FeatureName NetFx3 -Online`
|
||||
(reboot when asked!)
|
||||
3. `powershell -NoProfile -ExecutionPolicy Bypass -Command "Invoke-Expression (New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/Icinga/icinga2/master/doc/win-dev.ps1')"`
|
||||
(will take some time)
|
||||
|
||||
This installs everything needed for cloning and building Icinga 2
|
||||
on the command line (Powershell) as follows:
|
||||
@ -1935,7 +1986,7 @@ Download the [boost-binaries](https://sourceforge.net/projects/boost/files/boost
|
||||
- 64 for 64 bit builds
|
||||
|
||||
```
|
||||
https://sourceforge.net/projects/boost/files/boost-binaries/1.82.0/boost_1_85_0-msvc-14.2-64.exe/download
|
||||
https://sourceforge.net/projects/boost/files/boost-binaries/1.85.0/boost_1_85_0-msvc-14.2-64.exe/download
|
||||
```
|
||||
|
||||
Run the installer and leave the default installation path in `C:\local\boost_1_85_0`.
|
||||
@ -2203,7 +2254,7 @@ Icinga application using a dist tarball (including notes for distributions):
|
||||
* Debian/Ubuntu: libpq-dev
|
||||
* postgresql-dev on Alpine
|
||||
* libedit (CLI console)
|
||||
* RHEL/Fedora: libedit-devel on CentOS (RHEL requires rhel-7-server-optional-rpms)
|
||||
* RHEL/Fedora: libedit-devel (RHEL requires rhel-7-server-optional-rpms)
|
||||
* Debian/Ubuntu/Alpine: libedit-dev
|
||||
* Termcap (only required if libedit doesn't already link against termcap/ncurses)
|
||||
* RHEL/Fedora: libtermcap-devel
|
||||
@ -2269,7 +2320,7 @@ cmake .. -DCMAKE_INSTALL_PREFIX=/tmp/icinga2
|
||||
|
||||
### CMake Variables <a id="development-package-builds-cmake-variables"></a>
|
||||
|
||||
In addition to `CMAKE_INSTALL_PREFIX` here are most of the supported Icinga-specific cmake variables.
|
||||
In addition to `CMAKE_INSTALL_PREFIX` here are most of the supported Icinga-specific CMake variables.
|
||||
|
||||
For all variables regarding defaults paths on in CMake, see
|
||||
[GNUInstallDirs](https://cmake.org/cmake/help/latest/module/GNUInstallDirs.html).
|
||||
@ -2283,12 +2334,12 @@ Also see `CMakeLists.txt` for details.
|
||||
* `ICINGA2_CONFIGDIR`: Main config directory; defaults to `CMAKE_INSTALL_SYSCONFDIR/icinga2` usually `/etc/icinga2`
|
||||
* `ICINGA2_CACHEDIR`: Directory for cache files; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/cache/icinga2` usually `/var/cache/icinga2`
|
||||
* `ICINGA2_DATADIR`: Data directory for the daemon; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/lib/icinga2` usually `/var/lib/icinga2`
|
||||
* `ICINGA2_LOGDIR`: Logfiles of the daemon; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/log/icinga2 usually `/var/log/icinga2`
|
||||
* `ICINGA2_LOGDIR`: Logfiles of the daemon; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/log/icinga2` usually `/var/log/icinga2`
|
||||
* `ICINGA2_SPOOLDIR`: Spooling directory ; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/spool/icinga2` usually `/var/spool/icinga2`
|
||||
* `ICINGA2_INITRUNDIR`: Runtime data for the init system; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/run/icinga2` usually `/run/icinga2`
|
||||
* `ICINGA2_GIT_VERSION_INFO`: Whether to use Git to determine the version number; defaults to `ON`
|
||||
* `ICINGA2_USER`: The user Icinga 2 should run as; defaults to `icinga`
|
||||
* `ICINGA2_GROUP`: The group Icinga 2 should run as; defaults to `icinga`
|
||||
* `ICINGA2_USER`: The user or user-id Icinga 2 should run as; defaults to `icinga`
|
||||
* `ICINGA2_GROUP`: The group or group-id Icinga 2 should run as; defaults to `icinga`
|
||||
* `ICINGA2_COMMAND_GROUP`: The command group Icinga 2 should use; defaults to `icingacmd`
|
||||
* `ICINGA2_SYSCONFIGFILE`: Where to put the config file the initscript/systemd pulls it's dirs from;
|
||||
* defaults to `CMAKE_INSTALL_PREFIX/etc/sysconfig/icinga2`
|
||||
@ -2343,7 +2394,7 @@ for implementation details.
|
||||
|
||||
CMake determines the Icinga 2 version number using `git describe` if the
|
||||
source directory is contained in a Git repository. Otherwise the version number
|
||||
is extracted from the [ICINGA2_VERSION](ICINGA2_VERSION) file. This behavior can be
|
||||
is extracted from the `ICINGA2_VERSION` file. This behavior can be
|
||||
overridden by creating a file called `icinga-version.h.force` in the source
|
||||
directory. Alternatively the `-DICINGA2_GIT_VERSION_INFO=OFF` option for CMake
|
||||
can be used to disable the usage of `git describe`.
|
||||
@ -2351,7 +2402,7 @@ can be used to disable the usage of `git describe`.
|
||||
|
||||
### Building RPMs <a id="development-package-builds-rpms"></a>
|
||||
|
||||
#### Build Environment on RHEL, CentOS, Fedora, Amazon Linux
|
||||
#### Build Environment on RHEL, Fedora, Amazon Linux
|
||||
|
||||
Setup your build environment:
|
||||
|
||||
@ -2407,7 +2458,7 @@ spectool -g ../SPECS/icinga2.spec
|
||||
cd $HOME/rpmbuild
|
||||
```
|
||||
|
||||
Install the build dependencies. Example for CentOS 7:
|
||||
Install the build dependencies:
|
||||
|
||||
```bash
|
||||
yum -y install libedit-devel ncurses-devel gcc-c++ libstdc++-devel openssl-devel \
|
||||
@ -2436,21 +2487,9 @@ rpmbuild -ba SPECS/icinga2.spec
|
||||
The following packages are required to build the SELinux policy module:
|
||||
|
||||
* checkpolicy
|
||||
* selinux-policy (selinux-policy on CentOS 6, selinux-policy-devel on CentOS 7)
|
||||
* selinux-policy-devel
|
||||
* selinux-policy-doc
|
||||
|
||||
##### RHEL/CentOS 7
|
||||
|
||||
The RedHat Developer Toolset is required for building Icinga 2 beforehand.
|
||||
This contains a C++ compiler which supports C++17 features.
|
||||
|
||||
```bash
|
||||
yum install centos-release-scl
|
||||
```
|
||||
|
||||
Dependencies to devtools-11 are used in the RPM SPEC, so the correct tools
|
||||
should be used for building.
|
||||
|
||||
##### Amazon Linux
|
||||
|
||||
If you prefer to build packages offline, a suitable Vagrant box is located
|
||||
@ -2541,7 +2580,7 @@ chmod +x /etc/init.d/icinga2
|
||||
|
||||
Icinga 2 reads a single configuration file which is used to specify all
|
||||
configuration settings (global settings, hosts, services, etc.). The
|
||||
configuration format is explained in detail in the [doc/](doc/) directory.
|
||||
configuration format is explained in detail in the `doc/` directory.
|
||||
|
||||
By default `make install` installs example configuration files in
|
||||
`/usr/local/etc/icinga2` unless you have specified a different prefix or
|
||||
|
@ -116,19 +116,19 @@ The policy provides a role `icinga2adm_r` for confining an user which enables an
|
||||
|
||||
SELinux is based on the least level of access required for a service to run. Using booleans you can grant more access in a defined way. The Icinga 2 policy package provides the following booleans.
|
||||
|
||||
**icinga2_can_connect_all**
|
||||
**icinga2_can_connect_all**
|
||||
|
||||
Having this boolean enabled allows icinga2 to connect to all ports. This can be necessary if you use features which connect to unconfined services, for example the [influxdb writer](14-features.md#influxdb-writer).
|
||||
|
||||
**icinga2_run_sudo**
|
||||
**icinga2_run_sudo**
|
||||
|
||||
To allow Icinga 2 executing plugins via sudo you can toogle this boolean. It is disabled by default, resulting in error messages like `execvpe(sudo) failed: Permission denied`.
|
||||
To allow Icinga 2 executing plugins via sudo you can toggle this boolean. It is disabled by default, resulting in error messages like `execvpe(sudo) failed: Permission denied`.
|
||||
|
||||
**httpd_can_write_icinga2_command**
|
||||
**httpd_can_write_icinga2_command**
|
||||
|
||||
To allow httpd to write to the command pipe of icinga2 this boolean has to be enabled. This is enabled by default, if not needed you can disable it for more security.
|
||||
|
||||
**httpd_can_connect_icinga2_api**
|
||||
**httpd_can_connect_icinga2_api**
|
||||
|
||||
Enabling this boolean allows httpd to connect to the API of icinga2 (Ports labeled `icinga2_port_t`). This is enabled by default, if not needed you can disable it for more security.
|
||||
|
||||
@ -204,7 +204,7 @@ If you restart the daemon now it will successfully connect to graphite.
|
||||
|
||||
#### Running plugins requiring sudo <a id="selinux-policy-examples-sudo"></a>
|
||||
|
||||
Some plugins require privileged access to the system and are designied to be executed via `sudo` to get these privileges.
|
||||
Some plugins require privileged access to the system and are designed to be executed via `sudo` to get these privileges.
|
||||
|
||||
In this case it is the CheckCommand [running_kernel](10-icinga-template-library.md#plugin-contrib-command-running_kernel) which is set to use `sudo`.
|
||||
|
||||
@ -219,7 +219,7 @@ In this case it is the CheckCommand [running_kernel](10-icinga-template-library.
|
||||
assign where host.name == NodeName
|
||||
}
|
||||
|
||||
Having this Service defined will result in a UNKNOWN state and the error message `execvpe(sudo) failed: Permission denied` because SELinux dening the execution.
|
||||
Having this Service defined will result in a UNKNOWN state and the error message `execvpe(sudo) failed: Permission denied` because SELinux denying the execution.
|
||||
|
||||
Switching the boolean `icinga2_run_sudo` to allow the execution will result in the check executed successfully.
|
||||
|
||||
@ -229,7 +229,7 @@ Switching the boolean `icinga2_run_sudo` to allow the execution will result in t
|
||||
#### Confining a user <a id="selinux-policy-examples-user"></a>
|
||||
|
||||
If you want to have an administrative account capable of only managing icinga2 and not the complete system, you can restrict the privileges by confining
|
||||
this user. This is completly optional!
|
||||
this user. This is completely optional!
|
||||
|
||||
Start by adding the Icinga 2 administrator role `icinga2adm_r` to the administrative SELinux user `staff_u`.
|
||||
|
||||
@ -295,7 +295,7 @@ Failed to issue method call: Access denied
|
||||
|
||||
If you experience any problems while running in enforcing mode try to reproduce it in permissive mode. If the problem persists it is not related to SELinux because in permissive mode SELinux will not deny anything.
|
||||
|
||||
After some feedback Icinga 2 is now running in a enforced domain, but still adds also some rules for other necessary services so no problems should occure at all. But you can help to enhance the policy by testing Icinga 2 running confined by SELinux.
|
||||
After some feedback Icinga 2 is now running in a enforced domain, but still adds also some rules for other necessary services so no problems should occur at all. But you can help to enhance the policy by testing Icinga 2 running confined by SELinux.
|
||||
|
||||
Please add the following information to [bug reports](https://icinga.com/community/):
|
||||
|
||||
|
@ -1,4 +1,8 @@
|
||||
# Migration from Icinga 1.x <a id="migration"></a>
|
||||
# Migration from Icinga 1.x or Nagios <a id="migration"></a>
|
||||
|
||||
!!! note
|
||||
|
||||
Icinga 1.x was originally a fork of Nagios. The information provided here also applies to Nagios.
|
||||
|
||||
## Configuration Migration <a id="configuration-migration"></a>
|
||||
|
||||
@ -804,7 +808,7 @@ define service {
|
||||
}
|
||||
```
|
||||
|
||||
Icinga 2 supports objects and (global) variables, but does not make a difference
|
||||
Icinga 2 supports objects and (global) variables, but does not make a difference
|
||||
between the main configuration file or any other included file.
|
||||
|
||||
icinga2.conf:
|
||||
|
@ -692,4 +692,3 @@ the [servicegroups](24-appendix.md#schema-livestatus-servicegroups-table-attribu
|
||||
|
||||
All [services](24-appendix.md#schema-livestatus-services-table-attributes) table attributes grouped with
|
||||
the [hostgroups](24-appendix.md#schema-livestatus-hostgroups-table-attributes) table prefixed with `hostgroup_`.
|
||||
|
||||
|
@ -11,10 +11,10 @@ function ThrowOnNativeFailure {
|
||||
}
|
||||
|
||||
|
||||
$VsVersion = 2019
|
||||
$MsvcVersion = '14.2'
|
||||
$BoostVersion = @(1, 85, 0)
|
||||
$OpensslVersion = '3_0_14'
|
||||
$VsVersion = 2022
|
||||
$MsvcVersion = '14.3'
|
||||
$BoostVersion = @(1, 89, 0)
|
||||
$OpensslVersion = '3_0_16'
|
||||
|
||||
switch ($Env:BITS) {
|
||||
32 { }
|
||||
@ -74,7 +74,6 @@ try {
|
||||
if (-not $Env:GITHUB_ACTIONS) {
|
||||
choco install -y `
|
||||
"visualstudio${VsVersion}community" `
|
||||
"visualstudio${VsVersion}-workload-netcoretools" `
|
||||
"visualstudio${VsVersion}-workload-vctools" `
|
||||
"visualstudio${VsVersion}-workload-manageddesktop" `
|
||||
"visualstudio${VsVersion}-workload-nativedesktop" `
|
||||
@ -83,6 +82,7 @@ if (-not $Env:GITHUB_ACTIONS) {
|
||||
git `
|
||||
cmake `
|
||||
winflexbison3 `
|
||||
netfx-4.6-devpack `
|
||||
windows-sdk-8.1 `
|
||||
wixtoolset
|
||||
ThrowOnNativeFailure
|
||||
|
@ -165,13 +165,15 @@ if [ -n "$MAILFROM" ] ; then
|
||||
|
||||
## Debian/Ubuntu use mailutils which requires `-a` to append the header
|
||||
if [ -f /etc/debian_version ]; then
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | $MAILBIN -a "From: $MAILFROM" -s "$SUBJECT" $USEREMAIL
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | tr -d '\015' \
|
||||
| $MAILBIN -a "From: $MAILFROM" -s "$SUBJECT" $USEREMAIL
|
||||
## Other distributions (RHEL/SUSE/etc.) prefer mailx which sets a sender address with `-r`
|
||||
else
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | $MAILBIN -r "$MAILFROM" -s "$SUBJECT" $USEREMAIL
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | tr -d '\015' \
|
||||
| $MAILBIN -r "$MAILFROM" -s "$SUBJECT" $USEREMAIL
|
||||
fi
|
||||
|
||||
else
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" \
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | tr -d '\015' \
|
||||
| $MAILBIN -s "$SUBJECT" $USEREMAIL
|
||||
fi
|
||||
|
@ -178,13 +178,15 @@ if [ -n "$MAILFROM" ] ; then
|
||||
|
||||
## Debian/Ubuntu use mailutils which requires `-a` to append the header
|
||||
if [ -f /etc/debian_version ]; then
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | $MAILBIN -a "From: $MAILFROM" -s "$SUBJECT" $USEREMAIL
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | tr -d '\015' \
|
||||
| $MAILBIN -a "From: $MAILFROM" -s "$SUBJECT" $USEREMAIL
|
||||
## Other distributions (RHEL/SUSE/etc.) prefer mailx which sets a sender address with `-r`
|
||||
else
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | $MAILBIN -r "$MAILFROM" -s "$SUBJECT" $USEREMAIL
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | tr -d '\015' \
|
||||
| $MAILBIN -r "$MAILFROM" -s "$SUBJECT" $USEREMAIL
|
||||
fi
|
||||
|
||||
else
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" \
|
||||
/usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | tr -d '\015' \
|
||||
| $MAILBIN -s "$SUBJECT" $USEREMAIL
|
||||
fi
|
||||
|
@ -19,7 +19,7 @@ set_target_properties (
|
||||
FOLDER Lib
|
||||
)
|
||||
|
||||
include_directories(${Boost_INCLUDE_DIRS})
|
||||
include_directories(SYSTEM ${Boost_INCLUDE_DIRS})
|
||||
|
||||
if(ICINGA2_WITH_CHECKER)
|
||||
list(APPEND icinga_app_SOURCES $<TARGET_OBJECTS:checker>)
|
||||
@ -95,6 +95,8 @@ install(
|
||||
RUNTIME DESTINATION ${InstallPath}
|
||||
)
|
||||
|
||||
install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_LOGDIR}\")")
|
||||
install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_DATADIR}\")")
|
||||
install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_INITRUNDIR}\")")
|
||||
if(NOT WIN32)
|
||||
install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_LOGDIR}\")")
|
||||
install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_DATADIR}\")")
|
||||
install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_INITRUNDIR}\")")
|
||||
endif()
|
||||
|
@ -401,7 +401,7 @@ static int Main()
|
||||
#endif /* _WIN32 */
|
||||
|
||||
if (vm.count("define")) {
|
||||
for (const String& define : vm["define"].as<std::vector<std::string> >()) {
|
||||
for (String define : vm["define"].as<std::vector<std::string>>()) {
|
||||
String key, value;
|
||||
size_t pos = define.FindFirstOf('=');
|
||||
if (pos != String::NPos) {
|
||||
@ -420,12 +420,10 @@ static int Main()
|
||||
|
||||
for (size_t i = 1; i < keyTokens.size(); i++) {
|
||||
std::unique_ptr<IndexerExpression> indexerExpr{new IndexerExpression(std::move(expr), MakeLiteral(keyTokens[i]))};
|
||||
indexerExpr->SetOverrideFrozen();
|
||||
expr = std::move(indexerExpr);
|
||||
}
|
||||
|
||||
std::unique_ptr<SetExpression> setExpr{new SetExpression(std::move(expr), OpSetLiteral, MakeLiteral(value))};
|
||||
setExpr->SetOverrideFrozen();
|
||||
|
||||
try {
|
||||
ScriptFrame frame(true);
|
||||
@ -460,7 +458,7 @@ static int Main()
|
||||
ConfigCompiler::AddIncludeSearchDir(Configuration::IncludeConfDir);
|
||||
|
||||
if (!autocomplete && vm.count("include")) {
|
||||
for (const String& includePath : vm["include"].as<std::vector<std::string> >()) {
|
||||
for (String includePath : vm["include"].as<std::vector<std::string>>()) {
|
||||
ConfigCompiler::AddIncludeSearchDir(includePath);
|
||||
}
|
||||
}
|
||||
@ -571,42 +569,60 @@ static int Main()
|
||||
} else if (command && command->GetImpersonationLevel() == ImpersonateIcinga) {
|
||||
String group = Configuration::RunAsGroup;
|
||||
String user = Configuration::RunAsUser;
|
||||
gid_t gid = 0;
|
||||
|
||||
errno = 0;
|
||||
struct group *gr = getgrnam(group.CStr());
|
||||
|
||||
if (!gr) {
|
||||
if (errno == 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "Invalid group specified: " << group;
|
||||
return EXIT_FAILURE;
|
||||
} else {
|
||||
Log(LogCritical, "cli")
|
||||
<< "getgrnam() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
|
||||
try {
|
||||
gid = boost::lexical_cast<gid_t>(group);
|
||||
} catch (const boost::bad_lexical_cast&) {
|
||||
struct group* gr = getgrnam(group.CStr());
|
||||
if (!gr) {
|
||||
if (errno == 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "Invalid group specified: " << group;
|
||||
} else {
|
||||
Log(LogCritical, "cli")
|
||||
<< "getgrnam() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
|
||||
}
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
gid = gr->gr_gid;
|
||||
}
|
||||
|
||||
if (getgid() != gr->gr_gid) {
|
||||
if (getgid() != gid) {
|
||||
if (!vm.count("reload-internal") && setgroups(0, nullptr) < 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "setgroups() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
|
||||
Log(LogCritical, "cli")
|
||||
<< "Please re-run this command as a privileged user or using the \"" << user << "\" account.";
|
||||
<< "Please rerun this command as a privileged user or using the \"" << user << "\" account.";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
if (setgid(gr->gr_gid) < 0) {
|
||||
if (setgid(gid) < 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "setgid() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
|
||||
Log(LogCritical, "cli")
|
||||
<< "Please rerun this command as a privileged user or using the \"" << user << "\" account.";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
errno = 0;
|
||||
struct passwd *pw = getpwnam(user.CStr());
|
||||
std::optional<uid_t> uid;
|
||||
struct passwd *pw = nullptr;
|
||||
|
||||
if (!pw) {
|
||||
errno = 0;
|
||||
try {
|
||||
uid = boost::lexical_cast<uid_t>(user);
|
||||
pw = getpwuid(*uid);
|
||||
} catch (const boost::bad_lexical_cast&) {
|
||||
pw = getpwnam(user.CStr());
|
||||
if (pw) {
|
||||
uid = pw->pw_uid;
|
||||
}
|
||||
}
|
||||
|
||||
if (!uid) {
|
||||
if (errno == 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "Invalid user specified: " << user;
|
||||
@ -619,20 +635,22 @@ static int Main()
|
||||
}
|
||||
|
||||
// also activate the additional groups the configured user is member of
|
||||
if (getuid() != pw->pw_uid) {
|
||||
if (!vm.count("reload-internal") && initgroups(user.CStr(), pw->pw_gid) < 0) {
|
||||
if (getuid() != *uid) {
|
||||
// initgroups() is only called when either getpwuid() or getpwnam() returned a valid user entry.
|
||||
// Otherwise it makes no sense to set any additional groups.
|
||||
if (!vm.count("reload-internal") && pw && initgroups(user.CStr(), pw->pw_gid) < 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "initgroups() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
|
||||
Log(LogCritical, "cli")
|
||||
<< "Please re-run this command as a privileged user or using the \"" << user << "\" account.";
|
||||
<< "Please rerun this command as a privileged user or using the \"" << user << "\" account.";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
if (setuid(pw->pw_uid) < 0) {
|
||||
if (setuid(*uid) < 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "setuid() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
|
||||
Log(LogCritical, "cli")
|
||||
<< "Please re-run this command as a privileged user or using the \"" << user << "\" account.";
|
||||
<< "Please rerun this command as a privileged user or using the \"" << user << "\" account.";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,10 @@ set_target_properties(
|
||||
FOLDER Bin
|
||||
OUTPUT_NAME icinga2-installer
|
||||
LINK_FLAGS "/SUBSYSTEM:WINDOWS"
|
||||
|
||||
# Use a statically-linked runtime library as this binary is run during the installation process where the other DLLs
|
||||
# may not have been installed already and the system-provided version may be too old.
|
||||
MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>"
|
||||
)
|
||||
|
||||
target_link_libraries(icinga-installer shlwapi)
|
||||
|
@ -24,6 +24,10 @@ template CheckCommand "ping-common" {
|
||||
value = "$ping_address$"
|
||||
description = "host to ping"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$ping_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-w" = {
|
||||
value = "$ping_wrta$,$ping_wpl$%"
|
||||
description = "warning threshold pair"
|
||||
@ -101,6 +105,10 @@ template CheckCommand "fping-common" {
|
||||
]
|
||||
|
||||
arguments = {
|
||||
"--extra-opts" = {
|
||||
value = "$fping_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-w" = {
|
||||
value = "$fping_wrta$,$fping_wpl$%"
|
||||
description = "warning threshold pair"
|
||||
@ -143,6 +151,13 @@ template CheckCommand "fping-common" {
|
||||
vars.fping_interval = 500
|
||||
}
|
||||
|
||||
object CheckCommand "fping" {
|
||||
import "fping-common"
|
||||
import "ipv4-or-ipv6"
|
||||
|
||||
vars.fping_address = "$check_address$"
|
||||
}
|
||||
|
||||
object CheckCommand "fping4" {
|
||||
import "fping-common"
|
||||
|
||||
@ -169,6 +184,10 @@ object CheckCommand "tcp" {
|
||||
value = "$tcp_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)."
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$tcp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$tcp_port$"
|
||||
description = "The TCP port number."
|
||||
@ -276,6 +295,10 @@ object CheckCommand "ssl" {
|
||||
value = "$ssl_address$"
|
||||
description = "Host address"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$ssl_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$ssl_port$"
|
||||
description ="TCP port (default: 443)"
|
||||
@ -321,6 +344,10 @@ object CheckCommand "udp" {
|
||||
]
|
||||
|
||||
arguments = {
|
||||
"--extra-opts" = {
|
||||
value = "$udp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-s" = {
|
||||
value = "$udp_send$"
|
||||
required = true
|
||||
@ -360,6 +387,11 @@ object CheckCommand "http" {
|
||||
value = "$http_vhost$"
|
||||
description = "Host name argument for servers using host headers (virtual host)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
set_if = {{ string(macro("$http_extra_opts$")) != "" }}
|
||||
value = "$http_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file"
|
||||
}
|
||||
"-I" = {
|
||||
set_if = {{ string(macro("$http_address$")) != "" }}
|
||||
value = "$http_address$"
|
||||
@ -419,12 +451,16 @@ object CheckCommand "http" {
|
||||
}
|
||||
"--sni" = {
|
||||
set_if = "$http_sni$"
|
||||
description = "Enable SSL/TLS hostname extension support (SNI)"
|
||||
description = "Enable SSL/TLS hostname extension support (SNI). This is (normally) the default in modern setups"
|
||||
}
|
||||
"-C" = {
|
||||
value = "$http_certificate$"
|
||||
description = "Minimum number of days a certificate has to be valid. This parameter explicitely sets the port to 443 and ignores the URL if passed."
|
||||
}
|
||||
"--continue-after-certificate" = {
|
||||
set_if = "$http_certificate_continue$"
|
||||
description = "Allows the HTTP check to continue after performing the certificate check. Does nothing unless -C is used"
|
||||
}
|
||||
"-J" = {
|
||||
value = "$http_clientcert$"
|
||||
description = "Name of file contains the client certificate (PEM format)"
|
||||
@ -557,6 +593,212 @@ object CheckCommand "http" {
|
||||
vars.http_verbose = false
|
||||
}
|
||||
|
||||
object CheckCommand "curl" {
|
||||
import "ipv4-or-ipv6"
|
||||
|
||||
command = [ PluginDir + "/check_curl" ]
|
||||
|
||||
arguments += {
|
||||
"--extra-opts" = {
|
||||
value = "$curl_extra_opts$"
|
||||
description = "Read options from an ini file"
|
||||
}
|
||||
"-H" = {
|
||||
value = "$curl_vhost$"
|
||||
description = "Host name argument for servers using host headers (virtual host). Append a port to include it in the header (eg: example.com:5000)"
|
||||
}
|
||||
"-I" = {
|
||||
value = "$curl_ip$"
|
||||
set_if = {{ string(macro("$curl_ip$")) != "" }}
|
||||
description = "IP address or name (use numeric address if possible to bypass DNS lookup)."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$curl_port$"
|
||||
description = "Port number (default: 80)"
|
||||
}
|
||||
"-4" = {
|
||||
set_if = "$curl_ipv4$"
|
||||
description = "Force `check_curl` to use IPv4 instead of choosing automatically"
|
||||
}
|
||||
"-6" = {
|
||||
set_if = "$curl_ipv6$"
|
||||
description = "Force `check_curl` to use IPv6 instead of choosing automatically"
|
||||
}
|
||||
"(-S w/ value)" = {
|
||||
set_if = {{ macro("$curl_tls$") && string(macro("$curl_tls_version$")) != "" }}
|
||||
key = "-S"
|
||||
value = "$curl_tls_version$"
|
||||
description = "Connect via SSL. Port defaults to 443. VERSION is optional, and prevents auto-negotiation"
|
||||
}
|
||||
"(-S w/o value)" = {
|
||||
set_if = {{ macro("$curl_tls$") && string(macro("$curl_tls_version$")) == "" }}
|
||||
key = "-S"
|
||||
description = "Connect via SSL. Port defaults to 443. VERSION is optional, and prevents auto-negotiation"
|
||||
}
|
||||
"--sni" = {
|
||||
set_if = "$curl_sni$"
|
||||
description = "Enable SSL/TLS hostname extension support (SNI). Default if TLS version > 1.0"
|
||||
}
|
||||
"-C" = {
|
||||
value = "$curl_certificate_valid_days_min_warning$,$curl_certificate_valid_days_min_critical$"
|
||||
description = "Minimum number of days a certificate has to be valid."
|
||||
}
|
||||
"--continue-after-certificate" = {
|
||||
value = "$curl_continue_after_certificate$"
|
||||
description = "Allows the HTTP check to continue after performing the certificate check. Does nothing unless -C is used."
|
||||
}
|
||||
"-J" = {
|
||||
value = "$curl_client_certificate_file$"
|
||||
description = "Name of file that contains the client certificate (PEM format) to be used in establishing the SSL session"
|
||||
}
|
||||
"-K" = {
|
||||
value = "$curl_client_certificate_key_file$"
|
||||
description = "Name of file containing the private key (PEM format) matching the client certificate"
|
||||
}
|
||||
"--ca-cert" = {
|
||||
value = "$curl_ca_cert_file$"
|
||||
description = "CA certificate file to verify peer against"
|
||||
}
|
||||
"-D" = {
|
||||
set_if = "$curl_verify_peer_cert$"
|
||||
description = "Verify the peer's SSL certificate and hostname"
|
||||
}
|
||||
"-e" = {
|
||||
value = "$curl_expect_string$"
|
||||
description = "Comma-delimited list of strings, at least one of them is expected in the first (status) line of the server response (default: HTTP/), If specified skips all other status line logic (ex: 3xx, 4xx, 5xx processing)"
|
||||
}
|
||||
"-d" = {
|
||||
value = "$curl_expect_header_string$"
|
||||
description = "String to expect in the response headers"
|
||||
}
|
||||
"-s" = {
|
||||
value = "$curl_expect_content_string$"
|
||||
description = "String to expect in the content"
|
||||
}
|
||||
"-u" = {
|
||||
value = "$curl_url$"
|
||||
description = "URL to GET or POST (default: /)"
|
||||
}
|
||||
"-P" = {
|
||||
value = "$curl_post_data$"
|
||||
description = "URL encoded http POST data"
|
||||
}
|
||||
"-j" = {
|
||||
value = "$curl_http_method$"
|
||||
description = "Set HTTP method (for example: HEAD, OPTIONS, TRACE, PUT, DELETE, CONNECT)"
|
||||
}
|
||||
"-N" = {
|
||||
value = "$curl_no_body$"
|
||||
description = "Don't wait for document body: stop reading after headers. (Note that this still does an HTTP GET or POST, not a HEAD.)"
|
||||
}
|
||||
"-M" = {
|
||||
value = "$curl_max_age$"
|
||||
description = "Warn if document is more than SECONDS old. the number can also be of the form '10m' for minutes, '10h' for hours, or '10d' for days."
|
||||
}
|
||||
"-T" = {
|
||||
value = "$curl_content_type$"
|
||||
description = "specify Content-Type header media type when POSTing"
|
||||
}
|
||||
"-l" = {
|
||||
value = "$curl_linespan$"
|
||||
description = "Allow regex to span newlines (must precede -r or -R)"
|
||||
}
|
||||
"-r" = {
|
||||
value = "$curl_ereg$"
|
||||
description = "Search page for regex STRING"
|
||||
}
|
||||
"-R" = {
|
||||
value = "$curl_eregi$"
|
||||
description = "Search page for case-insensitive regex STRING"
|
||||
}
|
||||
"--invert-regex" = {
|
||||
set_if = "$curl_invert_regex$"
|
||||
description = "When using regex, return CRITICAL if found, OK if not"
|
||||
}
|
||||
"--state-regex" = {
|
||||
value = "$curl_state_regex$"
|
||||
description = "Return STATE if regex is found, OK if not"
|
||||
}
|
||||
"-a" = {
|
||||
value = "$curl_authorization$"
|
||||
description = "Username:password on sites with basic authentication"
|
||||
}
|
||||
"-b" = {
|
||||
value = "$curl_proxy_authorization$"
|
||||
description = "Username:password on proxy-servers with basic authentication"
|
||||
}
|
||||
"-A" = {
|
||||
value = "$curl_user_agent$"
|
||||
description = "String to be sent in http header as 'User Agent'"
|
||||
}
|
||||
"-k" = {
|
||||
value = "$curl_header$"
|
||||
repeat_key = true
|
||||
description = "Any other tags to be sent in http header. Use multiple times for additional headers"
|
||||
}
|
||||
"-E" = {
|
||||
set_if = "$curl_extended_perfdata$"
|
||||
description = "Print additional performance data"
|
||||
}
|
||||
"-B" = {
|
||||
set_if = "$curl_show_body$"
|
||||
description = "Print body content below status line"
|
||||
}
|
||||
"-L" = {
|
||||
set_if = "$curl_link$"
|
||||
description = "Wrap output in HTML link (obsoleted by urlize)"
|
||||
}
|
||||
"-f" = {
|
||||
value = "$curl_onredirect$"
|
||||
description = "Options: <ok|warning|critical|follow|sticky|stickyport|curl> How to handle redirected pages."
|
||||
}
|
||||
"--max-redirs" = {
|
||||
value = "$curl_max_redirs$"
|
||||
description = "Maximal number of redirects (default: 15)"
|
||||
}
|
||||
"-m" = {
|
||||
value = "$curl_pagesize$"
|
||||
description = "Minimum page size required (bytes) : Maximum page size required (bytes)"
|
||||
}
|
||||
"--http-version" = {
|
||||
value = "$curl_http_version$"
|
||||
description = "Connect via specific HTTP protocol. 1.0 = HTTP/1.0, 1.1 = HTTP/1.1, 2.0 = HTTP/2 (HTTP/2 will fail without -S)"
|
||||
}
|
||||
"--enable-automatic-decompression" = {
|
||||
set_if = "$curl_enable_automatic_decompression$"
|
||||
description = "Enable automatic decompression of body (CURLOPT_ACCEPT_ENCODING)."
|
||||
}
|
||||
"--haproxy-protocol" = {
|
||||
set_if = "$curl_haproxy_protocol$"
|
||||
description = "Send HAProxy proxy protocol v1 header (CURLOPT_HAPROXYPROTOCOL)"
|
||||
}
|
||||
"--cookie-jar" = {
|
||||
value = "$curl_cookie_jar_file$"
|
||||
description = "Store cookies in the cookie jar file and send them out when requested."
|
||||
}
|
||||
"-w" = {
|
||||
value = "$curl_warning$"
|
||||
description = "Response time to result in warning status (seconds)"
|
||||
}
|
||||
"-c" = {
|
||||
value = "$curl_critical$"
|
||||
description = "Response time to result in critical status (seconds)"
|
||||
}
|
||||
"-t" = {
|
||||
value = "$curl_timeout$"
|
||||
description = "Seconds before connection times out (default: 10)"
|
||||
}
|
||||
}
|
||||
|
||||
vars.curl_ip = "$check_address$"
|
||||
vars.curl_link = false
|
||||
vars.curl_invert_regex = false
|
||||
vars.curl_show_body = false
|
||||
vars.curl_extended_perfdata = false
|
||||
vars.check_ipv4 = "$curl_ipv4$"
|
||||
vars.check_ipv6 = "$curl_ipv6$"
|
||||
}
|
||||
|
||||
object CheckCommand "ftp" {
|
||||
import "ipv4-or-ipv6"
|
||||
|
||||
@ -567,6 +809,10 @@ object CheckCommand "ftp" {
|
||||
value = "$ftp_address$"
|
||||
description = "The host's address. Defaults to $address$ or $address6$ if the address attribute is not set."
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$ftp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$ftp_port$"
|
||||
description = "The FTP port number. Defaults to none"
|
||||
@ -670,6 +916,10 @@ object CheckCommand "smtp" {
|
||||
value = "$smtp_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$smtp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$smtp_port$"
|
||||
description = "Port number (default: 25)"
|
||||
@ -755,6 +1005,10 @@ object CheckCommand "ssmtp" {
|
||||
value = "$ssmtp_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$ssmtp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$ssmtp_port$"
|
||||
description = "Port number (default: none)"
|
||||
@ -844,6 +1098,10 @@ object CheckCommand "imap" {
|
||||
value = "$imap_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$imap_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$imap_port$"
|
||||
description = "Port number (default: none)"
|
||||
@ -933,6 +1191,10 @@ object CheckCommand "simap" {
|
||||
value = "$simap_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$simap_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$simap_port$"
|
||||
description = "Port number (default: none)"
|
||||
@ -1022,6 +1284,10 @@ object CheckCommand "pop" {
|
||||
value = "$pop_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$pop_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$pop_port$"
|
||||
description = "Port number (default: none)"
|
||||
@ -1111,6 +1377,10 @@ object CheckCommand "spop" {
|
||||
value = "$spop_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$spop_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$spop_port$"
|
||||
description = "Port number (default: none)"
|
||||
@ -1200,6 +1470,10 @@ object CheckCommand "ntp_time" {
|
||||
value = "$ntp_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$ntp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$ntp_port$"
|
||||
description = "Port number (default: 123)"
|
||||
@ -1249,6 +1523,10 @@ object CheckCommand "ntp_peer" {
|
||||
value = "$ntp_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$ntp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$ntp_port$"
|
||||
description = "Port number (default: 123)"
|
||||
@ -1314,6 +1592,10 @@ object CheckCommand "ssh" {
|
||||
command = [ PluginDir + "/check_ssh" ]
|
||||
|
||||
arguments = {
|
||||
"--extra-opts" = {
|
||||
value = "$ssh_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$ssh_port$"
|
||||
description = "Port number (default: 22)"
|
||||
@ -1335,6 +1617,14 @@ object CheckCommand "ssh" {
|
||||
set_if = "$ssh_ipv6$"
|
||||
description = "Use IPv6 connection"
|
||||
}
|
||||
"-r" = {
|
||||
value = "$ssh_remote_version$"
|
||||
description = "Alert if string doesn't match expected server version (ex: OpenSSH_3.9p1)"
|
||||
}
|
||||
"-P" = {
|
||||
value = "$ssh_remote_protocol$"
|
||||
description = "Alert if protocol doesn't match expected protocol version (ex: 2.0)"
|
||||
}
|
||||
}
|
||||
|
||||
vars.ssh_address = "$check_address$"
|
||||
@ -1346,6 +1636,13 @@ object CheckCommand "disk" {
|
||||
command = [ PluginDir + "/check_disk" ]
|
||||
|
||||
arguments = {
|
||||
/*
|
||||
"-C" (disk_clear) is missing on purpose, since there is no useful use case possible the way check_disk is mapped here
|
||||
*/
|
||||
"--extra-opts" = {
|
||||
value = "$disk_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-w" = {
|
||||
value = "$disk_wfree$"
|
||||
description = "Exit with WARNING status if less than INTEGER units of disk are free or Exit with WARNING status if less than PERCENT of disk space is free"
|
||||
@ -1372,6 +1669,10 @@ object CheckCommand "disk" {
|
||||
description = "Display inode usage in perfdata"
|
||||
set_if = "$disk_inode_perfdata$"
|
||||
}
|
||||
"--inode-perfdata" = {
|
||||
description = "Enable performance data for inode-based statistics (nagios-plugins)"
|
||||
set_if = "$disk_np_inode_perfdata$"
|
||||
}
|
||||
"-p" = {
|
||||
value = "$disk_partitions$"
|
||||
description = "Path or partition (may be repeated)"
|
||||
@ -1391,10 +1692,6 @@ object CheckCommand "disk" {
|
||||
key = "-x"
|
||||
value = "$disk_partition_excluded$"
|
||||
}
|
||||
"-C" = {
|
||||
set_if = "$disk_clear$"
|
||||
description = "Clear thresholds"
|
||||
}
|
||||
"-E" = {
|
||||
set_if = "$disk_exact_match$"
|
||||
description = "For paths or partitions specified with -p, only check for exact paths"
|
||||
@ -1553,6 +1850,10 @@ object CheckCommand "users" {
|
||||
command = [ PluginDir + "/check_users" ]
|
||||
|
||||
arguments = {
|
||||
"--extra-opts" = {
|
||||
value = "$users_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-w" = {
|
||||
value = "$users_wgreater$"
|
||||
description = "Set WARNING status if more than INTEGER users are logged in"
|
||||
@ -1571,6 +1872,10 @@ object CheckCommand "procs" {
|
||||
command = [ PluginDir + "/check_procs" ]
|
||||
|
||||
arguments = {
|
||||
"--extra-opts" = {
|
||||
value = "$procs_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-w" = {
|
||||
value = "$procs_warning$"
|
||||
description = "Generate warning state if metric is outside this range"
|
||||
@ -1627,6 +1932,10 @@ object CheckCommand "procs" {
|
||||
value = "$procs_command$"
|
||||
description = "Only scan for exact matches of COMMAND (without path)"
|
||||
}
|
||||
"-X" = {
|
||||
value = "$procs_exclude_process$"
|
||||
description = "Exclude processes which match this comma separated list"
|
||||
}
|
||||
"-k" = {
|
||||
set_if = "$procs_nokthreads$"
|
||||
description = "Only scan for non kernel threads"
|
||||
@ -1643,6 +1952,10 @@ object CheckCommand "swap" {
|
||||
command = [ PluginDir + "/check_swap" ]
|
||||
|
||||
arguments = {
|
||||
"--extra-opts" = {
|
||||
value = "$swap_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-w" = {{
|
||||
if (macro("$swap_integer$")) {
|
||||
return macro("$swap_wfree$")
|
||||
@ -1677,6 +1990,10 @@ object CheckCommand "load" {
|
||||
command = [ PluginDir + "/check_load" ]
|
||||
|
||||
arguments = {
|
||||
"--extra-opts" = {
|
||||
value = "$load_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-w" = {
|
||||
value = "$load_wload1$,$load_wload5$,$load_wload15$"
|
||||
description = "Exit with WARNING status if load average exceeds WLOADn"
|
||||
@ -1689,6 +2006,10 @@ object CheckCommand "load" {
|
||||
set_if = "$load_percpu$"
|
||||
description = "Divide the load averages by the number of CPUs (when possible)"
|
||||
}
|
||||
"-n" = {
|
||||
value = "$load_procs_to_show$"
|
||||
description = "Number of processes to show when printing the top consuming processes. (Default value is 0)"
|
||||
}
|
||||
}
|
||||
|
||||
vars.load_wload1 = 5.0
|
||||
@ -1710,6 +2031,10 @@ object CheckCommand "snmp" {
|
||||
value = "$snmp_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$snmp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-o" = {
|
||||
value = "$snmp_oid$"
|
||||
description = "Object identifier(s) or SNMP variables whose value you wish to query"
|
||||
@ -1770,6 +2095,10 @@ object CheckCommand "snmp" {
|
||||
value = "$snmp_miblist$"
|
||||
description = "List of MIBS to be loaded (default = none if using numeric OIDs or 'ALL' for symbolic OIDs.)"
|
||||
}
|
||||
"-M" = {
|
||||
value = "$snmp_multiplier$"
|
||||
description = "Multiplies current value, 0 < n < 1 works as divider, defaults to 1"
|
||||
}
|
||||
"--rate-multiplier" = {
|
||||
value = "$snmp_rate_multiplier$"
|
||||
description = "Converts rate per second. For example, set to 60 to convert to per minute"
|
||||
@ -1822,6 +2151,10 @@ object CheckCommand "snmpv3" {
|
||||
value = "$snmpv3_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$snmpv3_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$snmpv3_port$"
|
||||
description = "Port number"
|
||||
@ -1898,6 +2231,10 @@ object CheckCommand "snmpv3" {
|
||||
value = "$snmpv3_miblist$"
|
||||
description = "List of SNMP MIBs for translating OIDs between numeric and textual representation"
|
||||
}
|
||||
"-M" = {
|
||||
value = "$snmpv3_multiplier$"
|
||||
description = "Multiplies current value, 0 < n < 1 works as divider, defaults to 1"
|
||||
}
|
||||
"-u" = {
|
||||
value = "$snmpv3_units$"
|
||||
description = "Units label(s) for output data (e.g., 'sec.')"
|
||||
@ -2003,6 +2340,10 @@ object CheckCommand "dhcp" {
|
||||
command = [ PluginDir + "/check_dhcp" ]
|
||||
|
||||
arguments = {
|
||||
"--extra-opts" = {
|
||||
value = "$dhcp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-s" = {
|
||||
value = "$dhcp_serverip$"
|
||||
description = "IP address of DHCP server that we must hear from"
|
||||
@ -2042,6 +2383,10 @@ object CheckCommand "dns" {
|
||||
value = "$dns_lookup$"
|
||||
description = "The name or address you want to query."
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$dns_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-s" = {
|
||||
value = "$dns_server$"
|
||||
description = "Optional DNS server you want to use for the lookup."
|
||||
@ -2094,6 +2439,10 @@ object CheckCommand "dig" {
|
||||
value = "$dig_server$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$dig_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$dig_port$"
|
||||
description = "Port number (default: 53)"
|
||||
@ -2152,6 +2501,10 @@ object CheckCommand "nscp" {
|
||||
value = "$nscp_address$"
|
||||
description = "Name of the host to check"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$nscp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$nscp_port$"
|
||||
description = "Optional port number (default: 1248)"
|
||||
@ -2203,6 +2556,10 @@ object CheckCommand "by_ssh" {
|
||||
value = "$by_ssh_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$by_ssh_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$by_ssh_port$"
|
||||
description = "Port number (default: none)"
|
||||
@ -2280,6 +2637,10 @@ object CheckCommand "ups" {
|
||||
description = "Address of the upsd server"
|
||||
required = true
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$ups_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-u" = {
|
||||
value = "$ups_name$"
|
||||
description = "Name of the UPS to monitor"
|
||||
@ -2417,6 +2778,10 @@ object CheckCommand "hpjd" {
|
||||
value = "$hpjd_address$"
|
||||
description = "Host address"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$hpjd_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-C" = {
|
||||
value = "$hpjd_community$"
|
||||
description = "The SNMP community name (default=public)"
|
||||
@ -2440,6 +2805,10 @@ object CheckCommand "icmp" {
|
||||
order = 1
|
||||
description = "Host address"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$icmp_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-w" = {
|
||||
value = "$icmp_wrta$,$icmp_wpl$%"
|
||||
description = "warning threshold (currently 200.000ms,10%)"
|
||||
@ -2499,6 +2868,10 @@ object CheckCommand "ldap" {
|
||||
value = "$ldap_address$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$ldap_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$ldap_port$"
|
||||
description = "Port number (default: 389)"
|
||||
@ -2578,6 +2951,10 @@ object CheckCommand "clamd" {
|
||||
description = "The host's address or unix socket (must be an absolute path)."
|
||||
required = true
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$clamd_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-p" = {
|
||||
value = "$clamd_port$"
|
||||
description = "Port number (default: none)."
|
||||
@ -2722,6 +3099,10 @@ object CheckCommand "pgsql" {
|
||||
value = "$pgsql_hostname$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$pgsql_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-P" = {
|
||||
value = "$pgsql_port$"
|
||||
description = "Port number (default: 5432)"
|
||||
@ -2786,6 +3167,10 @@ object CheckCommand "mysql" {
|
||||
value = "$mysql_hostname$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$mysql_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-P" = {
|
||||
value = "$mysql_port$"
|
||||
description = "Port number (default: 3306)"
|
||||
@ -2947,6 +3332,10 @@ object CheckCommand "smart" {
|
||||
command = [ PluginDir + "/check_ide_smart" ]
|
||||
|
||||
arguments = {
|
||||
"--extra-opts" = {
|
||||
value = "$smart_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-d" = {
|
||||
value = "$smart_device$"
|
||||
description = "Name of a local hard drive to monitor"
|
||||
@ -3009,6 +3398,10 @@ object CheckCommand "game" {
|
||||
command = [ PluginDir + "/check_game" ]
|
||||
|
||||
arguments = {
|
||||
"--extra-opts" = {
|
||||
value = "$game_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-P" = {
|
||||
value = "$game_port$"
|
||||
description = "Port to connect to"
|
||||
@ -3062,6 +3455,10 @@ object CheckCommand "mysql_query" {
|
||||
value = "$mysql_query_hostname$"
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$mysql_query_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-P" = {
|
||||
value = "$mysql_query_port$"
|
||||
description = "Port number (default: 3306)"
|
||||
@ -3115,6 +3512,10 @@ object CheckCommand "radius" {
|
||||
value = "$radius_address$",
|
||||
description = "Host name, IP Address, or unix socket (must be an absolute path)"
|
||||
}
|
||||
"--extra-opts" = {
|
||||
value = "$radius_extra_opts$"
|
||||
description = "Read extra plugin options from an ini file."
|
||||
}
|
||||
"-F" = {
|
||||
value = "$radius_config_file$",
|
||||
description = "Configuration file"
|
||||
|
@ -1,42 +1,10 @@
|
||||
/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
|
||||
|
||||
object CheckCommand "systemd" {
|
||||
command = [ PluginContribDir + "/check_systemd.py" ]
|
||||
command = [ PluginContribDir + "/check_systemd" ]
|
||||
|
||||
arguments = {
|
||||
"--unit" = {
|
||||
value = "$systemd_unit$"
|
||||
description = "Name of the systemd unit that is being tested."
|
||||
}
|
||||
"--exclude" = {
|
||||
value = "$systemd_exclude_unit$"
|
||||
description = "Exclude a systemd unit from the checks. This option can be applied multiple times. Also supports regular expressions."
|
||||
repeat_key = true
|
||||
}
|
||||
"--no-startup-time" = {
|
||||
set_if = "$systemd_no_startup_time$"
|
||||
description = "Don’t check the startup time. Using this option the options `systemd_warning` and `systemd_critical` have no effect. (Default: `false`)"
|
||||
}
|
||||
"--warning" = {
|
||||
value = "$systemd_warning$"
|
||||
description = "Startup time in seconds to result in a warning status. (Default: `60s`)"
|
||||
}
|
||||
"--critical" = {
|
||||
value = "$systemd_critical$"
|
||||
description = "Startup time in seconds to result in a critical status. (Default: `120s`)"
|
||||
}
|
||||
"--dead-timers" = {
|
||||
set_if = "$systemd_dead_timers$"
|
||||
description = "Detect dead / inactive timers. (Default: `false`)"
|
||||
}
|
||||
"--dead-timers-warning" = {
|
||||
value = "$systemd_dead_timers_warning$"
|
||||
description = "Time ago in seconds for dead / inactive timers to trigger a warning state (by default 6 days)."
|
||||
}
|
||||
"--dead-timers-critical" = {
|
||||
value = "$systemd_dead_timers_critical$"
|
||||
description = "Time ago in seconds for dead / inactive timers to trigger a critical state (by default 7 days)."
|
||||
}
|
||||
/* General options */
|
||||
"-v" = {
|
||||
set_if = {{ macro("$systemd_verbose_level$") == 1 }}
|
||||
description = "Increase verbosity level (Accepted values: `1`, `2` or `3`). Defaults to none."
|
||||
@ -47,5 +15,85 @@ object CheckCommand "systemd" {
|
||||
"-vvv" = {
|
||||
set_if = {{ macro("$systemd_verbose_level$") == 3 }}
|
||||
}
|
||||
|
||||
/* Options related to unit selection */
|
||||
"--ignore-inactive-state" = {
|
||||
set_if = "$systemd_ignore_inactive_state$"
|
||||
description = "Ignore an inactive state on a specific unit. Only affective if used with `systemd_unit`."
|
||||
}
|
||||
"--include" = {
|
||||
value = "$systemd_include$"
|
||||
description = "Include systemd units to the checks, regular expressions are supported. This option can be applied multiple times."
|
||||
repeat_key = true
|
||||
}
|
||||
"--unit" = {
|
||||
value = "$systemd_unit$"
|
||||
description = "Name of the systemd unit that is being tested."
|
||||
}
|
||||
"--include-type" = {
|
||||
value = "$systemd_include_type$"
|
||||
description = "Unit types to be tested (for example: `service`, `timer`). This option can be applied multiple times."
|
||||
repeat_key = true
|
||||
}
|
||||
"--exclude" = {
|
||||
value = "$systemd_exclude_unit$"
|
||||
description = "Exclude a systemd unit from the checks, regular expressions are supported. This option can be applied multiple times."
|
||||
repeat_key = true
|
||||
}
|
||||
"--exclude-unit" = {
|
||||
value = "$systemd_exclude_unit_name$"
|
||||
description = "Exclude a systemd unit from the checks. This option can be applied multiple times."
|
||||
repeat_key = true
|
||||
}
|
||||
"--exclude-type" = {
|
||||
value = "$systemd_exclude_type$"
|
||||
description = "Exclude a systemd unit type (for example: `service`, `timer`)"
|
||||
}
|
||||
"--state" = {
|
||||
value = "$systemd_state$"
|
||||
description = "Specify the active state that the systemd unit must have (for example: `active`, `inactive`)"
|
||||
}
|
||||
|
||||
/* Timers related options */
|
||||
"--dead-timers" = {
|
||||
set_if = "$systemd_dead_timers$"
|
||||
description = "Detect dead / inactive timers, see `systemd_dead_timers_{warning,critical}`. (Default `false`)"
|
||||
}
|
||||
"--dead-timers-warning" = {
|
||||
value = "$systemd_dead_timers_warning$"
|
||||
description = "Time ago in seconds for dead / inactive timers to trigger a warning state. (Default 6 days)"
|
||||
}
|
||||
"--dead-timers-critical" = {
|
||||
value = "$systemd_dead_timers_critical$"
|
||||
description = "Time ago in seconds for dead / inactive timers to trigger a critical state. (Default 7 days)"
|
||||
}
|
||||
|
||||
/* Startup time related options */
|
||||
"--no-startup-time" = {
|
||||
set_if = "$systemd_no_startup_time$"
|
||||
description = "Don't check the startup time. Using this option, the options `systemd_{warning,critical}` have no effect. (Default `false`)"
|
||||
}
|
||||
"--warning" = {
|
||||
value = "$systemd_warning$"
|
||||
description = "Startup time in seconds to result in a warning status. (Default 60 seconds)"
|
||||
}
|
||||
"--critical" = {
|
||||
value = "$systemd_critical$"
|
||||
description = "Startup time in seconds to result in a critical status. (Default 120 seconds)"
|
||||
}
|
||||
|
||||
/* Monitoring data acquisition */
|
||||
"--dbus" = {
|
||||
set_if = "$systemd_dbus$"
|
||||
description = "Use systemd's D-Bus API instead of parsing command output. Only partially implemented!"
|
||||
}
|
||||
"--cli" = {
|
||||
set_if = "$systemd_cli$"
|
||||
description = "Use text output from parsing command output. (Default)"
|
||||
}
|
||||
"--user" = {
|
||||
set_if = "$systemd_user$"
|
||||
description = "Also show user (systemctl --user) units."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -50,6 +50,10 @@ template CheckCommand "vmware-esx-command" {
|
||||
username=<username> \
|
||||
password=<password>"
|
||||
}
|
||||
"--maintenance_mode_state" = {
|
||||
value = "$vmware_maintenance_mode_state$"
|
||||
description = "Set status in case ESX host is in maintenace mode. Possible Values are: ok or OK, CRITICAL or critical or CRIT or crit, WARNING or warning or WARN or warn. Default is UNKNOWN because you do not know the real state. Values are case insensitive."
|
||||
}
|
||||
}
|
||||
|
||||
vars.vmware_timeout = "90"
|
||||
@ -421,6 +425,10 @@ object CheckCommand "vmware-esx-soap-host-net" {
|
||||
"--isregexp" = {
|
||||
set_if = "$vmware_isregexp$"
|
||||
}
|
||||
"--unplugged_nics_state" = {
|
||||
value = "$vmware_unplugged_nics_state$"
|
||||
description = "Sets status for unplugged nics (Possible values are: [OK | ok] or [CRITICAL | critical | CRIT | crit] or [WARNING | warning | WARN | warn]. Default is WARNING. Values are case insensitive.)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -467,6 +475,10 @@ object CheckCommand "vmware-esx-soap-host-net-nic" {
|
||||
"--isregexp" = {
|
||||
set_if = "$vmware_isregexp$"
|
||||
}
|
||||
"--unplugged_nics_state" = {
|
||||
value = "$vmware_unplugged_nics_state$"
|
||||
description = "Sets status for unplugged nics (Possible values are: [OK | ok] or [CRITICAL | critical | CRIT | crit] or [WARNING | warning | WARN | warn]. Default is WARNING. Values are case insensitive.)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -582,7 +582,19 @@ object CheckCommand "ssl_cert" {
|
||||
value = "$ssl_cert_maximum_validity$"
|
||||
description = "The maximum validity of the certificate in days (default: 397)"
|
||||
}
|
||||
|
||||
"--dane" = {
|
||||
value = "$ssl_cert_dane$"
|
||||
description = "verify that valid DANE records exist (since OpenSSL 1.1.0)"
|
||||
repeat_key = false
|
||||
}
|
||||
"--ignore-maximum-validity" = {
|
||||
description = "Ignore the certificate maximum validity"
|
||||
set_if = "$ssl_cert_ignore_maximum_validity$"
|
||||
}
|
||||
"--long-output" = {
|
||||
description = "Append the specified comma separated (no spaces) list of attributes to the plugin output on additional lines"
|
||||
value = "$ssl_cert_long_output$"
|
||||
}
|
||||
}
|
||||
|
||||
vars.ssl_cert_address = "$check_address$"
|
||||
|
@ -37,7 +37,9 @@ set(base_SOURCES
|
||||
fifo.cpp fifo.hpp
|
||||
filelogger.cpp filelogger.hpp filelogger-ti.hpp
|
||||
function.cpp function.hpp function-ti.hpp function-script.cpp functionwrapper.hpp
|
||||
generator.hpp
|
||||
initialize.cpp initialize.hpp
|
||||
intrusive-ptr.hpp
|
||||
io-engine.cpp io-engine.hpp
|
||||
journaldlogger.cpp journaldlogger.hpp journaldlogger-ti.hpp
|
||||
json.cpp json.hpp json-script.cpp
|
||||
@ -86,6 +88,7 @@ set(base_SOURCES
|
||||
unixsocket.cpp unixsocket.hpp
|
||||
utility.cpp utility.hpp
|
||||
value.cpp value.hpp value-operators.cpp
|
||||
wait-group.cpp wait-group.hpp
|
||||
win32.hpp
|
||||
workqueue.cpp workqueue.hpp
|
||||
)
|
||||
@ -130,7 +133,7 @@ if(HAVE_SYSTEMD)
|
||||
find_path(SYSTEMD_INCLUDE_DIR
|
||||
NAMES systemd/sd-daemon.h
|
||||
HINTS ${SYSTEMD_ROOT_DIR})
|
||||
include_directories(${SYSTEMD_INCLUDE_DIR})
|
||||
include_directories(SYSTEM ${SYSTEMD_INCLUDE_DIR})
|
||||
set_property(
|
||||
SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/journaldlogger.cpp
|
||||
APPEND PROPERTY COMPILE_DEFINITIONS
|
||||
@ -140,13 +143,13 @@ endif()
|
||||
|
||||
add_library(base OBJECT ${base_SOURCES})
|
||||
|
||||
include_directories(${icinga2_SOURCE_DIR}/third-party/execvpe)
|
||||
include_directories(SYSTEM ${icinga2_SOURCE_DIR}/third-party/execvpe)
|
||||
link_directories(${icinga2_BINARY_DIR}/third-party/execvpe)
|
||||
|
||||
include_directories(${icinga2_SOURCE_DIR}/third-party/mmatch)
|
||||
include_directories(SYSTEM ${icinga2_SOURCE_DIR}/third-party/mmatch)
|
||||
link_directories(${icinga2_BINARY_DIR}/third-party/mmatch)
|
||||
|
||||
include_directories(${icinga2_SOURCE_DIR}/third-party/socketpair)
|
||||
include_directories(SYSTEM ${icinga2_SOURCE_DIR}/third-party/socketpair)
|
||||
link_directories(${icinga2_BINARY_DIR}/third-party/socketpair)
|
||||
|
||||
set_target_properties (
|
||||
@ -154,7 +157,9 @@ set_target_properties (
|
||||
FOLDER Lib
|
||||
)
|
||||
|
||||
install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_CACHEDIR}\")")
|
||||
install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_LOGDIR}/crash\")")
|
||||
if(NOT WIN32)
|
||||
install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_CACHEDIR}\")")
|
||||
install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_LOGDIR}/crash\")")
|
||||
endif()
|
||||
|
||||
set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
|
||||
|
@ -776,6 +776,12 @@ void Application::SigAbrtHandler(int)
|
||||
}
|
||||
|
||||
AttachDebugger(fname, interactive_debugger);
|
||||
|
||||
#ifdef __linux__
|
||||
prctl(PR_SET_DUMPABLE, 1);
|
||||
#endif /* __linux __ */
|
||||
|
||||
abort();
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
|
@ -45,13 +45,12 @@ Value Array::Get(SizeType index) const
|
||||
*
|
||||
* @param index The index.
|
||||
* @param value The value.
|
||||
* @param overrideFrozen Whether to allow modifying frozen arrays.
|
||||
*/
|
||||
void Array::Set(SizeType index, const Value& value, bool overrideFrozen)
|
||||
void Array::Set(SizeType index, const Value& value)
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Value in array must not be modified."));
|
||||
|
||||
m_Data.at(index) = value;
|
||||
@ -62,13 +61,12 @@ void Array::Set(SizeType index, const Value& value, bool overrideFrozen)
|
||||
*
|
||||
* @param index The index.
|
||||
* @param value The value.
|
||||
* @param overrideFrozen Whether to allow modifying frozen arrays.
|
||||
*/
|
||||
void Array::Set(SizeType index, Value&& value, bool overrideFrozen)
|
||||
void Array::Set(SizeType index, Value&& value)
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
|
||||
|
||||
m_Data.at(index).Swap(value);
|
||||
@ -78,13 +76,12 @@ void Array::Set(SizeType index, Value&& value, bool overrideFrozen)
|
||||
* Adds a value to the array.
|
||||
*
|
||||
* @param value The value.
|
||||
* @param overrideFrozen Whether to allow modifying frozen arrays.
|
||||
*/
|
||||
void Array::Add(Value value, bool overrideFrozen)
|
||||
void Array::Add(Value value)
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
|
||||
|
||||
m_Data.push_back(std::move(value));
|
||||
@ -99,7 +96,7 @@ void Array::Add(Value value, bool overrideFrozen)
|
||||
*/
|
||||
Array::Iterator Array::Begin()
|
||||
{
|
||||
ASSERT(OwnsLock());
|
||||
ASSERT(Frozen() || OwnsLock());
|
||||
|
||||
return m_Data.begin();
|
||||
}
|
||||
@ -113,7 +110,7 @@ Array::Iterator Array::Begin()
|
||||
*/
|
||||
Array::Iterator Array::End()
|
||||
{
|
||||
ASSERT(OwnsLock());
|
||||
ASSERT(Frozen() || OwnsLock());
|
||||
|
||||
return m_Data.end();
|
||||
}
|
||||
@ -148,15 +145,14 @@ bool Array::Contains(const Value& value) const
|
||||
*
|
||||
* @param index The index
|
||||
* @param value The value to add
|
||||
* @param overrideFrozen Whether to allow modifying frozen arrays.
|
||||
*/
|
||||
void Array::Insert(SizeType index, Value value, bool overrideFrozen)
|
||||
void Array::Insert(SizeType index, Value value)
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
|
||||
ASSERT(index <= m_Data.size());
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
|
||||
|
||||
m_Data.insert(m_Data.begin() + index, std::move(value));
|
||||
@ -166,13 +162,12 @@ void Array::Insert(SizeType index, Value value, bool overrideFrozen)
|
||||
* Removes the specified index from the array.
|
||||
*
|
||||
* @param index The index.
|
||||
* @param overrideFrozen Whether to allow modifying frozen arrays.
|
||||
*/
|
||||
void Array::Remove(SizeType index, bool overrideFrozen)
|
||||
void Array::Remove(SizeType index)
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
|
||||
|
||||
if (index >= m_Data.size())
|
||||
@ -185,43 +180,42 @@ void Array::Remove(SizeType index, bool overrideFrozen)
|
||||
* Removes the item specified by the iterator from the array.
|
||||
*
|
||||
* @param it The iterator.
|
||||
* @param overrideFrozen Whether to allow modifying frozen arrays.
|
||||
*/
|
||||
void Array::Remove(Array::Iterator it, bool overrideFrozen)
|
||||
void Array::Remove(Array::Iterator it)
|
||||
{
|
||||
ASSERT(OwnsLock());
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
|
||||
|
||||
m_Data.erase(it);
|
||||
}
|
||||
|
||||
void Array::Resize(SizeType newSize, bool overrideFrozen)
|
||||
void Array::Resize(SizeType newSize)
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
|
||||
|
||||
m_Data.resize(newSize);
|
||||
}
|
||||
|
||||
void Array::Clear(bool overrideFrozen)
|
||||
void Array::Clear()
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
|
||||
|
||||
m_Data.clear();
|
||||
}
|
||||
|
||||
void Array::Reserve(SizeType newSize, bool overrideFrozen)
|
||||
void Array::Reserve(SizeType newSize)
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
|
||||
|
||||
m_Data.reserve(newSize);
|
||||
@ -280,11 +274,11 @@ Array::Ptr Array::Reverse() const
|
||||
return result;
|
||||
}
|
||||
|
||||
void Array::Sort(bool overrideFrozen)
|
||||
void Array::Sort()
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
|
||||
|
||||
std::sort(m_Data.begin(), m_Data.end());
|
||||
@ -333,7 +327,26 @@ Array::Ptr Array::Unique() const
|
||||
void Array::Freeze()
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
m_Frozen = true;
|
||||
m_Frozen.store(true, std::memory_order_release);
|
||||
}
|
||||
|
||||
bool Array::Frozen() const
|
||||
{
|
||||
return m_Frozen.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an already locked ObjectLock if the array is frozen.
|
||||
* Otherwise, returns an unlocked object lock.
|
||||
*
|
||||
* @returns An object lock.
|
||||
*/
|
||||
ObjectLock Array::LockIfRequired()
|
||||
{
|
||||
if (Frozen()) {
|
||||
return ObjectLock(this, std::defer_lock);
|
||||
}
|
||||
return ObjectLock(this);
|
||||
}
|
||||
|
||||
Value Array::GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const
|
||||
@ -354,7 +367,7 @@ Value Array::GetFieldByName(const String& field, bool sandboxed, const DebugInfo
|
||||
return Get(index);
|
||||
}
|
||||
|
||||
void Array::SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo)
|
||||
void Array::SetFieldByName(const String& field, const Value& value, const DebugInfo& debugInfo)
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
|
||||
@ -364,9 +377,9 @@ void Array::SetFieldByName(const String& field, const Value& value, bool overrid
|
||||
BOOST_THROW_EXCEPTION(ScriptError("Array index '" + Convert::ToString(index) + "' is out of bounds.", debugInfo));
|
||||
|
||||
if (static_cast<size_t>(index) >= GetLength())
|
||||
Resize(index + 1, overrideFrozen);
|
||||
Resize(index + 1);
|
||||
|
||||
Set(index, value, overrideFrozen);
|
||||
Set(index, value);
|
||||
}
|
||||
|
||||
Array::Iterator icinga::begin(const Array::Ptr& x)
|
||||
|
@ -4,6 +4,7 @@
|
||||
#define ARRAY_H
|
||||
|
||||
#include "base/i2-base.hpp"
|
||||
#include "base/atomic.hpp"
|
||||
#include "base/objectlock.hpp"
|
||||
#include "base/value.hpp"
|
||||
#include <boost/range/iterator.hpp>
|
||||
@ -38,9 +39,9 @@ public:
|
||||
Array(std::initializer_list<Value> init);
|
||||
|
||||
Value Get(SizeType index) const;
|
||||
void Set(SizeType index, const Value& value, bool overrideFrozen = false);
|
||||
void Set(SizeType index, Value&& value, bool overrideFrozen = false);
|
||||
void Add(Value value, bool overrideFrozen = false);
|
||||
void Set(SizeType index, const Value& value);
|
||||
void Set(SizeType index, Value&& value);
|
||||
void Add(Value value);
|
||||
|
||||
Iterator Begin();
|
||||
Iterator End();
|
||||
@ -48,14 +49,14 @@ public:
|
||||
size_t GetLength() const;
|
||||
bool Contains(const Value& value) const;
|
||||
|
||||
void Insert(SizeType index, Value value, bool overrideFrozen = false);
|
||||
void Remove(SizeType index, bool overrideFrozen = false);
|
||||
void Remove(Iterator it, bool overrideFrozen = false);
|
||||
void Insert(SizeType index, Value value);
|
||||
void Remove(SizeType index);
|
||||
void Remove(Iterator it);
|
||||
|
||||
void Resize(SizeType newSize, bool overrideFrozen = false);
|
||||
void Clear(bool overrideFrozen = false);
|
||||
void Resize(SizeType newSize);
|
||||
void Clear();
|
||||
|
||||
void Reserve(SizeType newSize, bool overrideFrozen = false);
|
||||
void Reserve(SizeType newSize);
|
||||
|
||||
void CopyTo(const Array::Ptr& dest) const;
|
||||
Array::Ptr ShallowClone() const;
|
||||
@ -91,20 +92,22 @@ public:
|
||||
|
||||
Array::Ptr Reverse() const;
|
||||
|
||||
void Sort(bool overrideFrozen = false);
|
||||
void Sort();
|
||||
|
||||
String ToString() const override;
|
||||
Value Join(const Value& separator) const;
|
||||
|
||||
Array::Ptr Unique() const;
|
||||
void Freeze();
|
||||
bool Frozen() const;
|
||||
ObjectLock LockIfRequired();
|
||||
|
||||
Value GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const override;
|
||||
void SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo) override;
|
||||
void SetFieldByName(const String& field, const Value& value, const DebugInfo& debugInfo) override;
|
||||
|
||||
private:
|
||||
std::vector<Value> m_Data; /**< The data for the array. */
|
||||
bool m_Frozen{false};
|
||||
Atomic<bool> m_Frozen{false};
|
||||
};
|
||||
|
||||
Array::Iterator begin(const Array::Ptr& x);
|
||||
|
@ -12,7 +12,12 @@ namespace icinga
|
||||
{
|
||||
|
||||
/**
|
||||
* Extends std::atomic with an atomic constructor.
|
||||
* Like std::atomic, but enforces usage of its only safe constructor.
|
||||
*
|
||||
* "The default-initialized std::atomic<T> does not contain a T object,
|
||||
* and its only valid uses are destruction and
|
||||
* initialization by std::atomic_init, see LWG issue 2334."
|
||||
* -- https://en.cppreference.com/w/cpp/atomic/atomic/atomic
|
||||
*
|
||||
* @ingroup base
|
||||
*/
|
||||
@ -20,24 +25,12 @@ template<class T>
|
||||
class Atomic : public std::atomic<T> {
|
||||
public:
|
||||
/**
|
||||
* Like std::atomic#atomic, but operates atomically
|
||||
* The only safe constructor of std::atomic#atomic
|
||||
*
|
||||
* @param desired Initial value
|
||||
*/
|
||||
inline Atomic(T desired)
|
||||
inline Atomic(T desired) : std::atomic<T>(desired)
|
||||
{
|
||||
this->store(desired);
|
||||
}
|
||||
|
||||
/**
|
||||
* Like std::atomic#atomic, but operates atomically
|
||||
*
|
||||
* @param desired Initial value
|
||||
* @param order Initial store operation's memory order
|
||||
*/
|
||||
inline Atomic(T desired, std::memory_order order)
|
||||
{
|
||||
this->store(desired, order);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -9,11 +9,13 @@
|
||||
#include "base/dictionary.hpp"
|
||||
#include <shared_mutex>
|
||||
#include <unordered_map>
|
||||
#include <boost/signals2.hpp>
|
||||
|
||||
namespace icinga
|
||||
{
|
||||
|
||||
class ConfigObject;
|
||||
class ConfigItems;
|
||||
|
||||
class ConfigType
|
||||
{
|
||||
@ -48,6 +50,13 @@ for (const auto& object : objects) {
|
||||
|
||||
int GetObjectCount() const;
|
||||
|
||||
/**
|
||||
* Signal that allows hooking into the config loading process just before ConfigObject::OnAllConfigLoaded() is
|
||||
* called for a bunch of objects. A vector of pointers to these objects is passed as an argument. All elements
|
||||
* are of the object type the signal is called on.
|
||||
*/
|
||||
boost::signals2::signal<void (const ConfigItems&)> BeforeOnAllConfigLoaded;
|
||||
|
||||
private:
|
||||
typedef std::unordered_map<String, intrusive_ptr<ConfigObject> > ObjectMap;
|
||||
typedef std::vector<intrusive_ptr<ConfigObject> > ObjectVector;
|
||||
|
@ -35,7 +35,7 @@ DateTime::DateTime(const std::vector<Value>& args)
|
||||
|
||||
tms.tm_isdst = -1;
|
||||
|
||||
m_Value = mktime(&tms);
|
||||
m_Value = Utility::TmToTimestamp(&tms);
|
||||
} else if (args.size() == 1)
|
||||
m_Value = args[0];
|
||||
else
|
||||
|
@ -22,6 +22,8 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
Defer() = default;
|
||||
|
||||
Defer(const Defer&) = delete;
|
||||
Defer(Defer&&) = delete;
|
||||
Defer& operator=(const Defer&) = delete;
|
||||
@ -39,6 +41,11 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
inline void SetFunc(std::function<void()> func)
|
||||
{
|
||||
m_Func = std::move(func);
|
||||
}
|
||||
|
||||
inline
|
||||
void Cancel()
|
||||
{
|
||||
|
@ -5,46 +5,68 @@
|
||||
using namespace icinga;
|
||||
|
||||
std::mutex DependencyGraph::m_Mutex;
|
||||
std::map<Object *, std::map<Object *, int> > DependencyGraph::m_Dependencies;
|
||||
DependencyGraph::DependencyMap DependencyGraph::m_Dependencies;
|
||||
|
||||
void DependencyGraph::AddDependency(Object *parent, Object *child)
|
||||
void DependencyGraph::AddDependency(ConfigObject* child, ConfigObject* parent)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(m_Mutex);
|
||||
m_Dependencies[child][parent]++;
|
||||
if (auto [it, inserted] = m_Dependencies.insert(Edge(parent, child)); !inserted) {
|
||||
m_Dependencies.modify(it, [](Edge& e) { e.count++; });
|
||||
}
|
||||
}
|
||||
|
||||
void DependencyGraph::RemoveDependency(Object *parent, Object *child)
|
||||
void DependencyGraph::RemoveDependency(ConfigObject* child, ConfigObject* parent)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(m_Mutex);
|
||||
|
||||
auto& refs = m_Dependencies[child];
|
||||
auto it = refs.find(parent);
|
||||
|
||||
if (it == refs.end())
|
||||
return;
|
||||
|
||||
it->second--;
|
||||
|
||||
if (it->second == 0)
|
||||
refs.erase(it);
|
||||
|
||||
if (refs.empty())
|
||||
m_Dependencies.erase(child);
|
||||
}
|
||||
|
||||
std::vector<Object::Ptr> DependencyGraph::GetParents(const Object::Ptr& child)
|
||||
{
|
||||
std::vector<Object::Ptr> objects;
|
||||
|
||||
std::unique_lock<std::mutex> lock(m_Mutex);
|
||||
auto it = m_Dependencies.find(child.get());
|
||||
|
||||
if (it != m_Dependencies.end()) {
|
||||
typedef std::pair<Object *, int> kv_pair;
|
||||
for (const kv_pair& kv : it->second) {
|
||||
objects.emplace_back(kv.first);
|
||||
if (auto it(m_Dependencies.find(Edge(parent, child))); it != m_Dependencies.end()) {
|
||||
if (it->count > 1) {
|
||||
// Remove a duplicate edge from child to node, i.e. decrement the corresponding counter.
|
||||
m_Dependencies.modify(it, [](Edge& e) { e.count--; });
|
||||
} else {
|
||||
// Remove the last edge from child to node (decrementing the counter would set it to 0),
|
||||
// thus remove that connection from the data structure completely.
|
||||
m_Dependencies.erase(it);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all the parent objects of the given child object.
|
||||
*
|
||||
* @param child The child object.
|
||||
*
|
||||
* @returns A list of the parent objects.
|
||||
*/
|
||||
std::vector<ConfigObject::Ptr> DependencyGraph::GetParents(const ConfigObject::Ptr& child)
|
||||
{
|
||||
std::vector<ConfigObject::Ptr> objects;
|
||||
|
||||
std::unique_lock lock(m_Mutex);
|
||||
auto [begin, end] = m_Dependencies.get<2>().equal_range(child.get());
|
||||
std::transform(begin, end, std::back_inserter(objects), [](const Edge& edge) {
|
||||
return edge.parent;
|
||||
});
|
||||
|
||||
return objects;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all the dependent objects of the given parent object.
|
||||
*
|
||||
* @param parent The parent object.
|
||||
*
|
||||
* @returns A list of the dependent objects.
|
||||
*/
|
||||
std::vector<ConfigObject::Ptr> DependencyGraph::GetChildren(const ConfigObject::Ptr& parent)
|
||||
{
|
||||
std::vector<ConfigObject::Ptr> objects;
|
||||
|
||||
std::unique_lock lock(m_Mutex);
|
||||
auto [begin, end] = m_Dependencies.get<1>().equal_range(parent.get());
|
||||
std::transform(begin, end, std::back_inserter(objects), [](const Edge& edge) {
|
||||
return edge.child;
|
||||
});
|
||||
|
||||
return objects;
|
||||
}
|
||||
|
@ -4,8 +4,10 @@
|
||||
#define DEPENDENCYGRAPH_H
|
||||
|
||||
#include "base/i2-base.hpp"
|
||||
#include "base/object.hpp"
|
||||
#include <map>
|
||||
#include "base/configobject.hpp"
|
||||
#include <boost/multi_index_container.hpp>
|
||||
#include <boost/multi_index/hashed_index.hpp>
|
||||
#include <boost/multi_index/member.hpp>
|
||||
#include <mutex>
|
||||
|
||||
namespace icinga {
|
||||
@ -18,15 +20,84 @@ namespace icinga {
|
||||
class DependencyGraph
|
||||
{
|
||||
public:
|
||||
static void AddDependency(Object *parent, Object *child);
|
||||
static void RemoveDependency(Object *parent, Object *child);
|
||||
static std::vector<Object::Ptr> GetParents(const Object::Ptr& child);
|
||||
static void AddDependency(ConfigObject* child, ConfigObject* parent);
|
||||
static void RemoveDependency(ConfigObject* child, ConfigObject* parent);
|
||||
static std::vector<ConfigObject::Ptr> GetParents(const ConfigObject::Ptr& child);
|
||||
static std::vector<ConfigObject::Ptr> GetChildren(const ConfigObject::Ptr& parent);
|
||||
|
||||
private:
|
||||
DependencyGraph();
|
||||
|
||||
/**
|
||||
* Represents an undirected dependency edge between two objects.
|
||||
*
|
||||
* It allows to traverse the graph in both directions, i.e. from parent to child and vice versa.
|
||||
*/
|
||||
struct Edge
|
||||
{
|
||||
ConfigObject* parent; // The parent object of the child one.
|
||||
ConfigObject* child; // The dependent object of the parent.
|
||||
// Counter for the number of parent <-> child edges to allow duplicates.
|
||||
int count;
|
||||
|
||||
Edge(ConfigObject* parent, ConfigObject* child, int count = 1): parent(parent), child(child), count(count)
|
||||
{
|
||||
}
|
||||
|
||||
struct Hash
|
||||
{
|
||||
/**
|
||||
* Generates a unique hash of the given Edge object.
|
||||
*
|
||||
* Note, the hash value is generated only by combining the hash values of the parent and child pointers.
|
||||
*
|
||||
* @param edge The Edge object to be hashed.
|
||||
*
|
||||
* @return size_t The resulting hash value of the given object.
|
||||
*/
|
||||
size_t operator()(const Edge& edge) const
|
||||
{
|
||||
size_t seed = 0;
|
||||
boost::hash_combine(seed, edge.parent);
|
||||
boost::hash_combine(seed, edge.child);
|
||||
|
||||
return seed;
|
||||
}
|
||||
};
|
||||
|
||||
struct Equal
|
||||
{
|
||||
/**
|
||||
* Compares whether the two Edge objects contain the same parent and child pointers.
|
||||
*
|
||||
* Note, the member property count is not taken into account for equality checks.
|
||||
*
|
||||
* @param a The first Edge object to compare.
|
||||
* @param b The second Edge object to compare.
|
||||
*
|
||||
* @return bool Returns true if the two objects are equal, false otherwise.
|
||||
*/
|
||||
bool operator()(const Edge& a, const Edge& b) const
|
||||
{
|
||||
return a.parent == b.parent && a.child == b.child;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
using DependencyMap = boost::multi_index_container<
|
||||
Edge, // The value type we want to sore in the container.
|
||||
boost::multi_index::indexed_by<
|
||||
// The first indexer is used for lookups by the Edge from child to parent, thus it
|
||||
// needs its own hash function and comparison predicate.
|
||||
boost::multi_index::hashed_unique<boost::multi_index::identity<Edge>, Edge::Hash, Edge::Equal>,
|
||||
// These two indexers are used for lookups by the parent and child pointers.
|
||||
boost::multi_index::hashed_non_unique<boost::multi_index::member<Edge, ConfigObject*, &Edge::parent>>,
|
||||
boost::multi_index::hashed_non_unique<boost::multi_index::member<Edge, ConfigObject*, &Edge::child>>
|
||||
>
|
||||
>;
|
||||
|
||||
static std::mutex m_Mutex;
|
||||
static std::map<Object *, std::map<Object *, int> > m_Dependencies;
|
||||
static DependencyMap m_Dependencies;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
|
||||
|
||||
#include "base/dictionary.hpp"
|
||||
#include "base/objectlock.hpp"
|
||||
#include "base/debug.hpp"
|
||||
#include "base/primitivetype.hpp"
|
||||
#include "base/configwriter.hpp"
|
||||
@ -86,14 +85,13 @@ const Value * Dictionary::GetRef(const String& key) const
|
||||
*
|
||||
* @param key The key.
|
||||
* @param value The value.
|
||||
* @param overrideFrozen Whether to allow modifying frozen dictionaries.
|
||||
*/
|
||||
void Dictionary::Set(const String& key, Value value, bool overrideFrozen)
|
||||
void Dictionary::Set(const String& key, Value value)
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
std::unique_lock<std::shared_timed_mutex> lock (m_DataMutex);
|
||||
|
||||
if (m_Frozen && !overrideFrozen)
|
||||
if (m_Frozen)
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Value in dictionary must not be modified."));
|
||||
|
||||
m_Data[key] = std::move(value);
|
||||
@ -133,7 +131,7 @@ bool Dictionary::Contains(const String& key) const
|
||||
*/
|
||||
Dictionary::Iterator Dictionary::Begin()
|
||||
{
|
||||
ASSERT(OwnsLock());
|
||||
ASSERT(Frozen() || OwnsLock());
|
||||
|
||||
return m_Data.begin();
|
||||
}
|
||||
@ -147,7 +145,7 @@ Dictionary::Iterator Dictionary::Begin()
|
||||
*/
|
||||
Dictionary::Iterator Dictionary::End()
|
||||
{
|
||||
ASSERT(OwnsLock());
|
||||
ASSERT(Frozen() || OwnsLock());
|
||||
|
||||
return m_Data.end();
|
||||
}
|
||||
@ -277,7 +275,26 @@ String Dictionary::ToString() const
|
||||
void Dictionary::Freeze()
|
||||
{
|
||||
ObjectLock olock(this);
|
||||
m_Frozen = true;
|
||||
m_Frozen.store(true, std::memory_order_release);
|
||||
}
|
||||
|
||||
bool Dictionary::Frozen() const
|
||||
{
|
||||
return m_Frozen.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an already locked ObjectLock if the dictionary is frozen.
|
||||
* Otherwise, returns an unlocked object lock.
|
||||
*
|
||||
* @returns An object lock.
|
||||
*/
|
||||
ObjectLock Dictionary::LockIfRequired()
|
||||
{
|
||||
if (Frozen()) {
|
||||
return ObjectLock(this, std::defer_lock);
|
||||
}
|
||||
return ObjectLock(this);
|
||||
}
|
||||
|
||||
Value Dictionary::GetFieldByName(const String& field, bool, const DebugInfo& debugInfo) const
|
||||
@ -290,9 +307,9 @@ Value Dictionary::GetFieldByName(const String& field, bool, const DebugInfo& deb
|
||||
return GetPrototypeField(const_cast<Dictionary *>(this), field, false, debugInfo);
|
||||
}
|
||||
|
||||
void Dictionary::SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo&)
|
||||
void Dictionary::SetFieldByName(const String& field, const Value& value, const DebugInfo&)
|
||||
{
|
||||
Set(field, value, overrideFrozen);
|
||||
Set(field, value);
|
||||
}
|
||||
|
||||
bool Dictionary::HasOwnField(const String& field) const
|
||||
|
@ -4,7 +4,9 @@
|
||||
#define DICTIONARY_H
|
||||
|
||||
#include "base/i2-base.hpp"
|
||||
#include "base/atomic.hpp"
|
||||
#include "base/object.hpp"
|
||||
#include "base/objectlock.hpp"
|
||||
#include "base/value.hpp"
|
||||
#include <boost/range/iterator.hpp>
|
||||
#include <map>
|
||||
@ -43,7 +45,7 @@ public:
|
||||
Value Get(const String& key) const;
|
||||
bool Get(const String& key, Value *result) const;
|
||||
const Value * GetRef(const String& key) const;
|
||||
void Set(const String& key, Value value, bool overrideFrozen = false);
|
||||
void Set(const String& key, Value value);
|
||||
bool Contains(const String& key) const;
|
||||
|
||||
Iterator Begin();
|
||||
@ -69,16 +71,18 @@ public:
|
||||
String ToString() const override;
|
||||
|
||||
void Freeze();
|
||||
bool Frozen() const;
|
||||
ObjectLock LockIfRequired();
|
||||
|
||||
Value GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const override;
|
||||
void SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo) override;
|
||||
void SetFieldByName(const String& field, const Value& value, const DebugInfo& debugInfo) override;
|
||||
bool HasOwnField(const String& field) const override;
|
||||
bool GetOwnField(const String& field, Value *result) const override;
|
||||
|
||||
private:
|
||||
std::map<String, Value> m_Data; /**< The data for the dictionary. */
|
||||
mutable std::shared_timed_mutex m_DataMutex;
|
||||
bool m_Frozen{false};
|
||||
Atomic<bool> m_Frozen{false};
|
||||
};
|
||||
|
||||
Dictionary::Iterator begin(const Dictionary::Ptr& x);
|
||||
|
48
lib/base/generator.hpp
Normal file
48
lib/base/generator.hpp
Normal file
@ -0,0 +1,48 @@
|
||||
/* Icinga 2 | (c) 2025 Icinga GmbH | GPLv2+ */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "base/i2-base.hpp"
|
||||
#include "base/value.hpp"
|
||||
#include <optional>
|
||||
|
||||
namespace icinga
|
||||
{
|
||||
|
||||
/**
|
||||
* ValueGenerator is a class that defines a generator function type for producing Values on demand.
|
||||
*
|
||||
* This class is used to create generator functions that can yield any values that can be represented by the
|
||||
* Icinga Value type. The generator function is exhausted when it returns `std::nullopt`, indicating that there
|
||||
* are no more values to produce. Subsequent calls to `Next()` will always return `std::nullopt` after exhaustion.
|
||||
*
|
||||
* @ingroup base
|
||||
*/
|
||||
class ValueGenerator final : public Object
|
||||
{
|
||||
public:
|
||||
DECLARE_PTR_TYPEDEFS(ValueGenerator);
|
||||
|
||||
/**
|
||||
* Generates a Value using the provided generator function.
|
||||
*
|
||||
* The generator function should return an `std::optional<Value>` which contains the produced Value or
|
||||
* `std::nullopt` when there are no more values to produce. After the generator function returns `std::nullopt`,
|
||||
* the generator is considered exhausted, and further calls to `Next()` will always return `std::nullopt`.
|
||||
*/
|
||||
using GenFunc = std::function<std::optional<Value>()>;
|
||||
|
||||
explicit ValueGenerator(GenFunc generator): m_Generator(std::move(generator))
|
||||
{
|
||||
}
|
||||
|
||||
std::optional<Value> Next() const
|
||||
{
|
||||
return m_Generator();
|
||||
}
|
||||
|
||||
private:
|
||||
GenFunc m_Generator; // The generator function that produces Values.
|
||||
};
|
||||
|
||||
}
|
@ -23,6 +23,7 @@ enum class InitializePriority {
|
||||
RegisterBuiltinTypes,
|
||||
RegisterFunctions,
|
||||
RegisterTypes,
|
||||
SortTypes,
|
||||
EvaluateConfigFragments,
|
||||
Default,
|
||||
FreezeNamespaces,
|
||||
|
22
lib/base/intrusive-ptr.hpp
Normal file
22
lib/base/intrusive-ptr.hpp
Normal file
@ -0,0 +1,22 @@
|
||||
/* Icinga 2 | (c) 2025 Icinga GmbH | GPLv2+ */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "base/i2-base.hpp"
|
||||
#include <memory>
|
||||
#include <boost/smart_ptr/intrusive_ptr.hpp>
|
||||
#include <boost/version.hpp>
|
||||
|
||||
// std::hash is only implemented starting from Boost 1.74. Implement it ourselves for older version to allow using
|
||||
// boost::intrusive_ptr inside std::unordered_set<> or as the key of std::unordered_map<>.
|
||||
// https://github.com/boostorg/smart_ptr/commit/5a18ffdc5609a0e64b63e47cb81c4f0847e0c087
|
||||
#if BOOST_VERSION < 107400
|
||||
template<class T>
|
||||
struct std::hash<boost::intrusive_ptr<T>>
|
||||
{
|
||||
std::size_t operator()(const boost::intrusive_ptr<T>& ptr) const noexcept
|
||||
{
|
||||
return std::hash<T*>{}(ptr.get());
|
||||
}
|
||||
};
|
||||
#endif /* BOOST_VERSION < 107400 */
|
@ -124,31 +124,63 @@ void IoEngine::RunEventLoop()
|
||||
}
|
||||
}
|
||||
|
||||
AsioConditionVariable::AsioConditionVariable(boost::asio::io_context& io, bool init)
|
||||
AsioEvent::AsioEvent(boost::asio::io_context& io, bool init)
|
||||
: m_Timer(io)
|
||||
{
|
||||
m_Timer.expires_at(init ? boost::posix_time::neg_infin : boost::posix_time::pos_infin);
|
||||
}
|
||||
|
||||
void AsioConditionVariable::Set()
|
||||
void AsioEvent::Set()
|
||||
{
|
||||
m_Timer.expires_at(boost::posix_time::neg_infin);
|
||||
}
|
||||
|
||||
void AsioConditionVariable::Clear()
|
||||
void AsioEvent::Clear()
|
||||
{
|
||||
m_Timer.expires_at(boost::posix_time::pos_infin);
|
||||
}
|
||||
|
||||
void AsioConditionVariable::Wait(boost::asio::yield_context yc)
|
||||
void AsioEvent::Wait(boost::asio::yield_context yc)
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
m_Timer.async_wait(yc[ec]);
|
||||
}
|
||||
|
||||
AsioDualEvent::AsioDualEvent(boost::asio::io_context& io, bool init)
|
||||
: m_IsTrue(io, init), m_IsFalse(io, !init)
|
||||
{
|
||||
}
|
||||
|
||||
void AsioDualEvent::Set()
|
||||
{
|
||||
m_IsTrue.Set();
|
||||
m_IsFalse.Clear();
|
||||
}
|
||||
|
||||
void AsioDualEvent::Clear()
|
||||
{
|
||||
m_IsTrue.Clear();
|
||||
m_IsFalse.Set();
|
||||
}
|
||||
|
||||
void AsioDualEvent::WaitForSet(boost::asio::yield_context yc)
|
||||
{
|
||||
m_IsTrue.Wait(std::move(yc));
|
||||
}
|
||||
|
||||
void AsioDualEvent::WaitForClear(boost::asio::yield_context yc)
|
||||
{
|
||||
m_IsFalse.Wait(std::move(yc));
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels any pending timeout callback.
|
||||
*
|
||||
* Must be called in the strand in which the callback was scheduled!
|
||||
*/
|
||||
void Timeout::Cancel()
|
||||
{
|
||||
m_Cancelled.store(true);
|
||||
m_Cancelled->store(true);
|
||||
|
||||
boost::system::error_code ec;
|
||||
m_Timer.cancel(ec);
|
||||
|
@ -3,10 +3,12 @@
|
||||
#ifndef IO_ENGINE_H
|
||||
#define IO_ENGINE_H
|
||||
|
||||
#include "base/atomic.hpp"
|
||||
#include "base/debug.hpp"
|
||||
#include "base/exception.hpp"
|
||||
#include "base/lazy-init.hpp"
|
||||
#include "base/logger.hpp"
|
||||
#include "base/shared-object.hpp"
|
||||
#include "base/shared.hpp"
|
||||
#include <atomic>
|
||||
#include <exception>
|
||||
#include <memory>
|
||||
@ -14,11 +16,16 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <stdexcept>
|
||||
#include <boost/context/fixedsize_stack.hpp>
|
||||
#include <boost/exception/all.hpp>
|
||||
#include <boost/asio/deadline_timer.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
|
||||
#if BOOST_VERSION >= 108700
|
||||
# include <boost/asio/detached.hpp>
|
||||
#endif // BOOST_VERSION >= 108700
|
||||
|
||||
namespace icinga
|
||||
{
|
||||
|
||||
@ -98,24 +105,32 @@ public:
|
||||
|
||||
template <typename Handler, typename Function>
|
||||
static void SpawnCoroutine(Handler& h, Function f) {
|
||||
|
||||
boost::asio::spawn(h,
|
||||
[f](boost::asio::yield_context yc) {
|
||||
|
||||
auto wrapper = [f = std::move(f)](boost::asio::yield_context yc) {
|
||||
try {
|
||||
f(yc);
|
||||
} catch (const std::exception& ex) {
|
||||
Log(LogCritical, "IoEngine") << "Exception in coroutine: " << DiagnosticInformation(ex);
|
||||
} catch (...) {
|
||||
try {
|
||||
f(yc);
|
||||
} catch (const boost::coroutines::detail::forced_unwind &) {
|
||||
// Required for proper stack unwinding when coroutines are destroyed.
|
||||
// https://github.com/boostorg/coroutine/issues/39
|
||||
throw;
|
||||
} catch (const std::exception& ex) {
|
||||
Log(LogCritical, "IoEngine") << "Exception in coroutine: " << DiagnosticInformation(ex);
|
||||
} catch (...) {
|
||||
Log(LogCritical, "IoEngine", "Exception in coroutine!");
|
||||
} catch (...) {
|
||||
}
|
||||
},
|
||||
boost::coroutines::attributes(GetCoroutineStackSize()) // Set a pre-defined stack size.
|
||||
|
||||
// Required for proper stack unwinding when coroutines are destroyed.
|
||||
// https://github.com/boostorg/coroutine/issues/39
|
||||
throw;
|
||||
}
|
||||
};
|
||||
|
||||
#if BOOST_VERSION >= 108700
|
||||
boost::asio::spawn(h,
|
||||
std::allocator_arg, boost::context::fixedsize_stack(GetCoroutineStackSize()),
|
||||
std::move(wrapper),
|
||||
boost::asio::detached
|
||||
);
|
||||
#else // BOOST_VERSION >= 108700
|
||||
boost::asio::spawn(h, std::move(wrapper), boost::coroutines::attributes(GetCoroutineStackSize()));
|
||||
#endif // BOOST_VERSION >= 108700
|
||||
}
|
||||
|
||||
static inline
|
||||
@ -143,14 +158,14 @@ class TerminateIoThread : public std::exception
|
||||
};
|
||||
|
||||
/**
|
||||
* Condition variable which doesn't block I/O threads
|
||||
* Awaitable flag which doesn't block I/O threads, inspired by threading.Event from Python
|
||||
*
|
||||
* @ingroup base
|
||||
*/
|
||||
class AsioConditionVariable
|
||||
class AsioEvent
|
||||
{
|
||||
public:
|
||||
AsioConditionVariable(boost::asio::io_context& io, bool init = false);
|
||||
AsioEvent(boost::asio::io_context& io, bool init = false);
|
||||
|
||||
void Set();
|
||||
void Clear();
|
||||
@ -161,53 +176,102 @@ private:
|
||||
};
|
||||
|
||||
/**
|
||||
* I/O timeout emulator
|
||||
* Like AsioEvent, which only allows waiting for an event to be set, but additionally supports waiting for clearing
|
||||
*
|
||||
* @ingroup base
|
||||
*/
|
||||
class Timeout : public SharedObject
|
||||
class AsioDualEvent
|
||||
{
|
||||
public:
|
||||
DECLARE_PTR_TYPEDEFS(Timeout);
|
||||
AsioDualEvent(boost::asio::io_context& io, bool init = false);
|
||||
|
||||
template<class Executor, class TimeoutFromNow, class OnTimeout>
|
||||
Timeout(boost::asio::io_context& io, Executor& executor, TimeoutFromNow timeoutFromNow, OnTimeout onTimeout)
|
||||
: m_Timer(io)
|
||||
void Set();
|
||||
void Clear();
|
||||
|
||||
void WaitForSet(boost::asio::yield_context yc);
|
||||
void WaitForClear(boost::asio::yield_context yc);
|
||||
|
||||
private:
|
||||
AsioEvent m_IsTrue, m_IsFalse;
|
||||
};
|
||||
|
||||
/**
|
||||
* I/O timeout emulator
|
||||
*
|
||||
* This class provides a workaround for Boost.ASIO's lack of built-in timeout support.
|
||||
* While Boost.ASIO handles asynchronous operations, it does not natively support timeouts for these operations.
|
||||
* This class uses a boost::asio::deadline_timer to emulate a timeout by scheduling a callback to be triggered
|
||||
* after a specified duration, effectively adding timeout behavior where none exists.
|
||||
* The callback is executed within the provided strand, ensuring thread-safety.
|
||||
*
|
||||
* The constructor returns immediately after scheduling the timeout callback.
|
||||
* The callback itself is invoked asynchronously when the timeout occurs.
|
||||
* This allows the caller to continue execution while the timeout is running in the background.
|
||||
*
|
||||
* The class provides a Cancel() method to unschedule any pending callback. If the callback has already been run,
|
||||
* calling Cancel() has no effect. This method can be used to abort the timeout early if the monitored operation
|
||||
* completes before the callback has been run. The Timeout destructor also automatically cancels any pending callback.
|
||||
* A callback is considered pending even if the timeout has already expired,
|
||||
* but the callback has not been executed yet due to a busy strand.
|
||||
*
|
||||
* @ingroup base
|
||||
*/
|
||||
class Timeout
|
||||
{
|
||||
public:
|
||||
using Timer = boost::asio::deadline_timer;
|
||||
|
||||
/**
|
||||
* Schedules onTimeout to be triggered after timeoutFromNow on strand.
|
||||
*
|
||||
* @param strand The strand in which the callback will be executed.
|
||||
* The caller must also run in this strand, as well as Cancel() and the destructor!
|
||||
* @param timeoutFromNow The duration after which the timeout callback will be triggered.
|
||||
* @param onTimeout The callback to invoke when the timeout occurs.
|
||||
*/
|
||||
template<class OnTimeout>
|
||||
Timeout(boost::asio::io_context::strand& strand, const Timer::duration_type& timeoutFromNow, OnTimeout onTimeout)
|
||||
: m_Timer(strand.context(), timeoutFromNow), m_Cancelled(Shared<Atomic<bool>>::Make(false))
|
||||
{
|
||||
Ptr keepAlive (this);
|
||||
VERIFY(strand.running_in_this_thread());
|
||||
|
||||
m_Cancelled.store(false);
|
||||
m_Timer.expires_from_now(std::move(timeoutFromNow));
|
||||
|
||||
IoEngine::SpawnCoroutine(executor, [this, keepAlive, onTimeout](boost::asio::yield_context yc) {
|
||||
if (m_Cancelled.load()) {
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
boost::system::error_code ec;
|
||||
|
||||
m_Timer.async_wait(yc[ec]);
|
||||
|
||||
if (ec) {
|
||||
return;
|
||||
m_Timer.async_wait(boost::asio::bind_executor(
|
||||
strand, [cancelled = m_Cancelled, onTimeout = std::move(onTimeout)](boost::system::error_code ec) {
|
||||
if (!ec && !cancelled->load()) {
|
||||
onTimeout();
|
||||
}
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
if (m_Cancelled.load()) {
|
||||
return;
|
||||
}
|
||||
Timeout(const Timeout&) = delete;
|
||||
Timeout(Timeout&&) = delete;
|
||||
Timeout& operator=(const Timeout&) = delete;
|
||||
Timeout& operator=(Timeout&&) = delete;
|
||||
|
||||
auto f (onTimeout);
|
||||
f(std::move(yc));
|
||||
});
|
||||
/**
|
||||
* Cancels any pending timeout callback.
|
||||
*
|
||||
* Must be called in the strand in which the callback was scheduled!
|
||||
*/
|
||||
~Timeout()
|
||||
{
|
||||
Cancel();
|
||||
}
|
||||
|
||||
void Cancel();
|
||||
|
||||
private:
|
||||
boost::asio::deadline_timer m_Timer;
|
||||
std::atomic<bool> m_Cancelled;
|
||||
Timer m_Timer;
|
||||
|
||||
/**
|
||||
* Indicates whether the Timeout has been cancelled.
|
||||
*
|
||||
* This must be Shared<> between the lambda in the constructor and Cancel() for the case
|
||||
* the destructor calls Cancel() while the lambda is already queued in the strand.
|
||||
* The whole Timeout instance can't be kept alive by the lambda because this would delay the destructor.
|
||||
*/
|
||||
Shared<Atomic<bool>>::Ptr m_Cancelled;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -2,22 +2,324 @@
|
||||
|
||||
#include "base/json.hpp"
|
||||
#include "base/debug.hpp"
|
||||
#include "base/namespace.hpp"
|
||||
#include "base/dictionary.hpp"
|
||||
#include "base/array.hpp"
|
||||
#include "base/namespace.hpp"
|
||||
#include "base/objectlock.hpp"
|
||||
#include "base/convert.hpp"
|
||||
#include "base/utility.hpp"
|
||||
#include <bitset>
|
||||
#include <boost/exception_ptr.hpp>
|
||||
#include <cstdint>
|
||||
#include <json.hpp>
|
||||
#include <boost/numeric/conversion/cast.hpp>
|
||||
#include <stack>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace icinga;
|
||||
|
||||
JsonEncoder::JsonEncoder(std::string& output, bool prettify)
|
||||
: JsonEncoder{nlohmann::detail::output_adapter<char>(output), prettify}
|
||||
{
|
||||
}
|
||||
|
||||
JsonEncoder::JsonEncoder(std::basic_ostream<char>& stream, bool prettify)
|
||||
: JsonEncoder{nlohmann::detail::output_adapter<char>(stream), prettify}
|
||||
{
|
||||
}
|
||||
|
||||
JsonEncoder::JsonEncoder(nlohmann::detail::output_adapter_t<char> w, bool prettify)
|
||||
: m_Pretty(prettify), m_Writer(std::move(w)), m_Flusher{m_Writer}
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes a single value into JSON and writes it to the underlying output stream.
|
||||
*
|
||||
* This method is the main entry point for encoding JSON data. It takes a value of any type that can
|
||||
* be represented by our @c Value class recursively and encodes it into JSON in an efficient manner.
|
||||
* If prettifying is enabled, the JSON output will be formatted with indentation and newlines for better
|
||||
* readability, and the final JSON will also be terminated by a newline character.
|
||||
*
|
||||
* @note If the used output adapter performs asynchronous I/O operations (it's derived from @c AsyncJsonWriter),
|
||||
* please provide a @c boost::asio::yield_context object to allow the encoder to flush the output stream in a
|
||||
* safe manner. The encoder will try to regularly give the output stream a chance to flush its data when it is
|
||||
* safe to do so, but for this to work, there must be a valid yield context provided. Otherwise, the encoder
|
||||
* will not attempt to flush the output stream at all, which may lead to huge memory consumption when encoding
|
||||
* large JSON objects or arrays.
|
||||
*
|
||||
* @param value The value to be JSON serialized.
|
||||
* @param yc The optional yield context for asynchronous operations. If provided, it allows the encoder
|
||||
* to flush the output stream safely when it has not acquired any object lock on the parent containers.
|
||||
*/
|
||||
void JsonEncoder::Encode(const Value& value, boost::asio::yield_context* yc)
|
||||
{
|
||||
switch (value.GetType()) {
|
||||
case ValueEmpty:
|
||||
Write("null");
|
||||
break;
|
||||
case ValueBoolean:
|
||||
Write(value.ToBool() ? "true" : "false");
|
||||
break;
|
||||
case ValueString:
|
||||
EncodeNlohmannJson(value.Get<String>());
|
||||
break;
|
||||
case ValueNumber:
|
||||
EncodeNumber(value.Get<double>());
|
||||
break;
|
||||
case ValueObject: {
|
||||
const auto& obj = value.Get<Object::Ptr>();
|
||||
const auto& type = obj->GetReflectionType();
|
||||
if (type == Namespace::TypeInstance) {
|
||||
static constexpr auto extractor = [](const NamespaceValue& v) -> const Value& { return v.Val; };
|
||||
EncodeObject(static_pointer_cast<Namespace>(obj), extractor, yc);
|
||||
} else if (type == Dictionary::TypeInstance) {
|
||||
static constexpr auto extractor = [](const Value& v) -> const Value& { return v; };
|
||||
EncodeObject(static_pointer_cast<Dictionary>(obj), extractor, yc);
|
||||
} else if (type == Array::TypeInstance) {
|
||||
EncodeArray(static_pointer_cast<Array>(obj), yc);
|
||||
} else if (auto gen(dynamic_pointer_cast<ValueGenerator>(obj)); gen) {
|
||||
EncodeValueGenerator(gen, yc);
|
||||
} else {
|
||||
// Some other non-serializable object type!
|
||||
EncodeNlohmannJson(obj->ToString());
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
VERIFY(!"Invalid variant type.");
|
||||
}
|
||||
|
||||
// If we are at the top level of the JSON object and prettifying is enabled, we need to end
|
||||
// the JSON with a newline character to ensure that the output is properly formatted.
|
||||
if (m_Indent == 0 && m_Pretty) {
|
||||
Write("\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes an Array object into JSON and writes it to the output stream.
|
||||
*
|
||||
* @param array The Array object to be serialized into JSON.
|
||||
* @param yc The optional yield context for asynchronous operations. If provided, it allows the encoder
|
||||
* to flush the output stream safely when it has not acquired any object lock.
|
||||
*/
|
||||
void JsonEncoder::EncodeArray(const Array::Ptr& array, boost::asio::yield_context* yc)
|
||||
{
|
||||
BeginContainer('[');
|
||||
auto olock = array->LockIfRequired();
|
||||
if (olock) {
|
||||
yc = nullptr; // We've acquired an object lock, never allow asynchronous operations.
|
||||
}
|
||||
|
||||
bool isEmpty = true;
|
||||
for (const auto& item : array) {
|
||||
WriteSeparatorAndIndentStrIfNeeded(!isEmpty);
|
||||
isEmpty = false;
|
||||
Encode(item, yc);
|
||||
m_Flusher.FlushIfSafe(yc);
|
||||
}
|
||||
EndContainer(']', isEmpty);
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes a ValueGenerator object into JSON and writes it to the output stream.
|
||||
*
|
||||
* This will iterate through the generator, encoding each value it produces until it is exhausted.
|
||||
*
|
||||
* @param generator The ValueGenerator object to be serialized into JSON.
|
||||
* @param yc The optional yield context for asynchronous operations. If provided, it allows the encoder
|
||||
* to flush the output stream safely when it has not acquired any object lock on the parent containers.
|
||||
*/
|
||||
void JsonEncoder::EncodeValueGenerator(const ValueGenerator::Ptr& generator, boost::asio::yield_context* yc)
|
||||
{
|
||||
BeginContainer('[');
|
||||
bool isEmpty = true;
|
||||
while (auto result = generator->Next()) {
|
||||
WriteSeparatorAndIndentStrIfNeeded(!isEmpty);
|
||||
isEmpty = false;
|
||||
Encode(*result, yc);
|
||||
m_Flusher.FlushIfSafe(yc);
|
||||
}
|
||||
EndContainer(']', isEmpty);
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes an Icinga 2 object (Namespace or Dictionary) into JSON and writes it to @c m_Writer.
|
||||
*
|
||||
* @tparam Iterable Type of the container (Namespace or Dictionary).
|
||||
* @tparam ValExtractor Type of the value extractor function used to extract values from the container's iterator.
|
||||
*
|
||||
* @param container The container to JSON serialize.
|
||||
* @param extractor The value extractor function used to extract values from the container's iterator.
|
||||
* @param yc The optional yield context for asynchronous operations. It will only be set when the encoder
|
||||
* has not acquired any object lock on the parent containers, allowing safe asynchronous operations.
|
||||
*/
|
||||
template<typename Iterable, typename ValExtractor>
|
||||
void JsonEncoder::EncodeObject(const Iterable& container, const ValExtractor& extractor, boost::asio::yield_context* yc)
|
||||
{
|
||||
static_assert(std::is_same_v<Iterable, Namespace::Ptr> || std::is_same_v<Iterable, Dictionary::Ptr>,
|
||||
"Container must be a Namespace or Dictionary");
|
||||
|
||||
BeginContainer('{');
|
||||
auto olock = container->LockIfRequired();
|
||||
if (olock) {
|
||||
yc = nullptr; // We've acquired an object lock, never allow asynchronous operations.
|
||||
}
|
||||
|
||||
bool isEmpty = true;
|
||||
for (const auto& [key, val] : container) {
|
||||
WriteSeparatorAndIndentStrIfNeeded(!isEmpty);
|
||||
isEmpty = false;
|
||||
|
||||
EncodeNlohmannJson(key);
|
||||
Write(m_Pretty ? ": " : ":");
|
||||
|
||||
Encode(extractor(val), yc);
|
||||
m_Flusher.FlushIfSafe(yc);
|
||||
}
|
||||
EndContainer('}', isEmpty);
|
||||
}
|
||||
|
||||
/**
|
||||
* Dumps a nlohmann::json object to the output stream using the serializer.
|
||||
*
|
||||
* This function uses the @c nlohmann::detail::serializer to dump the provided @c nlohmann::json
|
||||
* object to the output stream managed by the @c JsonEncoder. Strings will be properly escaped, and
|
||||
* if any invalid UTF-8 sequences are encountered, it will replace them with the Unicode replacement
|
||||
* character (U+FFFD).
|
||||
*
|
||||
* @param json The nlohmann::json object to encode.
|
||||
*/
|
||||
void JsonEncoder::EncodeNlohmannJson(const nlohmann::json& json) const
|
||||
{
|
||||
nlohmann::detail::serializer<nlohmann::json> s(m_Writer, ' ', nlohmann::json::error_handler_t::replace);
|
||||
s.dump(json, m_Pretty, true, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes a double value into JSON format and writes it to the output stream.
|
||||
*
|
||||
* This function checks if the double value can be safely cast to an integer or unsigned integer type
|
||||
* without loss of precision. If it can, it will serialize it as such; otherwise, it will serialize
|
||||
* it as a double. This is particularly useful for ensuring that values like 0.0 are serialized as 0,
|
||||
* which can be important for compatibility with clients like Icinga DB that expect integers in such cases.
|
||||
*
|
||||
* @param value The double value to encode as JSON.
|
||||
*/
|
||||
void JsonEncoder::EncodeNumber(double value) const
|
||||
{
|
||||
try {
|
||||
if (value < 0) {
|
||||
if (auto ll(boost::numeric_cast<nlohmann::json::number_integer_t>(value)); ll == value) {
|
||||
EncodeNlohmannJson(ll);
|
||||
return;
|
||||
}
|
||||
} else if (auto ull(boost::numeric_cast<nlohmann::json::number_unsigned_t>(value)); ull == value) {
|
||||
EncodeNlohmannJson(ull);
|
||||
return;
|
||||
}
|
||||
// If we reach this point, the value cannot be safely cast to a signed or unsigned integer
|
||||
// type because it would otherwise lose its precision. If the value was just too large to fit
|
||||
// into the above types, then boost will throw an exception and end up in the below catch block.
|
||||
// So, in either case, serialize the number as-is without any casting.
|
||||
} catch (const boost::bad_numeric_cast&) {}
|
||||
|
||||
EncodeNlohmannJson(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a string to the underlying output stream.
|
||||
*
|
||||
* This function writes the provided string view directly to the output stream without any additional formatting.
|
||||
*
|
||||
* @param sv The string view to write to the output stream.
|
||||
*/
|
||||
void JsonEncoder::Write(const std::string_view& sv) const
|
||||
{
|
||||
m_Writer->write_characters(sv.data(), sv.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* Begins a JSON container (object or array) by writing the opening character and adjusting the
|
||||
* indentation level if pretty-printing is enabled.
|
||||
*
|
||||
* @param openChar The character that opens the container (either '{' for objects or '[' for arrays).
|
||||
*/
|
||||
void JsonEncoder::BeginContainer(char openChar)
|
||||
{
|
||||
if (m_Pretty) {
|
||||
m_Indent += m_IndentSize;
|
||||
if (m_IndentStr.size() < m_Indent) {
|
||||
m_IndentStr.resize(m_IndentStr.size() * 2, ' ');
|
||||
}
|
||||
}
|
||||
m_Writer->write_character(openChar);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ends a JSON container (object or array) by writing the closing character and adjusting the
|
||||
* indentation level if pretty-printing is enabled.
|
||||
*
|
||||
* @param closeChar The character that closes the container (either '}' for objects or ']' for arrays).
|
||||
* @param isContainerEmpty Whether the container is empty, used to determine if a newline should be written.
|
||||
*/
|
||||
void JsonEncoder::EndContainer(char closeChar, bool isContainerEmpty)
|
||||
{
|
||||
if (m_Pretty) {
|
||||
ASSERT(m_Indent >= m_IndentSize); // Ensure we don't underflow the indent size.
|
||||
m_Indent -= m_IndentSize;
|
||||
if (!isContainerEmpty) {
|
||||
Write("\n");
|
||||
m_Writer->write_characters(m_IndentStr.c_str(), m_Indent);
|
||||
}
|
||||
}
|
||||
m_Writer->write_character(closeChar);
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a separator (comma) and an indentation string if pretty-printing is enabled.
|
||||
*
|
||||
* This function is used to separate items in a JSON array or object and to maintain the correct indentation level.
|
||||
*
|
||||
* @param emitComma Whether to emit a comma. This is typically true for all but the first item in a container.
|
||||
*/
|
||||
void JsonEncoder::WriteSeparatorAndIndentStrIfNeeded(bool emitComma) const
|
||||
{
|
||||
if (emitComma) {
|
||||
Write(",");
|
||||
}
|
||||
if (m_Pretty) {
|
||||
Write("\n");
|
||||
m_Writer->write_characters(m_IndentStr.c_str(), m_Indent);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps any writer of type @c nlohmann::detail::output_adapter_t<char> into a Flusher
|
||||
*
|
||||
* @param w The writer to wrap.
|
||||
*/
|
||||
JsonEncoder::Flusher::Flusher(const nlohmann::detail::output_adapter_t<char>& w)
|
||||
: m_AsyncWriter(dynamic_cast<AsyncJsonWriter*>(w.get()))
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes the underlying writer if it supports that operation and is safe to do so.
|
||||
*
|
||||
* Safe flushing means that it only performs the flush operation if the @c JsonEncoder has not acquired
|
||||
* any object lock so far. This is to ensure that the stream can safely perform asynchronous operations
|
||||
* without risking undefined behaviour due to coroutines being suspended while the stream is being flushed.
|
||||
*
|
||||
* When the @c yc parameter is provided, it indicates that it's safe to perform asynchronous operations,
|
||||
* and the function will attempt to flush if the writer is an instance of @c AsyncJsonWriter. Otherwise,
|
||||
* this function does nothing.
|
||||
*
|
||||
* @param yc The yield context to use for asynchronous operations.
|
||||
*/
|
||||
void JsonEncoder::Flusher::FlushIfSafe(boost::asio::yield_context* yc) const
|
||||
{
|
||||
if (yc && m_AsyncWriter) {
|
||||
m_AsyncWriter->MayFlush(*yc);
|
||||
}
|
||||
}
|
||||
|
||||
class JsonSax : public nlohmann::json_sax<nlohmann::json>
|
||||
{
|
||||
public:
|
||||
@ -45,165 +347,25 @@ private:
|
||||
void FillCurrentTarget(Value value);
|
||||
};
|
||||
|
||||
const char l_Null[] = "null";
|
||||
const char l_False[] = "false";
|
||||
const char l_True[] = "true";
|
||||
const char l_Indent[] = " ";
|
||||
|
||||
// https://github.com/nlohmann/json/issues/1512
|
||||
template<bool prettyPrint>
|
||||
class JsonEncoder
|
||||
String icinga::JsonEncode(const Value& value, bool prettify)
|
||||
{
|
||||
public:
|
||||
void Null();
|
||||
void Boolean(bool value);
|
||||
void NumberFloat(double value);
|
||||
void Strng(String value);
|
||||
void StartObject();
|
||||
void Key(String value);
|
||||
void EndObject();
|
||||
void StartArray();
|
||||
void EndArray();
|
||||
|
||||
String GetResult();
|
||||
|
||||
private:
|
||||
std::vector<char> m_Result;
|
||||
String m_CurrentKey;
|
||||
std::stack<std::bitset<2>> m_CurrentSubtree;
|
||||
|
||||
void AppendChar(char c);
|
||||
|
||||
template<class Iterator>
|
||||
void AppendChars(Iterator begin, Iterator end);
|
||||
|
||||
void AppendJson(nlohmann::json json);
|
||||
|
||||
void BeforeItem();
|
||||
|
||||
void FinishContainer(char terminator);
|
||||
};
|
||||
|
||||
template<bool prettyPrint>
|
||||
void Encode(JsonEncoder<prettyPrint>& stateMachine, const Value& value);
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void EncodeNamespace(JsonEncoder<prettyPrint>& stateMachine, const Namespace::Ptr& ns)
|
||||
{
|
||||
stateMachine.StartObject();
|
||||
|
||||
ObjectLock olock(ns);
|
||||
for (const Namespace::Pair& kv : ns) {
|
||||
stateMachine.Key(Utility::ValidateUTF8(kv.first));
|
||||
Encode(stateMachine, kv.second.Val);
|
||||
}
|
||||
|
||||
stateMachine.EndObject();
|
||||
std::string output;
|
||||
JsonEncoder encoder(output, prettify);
|
||||
encoder.Encode(value);
|
||||
return String(std::move(output));
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void EncodeDictionary(JsonEncoder<prettyPrint>& stateMachine, const Dictionary::Ptr& dict)
|
||||
/**
|
||||
* Serializes an Icinga Value into a JSON object and writes it to the given output stream.
|
||||
*
|
||||
* @param value The value to be JSON serialized.
|
||||
* @param os The output stream to write the JSON data to.
|
||||
* @param prettify Whether to pretty print the serialized JSON.
|
||||
*/
|
||||
void icinga::JsonEncode(const Value& value, std::ostream& os, bool prettify)
|
||||
{
|
||||
stateMachine.StartObject();
|
||||
|
||||
ObjectLock olock(dict);
|
||||
for (const Dictionary::Pair& kv : dict) {
|
||||
stateMachine.Key(Utility::ValidateUTF8(kv.first));
|
||||
Encode(stateMachine, kv.second);
|
||||
}
|
||||
|
||||
stateMachine.EndObject();
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void EncodeArray(JsonEncoder<prettyPrint>& stateMachine, const Array::Ptr& arr)
|
||||
{
|
||||
stateMachine.StartArray();
|
||||
|
||||
ObjectLock olock(arr);
|
||||
for (const Value& value : arr) {
|
||||
Encode(stateMachine, value);
|
||||
}
|
||||
|
||||
stateMachine.EndArray();
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
void Encode(JsonEncoder<prettyPrint>& stateMachine, const Value& value)
|
||||
{
|
||||
switch (value.GetType()) {
|
||||
case ValueNumber:
|
||||
stateMachine.NumberFloat(value.Get<double>());
|
||||
break;
|
||||
|
||||
case ValueBoolean:
|
||||
stateMachine.Boolean(value.ToBool());
|
||||
break;
|
||||
|
||||
case ValueString:
|
||||
stateMachine.Strng(Utility::ValidateUTF8(value.Get<String>()));
|
||||
break;
|
||||
|
||||
case ValueObject:
|
||||
{
|
||||
const Object::Ptr& obj = value.Get<Object::Ptr>();
|
||||
|
||||
{
|
||||
Namespace::Ptr ns = dynamic_pointer_cast<Namespace>(obj);
|
||||
if (ns) {
|
||||
EncodeNamespace(stateMachine, ns);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
Dictionary::Ptr dict = dynamic_pointer_cast<Dictionary>(obj);
|
||||
if (dict) {
|
||||
EncodeDictionary(stateMachine, dict);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
Array::Ptr arr = dynamic_pointer_cast<Array>(obj);
|
||||
if (arr) {
|
||||
EncodeArray(stateMachine, arr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// obj is most likely a function => "Object of type 'Function'"
|
||||
Encode(stateMachine, obj->ToString());
|
||||
break;
|
||||
}
|
||||
|
||||
case ValueEmpty:
|
||||
stateMachine.Null();
|
||||
break;
|
||||
|
||||
default:
|
||||
VERIFY(!"Invalid variant type.");
|
||||
}
|
||||
}
|
||||
|
||||
String icinga::JsonEncode(const Value& value, bool pretty_print)
|
||||
{
|
||||
if (pretty_print) {
|
||||
JsonEncoder<true> stateMachine;
|
||||
|
||||
Encode(stateMachine, value);
|
||||
|
||||
return stateMachine.GetResult() + "\n";
|
||||
} else {
|
||||
JsonEncoder<false> stateMachine;
|
||||
|
||||
Encode(stateMachine, value);
|
||||
|
||||
return stateMachine.GetResult();
|
||||
}
|
||||
JsonEncoder encoder(os, prettify);
|
||||
encoder.Encode(value);
|
||||
}
|
||||
|
||||
Value icinga::JsonDecode(const String& data)
|
||||
@ -349,177 +511,3 @@ void JsonSax::FillCurrentTarget(Value value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::Null()
|
||||
{
|
||||
BeforeItem();
|
||||
AppendChars((const char*)l_Null, (const char*)l_Null + 4);
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::Boolean(bool value)
|
||||
{
|
||||
BeforeItem();
|
||||
|
||||
if (value) {
|
||||
AppendChars((const char*)l_True, (const char*)l_True + 4);
|
||||
} else {
|
||||
AppendChars((const char*)l_False, (const char*)l_False + 5);
|
||||
}
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::NumberFloat(double value)
|
||||
{
|
||||
BeforeItem();
|
||||
|
||||
// Make sure 0.0 is serialized as 0, so e.g. Icinga DB can parse it as int.
|
||||
if (value < 0) {
|
||||
long long i = value;
|
||||
|
||||
if (i == value) {
|
||||
AppendJson(i);
|
||||
} else {
|
||||
AppendJson(value);
|
||||
}
|
||||
} else {
|
||||
unsigned long long i = value;
|
||||
|
||||
if (i == value) {
|
||||
AppendJson(i);
|
||||
} else {
|
||||
AppendJson(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::Strng(String value)
|
||||
{
|
||||
BeforeItem();
|
||||
AppendJson(std::move(value));
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::StartObject()
|
||||
{
|
||||
BeforeItem();
|
||||
AppendChar('{');
|
||||
|
||||
m_CurrentSubtree.push(2);
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::Key(String value)
|
||||
{
|
||||
m_CurrentKey = std::move(value);
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::EndObject()
|
||||
{
|
||||
FinishContainer('}');
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::StartArray()
|
||||
{
|
||||
BeforeItem();
|
||||
AppendChar('[');
|
||||
|
||||
m_CurrentSubtree.push(0);
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::EndArray()
|
||||
{
|
||||
FinishContainer(']');
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
String JsonEncoder<prettyPrint>::GetResult()
|
||||
{
|
||||
return String(m_Result.begin(), m_Result.end());
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::AppendChar(char c)
|
||||
{
|
||||
m_Result.emplace_back(c);
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
template<class Iterator>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::AppendChars(Iterator begin, Iterator end)
|
||||
{
|
||||
m_Result.insert(m_Result.end(), begin, end);
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::AppendJson(nlohmann::json json)
|
||||
{
|
||||
nlohmann::detail::serializer<nlohmann::json>(nlohmann::detail::output_adapter<char>(m_Result), ' ').dump(std::move(json), prettyPrint, true, 0);
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::BeforeItem()
|
||||
{
|
||||
if (!m_CurrentSubtree.empty()) {
|
||||
auto& node (m_CurrentSubtree.top());
|
||||
|
||||
if (node[0]) {
|
||||
AppendChar(',');
|
||||
} else {
|
||||
node[0] = true;
|
||||
}
|
||||
|
||||
if (prettyPrint) {
|
||||
AppendChar('\n');
|
||||
|
||||
for (auto i (m_CurrentSubtree.size()); i; --i) {
|
||||
AppendChars((const char*)l_Indent, (const char*)l_Indent + 4);
|
||||
}
|
||||
}
|
||||
|
||||
if (node[1]) {
|
||||
AppendJson(std::move(m_CurrentKey));
|
||||
AppendChar(':');
|
||||
|
||||
if (prettyPrint) {
|
||||
AppendChar(' ');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<bool prettyPrint>
|
||||
inline
|
||||
void JsonEncoder<prettyPrint>::FinishContainer(char terminator)
|
||||
{
|
||||
if (prettyPrint && m_CurrentSubtree.top()[0]) {
|
||||
AppendChar('\n');
|
||||
|
||||
for (auto i (m_CurrentSubtree.size() - 1u); i; --i) {
|
||||
AppendChars((const char*)l_Indent, (const char*)l_Indent + 4);
|
||||
}
|
||||
}
|
||||
|
||||
AppendChar(terminator);
|
||||
|
||||
m_CurrentSubtree.pop();
|
||||
}
|
||||
|
@ -4,14 +4,121 @@
|
||||
#define JSON_H
|
||||
|
||||
#include "base/i2-base.hpp"
|
||||
#include "base/array.hpp"
|
||||
#include "base/generator.hpp"
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <json.hpp>
|
||||
|
||||
namespace icinga
|
||||
{
|
||||
|
||||
/**
|
||||
* AsyncJsonWriter allows writing JSON data to any output stream asynchronously.
|
||||
*
|
||||
* All users of this class must ensure that the underlying output stream will not perform any asynchronous I/O
|
||||
* operations when the @c write_character() or @c write_characters() methods are called. They shall only perform
|
||||
* such ops when the @c JsonEncoder allows them to do so by calling the @c MayFlush() method.
|
||||
*
|
||||
* @ingroup base
|
||||
*/
|
||||
class AsyncJsonWriter : public nlohmann::detail::output_adapter_protocol<char>
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* It instructs the underlying output stream to write any buffered data to wherever it is supposed to go.
|
||||
*
|
||||
* The @c JsonEncoder allows the stream to even perform asynchronous operations in a safe manner by calling
|
||||
* this method with a dedicated @c boost::asio::yield_context object. The stream must not perform any async
|
||||
* I/O operations triggered by methods other than this one. Any attempt to do so will result in undefined behavior.
|
||||
*
|
||||
* However, this doesn't necessarily enforce the stream to really flush its data immediately, but it's up
|
||||
* to the implementation to do whatever it needs to. The encoder just gives it a chance to do so by calling
|
||||
* this method.
|
||||
*
|
||||
* @param yield The yield context to use for asynchronous operations.
|
||||
*/
|
||||
virtual void MayFlush(boost::asio::yield_context& yield) = 0;
|
||||
};
|
||||
|
||||
class String;
|
||||
class Value;
|
||||
|
||||
String JsonEncode(const Value& value, bool pretty_print = false);
|
||||
/**
|
||||
* JSON encoder.
|
||||
*
|
||||
* This class can be used to encode Icinga Value types into JSON format and write them to an output stream.
|
||||
* The supported stream types include any @c std::ostream like objects and our own @c AsyncJsonWriter, which
|
||||
* allows writing JSON data to an Asio stream asynchronously. The nlohmann/json library already provides
|
||||
* full support for the former stream type, while the latter is fully implemented by our own and satisfies the
|
||||
* @c nlohmann::detail::output_adapter_protocol<> interface as well.
|
||||
*
|
||||
* The JSON encoder generates most of the low level JSON tokens, but it still relies on the already existing
|
||||
* @c nlohmann::detail::serializer<> class to dump numbers and ASCII validated JSON strings. This means that the
|
||||
* encoder doesn't perform any kind of JSON validation or escaping on its own, but simply delegates all this kind
|
||||
* of work to serializer<>.
|
||||
*
|
||||
* The generated JSON can be either prettified or compact, depending on your needs. The prettified JSON object
|
||||
* is indented with 4 spaces and grows linearly with the depth of the object tree.
|
||||
*
|
||||
* @ingroup base
|
||||
*/
|
||||
class JsonEncoder
|
||||
{
|
||||
public:
|
||||
explicit JsonEncoder(std::string& output, bool prettify = false);
|
||||
explicit JsonEncoder(std::basic_ostream<char>& stream, bool prettify = false);
|
||||
explicit JsonEncoder(nlohmann::detail::output_adapter_t<char> w, bool prettify = false);
|
||||
|
||||
void Encode(const Value& value, boost::asio::yield_context* yc = nullptr);
|
||||
|
||||
private:
|
||||
void EncodeArray(const Array::Ptr& array, boost::asio::yield_context* yc);
|
||||
void EncodeValueGenerator(const ValueGenerator::Ptr& generator, boost::asio::yield_context* yc);
|
||||
|
||||
template<typename Iterable, typename ValExtractor>
|
||||
void EncodeObject(const Iterable& container, const ValExtractor& extractor, boost::asio::yield_context* yc);
|
||||
|
||||
void EncodeNlohmannJson(const nlohmann::json& json) const;
|
||||
void EncodeNumber(double value) const;
|
||||
|
||||
void Write(const std::string_view& sv) const;
|
||||
void BeginContainer(char openChar);
|
||||
void EndContainer(char closeChar, bool isContainerEmpty = false);
|
||||
void WriteSeparatorAndIndentStrIfNeeded(bool emitComma) const;
|
||||
|
||||
// The number of spaces to use for indentation in prettified JSON.
|
||||
static constexpr uint8_t m_IndentSize = 4;
|
||||
|
||||
bool m_Pretty; // Whether to pretty-print the JSON output.
|
||||
unsigned m_Indent{0}; // The current indentation level for pretty-printing.
|
||||
/**
|
||||
* Pre-allocate for 8 levels of indentation for pretty-printing.
|
||||
*
|
||||
* This is used to avoid reallocating the string on every indent level change.
|
||||
* The size of this string is dynamically adjusted if the indentation level exceeds its initial size at some point.
|
||||
*/
|
||||
std::string m_IndentStr{8*m_IndentSize, ' '};
|
||||
|
||||
// The output stream adapter for writing JSON data. This can be either a std::ostream or an Asio stream adapter.
|
||||
nlohmann::detail::output_adapter_t<char> m_Writer;
|
||||
|
||||
/**
|
||||
* This class wraps any @c nlohmann::detail::output_adapter_t<char> writer and provides a method to flush it as
|
||||
* required. Only @c AsyncJsonWriter supports the flush operation, however, this class is also safe to use with
|
||||
* other writer types and the flush method does nothing for them.
|
||||
*/
|
||||
class Flusher {
|
||||
public:
|
||||
explicit Flusher(const nlohmann::detail::output_adapter_t<char>& w);
|
||||
void FlushIfSafe(boost::asio::yield_context* yc) const;
|
||||
|
||||
private:
|
||||
AsyncJsonWriter* m_AsyncWriter;
|
||||
} m_Flusher;
|
||||
};
|
||||
|
||||
String JsonEncode(const Value& value, bool prettify = false);
|
||||
void JsonEncode(const Value& value, std::ostream& os, bool prettify = false);
|
||||
Value JsonDecode(const String& data);
|
||||
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ template Log& Log::operator<<(const int&);
|
||||
template Log& Log::operator<<(const unsigned long&);
|
||||
template Log& Log::operator<<(const long&);
|
||||
template Log& Log::operator<<(const double&);
|
||||
template Log& Log::operator<<(const char*&);
|
||||
|
||||
REGISTER_TYPE(Logger);
|
||||
|
||||
@ -246,21 +247,25 @@ void Logger::UpdateMinLogSeverity()
|
||||
Log::Log(LogSeverity severity, String facility, const String& message)
|
||||
: Log(severity, std::move(facility))
|
||||
{
|
||||
if (!m_IsNoOp) {
|
||||
m_Buffer << message;
|
||||
}
|
||||
*this << message;
|
||||
}
|
||||
|
||||
Log::Log(LogSeverity severity, String facility)
|
||||
: m_Severity(severity), m_Facility(std::move(facility)), m_IsNoOp(severity < Logger::GetMinLogSeverity())
|
||||
{ }
|
||||
{
|
||||
// Only fully initialize the object if it's actually going to be logged.
|
||||
if (severity >= Logger::GetMinLogSeverity()) {
|
||||
m_Severity = severity;
|
||||
m_Facility = std::move(facility);
|
||||
m_Buffer.emplace();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the message to the application's log.
|
||||
*/
|
||||
Log::~Log()
|
||||
{
|
||||
if (m_IsNoOp) {
|
||||
if (!m_Buffer) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -270,7 +275,7 @@ Log::~Log()
|
||||
entry.Facility = m_Facility;
|
||||
|
||||
{
|
||||
auto msg (m_Buffer.str());
|
||||
auto msg (m_Buffer->str());
|
||||
msg.erase(msg.find_last_not_of("\n") + 1u);
|
||||
|
||||
entry.Message = std::move(msg);
|
||||
@ -315,12 +320,3 @@ Log::~Log()
|
||||
}
|
||||
#endif /* _WIN32 */
|
||||
}
|
||||
|
||||
Log& Log::operator<<(const char *val)
|
||||
{
|
||||
if (!m_IsNoOp) {
|
||||
m_Buffer << val;
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "base/atomic.hpp"
|
||||
#include "base/i2-base.hpp"
|
||||
#include "base/logger-ti.hpp"
|
||||
#include <optional>
|
||||
#include <set>
|
||||
#include <sstream>
|
||||
|
||||
@ -119,19 +120,23 @@ public:
|
||||
~Log();
|
||||
|
||||
template<typename T>
|
||||
Log& operator<<(const T& val)
|
||||
Log& operator<<(T&& val)
|
||||
{
|
||||
m_Buffer << val;
|
||||
if (m_Buffer) {
|
||||
*m_Buffer << std::forward<T>(val);
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
Log& operator<<(const char *val);
|
||||
|
||||
private:
|
||||
LogSeverity m_Severity;
|
||||
String m_Facility;
|
||||
std::ostringstream m_Buffer;
|
||||
bool m_IsNoOp;
|
||||
/**
|
||||
* Stream for incrementally generating the log message. If the message will be discarded as it's level currently
|
||||
* isn't logged, it will be empty as the stream doesn't need to be initialized in this case.
|
||||
*/
|
||||
std::optional<std::ostringstream> m_Buffer;
|
||||
};
|
||||
|
||||
extern template Log& Log::operator<<(const Value&);
|
||||
@ -143,6 +148,7 @@ extern template Log& Log::operator<<(const int&);
|
||||
extern template Log& Log::operator<<(const unsigned long&);
|
||||
extern template Log& Log::operator<<(const long&);
|
||||
extern template Log& Log::operator<<(const double&);
|
||||
extern template Log& Log::operator<<(const char*&);
|
||||
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
|
||||
|
||||
#include "base/namespace.hpp"
|
||||
#include "base/objectlock.hpp"
|
||||
#include "base/debug.hpp"
|
||||
#include "base/primitivetype.hpp"
|
||||
#include "base/debuginfo.hpp"
|
||||
@ -119,7 +118,26 @@ void Namespace::Remove(const String& field)
|
||||
void Namespace::Freeze() {
|
||||
ObjectLock olock(this);
|
||||
|
||||
m_Frozen = true;
|
||||
m_Frozen.store(true, std::memory_order_release);
|
||||
}
|
||||
|
||||
bool Namespace::Frozen() const
|
||||
{
|
||||
return m_Frozen.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an already locked ObjectLock if the namespace is frozen.
|
||||
* Otherwise, returns an unlocked object lock.
|
||||
*
|
||||
* @returns An object lock.
|
||||
*/
|
||||
ObjectLock Namespace::LockIfRequired()
|
||||
{
|
||||
if (Frozen()) {
|
||||
return ObjectLock(this, std::defer_lock);
|
||||
}
|
||||
return ObjectLock(this);
|
||||
}
|
||||
|
||||
std::shared_lock<std::shared_timed_mutex> Namespace::ReadLockUnlessFrozen() const
|
||||
@ -143,13 +161,8 @@ Value Namespace::GetFieldByName(const String& field, bool, const DebugInfo& debu
|
||||
return GetPrototypeField(const_cast<Namespace *>(this), field, false, debugInfo); /* Ignore indexer not found errors similar to the Dictionary class. */
|
||||
}
|
||||
|
||||
void Namespace::SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo)
|
||||
void Namespace::SetFieldByName(const String& field, const Value& value, const DebugInfo& debugInfo)
|
||||
{
|
||||
// The override frozen parameter is mandated by the interface but ignored here. If the namespace is frozen, this
|
||||
// disables locking for read operations, so it must not be modified again to ensure the consistency of the internal
|
||||
// data structures.
|
||||
(void) overrideFrozen;
|
||||
|
||||
Set(field, value, false, debugInfo);
|
||||
}
|
||||
|
||||
@ -165,14 +178,14 @@ bool Namespace::GetOwnField(const String& field, Value *result) const
|
||||
|
||||
Namespace::Iterator Namespace::Begin()
|
||||
{
|
||||
ASSERT(OwnsLock());
|
||||
ASSERT(Frozen() || OwnsLock());
|
||||
|
||||
return m_Data.begin();
|
||||
}
|
||||
|
||||
Namespace::Iterator Namespace::End()
|
||||
{
|
||||
ASSERT(OwnsLock());
|
||||
ASSERT(Frozen() || OwnsLock());
|
||||
|
||||
return m_Data.end();
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include "base/i2-base.hpp"
|
||||
#include "base/object.hpp"
|
||||
#include "base/objectlock.hpp"
|
||||
#include "base/shared-object.hpp"
|
||||
#include "base/value.hpp"
|
||||
#include "base/debuginfo.hpp"
|
||||
@ -73,6 +74,8 @@ public:
|
||||
bool Contains(const String& field) const;
|
||||
void Remove(const String& field);
|
||||
void Freeze();
|
||||
bool Frozen() const;
|
||||
ObjectLock LockIfRequired();
|
||||
|
||||
Iterator Begin();
|
||||
Iterator End();
|
||||
@ -80,7 +83,7 @@ public:
|
||||
size_t GetLength() const;
|
||||
|
||||
Value GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const override;
|
||||
void SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo) override;
|
||||
void SetFieldByName(const String& field, const Value& value, const DebugInfo& debugInfo) override;
|
||||
bool HasOwnField(const String& field) const override;
|
||||
bool GetOwnField(const String& field, Value *result) const override;
|
||||
|
||||
|
@ -125,7 +125,7 @@ Value Object::GetFieldByName(const String& field, bool sandboxed, const DebugInf
|
||||
return GetField(fid);
|
||||
}
|
||||
|
||||
void Object::SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo)
|
||||
void Object::SetFieldByName(const String& field, const Value& value, const DebugInfo& debugInfo)
|
||||
{
|
||||
Type::Ptr type = GetReflectionType();
|
||||
|
||||
@ -201,14 +201,14 @@ Value icinga::GetPrototypeField(const Value& context, const String& field, bool
|
||||
}
|
||||
|
||||
#ifdef I2_LEAK_DEBUG
|
||||
void icinga::TypeAddObject(Object *object)
|
||||
void icinga::TypeAddObject(const Object *object)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(l_ObjectCountLock);
|
||||
String typeName = Utility::GetTypeName(typeid(*object));
|
||||
l_ObjectCounts[typeName]++;
|
||||
}
|
||||
|
||||
void icinga::TypeRemoveObject(Object *object)
|
||||
void icinga::TypeRemoveObject(const Object *object)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(l_ObjectCountLock);
|
||||
String typeName = Utility::GetTypeName(typeid(*object));
|
||||
@ -239,7 +239,7 @@ INITIALIZE_ONCE([]() {
|
||||
});
|
||||
#endif /* I2_LEAK_DEBUG */
|
||||
|
||||
void icinga::intrusive_ptr_add_ref(Object *object)
|
||||
void icinga::intrusive_ptr_add_ref(const Object *object)
|
||||
{
|
||||
#ifdef I2_LEAK_DEBUG
|
||||
if (object->m_References.fetch_add(1) == 0u)
|
||||
@ -249,7 +249,7 @@ void icinga::intrusive_ptr_add_ref(Object *object)
|
||||
#endif /* I2_LEAK_DEBUG */
|
||||
}
|
||||
|
||||
void icinga::intrusive_ptr_release(Object *object)
|
||||
void icinga::intrusive_ptr_release(const Object *object)
|
||||
{
|
||||
auto previous (object->m_References.fetch_sub(1));
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include "base/i2-base.hpp"
|
||||
#include "base/debug.hpp"
|
||||
#include "base/intrusive-ptr.hpp"
|
||||
#include <boost/smart_ptr/intrusive_ptr.hpp>
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
@ -27,10 +28,11 @@ class String;
|
||||
struct DebugInfo;
|
||||
class ValidationUtils;
|
||||
|
||||
extern Value Empty;
|
||||
extern const Value Empty;
|
||||
|
||||
#define DECLARE_PTR_TYPEDEFS(klass) \
|
||||
typedef intrusive_ptr<klass> Ptr
|
||||
typedef intrusive_ptr<klass> Ptr; \
|
||||
typedef intrusive_ptr<const klass> ConstPtr
|
||||
|
||||
#define IMPL_TYPE_LOOKUP_SUPER() \
|
||||
|
||||
@ -170,7 +172,7 @@ public:
|
||||
virtual void SetField(int id, const Value& value, bool suppress_events = false, const Value& cookie = Empty);
|
||||
virtual Value GetField(int id) const;
|
||||
virtual Value GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const;
|
||||
virtual void SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo);
|
||||
virtual void SetFieldByName(const String& field, const Value& value, const DebugInfo& debugInfo);
|
||||
virtual bool HasOwnField(const String& field) const;
|
||||
virtual bool GetOwnField(const String& field, Value *result) const;
|
||||
virtual void ValidateField(int id, const Lazy<Value>& lvalue, const ValidationUtils& utils);
|
||||
@ -191,7 +193,7 @@ private:
|
||||
Object(const Object& other) = delete;
|
||||
Object& operator=(const Object& rhs) = delete;
|
||||
|
||||
std::atomic<uint_fast64_t> m_References;
|
||||
mutable std::atomic<uint_fast64_t> m_References;
|
||||
mutable std::recursive_mutex m_Mutex;
|
||||
|
||||
#ifdef I2_DEBUG
|
||||
@ -201,17 +203,17 @@ private:
|
||||
|
||||
friend struct ObjectLock;
|
||||
|
||||
friend void intrusive_ptr_add_ref(Object *object);
|
||||
friend void intrusive_ptr_release(Object *object);
|
||||
friend void intrusive_ptr_add_ref(const Object *object);
|
||||
friend void intrusive_ptr_release(const Object *object);
|
||||
};
|
||||
|
||||
Value GetPrototypeField(const Value& context, const String& field, bool not_found_error, const DebugInfo& debugInfo);
|
||||
|
||||
void TypeAddObject(Object *object);
|
||||
void TypeRemoveObject(Object *object);
|
||||
void TypeAddObject(const Object *object);
|
||||
void TypeRemoveObject(const Object *object);
|
||||
|
||||
void intrusive_ptr_add_ref(Object *object);
|
||||
void intrusive_ptr_release(Object *object);
|
||||
void intrusive_ptr_add_ref(const Object *object);
|
||||
void intrusive_ptr_release(const Object *object);
|
||||
|
||||
template<typename T>
|
||||
class ObjectImpl
|
||||
|
@ -18,6 +18,18 @@ ObjectLock::ObjectLock(const Object::Ptr& object)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a lock for the given object without locking it immediately.
|
||||
*
|
||||
* The user must call Lock() explicitly when needed.
|
||||
*
|
||||
* @param object The object to lock.
|
||||
*/
|
||||
ObjectLock::ObjectLock(const Object::Ptr& object, std::defer_lock_t)
|
||||
: m_Object(object.get()), m_Locked(false)
|
||||
{
|
||||
}
|
||||
|
||||
ObjectLock::ObjectLock(const Object *object)
|
||||
: m_Object(object), m_Locked(false)
|
||||
{
|
||||
@ -53,3 +65,15 @@ void ObjectLock::Unlock()
|
||||
m_Locked = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the object is locked, false otherwise.
|
||||
*
|
||||
* This operator allows using ObjectLock in boolean contexts.
|
||||
*
|
||||
* @returns true if the object is locked, false otherwise.
|
||||
*/
|
||||
ObjectLock::operator bool() const
|
||||
{
|
||||
return m_Locked;
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ struct ObjectLock
|
||||
{
|
||||
public:
|
||||
ObjectLock(const Object::Ptr& object);
|
||||
ObjectLock(const Object::Ptr& object, std::defer_lock_t);
|
||||
ObjectLock(const Object *object);
|
||||
|
||||
ObjectLock(const ObjectLock&) = delete;
|
||||
@ -25,6 +26,8 @@ public:
|
||||
void Lock();
|
||||
void Unlock();
|
||||
|
||||
operator bool() const;
|
||||
|
||||
private:
|
||||
const Object *m_Object{nullptr};
|
||||
bool m_Locked{false};
|
||||
|
@ -259,6 +259,10 @@ PerfdataValue::Ptr PerfdataValue::Parse(const String& perfdata)
|
||||
|
||||
double value = Convert::ToDouble(tokens[0].SubStr(0, pos));
|
||||
|
||||
if (!std::isfinite(value)) {
|
||||
BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid performance data value: " + perfdata + " is outside of any reasonable range"));
|
||||
}
|
||||
|
||||
bool counter = false;
|
||||
String unit;
|
||||
Value warn, crit, min, max;
|
||||
@ -266,6 +270,11 @@ PerfdataValue::Ptr PerfdataValue::Parse(const String& perfdata)
|
||||
if (pos != String::NPos)
|
||||
unit = tokens[0].SubStr(pos, String::NPos);
|
||||
|
||||
// UoM.Out is an empty string for "c". So set counter before parsing.
|
||||
if (unit == "c") {
|
||||
counter = true;
|
||||
}
|
||||
|
||||
double base;
|
||||
|
||||
{
|
||||
@ -291,10 +300,6 @@ PerfdataValue::Ptr PerfdataValue::Parse(const String& perfdata)
|
||||
}
|
||||
}
|
||||
|
||||
if (unit == "c") {
|
||||
counter = true;
|
||||
}
|
||||
|
||||
warn = ParseWarnCritMinMaxToken(tokens, 1, "warning");
|
||||
crit = ParseWarnCritMinMaxToken(tokens, 2, "critical");
|
||||
min = ParseWarnCritMinMaxToken(tokens, 3, "minimum");
|
||||
|
@ -643,8 +643,7 @@ void Process::IOThreadProc(int tid)
|
||||
#endif /* _WIN32 */
|
||||
|
||||
int i = 1;
|
||||
typedef std::pair<ProcessHandle, Process::Ptr> kv_pair;
|
||||
for (const kv_pair& kv : l_Processes[tid]) {
|
||||
for (auto& kv : l_Processes[tid]) {
|
||||
const Process::Ptr& process = kv.second;
|
||||
#ifdef _WIN32
|
||||
handles[i] = kv.first;
|
||||
@ -1087,7 +1086,9 @@ bool Process::DoEvents()
|
||||
Log(LogWarning, "Process")
|
||||
<< "Couldn't kill the process group " << m_PID << " (" << PrettyPrintArguments(m_Arguments)
|
||||
<< "): [errno " << error << "] " << strerror(error);
|
||||
could_not_kill = true;
|
||||
if (error != ESRCH) {
|
||||
could_not_kill = true;
|
||||
}
|
||||
}
|
||||
#endif /* _WIN32 */
|
||||
|
||||
|
@ -24,7 +24,7 @@ Value Reference::Get() const
|
||||
|
||||
void Reference::Set(const Value& value)
|
||||
{
|
||||
m_Parent->SetFieldByName(m_Index, value, false, DebugInfo());
|
||||
m_Parent->SetFieldByName(m_Index, value, DebugInfo());
|
||||
}
|
||||
|
||||
Object::Ptr Reference::GetParent() const
|
||||
|
@ -23,16 +23,6 @@ class Registry
|
||||
public:
|
||||
typedef std::map<String, T> ItemMap;
|
||||
|
||||
void RegisterIfNew(const String& name, const T& item)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(m_Mutex);
|
||||
|
||||
if (m_Items.find(name) != m_Items.end())
|
||||
return;
|
||||
|
||||
RegisterInternal(name, item, lock);
|
||||
}
|
||||
|
||||
void Register(const String& name, const T& item)
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(m_Mutex);
|
||||
@ -40,38 +30,6 @@ public:
|
||||
RegisterInternal(name, item, lock);
|
||||
}
|
||||
|
||||
void Unregister(const String& name)
|
||||
{
|
||||
size_t erased;
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(m_Mutex);
|
||||
erased = m_Items.erase(name);
|
||||
}
|
||||
|
||||
if (erased > 0)
|
||||
OnUnregistered(name);
|
||||
}
|
||||
|
||||
void Clear()
|
||||
{
|
||||
typename Registry<U, T>::ItemMap items;
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(m_Mutex);
|
||||
items = m_Items;
|
||||
}
|
||||
|
||||
for (const auto& kv : items) {
|
||||
OnUnregistered(kv.first);
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(m_Mutex);
|
||||
m_Items.clear();
|
||||
}
|
||||
}
|
||||
|
||||
T GetItem(const String& name) const
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(m_Mutex);
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include "base/objectlock.hpp"
|
||||
#include "base/configtype.hpp"
|
||||
#include "base/application.hpp"
|
||||
#include "base/dependencygraph.hpp"
|
||||
#include "base/initialize.hpp"
|
||||
#include "base/namespace.hpp"
|
||||
#include "config/configitem.hpp"
|
||||
@ -49,7 +48,6 @@ REGISTER_SAFE_FUNCTION(System, basename, &Utility::BaseName, "path");
|
||||
REGISTER_SAFE_FUNCTION(System, dirname, &Utility::DirName, "path");
|
||||
REGISTER_SAFE_FUNCTION(System, getenv, &ScriptUtils::GetEnv, "value");
|
||||
REGISTER_SAFE_FUNCTION(System, msi_get_component_path, &ScriptUtils::MsiGetComponentPathShim, "component");
|
||||
REGISTER_SAFE_FUNCTION(System, track_parents, &ScriptUtils::TrackParents, "child");
|
||||
REGISTER_SAFE_FUNCTION(System, escape_shell_cmd, &Utility::EscapeShellCmd, "cmd");
|
||||
REGISTER_SAFE_FUNCTION(System, escape_shell_arg, &Utility::EscapeShellArg, "arg");
|
||||
#ifdef _WIN32
|
||||
@ -124,7 +122,7 @@ bool ScriptUtils::Regex(const std::vector<Value>& args)
|
||||
if (texts->GetLength() == 0)
|
||||
return false;
|
||||
|
||||
for (const String& text : texts) {
|
||||
for (String text : texts) {
|
||||
bool res = false;
|
||||
try {
|
||||
boost::smatch what;
|
||||
@ -177,7 +175,7 @@ bool ScriptUtils::Match(const std::vector<Value>& args)
|
||||
if (texts->GetLength() == 0)
|
||||
return false;
|
||||
|
||||
for (const String& text : texts) {
|
||||
for (String text : texts) {
|
||||
bool res = Utility::Match(pattern, text);
|
||||
|
||||
if (mode == MatchAny && res)
|
||||
@ -223,7 +221,7 @@ bool ScriptUtils::CidrMatch(const std::vector<Value>& args)
|
||||
if (ips->GetLength() == 0)
|
||||
return false;
|
||||
|
||||
for (const String& ip : ips) {
|
||||
for (String ip : ips) {
|
||||
bool res = Utility::CidrMatch(pattern, ip);
|
||||
|
||||
if (mode == MatchAny && res)
|
||||
@ -518,11 +516,6 @@ String ScriptUtils::MsiGetComponentPathShim(const String& component)
|
||||
#endif /* _WIN32 */
|
||||
}
|
||||
|
||||
Array::Ptr ScriptUtils::TrackParents(const Object::Ptr& child)
|
||||
{
|
||||
return Array::FromVector(DependencyGraph::GetParents(child));
|
||||
}
|
||||
|
||||
double ScriptUtils::Ptr(const Object::Ptr& object)
|
||||
{
|
||||
return reinterpret_cast<intptr_t>(object.get());
|
||||
|
@ -39,7 +39,6 @@ public:
|
||||
static Array::Ptr GetObjects(const Type::Ptr& type);
|
||||
static void Assert(const Value& arg);
|
||||
static String MsiGetComponentPathShim(const String& component);
|
||||
static Array::Ptr TrackParents(const Object::Ptr& parent);
|
||||
static double Ptr(const Object::Ptr& object);
|
||||
static Value Glob(const std::vector<Value>& args);
|
||||
static Value GlobRecursive(const std::vector<Value>& args);
|
||||
|
@ -12,8 +12,8 @@ namespace icinga
|
||||
|
||||
class SharedObject;
|
||||
|
||||
inline void intrusive_ptr_add_ref(SharedObject *object);
|
||||
inline void intrusive_ptr_release(SharedObject *object);
|
||||
inline void intrusive_ptr_add_ref(const SharedObject *object);
|
||||
inline void intrusive_ptr_release(const SharedObject *object);
|
||||
|
||||
/**
|
||||
* Seamless and polymorphistic base for any class to create shared pointers of.
|
||||
@ -23,45 +23,30 @@ inline void intrusive_ptr_release(SharedObject *object);
|
||||
*/
|
||||
class SharedObject
|
||||
{
|
||||
friend void intrusive_ptr_add_ref(SharedObject *object);
|
||||
friend void intrusive_ptr_release(SharedObject *object);
|
||||
friend void intrusive_ptr_add_ref(const SharedObject *object);
|
||||
friend void intrusive_ptr_release(const SharedObject *object);
|
||||
|
||||
protected:
|
||||
inline SharedObject() : m_References(0)
|
||||
{
|
||||
}
|
||||
|
||||
inline SharedObject(const SharedObject&) : SharedObject()
|
||||
{
|
||||
}
|
||||
|
||||
inline SharedObject(SharedObject&&) : SharedObject()
|
||||
{
|
||||
}
|
||||
|
||||
inline SharedObject& operator=(const SharedObject&)
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline SharedObject& operator=(SharedObject&&)
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
SharedObject(const SharedObject&) = delete;
|
||||
SharedObject& operator=(const SharedObject&) = delete;
|
||||
|
||||
inline virtual
|
||||
~SharedObject() = default;
|
||||
|
||||
private:
|
||||
Atomic<uint_fast64_t> m_References;
|
||||
mutable Atomic<uint_fast64_t> m_References;
|
||||
};
|
||||
|
||||
inline void intrusive_ptr_add_ref(SharedObject *object)
|
||||
inline void intrusive_ptr_add_ref(const SharedObject *object)
|
||||
{
|
||||
object->m_References.fetch_add(1);
|
||||
}
|
||||
|
||||
inline void intrusive_ptr_release(SharedObject *object)
|
||||
inline void intrusive_ptr_release(const SharedObject *object)
|
||||
{
|
||||
if (object->m_References.fetch_sub(1) == 1u) {
|
||||
delete object;
|
||||
|
@ -4,6 +4,7 @@
|
||||
#define SHARED_H
|
||||
|
||||
#include "base/atomic.hpp"
|
||||
#include "base/intrusive-ptr.hpp"
|
||||
#include <boost/smart_ptr/intrusive_ptr.hpp>
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
@ -15,13 +16,13 @@ template<class T>
|
||||
class Shared;
|
||||
|
||||
template<class T>
|
||||
inline void intrusive_ptr_add_ref(Shared<T> *object)
|
||||
inline void intrusive_ptr_add_ref(const Shared<T> *object)
|
||||
{
|
||||
object->m_References.fetch_add(1);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
inline void intrusive_ptr_release(Shared<T> *object)
|
||||
inline void intrusive_ptr_release(const Shared<T> *object)
|
||||
{
|
||||
if (object->m_References.fetch_sub(1) == 1u) {
|
||||
delete object;
|
||||
@ -37,11 +38,12 @@ inline void intrusive_ptr_release(Shared<T> *object)
|
||||
template<class T>
|
||||
class Shared : public T
|
||||
{
|
||||
friend void intrusive_ptr_add_ref<>(Shared<T> *object);
|
||||
friend void intrusive_ptr_release<>(Shared<T> *object);
|
||||
friend void intrusive_ptr_add_ref<>(const Shared<T> *object);
|
||||
friend void intrusive_ptr_release<>(const Shared<T> *object);
|
||||
|
||||
public:
|
||||
typedef boost::intrusive_ptr<Shared> Ptr;
|
||||
typedef boost::intrusive_ptr<const Shared> ConstPtr;
|
||||
|
||||
/**
|
||||
* Like std::make_shared, but for this class.
|
||||
@ -93,7 +95,7 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
Atomic<uint_fast64_t> m_References;
|
||||
mutable Atomic<uint_fast64_t> m_References;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ String::String(const String& other)
|
||||
: m_Data(other)
|
||||
{ }
|
||||
|
||||
String::String(String&& other)
|
||||
String::String(String&& other) noexcept
|
||||
: m_Data(std::move(other.m_Data))
|
||||
{ }
|
||||
|
||||
@ -47,7 +47,7 @@ String::String(Value&& other)
|
||||
String& String::operator=(Value&& other)
|
||||
{
|
||||
if (other.IsString())
|
||||
m_Data = std::move(other.Get<String>());
|
||||
*this = std::move(other.Get<String>()); // Will atomically bind to the move assignment operator below.
|
||||
else
|
||||
*this = static_cast<String>(other);
|
||||
|
||||
@ -66,7 +66,7 @@ String& String::operator=(const String& rhs)
|
||||
return *this;
|
||||
}
|
||||
|
||||
String& String::operator=(String&& rhs)
|
||||
String& String::operator=(String&& rhs) noexcept
|
||||
{
|
||||
m_Data = std::move(rhs.m_Data);
|
||||
return *this;
|
||||
|
@ -44,7 +44,7 @@ public:
|
||||
String(std::string data);
|
||||
String(String::SizeType n, char c);
|
||||
String(const String& other);
|
||||
String(String&& other);
|
||||
String(String&& other) noexcept;
|
||||
|
||||
#ifndef _MSC_VER
|
||||
String(Value&& other);
|
||||
@ -56,7 +56,7 @@ public:
|
||||
{ }
|
||||
|
||||
String& operator=(const String& rhs);
|
||||
String& operator=(String&& rhs);
|
||||
String& operator=(String&& rhs) noexcept;
|
||||
String& operator=(Value&& rhs);
|
||||
String& operator=(const std::string& rhs);
|
||||
String& operator=(const char *rhs);
|
||||
|
@ -41,8 +41,7 @@ void Connect(Socket& socket, const String& node, const String& service)
|
||||
using boost::asio::ip::tcp;
|
||||
|
||||
tcp::resolver resolver (IoEngine::Get().GetIoContext());
|
||||
tcp::resolver::query query (node, service);
|
||||
auto result (resolver.resolve(query));
|
||||
auto result (resolver.resolve(node.GetData(), service.GetData()));
|
||||
auto current (result.begin());
|
||||
|
||||
for (;;) {
|
||||
@ -72,8 +71,7 @@ void Connect(Socket& socket, const String& node, const String& service, boost::a
|
||||
using boost::asio::ip::tcp;
|
||||
|
||||
tcp::resolver resolver (IoEngine::Get().GetIoContext());
|
||||
tcp::resolver::query query (node, service);
|
||||
auto result (resolver.async_resolve(query, yc));
|
||||
auto result (resolver.async_resolve(node.GetData(), service.GetData(), yc));
|
||||
auto current (result.begin());
|
||||
|
||||
for (;;) {
|
||||
|
@ -7,6 +7,8 @@
|
||||
#include "base/logger.hpp"
|
||||
#include "base/configuration.hpp"
|
||||
#include "base/convert.hpp"
|
||||
#include "base/defer.hpp"
|
||||
#include "base/io-engine.hpp"
|
||||
#include <boost/asio/ssl/context.hpp>
|
||||
#include <boost/asio/ssl/verify_context.hpp>
|
||||
#include <boost/asio/ssl/verify_mode.hpp>
|
||||
@ -18,14 +20,48 @@
|
||||
|
||||
using namespace icinga;
|
||||
|
||||
bool UnbufferedAsioTlsStream::IsVerifyOK() const
|
||||
/**
|
||||
* Checks whether the TLS handshake was completed with a valid peer certificate.
|
||||
*
|
||||
* @return true if the peer presented a valid certificate, false otherwise
|
||||
*/
|
||||
bool UnbufferedAsioTlsStream::IsVerifyOK()
|
||||
{
|
||||
return m_VerifyOK;
|
||||
if (!SSL_is_init_finished(native_handle())) {
|
||||
// handshake was not completed
|
||||
return false;
|
||||
}
|
||||
|
||||
if (GetPeerCertificate() == nullptr) {
|
||||
// no peer certificate was sent
|
||||
return false;
|
||||
}
|
||||
|
||||
return SSL_get_verify_result(native_handle()) == X509_V_OK;
|
||||
}
|
||||
|
||||
String UnbufferedAsioTlsStream::GetVerifyError() const
|
||||
/**
|
||||
* Returns a human-readable error string for situations where IsVerifyOK() returns false.
|
||||
*
|
||||
* If the handshake was completed and a peer certificate was provided,
|
||||
* the string additionally contains the OpenSSL verification error code.
|
||||
*
|
||||
* @return string containing the error message
|
||||
*/
|
||||
String UnbufferedAsioTlsStream::GetVerifyError()
|
||||
{
|
||||
return m_VerifyError;
|
||||
if (!SSL_is_init_finished(native_handle())) {
|
||||
return "handshake not completed";
|
||||
}
|
||||
|
||||
if (GetPeerCertificate() == nullptr) {
|
||||
return "no peer certificate provided";
|
||||
}
|
||||
|
||||
std::ostringstream buf;
|
||||
long err = SSL_get_verify_result(native_handle());
|
||||
buf << "code " << err << ": " << X509_verify_cert_error_string(err);
|
||||
return buf.str();
|
||||
}
|
||||
|
||||
std::shared_ptr<X509> UnbufferedAsioTlsStream::GetPeerCertificate()
|
||||
@ -43,17 +79,17 @@ void UnbufferedAsioTlsStream::BeforeHandshake(handshake_type type)
|
||||
|
||||
set_verify_mode(ssl::verify_peer | ssl::verify_client_once);
|
||||
|
||||
set_verify_callback([this](bool preverified, ssl::verify_context& ctx) {
|
||||
if (!preverified) {
|
||||
m_VerifyOK = false;
|
||||
|
||||
std::ostringstream msgbuf;
|
||||
int err = X509_STORE_CTX_get_error(ctx.native_handle());
|
||||
|
||||
msgbuf << "code " << err << ": " << X509_verify_cert_error_string(err);
|
||||
m_VerifyError = msgbuf.str();
|
||||
}
|
||||
set_verify_callback([](bool preverified, ssl::verify_context& ctx) {
|
||||
(void) preverified;
|
||||
(void) ctx;
|
||||
|
||||
/* Continue the handshake even if an invalid peer certificate was presented. The verification result has to be
|
||||
* checked using the IsVerifyOK() method.
|
||||
*
|
||||
* Such connections are used for the initial enrollment of nodes where they use a self-signed certificate to
|
||||
* send a certificate request and receive their valid certificate after approval (manually by the administrator
|
||||
* or using a certificate ticket).
|
||||
*/
|
||||
return true;
|
||||
});
|
||||
|
||||
@ -69,3 +105,62 @@ void UnbufferedAsioTlsStream::BeforeHandshake(handshake_type type)
|
||||
}
|
||||
#endif /* SSL_CTRL_SET_TLSEXT_HOSTNAME */
|
||||
}
|
||||
|
||||
/**
|
||||
* Forcefully close the connection, typically (details are up to the operating system) using a TCP RST.
|
||||
*/
|
||||
void AsioTlsStream::ForceDisconnect()
|
||||
{
|
||||
if (!lowest_layer().is_open()) {
|
||||
// Already disconnected, nothing to do.
|
||||
return;
|
||||
}
|
||||
|
||||
boost::system::error_code ec;
|
||||
|
||||
// Close the socket. In case the connection wasn't shut down cleanly by GracefulDisconnect(), the operating system
|
||||
// will typically terminate the connection with a TCP RST. Otherwise, this just releases the file descriptor.
|
||||
lowest_layer().close(ec);
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to cleanly shut down the connection. This involves sending a TLS close_notify shutdown alert and terminating the
|
||||
* underlying TCP connection. Sending these additional messages can block, hence the method takes a yield context and
|
||||
* internally implements a timeout of 10 seconds for the operation after which the connection is forcefully terminated
|
||||
* using ForceDisconnect().
|
||||
*
|
||||
* @param strand Asio strand used for other operations on this connection.
|
||||
* @param yc Yield context for Asio coroutines
|
||||
*/
|
||||
void AsioTlsStream::GracefulDisconnect(boost::asio::io_context::strand& strand, boost::asio::yield_context& yc)
|
||||
{
|
||||
if (!lowest_layer().is_open()) {
|
||||
// Already disconnected, nothing to do.
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
Timeout shutdownTimeout (strand, boost::posix_time::seconds(10),
|
||||
[this] {
|
||||
// Forcefully terminate the connection if async_shutdown() blocked more than 10 seconds.
|
||||
ForceDisconnect();
|
||||
}
|
||||
);
|
||||
|
||||
// Close the TLS connection, effectively uses SSL_shutdown() to send a close_notify shutdown alert to the peer.
|
||||
boost::system::error_code ec;
|
||||
next_layer().async_shutdown(yc[ec]);
|
||||
}
|
||||
|
||||
if (!lowest_layer().is_open()) {
|
||||
// Connection got closed in the meantime, most likely by the timeout, so nothing more to do.
|
||||
return;
|
||||
}
|
||||
|
||||
// Shut down the TCP connection.
|
||||
boost::system::error_code ec;
|
||||
lowest_layer().shutdown(lowest_layer_type::shutdown_both, ec);
|
||||
|
||||
// Clean up the connection (closes the file descriptor).
|
||||
ForceDisconnect();
|
||||
}
|
||||
|
@ -70,12 +70,12 @@ class UnbufferedAsioTlsStream : public AsioTcpTlsStream
|
||||
public:
|
||||
inline
|
||||
UnbufferedAsioTlsStream(UnbufferedAsioTlsStreamParams& init)
|
||||
: AsioTcpTlsStream(init.IoContext, init.SslContext), m_VerifyOK(true), m_Hostname(init.Hostname)
|
||||
: AsioTcpTlsStream(init.IoContext, init.SslContext), m_Hostname(init.Hostname)
|
||||
{
|
||||
}
|
||||
|
||||
bool IsVerifyOK() const;
|
||||
String GetVerifyError() const;
|
||||
bool IsVerifyOK();
|
||||
String GetVerifyError();
|
||||
std::shared_ptr<X509> GetPeerCertificate();
|
||||
|
||||
template<class... Args>
|
||||
@ -97,8 +97,6 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
bool m_VerifyOK;
|
||||
String m_VerifyError;
|
||||
String m_Hostname;
|
||||
|
||||
void BeforeHandshake(handshake_type type);
|
||||
@ -113,6 +111,9 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void ForceDisconnect();
|
||||
void GracefulDisconnect(boost::asio::io_context::strand& strand, boost::asio::yield_context& yc);
|
||||
|
||||
private:
|
||||
inline
|
||||
AsioTlsStream(UnbufferedAsioTlsStreamParams init)
|
||||
|
@ -93,7 +93,9 @@ static void InitSslContext(const Shared<boost::asio::ssl::context>::Ptr& context
|
||||
|
||||
flags |= SSL_OP_CIPHER_SERVER_PREFERENCE;
|
||||
|
||||
#if OPENSSL_VERSION_NUMBER < 0x10100000L
|
||||
#ifdef LIBRESSL_VERSION_NUMBER
|
||||
flags |= SSL_OP_NO_CLIENT_RENEGOTIATION;
|
||||
#elif OPENSSL_VERSION_NUMBER < 0x10100000L
|
||||
SSL_CTX_set_info_callback(sslContext, [](const SSL* ssl, int where, int) {
|
||||
if (where & SSL_CB_HANDSHAKE_DONE) {
|
||||
ssl->s3->flags |= SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS;
|
||||
@ -983,27 +985,47 @@ String BinaryToHex(const unsigned char* data, size_t length) {
|
||||
|
||||
bool VerifyCertificate(const std::shared_ptr<X509> &caCertificate, const std::shared_ptr<X509> &certificate, const String& crlFile)
|
||||
{
|
||||
X509_STORE *store = X509_STORE_new();
|
||||
return VerifyCertificate(caCertificate.get(), certificate.get(), crlFile);
|
||||
}
|
||||
|
||||
bool VerifyCertificate(X509* caCertificate, X509* certificate, const String& crlFile)
|
||||
{
|
||||
#if OPENSSL_VERSION_NUMBER < 0x10100000L
|
||||
/*
|
||||
* OpenSSL older than version 1.1.0 stored a valid flag in the struct behind X509* which leads to certain validation
|
||||
* steps to be skipped on subsequent verification operations. If a certificate is verified multiple times with a
|
||||
* different configuration, for example with different trust anchors, this can result in the certificate
|
||||
* incorrectly being treated as valid.
|
||||
*
|
||||
* This issue is worked around by serializing and deserializing the certificate which creates a new struct instance
|
||||
* with the valid flag cleared, hence performing the full validation.
|
||||
*
|
||||
* The flag in question was removed in OpenSSL 1.1.0, so this extra step isn't necessary for more recent versions:
|
||||
* https://github.com/openssl/openssl/commit/0e76014e584ba78ef1d6ecb4572391ef61c4fb51
|
||||
*/
|
||||
std::shared_ptr<X509> copy = StringToCertificate(CertificateToString(certificate));
|
||||
VERIFY(copy.get() != certificate);
|
||||
certificate = copy.get();
|
||||
#endif
|
||||
|
||||
std::unique_ptr<X509_STORE, decltype(&X509_STORE_free)> store{X509_STORE_new(), &X509_STORE_free};
|
||||
|
||||
if (!store)
|
||||
return false;
|
||||
|
||||
X509_STORE_add_cert(store, caCertificate.get());
|
||||
X509_STORE_add_cert(store.get(), caCertificate);
|
||||
|
||||
if (!crlFile.IsEmpty()) {
|
||||
AddCRLToSSLContext(store, crlFile);
|
||||
AddCRLToSSLContext(store.get(), crlFile);
|
||||
}
|
||||
|
||||
X509_STORE_CTX *csc = X509_STORE_CTX_new();
|
||||
X509_STORE_CTX_init(csc, store, certificate.get(), nullptr);
|
||||
std::unique_ptr<X509_STORE_CTX, decltype(&X509_STORE_CTX_free)> csc{X509_STORE_CTX_new(), &X509_STORE_CTX_free};
|
||||
X509_STORE_CTX_init(csc.get(), store.get(), certificate, nullptr);
|
||||
|
||||
int rc = X509_verify_cert(csc);
|
||||
|
||||
X509_STORE_CTX_free(csc);
|
||||
X509_STORE_free(store);
|
||||
int rc = X509_verify_cert(csc.get());
|
||||
|
||||
if (rc == 0) {
|
||||
int err = X509_STORE_CTX_get_error(csc);
|
||||
int err = X509_STORE_CTX_get_error(csc.get());
|
||||
|
||||
BOOST_THROW_EXCEPTION(openssl_error()
|
||||
<< boost::errinfo_api_function("X509_verify_cert")
|
||||
|
@ -79,6 +79,7 @@ String RandomString(int length);
|
||||
String BinaryToHex(const unsigned char* data, size_t length);
|
||||
|
||||
bool VerifyCertificate(const std::shared_ptr<X509>& caCertificate, const std::shared_ptr<X509>& certificate, const String& crlFile);
|
||||
bool VerifyCertificate(X509* caCertificate, X509* certificate, const String& crlFile);
|
||||
bool IsCa(const std::shared_ptr<X509>& cacert);
|
||||
int GetCertificateVersion(const std::shared_ptr<X509>& cert);
|
||||
String GetSignatureAlgorithm(const std::shared_ptr<X509>& cert);
|
||||
|
@ -1,9 +1,13 @@
|
||||
/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
|
||||
|
||||
#include "base/type.hpp"
|
||||
#include "base/atomic.hpp"
|
||||
#include "base/configobject.hpp"
|
||||
#include "base/debug.hpp"
|
||||
#include "base/scriptglobal.hpp"
|
||||
#include "base/namespace.hpp"
|
||||
#include "base/objectlock.hpp"
|
||||
#include <functional>
|
||||
|
||||
using namespace icinga;
|
||||
|
||||
@ -32,6 +36,43 @@ INITIALIZE_ONCE_WITH_PRIORITY([]() {
|
||||
Type::Register(type);
|
||||
}, InitializePriority::RegisterTypeType);
|
||||
|
||||
static std::vector<Type::Ptr> l_SortedByLoadDependencies;
|
||||
static Atomic l_SortingByLoadDependenciesDone (false);
|
||||
|
||||
INITIALIZE_ONCE_WITH_PRIORITY([] {
|
||||
std::unordered_set<Type*> visited;
|
||||
|
||||
std::function<void(Type*)> visit;
|
||||
// Please note that this callback does not detect any cyclic load dependencies,
|
||||
// instead, it relies on the "sort_by_load_after" unit test to fail.
|
||||
visit = ([&visit, &visited](Type* type) {
|
||||
if (visited.find(type) != visited.end()) {
|
||||
return;
|
||||
}
|
||||
visited.emplace(type);
|
||||
|
||||
for (auto dependency : type->GetLoadDependencies()) {
|
||||
visit(dependency);
|
||||
}
|
||||
|
||||
// We have managed to reach the final/top node in this dependency graph,
|
||||
// so let's place them in reverse order to their final place.
|
||||
l_SortedByLoadDependencies.emplace_back(type);
|
||||
});
|
||||
|
||||
// Sort the types by their load_after dependencies in a Depth-First search manner.
|
||||
for (const Type::Ptr& type : Type::GetAllTypes()) {
|
||||
// Note that only those types that are assignable to the dynamic ConfigObject type can have "load_after"
|
||||
// dependencies, otherwise they are just some Icinga 2 primitive types such as Number, String, etc. and
|
||||
// we need to ignore them.
|
||||
if (ConfigObject::TypeInstance->IsAssignableFrom(type)) {
|
||||
visit(type.get());
|
||||
}
|
||||
}
|
||||
|
||||
l_SortingByLoadDependenciesDone.store(true);
|
||||
}, InitializePriority::SortTypes);
|
||||
|
||||
String Type::ToString() const
|
||||
{
|
||||
return "type '" + GetName() + "'";
|
||||
@ -72,6 +113,12 @@ std::vector<Type::Ptr> Type::GetAllTypes()
|
||||
return types;
|
||||
}
|
||||
|
||||
const std::vector<Type::Ptr>& Type::GetConfigTypesSortedByLoadDependencies()
|
||||
{
|
||||
VERIFY(l_SortingByLoadDependenciesDone.load());
|
||||
return l_SortedByLoadDependencies;
|
||||
}
|
||||
|
||||
String Type::GetPluralName() const
|
||||
{
|
||||
String name = GetName();
|
||||
|
@ -83,6 +83,21 @@ public:
|
||||
static Type::Ptr GetByName(const String& name);
|
||||
static std::vector<Type::Ptr> GetAllTypes();
|
||||
|
||||
/**
|
||||
* Returns a list of config types sorted by their "load_after" dependencies.
|
||||
*
|
||||
* All dependencies of a given type are listed at a lower index than that of the type itself. In other words,
|
||||
* if a `Service` type load depends on the `Host` and `ApiListener` types, the Host and ApiListener types are
|
||||
* guaranteed to appear first on the list. Nevertheless, the order of the Host and ApiListener types themselves
|
||||
* is arbitrary if the two types are not dependent.
|
||||
*
|
||||
* It should be noted that this method will fail fatally when used prior to the completion
|
||||
* of namespace initialization.
|
||||
*
|
||||
* @return std::vector<Type::Ptr>
|
||||
*/
|
||||
static const std::vector<Ptr>& GetConfigTypesSortedByLoadDependencies();
|
||||
|
||||
void SetField(int id, const Value& value, bool suppress_events = false, const Value& cookie = Empty) override;
|
||||
Value GetField(int id) const override;
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include "base/utility.hpp"
|
||||
#include "base/convert.hpp"
|
||||
#include "base/application.hpp"
|
||||
#include "base/defer.hpp"
|
||||
#include "base/logger.hpp"
|
||||
#include "base/exception.hpp"
|
||||
#include "base/socket.hpp"
|
||||
@ -19,6 +20,7 @@
|
||||
#include <boost/thread/tss.hpp>
|
||||
#include <boost/algorithm/string/trim.hpp>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <boost/numeric/conversion/cast.hpp>
|
||||
#include <boost/uuid/uuid_io.hpp>
|
||||
#include <boost/uuid/uuid_generators.hpp>
|
||||
#include <boost/regex.hpp>
|
||||
@ -798,43 +800,67 @@ void Utility::RenameFile(const String& source, const String& target)
|
||||
#endif /* _WIN32 */
|
||||
}
|
||||
|
||||
/*
|
||||
* Set file permissions
|
||||
/**
|
||||
* Set the ownership of the specified file to the given user and group.
|
||||
*
|
||||
* In case of an error, false is returned and the error is logged.
|
||||
*
|
||||
* @note This operation will fail if the program is not run as root or the given user is
|
||||
* not already the owner and member of the given group.
|
||||
*
|
||||
* @param file The path to the file as a string
|
||||
* @param user Either the username or their UID as a string
|
||||
* @param group Either the group's name or its GID as a string
|
||||
*
|
||||
* @return 'true' if the operation was successful, 'false' if an error occurred.
|
||||
*/
|
||||
bool Utility::SetFileOwnership(const String& file, const String& user, const String& group)
|
||||
{
|
||||
#ifndef _WIN32
|
||||
errno = 0;
|
||||
struct passwd *pw = getpwnam(user.CStr());
|
||||
uid_t uid = 0;
|
||||
try {
|
||||
uid = boost::lexical_cast<uid_t>(user);
|
||||
} catch (const boost::bad_lexical_cast&) {
|
||||
errno = 0;
|
||||
struct passwd* pw = getpwnam(user.CStr());
|
||||
|
||||
if (!pw) {
|
||||
if (errno == 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "Invalid user specified: " << user;
|
||||
return false;
|
||||
} else {
|
||||
Log(LogCritical, "cli")
|
||||
<< "getpwnam() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
|
||||
if (!pw) {
|
||||
if (errno == 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "Invalid user specified: " << user;
|
||||
} else {
|
||||
Log(LogCritical, "cli") << "getpwnam() failed with error code " << errno << ", \""
|
||||
<< Utility::FormatErrorNumber(errno) << "\"";
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
uid = pw->pw_uid;
|
||||
}
|
||||
|
||||
errno = 0;
|
||||
struct group *gr = getgrnam(group.CStr());
|
||||
|
||||
if (!gr) {
|
||||
if (errno == 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "Invalid group specified: " << group;
|
||||
return false;
|
||||
} else {
|
||||
Log(LogCritical, "cli")
|
||||
<< "getgrnam() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
|
||||
gid_t gid = 0;
|
||||
try {
|
||||
gid = boost::lexical_cast<gid_t>(group);
|
||||
} catch (const boost::bad_lexical_cast&) {
|
||||
errno = 0;
|
||||
struct group* gr = getgrnam(group.CStr());
|
||||
|
||||
if (!gr) {
|
||||
if (errno == 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "Invalid group specified: " << group;
|
||||
} else {
|
||||
Log(LogCritical, "cli") << "getgrnam() failed with error code " << errno << ", \""
|
||||
<< Utility::FormatErrorNumber(errno) << "\"";
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
gid = gr->gr_gid;
|
||||
}
|
||||
|
||||
if (chown(file.CStr(), pw->pw_uid, gr->gr_gid) < 0) {
|
||||
if (chown(file.CStr(), uid, gid) < 0) {
|
||||
Log(LogCritical, "cli")
|
||||
<< "chown() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
|
||||
return false;
|
||||
@ -1049,22 +1075,19 @@ String Utility::FormatDuration(double duration)
|
||||
return NaturalJoin(tokens);
|
||||
}
|
||||
|
||||
String Utility::FormatDateTime(const char *format, double ts)
|
||||
String Utility::FormatDateTime(const char* format, double ts)
|
||||
{
|
||||
char timestamp[128];
|
||||
auto tempts = (time_t)ts; /* We don't handle sub-second timestamps here just yet. */
|
||||
// Sub-second precision is removed, strftime() has no format specifiers for that anyway.
|
||||
auto tempts = boost::numeric_cast<time_t>(ts);
|
||||
tm tmthen;
|
||||
|
||||
#ifdef _MSC_VER
|
||||
tm *temp = localtime(&tempts);
|
||||
|
||||
if (!temp) {
|
||||
errno_t err = localtime_s(&tmthen, &tempts);
|
||||
if (err) {
|
||||
BOOST_THROW_EXCEPTION(posix_error()
|
||||
<< boost::errinfo_api_function("localtime")
|
||||
<< boost::errinfo_errno(errno));
|
||||
<< boost::errinfo_api_function("localtime_s")
|
||||
<< boost::errinfo_errno(err));
|
||||
}
|
||||
|
||||
tmthen = *temp;
|
||||
#else /* _MSC_VER */
|
||||
if (!localtime_r(&tempts, &tmthen)) {
|
||||
BOOST_THROW_EXCEPTION(posix_error()
|
||||
@ -1073,9 +1096,61 @@ String Utility::FormatDateTime(const char *format, double ts)
|
||||
}
|
||||
#endif /* _MSC_VER */
|
||||
|
||||
strftime(timestamp, sizeof(timestamp), format, &tmthen);
|
||||
return FormatDateTime(format, &tmthen);
|
||||
}
|
||||
|
||||
return timestamp;
|
||||
String Utility::FormatDateTime(const char* format, const tm* t) {
|
||||
/* Known limitations of the implementation: Only works if the result is at most 127 bytes, otherwise returns an
|
||||
* empty string. An empty string is also returned in all other error cases as proper error handling for strftime()
|
||||
* is impossible.
|
||||
*
|
||||
* From strftime(3):
|
||||
*
|
||||
* If the output string would exceed max bytes, errno is not set. This makes it impossible to distinguish this
|
||||
* error case from cases where the format string legitimately produces a zero-length output string. POSIX.1-2001
|
||||
* does not specify any errno settings for strftime().
|
||||
*
|
||||
* https://manpages.debian.org/bookworm/manpages-dev/strftime.3.en.html#BUGS
|
||||
*
|
||||
* There's also std::put_time() from C++ which works with an ostream and does not have a fixed size output buffer
|
||||
* and should allow using the error handling of the ostream. However, there seem to be an unfortunate implementation
|
||||
* of this on some Windows versions where passing an invalid format string results in std::bad_alloc and the process
|
||||
* allocating more and more memory before throwing the exception. In case someone in the future wants to try
|
||||
* std::put_time() again: better build packages for Windows and test them across all supported versions.
|
||||
* Hypothesis: it's implemented using a fixed output buffer and retrying with a larger buffer on error, assuming
|
||||
* the error was due to the buffer being too small.
|
||||
*/
|
||||
|
||||
#ifdef _MSC_VER
|
||||
/* On Windows, the strftime() function family invokes an invalid parameter handler when the format string is
|
||||
* invalid (see the "Remarks" section in their documentation). std::put_time() shows the same behavior as it
|
||||
* uses _wcsftime_l() internally. The default invalid parameter handler may terminate the process, which can
|
||||
* be a problem given that the format string can be specified by the user from the Icinga DSL.
|
||||
*
|
||||
* Thus, temporarily set a thread-local no-op handler to disable the default one allowing the program to
|
||||
* continue. This then simply results in the function returning an error which then results in an exception as
|
||||
* we ask the stream to throw one.
|
||||
*
|
||||
* See also:
|
||||
* https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/strftime-wcsftime-strftime-l-wcsftime-l?view=msvc-170
|
||||
* https://learn.microsoft.com/en-us/cpp/c-runtime-library/parameter-validation?view=msvc-170
|
||||
* https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/set-invalid-parameter-handler-set-thread-local-invalid-parameter-handler?view=msvc-170
|
||||
*/
|
||||
|
||||
auto oldHandler = _set_thread_local_invalid_parameter_handler(
|
||||
[](const wchar_t*, const wchar_t*, const wchar_t*, unsigned int, uintptr_t) {
|
||||
// Intentionally do nothing to continue executing.
|
||||
});
|
||||
|
||||
Defer resetHandler([oldHandler]() {
|
||||
_set_thread_local_invalid_parameter_handler(oldHandler);
|
||||
});
|
||||
#endif /* _MSC_VER */
|
||||
|
||||
char buf[128];
|
||||
size_t n = strftime(buf, sizeof(buf), format, t);
|
||||
// On error, n == 0 and an empty string is returned.
|
||||
return std::string(buf, n);
|
||||
}
|
||||
|
||||
String Utility::FormatErrorNumber(int code) {
|
||||
@ -1598,37 +1673,8 @@ static bool ReleaseHelper(String *platformName, String *platformVersion)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* You are using a distribution which supports LSB. */
|
||||
FILE *fp = popen("type lsb_release >/dev/null 2>&1 && lsb_release -s -i 2>&1", "r");
|
||||
|
||||
if (fp) {
|
||||
std::ostringstream msgbuf;
|
||||
char line[1024];
|
||||
while (fgets(line, sizeof(line), fp))
|
||||
msgbuf << line;
|
||||
int status = pclose(fp);
|
||||
if (WEXITSTATUS(status) == 0) {
|
||||
if (platformName)
|
||||
*platformName = msgbuf.str();
|
||||
}
|
||||
}
|
||||
|
||||
fp = popen("type lsb_release >/dev/null 2>&1 && lsb_release -s -r 2>&1", "r");
|
||||
|
||||
if (fp) {
|
||||
std::ostringstream msgbuf;
|
||||
char line[1024];
|
||||
while (fgets(line, sizeof(line), fp))
|
||||
msgbuf << line;
|
||||
int status = pclose(fp);
|
||||
if (WEXITSTATUS(status) == 0) {
|
||||
if (platformVersion)
|
||||
*platformVersion = msgbuf.str();
|
||||
}
|
||||
}
|
||||
|
||||
/* OS X */
|
||||
fp = popen("type sw_vers >/dev/null 2>&1 && sw_vers -productName 2>&1", "r");
|
||||
FILE* fp = popen("type sw_vers >/dev/null 2>&1 && sw_vers -productName 2>&1", "r");
|
||||
|
||||
if (fp) {
|
||||
std::ostringstream msgbuf;
|
||||
@ -1664,43 +1710,6 @@ static bool ReleaseHelper(String *platformName, String *platformVersion)
|
||||
}
|
||||
}
|
||||
|
||||
/* Centos/RHEL < 7 */
|
||||
release.close();
|
||||
release.open("/etc/redhat-release");
|
||||
if (release.is_open()) {
|
||||
std::string release_line;
|
||||
getline(release, release_line);
|
||||
|
||||
String info = release_line;
|
||||
|
||||
/* example: Red Hat Enterprise Linux Server release 6.7 (Santiago) */
|
||||
if (platformName)
|
||||
*platformName = info.SubStr(0, info.Find("release") - 1);
|
||||
|
||||
if (platformVersion)
|
||||
*platformVersion = info.SubStr(info.Find("release") + 8);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* sles 11 sp3, opensuse w/e */
|
||||
release.close();
|
||||
release.open("/etc/SuSE-release");
|
||||
if (release.is_open()) {
|
||||
std::string release_line;
|
||||
getline(release, release_line);
|
||||
|
||||
String info = release_line;
|
||||
|
||||
if (platformName)
|
||||
*platformName = info.SubStr(0, info.FindFirstOf(" "));
|
||||
|
||||
if (platformVersion)
|
||||
*platformVersion = info.SubStr(info.FindFirstOf(" ") + 1);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Just give up */
|
||||
return false;
|
||||
#endif /* _WIN32 */
|
||||
@ -1973,3 +1982,51 @@ bool Utility::ComparePasswords(const String& enteredPassword, const String& actu
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalizes the given struct tm like mktime() from libc does with some exception for DST handling: If the given time
|
||||
* exists twice on a day, the instance in the DST timezone is picked. If the time does not actually exist on a day, it's
|
||||
* interpreted using the UTC offset of the standard timezone and then normalized.
|
||||
*
|
||||
* This is done in order to provide consistent behavior across operating systems. Historically, Icinga 2 just relied on
|
||||
* whatever mktime() of the operating system did and this function mimics what glibc does as that's what most systems
|
||||
* use.
|
||||
*
|
||||
* @param t tm struct to be normalized
|
||||
* @return time_t representing the timestamp given by t
|
||||
*/
|
||||
time_t Utility::NormalizeTm(tm *t)
|
||||
{
|
||||
// If tm_isdst already specifies the timezone (0 or 1), just use the mktime() behavior.
|
||||
if (t->tm_isdst >= 0) {
|
||||
return mktime(t);
|
||||
}
|
||||
|
||||
const tm copy = *t;
|
||||
|
||||
t->tm_isdst = 1;
|
||||
time_t result = mktime(t);
|
||||
if (result != -1 && t->tm_isdst == 1) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Restore the original input. mktime() can (and does) change more fields than just tm_isdst by converting from
|
||||
// daylight saving time to standard time (it moves the contents by (typically) an hour, which can move across
|
||||
// days/weeks/months/years changing all other fields).
|
||||
*t = copy;
|
||||
|
||||
t->tm_isdst = 0;
|
||||
return mktime(t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the same as NormalizeTm() but takes a const pointer as argument and thus does not modify it.
|
||||
*
|
||||
* @param t struct tm to convert to time_t
|
||||
* @return time_t representing the timestamp given by t
|
||||
*/
|
||||
time_t Utility::TmToTimestamp(const tm *t)
|
||||
{
|
||||
tm copy = *t;
|
||||
return NormalizeTm(©);
|
||||
}
|
||||
|
@ -77,7 +77,8 @@ public:
|
||||
static String Join(const Array::Ptr& tokens, char separator, bool escapeSeparator = true);
|
||||
|
||||
static String FormatDuration(double duration);
|
||||
static String FormatDateTime(const char *format, double ts);
|
||||
static String FormatDateTime(const char* format, double ts);
|
||||
static String FormatDateTime(const char* format, const tm* t);
|
||||
static String FormatErrorNumber(int code);
|
||||
|
||||
#ifndef _WIN32
|
||||
@ -184,6 +185,9 @@ public:
|
||||
return in.SubStr(0, maxLength - sha1HexLength - strlen(trunc)) + trunc + SHA1(in);
|
||||
}
|
||||
|
||||
static time_t NormalizeTm(tm *t);
|
||||
static time_t TmToTimestamp(const tm *t);
|
||||
|
||||
private:
|
||||
Utility();
|
||||
|
||||
|
@ -9,11 +9,15 @@ using namespace icinga;
|
||||
|
||||
template class boost::variant<boost::blank, double, bool, String, Object::Ptr>;
|
||||
template const double& Value::Get<double>() const;
|
||||
template double& Value::Get<double>();
|
||||
template const bool& Value::Get<bool>() const;
|
||||
template bool& Value::Get<bool>();
|
||||
template const String& Value::Get<String>() const;
|
||||
template String& Value::Get<String>();
|
||||
template const Object::Ptr& Value::Get<Object::Ptr>() const;
|
||||
template Object::Ptr& Value::Get<Object::Ptr>();
|
||||
|
||||
Value icinga::Empty;
|
||||
const Value icinga::Empty;
|
||||
|
||||
Value::Value(std::nullptr_t)
|
||||
{ }
|
||||
|
@ -140,16 +140,26 @@ public:
|
||||
return boost::get<T>(m_Value);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T& Get()
|
||||
{
|
||||
return boost::get<T>(m_Value);
|
||||
}
|
||||
|
||||
private:
|
||||
boost::variant<boost::blank, double, bool, String, Object::Ptr> m_Value;
|
||||
};
|
||||
|
||||
extern template const double& Value::Get<double>() const;
|
||||
extern template double& Value::Get<double>();
|
||||
extern template const bool& Value::Get<bool>() const;
|
||||
extern template bool& Value::Get<bool>();
|
||||
extern template const String& Value::Get<String>() const;
|
||||
extern template String& Value::Get<String>();
|
||||
extern template const Object::Ptr& Value::Get<Object::Ptr>() const;
|
||||
extern template Object::Ptr& Value::Get<Object::Ptr>();
|
||||
|
||||
extern Value Empty;
|
||||
extern const Value Empty;
|
||||
|
||||
Value operator+(const Value& lhs, const char *rhs);
|
||||
Value operator+(const char *lhs, const Value& rhs);
|
||||
|
43
lib/base/wait-group.cpp
Normal file
43
lib/base/wait-group.cpp
Normal file
@ -0,0 +1,43 @@
|
||||
/* Icinga 2 | (c) 2025 Icinga GmbH | GPLv2+ */
|
||||
|
||||
#include "base/wait-group.hpp"
|
||||
|
||||
using namespace icinga;
|
||||
|
||||
bool StoppableWaitGroup::try_lock_shared()
|
||||
{
|
||||
std::unique_lock lock (m_Mutex);
|
||||
|
||||
if (m_Stopped) {
|
||||
return false;
|
||||
}
|
||||
|
||||
++m_SharedLocks;
|
||||
return true;
|
||||
}
|
||||
|
||||
void StoppableWaitGroup::unlock_shared()
|
||||
{
|
||||
std::unique_lock lock (m_Mutex);
|
||||
|
||||
if (!--m_SharedLocks && m_Stopped) {
|
||||
lock.unlock();
|
||||
m_CV.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
bool StoppableWaitGroup::IsLockable() const
|
||||
{
|
||||
return !m_Stopped.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disallow new shared locks, wait for all existing ones.
|
||||
*/
|
||||
void StoppableWaitGroup::Join()
|
||||
{
|
||||
std::unique_lock lock (m_Mutex);
|
||||
|
||||
m_Stopped.store(true, std::memory_order_relaxed);
|
||||
m_CV.wait(lock, [this] { return !m_SharedLocks; });
|
||||
}
|
60
lib/base/wait-group.hpp
Normal file
60
lib/base/wait-group.hpp
Normal file
@ -0,0 +1,60 @@
|
||||
/* Icinga 2 | (c) 2025 Icinga GmbH | GPLv2+ */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "base/object.hpp"
|
||||
#include "base/atomic.hpp"
|
||||
#include <condition_variable>
|
||||
#include <cstdint>
|
||||
#include <mutex>
|
||||
|
||||
namespace icinga
|
||||
{
|
||||
|
||||
/**
|
||||
* A synchronization interface that allows concurrent shared locking.
|
||||
*
|
||||
* @ingroup base
|
||||
*/
|
||||
class WaitGroup : public Object
|
||||
{
|
||||
public:
|
||||
DECLARE_PTR_TYPEDEFS(WaitGroup);
|
||||
|
||||
virtual bool try_lock_shared() = 0;
|
||||
virtual void unlock_shared() = 0;
|
||||
|
||||
virtual bool IsLockable() const = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* A thread-safe wait group that can be stopped to prevent further shared locking.
|
||||
*
|
||||
* @ingroup base
|
||||
*/
|
||||
class StoppableWaitGroup : public WaitGroup
|
||||
{
|
||||
public:
|
||||
DECLARE_PTR_TYPEDEFS(StoppableWaitGroup);
|
||||
|
||||
StoppableWaitGroup() = default;
|
||||
StoppableWaitGroup(const StoppableWaitGroup&) = delete;
|
||||
StoppableWaitGroup(StoppableWaitGroup&&) = delete;
|
||||
StoppableWaitGroup& operator=(const StoppableWaitGroup&) = delete;
|
||||
StoppableWaitGroup& operator=(StoppableWaitGroup&&) = delete;
|
||||
|
||||
bool try_lock_shared() override;
|
||||
void unlock_shared() override;
|
||||
|
||||
bool IsLockable() const override;
|
||||
|
||||
void Join();
|
||||
|
||||
private:
|
||||
std::mutex m_Mutex;
|
||||
std::condition_variable m_CV;
|
||||
uint_fast32_t m_SharedLocks = 0;
|
||||
Atomic<bool> m_Stopped = false;
|
||||
};
|
||||
|
||||
}
|
@ -5,7 +5,7 @@
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#ifndef _WIN32_WINNT
|
||||
#define _WIN32_WINNT _WIN32_WINNT_VISTA
|
||||
#define _WIN32_WINNT _WIN32_WINNT_WIN7
|
||||
#endif /* _WIN32_WINNT */
|
||||
#define NOMINMAX
|
||||
#include <winsock2.h>
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user