Merge pull request #6061 from gunnarbeutner/feature/remove-jenkins-scripts

Remove jenkins test scripts
This commit is contained in:
Michael Friedrich 2018-02-02 11:26:03 +01:00 committed by GitHub
commit 6a8a6903de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 0 additions and 3055 deletions

View File

@ -1,43 +0,0 @@
Set of scripts to set up and test a virtual demo machine
========================================================
This directory contains a few scripts primarily used by build.icinga.com.
* bootstrap-vm.sh
Ensures that all required software is installed and its configuration
is applied to the VM. (Usually not of interest for the typical user.)
* run_tests.sh
This is a wrapper script intended to be ran manually by a user. (Note
that you need to start this project's vagrant box for this to work!)
* run_tests.py
The actual test-runner. Accepts two options (-C|--config, -O|--output) and
expects one or more filenames or -patterns that should be run on the VM.
* run_tests.conf
The default configuration file for the test-runner. (Used when running
the wrapper script or when no custom configuration file is passed to the
test-runner.)
Format:
- commands: This section is mandatory and contains the commands to use.
- settings: This section is mandatory and defines settings that are applied to
all tests.
- setups: This section is optional and contains setup routines that should
be ran before (setup) and after (teardown) any matching test is
executed. (Note that only one setup can be effective at a time.)
Example:
"^v[1-9]\.test$": {
"setup": {
"copy": ["source >> target"], // Files that should be copied.
// Note that these files remain
// if not removed explicitly
"clean": ["target"], // Files to delete from the system
"exec": ["cmd1", "cmd2"] // Commands to execute on the system
},
"teardown": {
// The same kind of instructions as above can be added here
}
}

View File

@ -1,10 +0,0 @@
#!/bin/sh
sudo service httpd status &> /dev/null
if [ $? -gt 0 ]; then
echo '[FAIL] httpd is not running'
exit 1
else
echo '[OK] httpd is running'
exit 0
fi

View File

@ -1,34 +0,0 @@
#!/bin/sh
if [ "$1" != "--force" ]; then
echo 'This script is NOT intended to be ran by an individual user.' \
'If you are not human, pass "--force" as the first option to it!'
exit 1
fi
if [ $# -lt 3 ]; then
echo 'Too few arguments. You need to pass "--force <user> <host>"' \
'to run this script.'
exit 1
fi
user=$2
host=$3
SSH_OPTIONS="-o PasswordAuthentication=no"
SSH="ssh $SSH_OPTIONS $user@$host"
$SSH "mkdir /vagrant"
# TODO clone git and use the icinga2x puppet modules
git clone git://git.icinga.com/icinga-vagrant.git
scp -qr icinga-vagrant/icinga2x/.vagrant-puppet $user@$host:/vagrant
rm -rf icinga-vagrant
$SSH "useradd vagrant"
$SSH "su -c 'mkdir -p -m 0700 ~/.ssh' vagrant"
$SSH "su -c \"echo '`cat ~/.ssh/id_rsa.pub`' >> ~/.ssh/authorized_keys\" vagrant"
$SSH "echo '10.10.27.1 packages.icinga.com' >> /etc/hosts"
$SSH "puppet apply --modulepath=/vagrant/.vagrant-puppet/modules" \
" /vagrant/.vagrant-puppet/manifests/default.pp"
exit 0

View File

@ -1,95 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import os
import sys
import time
import utils
STATE_OK = 0
TYPE_PASSIVE_CHECK = 1
CHECK_INTERVAL = 300 # seconds
CHECKRESULT_READ_INTERVAL = 5 # seconds
CHECKRESULT_LOCATION = '/tmp/icinga2/checkresults'
CHECKRESULT_TEMPLATE = """
host_name=%(hostname)s
service_description=%(servicename)s
check_type=%(check_type)s
check_options=0
scheduled_check=0
reschedule_check=0
latency=0
start_time=%(start_time)s
finish_time=%(finish_time)s
early_timeout=0
exited_ok=%(excited_ok)s
return_code=%(return_code)s
output=%(output)s
"""
def main():
run_query = lambda q: utils.run_mysql_query(q, b'/usr/bin/mysql')
# We need to wait a bit first as Icinga processes a
# checkresult only if its newer than the last check
query = 'select unix_timestamp(s.last_check) as last_check ' \
'from icinga_servicestatus as s ' \
'inner join icinga_services as c ' \
'on s.service_object_id = c.service_object_id ' \
"where c.display_name = 'PassiveService1'"
state_time = float(next(iter(run_query(query)), {}).get('last_check', '0'))
if state_time == 0:
utils.Logger.fail('"PassiveService1" seems not'
' to have been checked yet\n')
return 1
if (state_time + CHECK_INTERVAL) - time.time() < 30:
time.sleep(45)
# Now pass the checkresult in
resultfile_path = os.path.join(CHECKRESULT_LOCATION, 'cfoobar')
with open(resultfile_path, 'w') as f:
f.write(CHECKRESULT_TEMPLATE % {
'hostname': 'nsca-ng',
'servicename': 'PassiveService1',
'check_type': TYPE_PASSIVE_CHECK,
'start_time': time.time(),
'finish_time': time.time(),
'excited_ok': '1',
'return_code': STATE_OK,
'output': 'Passing in CheckResult header files works!'
})
utils.Logger.debug('Written file: {0}\n'.format(resultfile_path))
# And notfiy Icinga that the file has been completely written...
resultfileok_path = os.path.join(CHECKRESULT_LOCATION, 'cfoobar.ok')
with open(resultfileok_path, 'w') as f:
pass
utils.Logger.debug('Written file: {0}\n'.format(resultfileok_path))
# Lastly check whether the service changed its state
time.sleep(CHECKRESULT_READ_INTERVAL * 2)
query = 'select s.output ' \
'from icinga_servicestatus as s ' \
'inner join icinga_services as c ' \
'on s.service_object_id = c.service_object_id ' \
"where c.display_name = 'PassiveService1'"
output = next(iter(run_query(query)), {}).get('output', '')
if output != 'Passing in CheckResult header files works!':
utils.Logger.fail('Checkresult header files seem '
'not to be processed properly\n')
return 1
utils.Logger.ok('Checkresult header files are processed properly\n')
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,277 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import os
import sys
import time
import socket
import utils
LIVESTATUS_PATH = '/var/run/icinga2/cmd/livestatus'
LS_HOST_COLUMNS = [
'name',
'name',
'display_name',
'display_name',
None,
'state',
'state_type',
'current_attempt',
'max_check_attempts',
None,
'last_state',
None,
'last_state_change',
None,
'latency',
'execution_time',
'plugin_output',
None,
'last_check',
'address',
'address6'
]
LS_SVC_COLUMNS = [
'description',
'display_name',
'display_name',
None,
'state',
'state_type',
'current_attempt',
'max_check_attempts',
None,
'last_state',
None,
'last_state_change',
None,
'latency',
'execution_time',
'plugin_output',
'perf_data',
'last_check',
'host_num_services',
'host_num_services_ok',
'host_num_services_warn',
'host_num_services_unknown',
'host_num_services_crit'
]
STATE_MAP = {
'SOFT': 0,
'HARD': 1
}
def send_command(command):
try:
return send_query('COMMAND [{0}] {1}'.format(int(time.time()), command))
except utils.LiveStatusError, error:
sys.stderr.write('Failed to execute command: {0}\n\n{1}\n'
''.format(command, error))
def send_query(query):
response = LIVESTATUS.query(query + '\nColumnHeaders: on')
if response:
header, result = response.pop(0), {}
return [dict((header[i], v) for i, v in enumerate(r)) for r in response]
return []
def get_one(query):
return next(iter(send_query(query)), {})
def get_event_output():
try:
with open('/tmp/test_event.out') as f:
remove = True
return f.read().rstrip().split('|')
except (IOError, OSError):
remove = False
finally:
if remove:
os.system('sudo rm -f /tmp/test_event.out')
def convert_output(value):
try:
return int(value)
except ValueError:
try:
return float(value)
except ValueError:
return STATE_MAP.get(value, value)
def validate_time_format(inputstr, formatstr):
try:
time.strptime(inputstr, formatstr)
except ValueError:
return False
else:
return True
def main():
send_command('CHANGE_HOST_EVENT_HANDLER;localhost;test_event')
host_info = get_one('GET hosts\nFilter: name = localhost'
'\nColumns: event_handler')
if host_info.get('event_handler') != 'test_event':
utils.Logger.fail('Could not assign eventcommand "test_event"'
' to host "localhost"\n')
return 1
utils.Logger.ok('Successfully assigned an eventcommand'
' to host "localhost"\n')
send_command('PROCESS_HOST_CHECK_RESULT;localhost;1;A negative result to'
' trigger an eventhandler|some interesting perfdata!')
event_output = get_event_output()
if not event_output:
send_command('CHANGE_HOST_EVENT_HANDLER;localhost;')
utils.Logger.fail('Could not trigger the eventcommand\n')
return 1
utils.Logger.ok('Successfully triggered the eventcommand\n')
failure = False
utils.Logger.info('Checking host macros...\n')
host_info = get_one('GET hosts\nFilter: name = localhost\nColumns: {0}'
''.format(' '.join(c for c in LS_HOST_COLUMNS if c)))
if event_output[0] != host_info['name']*2:
failure = True
utils.Logger.fail('Escaping environment variables '
'seems not to properly working\n')
utils.Logger.fail(' Expected: {0!r} Got: {1!r}\n'
''.format(host_info['name']*2, event_output[0]))
else:
utils.Logger.ok('Escaped environment variables'
' are properly processed\n')
for i, column in enumerate(LS_HOST_COLUMNS[1:], 1):
if column is not None:
macro_name, _, macro_value = event_output[i].partition('=')
output_value = convert_output(macro_value)
if output_value != host_info[column]:
failure = True
utils.Logger.fail('Macro "{0}" returns an incorrect value. '
'Expected "{2}" but got "{1}"\n'
''.format(macro_name, output_value,
host_info[column]))
else:
utils.Logger.ok('Macro "{0}" returns the correct value\n'
''.format(macro_name))
utils.Logger.info('Checking service macros...\n')
svc_info = get_one('GET services\nFilter: description = ping4\nColumns: {0}'
''.format(' '.join(c for c in LS_SVC_COLUMNS if c)))
for i, column in enumerate(LS_SVC_COLUMNS, len(LS_HOST_COLUMNS)):
if column is not None:
macro_name, _, macro_value = event_output[i].partition('=')
output_value = convert_output(macro_value)
if output_value != svc_info[column]:
failure = True
utils.Logger.fail('Macro "{0}" returns an incorrect value. '
'Expected "{2}" but got "{1}"\n'
''.format(macro_name, output_value,
svc_info[column]))
else:
utils.Logger.ok('Macro "{0}" returns the correct value\n'
''.format(macro_name))
utils.Logger.info('Checking global macros...\n')
timet = convert_output(event_output[-6].partition('=')[2])
if not isinstance(timet, int):
failure = True
utils.Logger.fail('Macro "TIMET" does not return a timestamp. '
'Expected int but got: {0!r}\n'.format(timet))
else:
utils.Logger.ok('Macro "TIMET" returns the correct value\n')
longdatetime = event_output[-5].partition('=')[2]
longdatetime_format = '%Y-%m-%d %H:%M:%S +0000'
if not validate_time_format(longdatetime, longdatetime_format):
failure = True
utils.Logger.fail('Macro "LONGDATETIME" returns an incorrect value.'
' Expected value of format "{0}" but got "{1}"\n'
''.format(longdatetime_format, longdatetime))
else:
utils.Logger.ok('Macro "LONGDATETIME" returns the correct value\n')
shortdatetime = event_output[-4].partition('=')[2]
shortdatetime_format = '%Y-%m-%d %H:%M:%S'
if not validate_time_format(shortdatetime, shortdatetime_format):
failure = True
utils.Logger.fail('Macro "SHORTDATETIME" returns an incorrect value.'
' Expected value of format "{0}" but got "{1}"\n'
''.format(shortdatetime_format, shortdatetime))
else:
utils.Logger.ok('Macro "SHORTDATETIME" returns the correct value\n')
m_date = event_output[-3].partition('=')[2]
m_date_format = '%Y-%m-%d'
if not validate_time_format(m_date, m_date_format):
failure = True
utils.Logger.fail('Macro "DATE" returns an incorrect value. '
'Expected value of format "{0}" but got "{1}"\n'
''.format(m_date_format, m_date))
else:
utils.Logger.ok('Macro "DATE" returns the correct value\n')
m_time = event_output[-2].partition('=')[2]
m_time_format = '%H:%M:%S +0000'
if not validate_time_format(m_time, m_time_format):
failure = True
utils.Logger.fail('Macro "TIME" returns an incorrect value. '
'Expected value of format "{0}" but got "{1}"\n'
''.format(m_time_format, m_time))
else:
utils.Logger.ok('Macro "TIME" returns the correct value\n')
utils.Logger.info('Checking command macros...\n')
if convert_output(event_output[-1].partition('=')[2]) != 1337:
failure = True
utils.Logger.fail('The command macro "custom_macro"'
' is not being substituted\n')
else:
utils.Logger.ok('The command macro "custom_macro"'
' is correctly substituted\n')
send_command('DISABLE_HOST_EVENT_HANDLER;localhost')
send_command('PROCESS_HOST_CHECK_RESULT;localhost;0;A positive result that'
' should not trigger an eventhandler')
if get_event_output():
failure = True
utils.Logger.fail('Could not disable the eventcommand\n')
else:
utils.Logger.ok('Successfully disabled the eventcommand\n')
send_command('ENABLE_HOST_EVENT_HANDLER;localhost')
host_info = get_one('GET hosts\nFilter: name = localhost'
'\nColumns: event_handler_enabled')
if host_info['event_handler_enabled'] != 1:
failure = True
utils.Logger.fail('Could not re-enable the eventcommand\n')
else:
utils.Logger.ok('Successfully re-enabled the eventcommand\n')
send_command('CHANGE_HOST_EVENT_HANDLER;localhost;')
host_info = get_one('GET hosts\nFilter: name = localhost'
'\nColumns: event_handler')
if host_info['event_handler']:
failure = True
utils.Logger.fail('Could not remove eventcommand "test_event"'
' assigned to host "localhost"\n')
else:
utils.Logger.ok('Successfully removed the eventcommand'
' assigned to host "localhost"\n')
return 1 if failure else 0
if __name__ == '__main__':
try:
with utils.LiveStatusSocket(LIVESTATUS_PATH) as LIVESTATUS:
sys.exit(main())
except (OSError, IOError, socket.error), e:
utils.Logger.error('Could not connect to Livestatus socket: {0} ({1})'
'\n'.format(LIVESTATUS_PATH, unicode(e)))

View File

@ -1,12 +0,0 @@
#!/bin/sh
commandpipe_path="/var/run/icinga2/cmd/icinga2.cmd"
if [ -e $commandpipe_path ];
then
echo "[OK] Icinga2 commandpipe found ($commandpipe_path)"
exit 0
else
echo "[FAIL] Icinga2 commandpipe not found ($commandpipe_path)"
exit 1
fi

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +0,0 @@
library "compat"
object CheckResultReader "reader" {
spool_dir = "/tmp/icinga2/checkresults"
}

View File

@ -1,61 +0,0 @@
object EventCommand "test_event" {
import "plugin-event-command",
command = {{{echo "\
$$HOSTNAME$HOSTNAME$\
|HOSTNAME=$HOSTNAME$\
|HOSTDISPLAYNAME=$HOSTDISPLAYNAME$\
|HOSTALIAS=$HOSTALIAS$\
|HOSTSTATE=$HOSTSTATE$\
|HOSTSTATEID=$HOSTSTATEID$\
|HOSTSTATETYPE=$HOSTSTATETYPE$\
|HOSTATTEMPT=$HOSTATTEMPT$\
|MAXHOSTATTEMPT=$MAXHOSTATTEMPT$\
|LASTHOSTSTATE=$LASTHOSTSTATE$\
|LASTEHOSTSTATEID=$LASTHOSTSTATEID$\
|LASTHOSTSTATETYPE=$LASTHOSTSTATETYPE$\
|LASTHOSTSTATECHANGE=$LASTHOSTSTATECHANGE$\
|HOSTDURATIONSEC=$HOSTDURATIONSEC$\
|HOSTLATENCY=$HOSTLATENCY$\
|HOSTEXECUTIONTIME=$HOSTEXECUTIONTIME$\
|HOSTOUTPUT=$HOSTOUTPUT$\
|HOSTPERFDATA=$HOSTPERFDATA$\
|LASTHOSTCHECK=$LASTHOSTCHECK$\
|HOSTADDRESS=$HOSTADDRESS$\
|HOSTADDRESS6=$HOSTADDRESS6$\
|SERVICEDESC=$SERVICEDESC$\
|SERVICEDISPLAYNAME=$SERVICEDISPLAYNAME$\
|SERVICECHECKCOMMAND=$SERVICECHECKCOMMAND$\
|SERVICESTATE=$SERVICESTATE$\
|SERVICESTATEID=$SERVICESTATEID$\
|SERVICESTATETYPE=$SERVICESTATETYPE$\
|SERVICEATTEMPT=$SERVICEATTEMPT$\
|MAXSERVICEATTEMPT=$MAXSERVICEATTEMPT$\
|LASTSERVICESTATE=$LASTSERVICESTATE$\
|LASTSERVICESTATEID=$LASTSERVICESTATEID$\
|LASTSERVICESTATETYPE=$LASTSERVICESTATETYPE$\
|LASTSERVICESTATECHANGE=$LASTSERVICESTATECHANGE$\
|SERVICEDURATIONSEC=$SERVICEDURATIONSEC$\
|SERVICELATENCY=$SERVICELATENCY$\
|SERVICEEXECUTIONTIME=$SERVICEEXECUTIONTIME$\
|SERVICEOUTPUT=$SERVICEOUTPUT$\
|SERVICEPERFDATA=$SERVICEPERFDATA$\
|LASTSERVICECHECK=$LASTSERVICECHECK$\
|TOTALHOSTSERVICES=$TOTALHOSTSERVICES$\
|TOTALHOSTSERVICESOK=$TOTALHOSTSERVICESOK$\
|TOTALHOSTSERVICESWARNING=$TOTALHOSTSERVICESWARNING$\
|TOTALHOSTSERVICESUNKNOWN=$TOTALHOSTSERVICESUNKNOWN$\
|TOTALHOSTSERVICESCRITICAL=$TOTALHOSTSERVICESCRITICAL$\
|TIMET=$TIMET$\
|LONGDATETIME=$LONGDATETIME$\
|SHORTDATETIME=$SHORTDATETIME$\
|DATE=$DATE$\
|TIME=$TIME$\
|custom_macro=$custom_macro$\
" > /tmp/test_event.out}}},
export_macros = ["HOSTNAME"],
macros = {
custom_macro = 1337
}
}

View File

@ -1,33 +0,0 @@
/**
* A new service group required by external_commands.test::test_servicegroup_commands
*/
object ServiceGroup "aservicegroup" {
display_name = "aServiceGroup"
}
/**
* The two default hostgroups
*/
object HostGroup "linux-servers" {
display_name = "Linux Servers"
}
object HostGroup "windows-servers" {
display_name = "Windows Servers"
}
/**
* This template is essentially the same as the default one but with a servicegroup added
*/
template Service "generic-service" {
max_check_attempts = 3,
check_interval = 5m,
retry_interval = 1m,
enable_perfdata = true,
groups = ["aservicegroup"],
notifications["mail-icingaadmin"] = {
templates = [ "mail-notification" ],
user_groups = [ "icingaadmins" ]
}
}

View File

@ -1,15 +0,0 @@
/**
* This is a copy of the default configuration file "ido-mysql.conf" with the "categories" attribute added
*/
library "db_ido_mysql"
object IdoMysqlConnection "ido-mysql" {
user = "icinga",
password = "icinga",
host = "localhost",
database = "icinga",
categories = (DbCatCheck | DbCatConfig | DbCatState | DbCatAcknowledgement |
DbCatComment | DbCatDowntime | DbCatEventHandler | DbCatExternalCommand | DbCatFlapping |
DbCatLog | DbCatNotification | DbCatProgramStatus | DbCatRetention | DbCatStateHistory)
}

View File

@ -1,46 +0,0 @@
/**
* This template is essentially the same as the default one but with a customised notification_interval
*/
template Notification "mail-notification" {
notification_command = "mail-service-notification",
notification_state_filter = (StateFilterWarning |
StateFilterCritical |
StateFilterUnknown),
notification_type_filter = (NotificationFilterProblem |
NotificationFilterAcknowledgement |
NotificationFilterRecovery |
NotificationFilterCustom |
NotificationFilterFlappingStart |
NotificationFilterFlappingEnd |
NotificationFilterDowntimeStart |
NotificationFilterDowntimeEnd |
NotificationFilterDowntimeRemoved),
notification_period = "24x7",
notification_interval = 10
}
/**
* 1:1 copy of the default command
*/
object NotificationCommand "mail-service-notification" {
import "plugin-notification-command",
command = [ (SysconfDir + "/icinga2/scripts/mail-notification.sh") ],
export_macros = [
"NOTIFICATIONTYPE",
"SERVICEDESC",
"HOSTALIAS",
"HOSTADDRESS",
"SERVICESTATE",
"LONGDATETIME",
"SERVICEOUTPUT",
"NOTIFICATIONAUTHORNAME",
"NOTIFICATIONCOMMENT",
"HOSTDISPLAYNAME",
"SERVICEDISPLAYNAME",
"USEREMAIL"
]
}

View File

@ -1,229 +0,0 @@
from __future__ import unicode_literals
import sys
from datetime import datetime, timedelta
import utils
CHECK_INTERVAL = 10 # minutes; The actual interval is 5 minutes but as other
# tests might restart Icinga we need to take any
# rescheduling into account
TABLE_PREFIX = 'icinga_'
TABLES = [
# Central tables
'instances',
'objects',
# Debugging tables
'conninfo',
# Historical tables
'acknowledgements',
'commenthistory',
'contactnotifications',
'dbversion',
'downtimehistory',
'eventhandlers',
'externalcommands',
'flappinghistory',
'hostchecks',
'logentries',
'notifications',
'processevents',
'servicechecks',
'statehistory',
'systemcommands',
# Current status tables
'comments',
'customvariablestatus',
'hoststatus',
'programstatus',
'runtimevariables',
'scheduleddowntime',
'servicestatus',
'contactstatus',
# Configuration tables
'commands',
'configfiles',
'configfilevariables',
'contact_addresses',
'contact_notificationcommands',
'contactgroup_members',
'contactgroups',
'contactnotificationmethods',
'contacts',
'customvariables',
'host_contactgroups',
'host_contacts',
'host_parenthosts',
'hostdependencies',
'hostescalation_contactgroups',
'hostescalation_contacts',
'hostescalations',
'hostgroup_members',
'hostgroups',
'hosts',
'service_contactgroups',
'service_contacts',
'servicedependencies',
'serviceescalation_contactgroups',
'serviceescalation_contacts',
'serviceescalations',
'servicegroup_members',
'servicegroups',
'services',
'timeperiod_timeranges',
'timeperiods'
]
EXAMPLE_CONFIG = {
'localhost': ['disk', 'http', 'icinga', 'load', 'ping4',
'ping6', 'procs', 'ssh', 'users'],
'nsca-ng': ['PassiveService1', 'PassiveService2']
}
def validate_tables(tables):
"""
Return whether all tables of the IDO database scheme exist in
the given table listing
"""
utils.Logger.info('Checking database scheme... (tables)\n')
failures = False
for table in (TABLE_PREFIX + n for n in TABLES):
if table in tables:
utils.Logger.ok('Found table "{0}" in database\n'.format(table))
else:
utils.Logger.fail('Could not find table "{0}" in database\n'
''.format(table))
failures = True
return not failures
def verify_host_config(config_data):
"""
Return whether the example hosts exist in the given "hosts" table
"""
utils.Logger.info('Checking example host configuration...\n')
failures = False
for hostname in EXAMPLE_CONFIG:
if not any(1 for e in config_data if e['alias'] == hostname):
utils.Logger.fail('Could not find host "{0}"\n'.format(hostname))
failures = True
else:
utils.Logger.ok('Found host "{0}"\n'.format(hostname))
return not failures
def verify_service_config(config_data):
"""
Return whether the example services exist in the given "services" table
"""
utils.Logger.info('Checking example service configuration...\n')
failures = False
for hostname, servicename in ((h, s) for h, ss in EXAMPLE_CONFIG.iteritems()
for s in ss):
if not any(1 for c in config_data
if c['alias'] == hostname and
c['display_name'] == servicename):
utils.Logger.fail('Could not find service "{0}" on host "{1}"\n'
''.format(servicename, hostname))
failures = True
else:
utils.Logger.ok('Found service "{0}" on host "{1}"\n'
''.format(servicename, hostname))
return not failures
def check_last_host_status_update(check_info):
"""
Return whether the example hosts are checked as scheduled
"""
utils.Logger.info('Checking last host status updates...\n')
failures = False
for host_info in check_info:
if host_info['alias'] == 'localhost':
last_check = datetime.fromtimestamp(float(host_info['last_check']))
if datetime.now() - last_check > timedelta(minutes=CHECK_INTERVAL,
seconds=10):
utils.Logger.fail('The last status update of host "{0}" was'
' more than {1} minutes ago\n'
''.format(host_info['alias'], CHECK_INTERVAL))
failures = True
else:
utils.Logger.ok('Host "{0}" is being updated\n'
''.format(host_info['alias']))
elif host_info['alias'] == 'nsca-ng':
if float(host_info['last_check']) > 0:
utils.Logger.fail('The host "{0}" was checked even'
' though it has no check service'
''.format(host_info['alias']))
failures = True
else:
utils.Logger.ok('Host "{0}" is not being checked because '
'there is no check service\n'
''.format(host_info['alias']))
else:
utils.Logger.info('Skipping host "{0}"\n'
''.format(host_info['alias']))
return not failures
def check_last_service_status_update(check_info):
"""
Return whether the example services are checked as scheduled
"""
utils.Logger.info('Checking last service status updates...\n')
failures = False
for svc_info in check_info:
if svc_info['display_name'] in EXAMPLE_CONFIG.get(svc_info['alias'], []):
last_check = datetime.fromtimestamp(float(svc_info['last_check']))
if datetime.now() - last_check > timedelta(minutes=CHECK_INTERVAL,
seconds=10):
utils.Logger.fail('The last status update of service "{0}" on '
'host "{1}" was more than {2} minutes ago\n'
''.format(svc_info['display_name'],
svc_info['alias'],
CHECK_INTERVAL))
failures = True
else:
utils.Logger.ok('Service "{0}" on host "{1}" is being updated\n'
''.format(svc_info['display_name'],
svc_info['alias']))
else:
utils.Logger.info('Skipping service "{0}" on host "{1}"\n'
''.format(svc_info['display_name'],
svc_info['alias']))
return not failures
def check_logentries(logentry_info):
"""
Return whether the given logentry originates from host "localhost"
and refers to its very last hard status change
"""
utils.Logger.info('Checking status log for host "localhost"...\n')
if logentry_info and logentry_info[0]['alias'] == 'localhost':
entry_time = datetime.fromtimestamp(float(logentry_info[0]['entry_time']))
state_time = datetime.fromtimestamp(float(logentry_info[0]['state_time']))
if entry_time - state_time > timedelta(seconds=10):
utils.Logger.fail('The last hard state of host "localhost"'
' seems not to have been logged\n')
return False
else:
utils.Logger.fail('No logs found in the IDO for host "localhost"\n')
return False
utils.Logger.ok('The last hard state of host "localhost"'
' was properly logged\n')
return True

View File

@ -1,263 +0,0 @@
from __future__ import unicode_literals
import os
import sys
import time
import json
import socket
import subprocess
__all__ = ['parse_statusdata', 'run_mysql_query', 'run_pgsql_query',
'LiveStatusSocket']
MYSQL_PARAMS = b"-t -D icinga -u icinga --password=icinga -e".split()
MYSQL_SEPARATOR = '|'
PGSQL_PARAMS = b"-nq -U icinga -d icinga -c".split()
PGSQL_SEPARATOR = '|'
PGSQL_ENVIRONMENT = {
b'PGPASSWORD': b'icinga'
}
def parse_statusdata(data, intelligent_cast=True):
parsed_data, data_type, type_data = {}, '', {}
for line in (l for l in data.split(os.linesep)
if l and not l.startswith('#')):
if '{' in line:
data_type = line.partition('{')[0].strip()
elif '}' in line:
parsed_data.setdefault(data_type, []).append(type_data)
else:
key, _, value = line.partition('=')
if intelligent_cast:
value = _cast_status_value(value)
type_data[key.strip()] = value
return parsed_data
def _cast_status_value(value):
try:
return int(value)
except ValueError:
try:
return float(value)
except ValueError:
return value
def run_mysql_query(query, path):
p = subprocess.Popen([path] + MYSQL_PARAMS + [query.encode('utf-8')],
stdout=subprocess.PIPE)
Logger.debug('Sent MYSQL query: {0!r}\n'.format(query))
resultset = [l.decode('utf-8') for l in p.stdout.readlines()]
Logger.debug('Received MYSQL resultset: {0!r}\n'
''.format(''.join(resultset)), True)
return _parse_mysql_result(resultset)
def _parse_mysql_result(resultset):
result, header = [], None
for line in (l for l in resultset if MYSQL_SEPARATOR in l):
columns = [c.strip() for c in line[1:-3].split(MYSQL_SEPARATOR)]
if header is None:
header = columns
else:
result.append(dict((header[i], v if v != 'NULL' else None)
for i, v in enumerate(columns)))
return result
def run_pgsql_query(query, path):
p = subprocess.Popen([path] + PGSQL_PARAMS + [query.encode('utf-8')],
stdout=subprocess.PIPE, env=PGSQL_ENVIRONMENT)
Logger.debug('Sent PostgreSQL query: {0!r}\n'.format(query))
resultset = [l.decode('utf-8') for l in p.stdout.readlines()]
Logger.debug('Received PostgreSQL resultset: {0!r}\n'
''.format(''.join(resultset)), True)
return _parse_pgsql_result(resultset)
def _parse_pgsql_result(resultset):
result, header = [], None
for line in (l for l in resultset if PGSQL_SEPARATOR in l):
columns = [c.strip() for c in line.split(PGSQL_SEPARATOR)]
if header is None:
header = columns
else:
result.append(dict((header[i], v) for i, v in enumerate(columns)))
return result
class LiveStatusError(Exception):
pass
class LiveStatusSocket(object):
options = [
'KeepAlive: on',
'OutputFormat: json',
'ResponseHeader: fixed16'
]
def __init__(self, path):
self.path = path
self._connected = False
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
Logger.debug('Opened UNIX stream socket\n', True)
self.sock.connect(self.path)
Logger.debug('Connected to Livestatus socket: {0}\n'.format(self.path),
True)
self._connected = True
def reconnect(self, timeout=30):
Logger.debug('Reconnecting to Livestatus socket\n', True)
start = time.time()
while not self._connected and time.time() - start < timeout:
try:
self.connect()
except socket.error, error:
Logger.debug('Could not connect: {0}\n'.format(error), True)
# Icinga2 does some "magic" with the socket during startup
# which causes random errors being raised (EACCES, ENOENT, ..)
# so we just ignore them until the timeout is reached
time.sleep(1)
if not self._connected:
# Raise the very last exception once the timeout is reached
raise
def close(self):
if self._connected:
self.sock.shutdown(socket.SHUT_RDWR)
Logger.debug('Shutted down Livestatus connection\n', True)
self.sock.close()
Logger.debug('Closed Livestatus socket\n', True)
self._connected = False
def query(self, command):
self.send(command)
statuscode, response = self.recv()
if statuscode != 200:
raise LiveStatusError(statuscode, response)
return response
def send(self, query):
if not self._connected:
raise RuntimeError('Tried to write to closed socket')
full_query = '\n'.join([query] + self.options)
self.sock.sendall((full_query + '\n\n').encode('utf-8'))
Logger.debug('Sent Livestatus query: {0!r}\n'.format(full_query))
def recv(self):
if not self._connected:
raise RuntimeError('Tried to read from closed socket')
response = b''
response_header = self.sock.recv(16)
response_code = int(response_header[:3])
response_length = int(response_header[3:].strip())
if response_length > 0:
while len(response) < response_length:
response += self.sock.recv(response_length - len(response))
response = response.decode('utf-8')
try:
response = json.loads(response)
except ValueError:
pass
Logger.debug('Received Livestatus response: {0!r} (Header was: {1!r})'
'\n'.format(response, response_header), True)
return response_code, response
class Logger(object):
INFO = 1
OK = 2
FAIL = 3
ERROR = 4
DEBUG_STD = 5
DEBUG_EXT = 6
VERBOSITY = None
@classmethod
def permitted(cls, severity):
if cls.VERBOSITY is None:
cls.VERBOSITY = next((int(sys.argv.pop(i).partition('=')[2])
for i, a in enumerate(sys.argv)
if a.startswith('--verbosity=')), 1)
return (severity == cls.INFO and cls.VERBOSITY >= 1) or \
(severity == cls.OK and cls.VERBOSITY >= 1) or \
(severity == cls.FAIL and cls.VERBOSITY >= 1) or \
(severity == cls.ERROR and cls.VERBOSITY >= 1) or \
(severity == cls.DEBUG_STD and cls.VERBOSITY >= 2) or \
(severity == cls.DEBUG_EXT and cls.VERBOSITY >= 3)
@staticmethod
def write(text, stderr=False):
if stderr:
sys.stderr.write(text)
sys.stderr.flush()
else:
sys.stdout.write(text)
sys.stdout.flush()
@classmethod
def log(cls, severity, text):
if severity == cls.INFO and cls.permitted(cls.INFO):
cls.write('\x00[INFO] {0}'.format(text))
elif severity == cls.ERROR and cls.permitted(cls.ERROR):
cls.write('\x00[ERROR] {0}'.format(text))
elif severity == cls.FAIL and cls.permitted(cls.FAIL):
cls.write('\x00[FAIL] {0}'.format(text))
elif severity == cls.OK and cls.permitted(cls.OK):
cls.write('\x00[OK] {0}'.format(text))
elif severity == cls.DEBUG_STD and cls.permitted(cls.DEBUG_STD):
cls.write('\x00\x00[DEBUG] {0}'.format(text))
elif severity == cls.DEBUG_EXT and cls.permitted(cls.DEBUG_EXT):
cls.write('\x00\x00\x00\x00[DEBUG] {0}'.format(text))
else:
return False
return True
@classmethod
def info(cls, text):
return cls.log(cls.INFO, text)
@classmethod
def error(cls, text):
return cls.log(cls.ERROR, text)
@classmethod
def fail(cls, text):
return cls.log(cls.FAIL, text)
@classmethod
def ok(cls, text):
return cls.log(cls.OK, text)
@classmethod
def debug(cls, text, extended=False):
return cls.log(cls.DEBUG_EXT if extended else cls.DEBUG_STD, text)

View File

@ -1,38 +0,0 @@
#!/bin/sh
TIMEOUT=30
case $1 in
mysql)
TYPE='MySQL'
CMD='/usr/bin/mysql -t -D icinga -u icinga --password=icinga -e'
;;
pgsql)
TYPE='PostgreSQL'
CMD='/usr/bin/psql -nq -U icinga -d icinga -c'
export PGPASSWORD='icinga'
;;
*)
echo "No IDO type specifier given!"
exit 1
;;
esac
tries=1
while true
do
out="`$CMD 'select * from icinga_hosts'`"
if [ $tries -lt $TIMEOUT ] && [ "$out" == "" ];
then
sleep 1
tries=$(($tries + 1))
else
if [ $tries -eq $TIMEOUT ];
then
echo "IDO ($TYPE) does not have any hosts or is not responding" >&2
fi
break
fi
done

View File

@ -1,10 +0,0 @@
#!/bin/sh
sudo service icinga2 status &> /dev/null
if [ $? -gt 0 ]; then
echo '[FAIL] icinga2 is not running'
exit 1
else
echo '[OK] icinga2 is running'
exit 0
fi

View File

@ -1,69 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import sys
import utils
import ido_tests
def main():
failures = False
run_query = lambda q: utils.run_mysql_query(q, b'/usr/bin/mysql')
if not ido_tests.validate_tables([d['Tables_in_icinga']
for d in run_query('show tables')]):
return 1 # Bail out as we cannot proceed without any data
host_info = run_query('select * from icinga_hosts')
if not ido_tests.verify_host_config(host_info):
return 1 # Bail out as we cannot proceed without any data
service_info = run_query(
'select c2.alias, c1.* from icinga_services as c1 '
'inner join icinga_hosts as c2'
' on c1.host_object_id = c2.host_object_id'
)
if not ido_tests.verify_service_config(service_info):
return 1 # Bail out as we cannot proceed without any data
hostchecks_data = run_query(
'select c.alias, unix_timestamp(s.last_check) as last_check'
' from icinga_hoststatus as s '
'inner join icinga_hosts as c'
' on s.host_object_id = c.host_object_id'
)
if not ido_tests.check_last_host_status_update(hostchecks_data):
failures = True
servicechecks_data = run_query(
'select c2.alias, c1.display_name, unix_timestamp(s.last_check) as last_check'
' from icinga_servicestatus as s '
'inner join icinga_services as c1'
' on s.service_object_id = c1.service_object_id '
'inner join icinga_hosts as c2'
' on c1.host_object_id = c2.host_object_id'
)
if not ido_tests.check_last_service_status_update(servicechecks_data):
failures = True
logentry_info = run_query(
'select hosts.alias,'
' max(unix_timestamp(logs.entry_time)) as entry_time,'
' max(unix_timestamp(hist.state_time)) as state_time'
' from icinga_logentries as logs '
'inner join icinga_hosts as hosts'
' on logs.object_id = hosts.host_object_id and hosts.alias = "localhost" '
'inner join icinga_statehistory as hist'
' on hist.object_id = hosts.host_object_id and hist.state_type = 1'
)
if not ido_tests.check_logentries(logentry_info):
failures = True
return 1 if failures else 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,71 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import sys
import utils
import ido_tests
def main():
failures = False
run_query = lambda q: utils.run_pgsql_query(q, b'/usr/bin/psql')
if not ido_tests.validate_tables([d['Name'] for d in run_query('\\dt')
if d['Type'] == 'table']):
return 1 # Bail out as we cannot proceed without any data
host_info = run_query('select * from icinga_hosts')
if not ido_tests.verify_host_config(host_info):
return 1 # Bail out as we cannot proceed without any data
service_info = run_query(
'select c2.alias, c1.* from icinga_services as c1 '
'inner join icinga_hosts as c2'
' on c1.host_object_id = c2.host_object_id'
)
if not ido_tests.verify_service_config(service_info):
return 1 # Bail out as we cannot proceed without any data
hostchecks_data = run_query(
'select c.alias, unix_timestamp(s.last_check) as last_check'
' from icinga_hoststatus as s '
'inner join icinga_hosts as c'
' on s.host_object_id = c.host_object_id'
)
if not ido_tests.check_last_host_status_update(hostchecks_data):
failures = True
servicechecks_data = run_query(
'select c2.alias, c1.display_name, unix_timestamp(s.last_check) as last_check'
' from icinga_servicestatus as s '
'inner join icinga_services as c1'
' on s.service_object_id = c1.service_object_id '
'inner join icinga_hosts as c2'
' on c1.host_object_id = c2.host_object_id'
)
if not ido_tests.check_last_service_status_update(servicechecks_data):
failures = True
logentry_info = run_query(
'select hosts.alias,'
' max(unix_timestamp(logs.entry_time)) as entry_time,'
' max(unix_timestamp(hist.state_time)) as state_time'
' from icinga_logentries as logs '
'inner join icinga_hosts as hosts'
' on logs.object_id = hosts.host_object_id '
'inner join icinga_statehistory as hist'
' on hist.object_id = hosts.host_object_id '
"where hosts.alias = 'localhost' and hist.state_type = 1 "
'group by hosts.alias'
)
if not ido_tests.check_logentries(logentry_info):
failures = True
return 1 if failures else 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,19 +0,0 @@
#!/bin/sh
livestatus_path="/var/run/icinga2/cmd/livestatus"
if [ ! -e $livestatus_path ];
then
sudo icinga2 feature enable livestatus 1> /dev/null
sudo service icinga2 restart 1> /dev/null
sleep 1
if [ ! -e $livestatus_path ];
then
echo "[FAIL] Icinga2 Livestatus socket not found ($livestatus_path)"
exit 1
fi
fi
echo "[OK] Icinga2 Livestatus socket found ($livestatus_path)"
exit 0

View File

@ -1,12 +0,0 @@
#!/bin/sh
logfile_path="/var/log/icinga2/icinga2.log"
if sudo test -f $logfile_path;
then
echo "[OK] Icinga2 log file found ($logfile_path)"
exit 0
else
echo "[FAIL] Icinga2 log file not found ($logfile_path)"
exit 1
fi

View File

@ -1,10 +0,0 @@
#!/bin/sh
sudo service mysqld status &> /dev/null
if [ $? -gt 0 ]; then
echo '[FAIL] mysqld is not running'
exit 1
else
echo '[OK] mysqld is running'
exit 0
fi

View File

@ -1,29 +0,0 @@
#!/bin/sh
pid_before_restart=`ps -U icinga | grep icinga2 | awk '{print $1}'`
echo 'RESTART_PROCESS' | sudo send_nsca -C
sleep 3
pid_after_restart=`ps -U icinga | grep icinga2 | awk '{print $1}'`
if [ $pid_after_restart -eq $pid_before_restart ]; then
echo "[FAIL] Failed to send 'RESTART_PROCESS' to icinga2"
exit 1
else
echo "[OK] Successfully sent 'RESTART_PROCESS' to icinga2"
fi
printf "localhost\t0\tA passive result returning OK\n" | sudo send_nsca
if [ $? -gt 0 ]; then
echo "[FAIL] Failed to send passive check result for host 'localhost'"
exit 1
else
echo "[OK] Successfully sent a passive check result for host 'localhost'"
fi
printf "localhost\tdisk\t2\tA passive result not returning OK\n" | sudo send_nsca
if [ $? -gt 0 ]; then
echo "[FAIL] Failed to send passive check result for service 'disk' on host 'localhost'"
exit 1
else
echo "[OK] Successfully sent a passive check result for service 'disk' on host 'localhost'"
fi

View File

@ -1,10 +0,0 @@
#!/bin/sh
sudo service postgresql status &> /dev/null
if [ $? -gt 0 ]; then
echo '[FAIL] postgresql is not running'
exit 1
else
echo '[OK] postgresql is running'
exit 0
fi

View File

@ -1,12 +0,0 @@
#!/bin/sh
pidfile_path="/var/run/icinga2/icinga2.pid"
if [ -f $pidfile_path ];
then
echo "[OK] Icinga2 pidfile found ($pidfile_path)"
exit 0
else
echo "[FAIL] Icinga2 pidfile not found ($pidfile_path)"
exit 1
fi

View File

@ -1,101 +0,0 @@
{
"commands": {
"copy": "scp -qF ssh_config {0} default:{1}",
"exec": "ssh -F ssh_config default '{0}'",
"clean": "ssh -F ssh_config default 'rm -f {0}'"
},
"settings": {
"test_root": "/tmp"
},
"setups": {
"^ido_[a-z]{2}sql.test$": {
"setup": {
"copy": [
"files/ido_tests.py >> /tmp/ido_tests.py",
"files/utils.py >> /tmp/utils.py"
]
},
"teardown": {
"clean": [
"/tmp/ido_tests.py*",
"/tmp/utils.py*"
]
}
},
"checkresult.test": {
"setup": {
"copy": [
"files/configs/checkresult.conf >> /tmp/checkresult.conf",
"files/wait_for_ido.sh >> /tmp/wait_for_ido.sh",
"files/utils.py >> /tmp/utils.py"
],
"exec": [
"sudo mv /tmp/checkresult.conf /etc/icinga2/conf.d/",
"mkdir -p -m 0777 /tmp/icinga2/checkresults",
"sudo service icinga2 restart",
"/tmp/wait_for_ido.sh mysql"
]
},
"teardown": {
"clean": ["/tmp/utils.py*"],
"exec": [
"sudo rm /etc/icinga2/conf.d/checkresult.conf",
"sudo service icinga2 restart",
"rmdir /tmp/icinga2/checkresults",
"/tmp/wait_for_ido.sh mysql",
"/tmp/wait_for_ido.sh pgsql && rm /tmp/wait_for_ido.sh"
]
}
},
"external_commands.test": {
"setup": {
"copy": [
"files/utils.py >> /tmp/utils.py",
"files/configs/notifications.conf >> /tmp/notifications.conf",
"files/configs/ido_checkresults.conf >> /tmp/ido_checkresults.conf",
"files/configs/groups.conf >> /tmp/groups.conf"
],
"exec": [
"sudo mv /etc/icinga2/conf.d/generic-service.conf /etc/icinga2/conf.d/generic-service.conf.bak",
"sudo mv /etc/icinga2/conf.d/notifications.conf /etc/icinga2/conf.d/notifications.conf.bak",
"sudo mv /etc/icinga2/conf.d/groups.conf /etc/icinga2/conf.d/groups.conf.bak",
"sudo mv /tmp/groups.conf /etc/icinga2/conf.d/",
"sudo mv /tmp/ido_checkresults.conf /etc/icinga2/conf.d/",
"sudo mv /tmp/notifications.conf /etc/icinga2/conf.d/",
"sudo service icinga2 restart"
]
},
"teardown": {
"clean": ["/tmp/utils.py*"],
"exec": [
"sudo rm /etc/icinga2/conf.d/groups.conf",
"sudo mv /etc/icinga2/conf.d/groups.conf.bak /etc/icinga2/conf.d/groups.conf",
"sudo mv /etc/icinga2/conf.d/generic-service.conf.bak /etc/icinga2/conf.d/generic-service.conf",
"sudo rm /etc/icinga2/conf.d/ido_checkresults.conf",
"sudo rm /etc/icinga2/conf.d/notifications.conf",
"sudo mv /etc/icinga2/conf.d/notifications.conf.bak /etc/icinga2/conf.d/notifications.conf",
"sudo service icinga2 restart"
]
}
},
"eventhandler.test": {
"setup": {
"copy": [
"files/utils.py >> /tmp/utils.py",
"files/configs/eventhandler.conf >> /tmp/eventhandler.conf"
],
"exec": [
"sudo mv /tmp/eventhandler.conf /etc/icinga2/conf.d/",
"sudo service icinga2 restart"
]
},
"teardown": {
"clean": ["/tmp/utils.py*"],
"exec": [
"sudo rm /etc/icinga2/conf.d/eventhandler.conf",
"sudo service icinga2 restart"
]
}
}
}
}

View File

@ -1,277 +0,0 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import os
import re
import sys
import json
import glob
import subprocess
from optparse import OptionParser
from xml.dom.minidom import getDOMImplementation
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'w')
class Logger(object):
INFO = 1
OK = 2
FAIL = 3
ERROR = 4
DEBUG_STD = 5
DEBUG_EXT = 6
VERBOSITY = 0
OUTPUT_LENGTH = 1024
@staticmethod
def write(text, stderr=False):
if stderr:
sys.stderr.write(text.encode('utf-8'))
sys.stderr.flush()
else:
sys.stdout.write(text.encode('utf-8'))
sys.stdout.flush()
@classmethod
def set_verbosity(cls, verbosity):
cls.VERBOSITY = verbosity
@classmethod
def log(cls, severity, text):
if severity == cls.INFO and cls.VERBOSITY >= 1:
cls.write('\033[1;94m[INFO]\033[1;0m {0}'.format(text))
elif severity == cls.ERROR and cls.VERBOSITY >= 1:
cls.write('\033[1;33m[ERROR]\033[1;0m {0}'.format(text), True)
elif severity == cls.FAIL and cls.VERBOSITY >= 1:
cls.write('\033[1;31m[FAIL] {0}\033[1;0m'.format(text))
elif severity == cls.OK and cls.VERBOSITY >= 1:
cls.write('\033[1;32m[OK]\033[1;0m {0}'.format(text))
elif severity == cls.DEBUG_STD and cls.VERBOSITY >= 2:
cls.write('\033[1;90m[DEBUG]\033[1;0m {0}'.format(text))
elif severity == cls.DEBUG_EXT and cls.VERBOSITY >= 3:
if cls.VERBOSITY < 4 and len(text) > cls.OUTPUT_LENGTH:
suffix = '... (Truncated to {0} bytes)\n' \
''.format(cls.OUTPUT_LENGTH)
text = text[:cls.OUTPUT_LENGTH] + suffix
cls.write('\033[1;90m[DEBUG]\033[1;0m {0}'.format(text))
else:
return False
return True
@classmethod
def info(cls, text):
return cls.log(cls.INFO, text)
@classmethod
def error(cls, text):
return cls.log(cls.ERROR, text)
@classmethod
def fail(cls, text):
return cls.log(cls.FAIL, text)
@classmethod
def ok(cls, text):
return cls.log(cls.OK, text)
@classmethod
def debug(cls, text, extended=False):
return cls.log(cls.DEBUG_EXT if extended else cls.DEBUG_STD, text)
class TestSuite(object):
def __init__(self, configpath):
self._tests = []
self._results = {}
self.load_config(configpath)
def add_test(self, filepath):
self._tests.append(filepath)
def load_config(self, filepath):
with open(filepath) as f:
self._config = json.load(f)
def get_report(self):
dom = getDOMImplementation()
document = dom.createDocument(None, 'testsuite', None)
xml_root = document.documentElement
for name, info in self._results.iteritems():
testresult = document.createElement('testcase')
testresult.setAttribute('classname', 'vm')
testresult.setAttribute('name', name)
totaltests = document.createElement('tests')
totaltests.appendChild(document.createTextNode(str(info['total'])))
testresult.appendChild(totaltests)
failedtests = document.createElement('failures')
failedtests.appendChild(document.createTextNode(str(info['failures'])))
testresult.appendChild(failedtests)
systemout = document.createElement('system-out')
systemout.appendChild(document.createTextNode(info['stdout']))
testresult.appendChild(systemout)
systemerr = document.createElement('system-err')
systemerr.appendChild(document.createTextNode(info['stderr']))
testresult.appendChild(systemerr)
if info['returncode'] != 0:
failure = document.createElement('failure')
failure.setAttribute('type', 'returncode')
failure.appendChild(document.createTextNode(
'code: {0}'.format(info['returncode'])))
testresult.appendChild(failure)
xml_root.appendChild(testresult)
return document.toxml()
def run(self):
for path in self._tests:
test_name = os.path.basename(path)
Logger.debug('Copying test "{0}" to remote machine\n'.format(test_name))
self._copy_test(path)
self._apply_setup_routines(test_name, 'setup')
note_printed = Logger.info('Running test "{0}"...\n'.format(test_name))
result = self._run_test(path)
Logger.info('Test "{0}" has finished (Total tests: {1}, Failures: {2})\n'
''.format(test_name, result['total'], result['failures']))
self._apply_setup_routines(test_name, 'teardown')
Logger.debug('Removing test "{0}" from remote machine\n'.format(test_name))
self._remove_test(test_name)
self._results[test_name] = result
if note_printed:
Logger.write('\n')
def _apply_setup_routines(self, test_name, context):
instructions = next((t[1].get(context)
for t in self._config.get('setups', {}).iteritems()
if re.match(t[0], test_name)), None)
if instructions is not None:
note_printed = Logger.info('Applying {0} routines for test "{1}" .. '
''.format(context, test_name))
for instruction in instructions.get('copy', []):
source, _, destination = instruction.partition('>>')
self._copy_file(source.strip(), destination.strip())
for filepath in instructions.get('clean', []):
self._remove_file(filepath)
for command in instructions.get('exec', []):
self._exec_command(command)
if note_printed:
Logger.write('Done\n')
def _remove_file(self, path):
command = self._config['commands']['clean'].format(path)
rc = subprocess.call(command, stdout=DEVNULL, shell=True)
if rc != 0:
Logger.error('Cannot remove file "{0}" ({1})\n'.format(path, rc))
def _exec_command(self, command):
command = self._config['commands']['exec'].format(command)
rc = subprocess.call(command, stdout=DEVNULL, shell=True)
if rc != 0:
Logger.error('Command "{0}" exited with exit code "{1}"\n' \
''.format(command, rc))
def _copy_file(self, source, destination):
command = self._config['commands']['copy'].format(source, destination)
rc = subprocess.call(command, stdout=DEVNULL, shell=True)
if rc != 0:
Logger.error('Cannot copy file "{0}" to "{1}" ({2})\n' \
''.format(source, destination, rc))
def _copy_test(self, path):
self._copy_file(path, os.path.join(self._config['settings']['test_root'],
os.path.basename(path)))
def _remove_test(self, test_name):
test_root = self._config['settings']['test_root']
self._remove_file(os.path.join(test_root, test_name))
def _run_test(self, path):
command = self._config['commands']['exec']
target = os.path.join(self._config['settings']['test_root'],
os.path.basename(path))
options = ['--verbosity={0}'.format(Logger.VERBOSITY)]
p = subprocess.Popen(command.format(' '.join([target] + options)),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
output, test_count, failed_tests = self._watch_output(p.stdout)
return {
'total': test_count,
'failures': failed_tests,
'stdout': output,
'stderr': p.stderr.read().decode('utf-8'),
'returncode': p.wait()
}
def _watch_output(self, pipe):
output, total, failures = '', 0, 0
while True:
line = pipe.readline().decode('utf-8')
if not line:
break
verbosity_level = line.count('\x00')
line = line[verbosity_level:]
if line.startswith('[ERROR] '):
Logger.error(line[8:])
elif line.startswith('[DEBUG] '):
Logger.debug(line[8:], verbosity_level == 4)
elif line.startswith('[FAIL] '):
Logger.fail(line[7:])
failures += 1
total += 1
elif line.startswith('[OK] '):
Logger.ok(line[5:])
total += 1
else:
Logger.info(line.replace('[INFO] ', ''))
output += line
return (output, total, failures)
def parse_commandline():
parser = OptionParser(version='0.5')
parser.add_option('-C', '--config', default="run_tests.conf",
help='The path to the config file to use [%default]')
parser.add_option('-R', '--results',
help='The file where to store the test results')
parser.add_option('-v', '--verbose', action='count', default=1,
help='Be more verbose (Maximum output: -vvv)')
parser.add_option('-q', '--quiet', action='count', default=0,
help='Be less verbose')
return parser.parse_args()
def main():
options, arguments = parse_commandline()
suite = TestSuite(options.config)
for path in (p for a in arguments for p in glob.glob(a)):
suite.add_test(path)
Logger.set_verbosity(options.verbose - options.quiet)
suite.run()
if options.results is not None:
with open(options.results, 'w') as f:
f.write(suite.get_report().encode('utf-8'))
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,5 +0,0 @@
#!/bin/sh
vagrant ssh-config > ssh_config
./run_tests.py $@ *.test
rm -f ssh_config

View File

@ -1,54 +0,0 @@
#!/bin/bash
statusdata_path="/var/cache/icinga2/status.dat"
objectscache_path="/var/cache/icinga2/objects.cache"
if [ ! -f $statusdata_path ];
then
sudo icinga2 feature enable statusdata 1> /dev/null
sudo service icinga2 restart 1> /dev/null
n=0
while [ $n -lt 3 ]
do
sleep 15
if [ -f $statusdata_path ];
then
break
fi
n=$(( $n + 1))
done
if [ $n -eq 3 ];
then
echo "[FAIL] Icinga2 status.dat not found ($statusdata_path)"
exit 1
fi
fi
echo "[OK] Icinga2 status.dat found ($statusdata_path)"
if [ -f $objectscache_path ];
then
echo "[OK] Icinga2 objects.cache found ($objectscache_path)"
else
echo "[FAIL] Icinga2 objects.cache not found ($objectscache_path)"
exit 1
fi
status_time=$(stat --format="%Y" $statusdata_path)
now=$(date +"%s")
sleep $(((15 + 5) - ($now - $status_time)))
new_status_time=$(stat --format="%Y" $statusdata_path)
if [ $new_status_time -eq $status_time ];
then
echo "[FAIL] Icinga2 status.dat is not being updated (Last update: $(date -r $statusdata_path '+%x %X'))"
exit 1
else
echo "[OK] Icinga2 status.dat is being updated (Last update: $(date -r $statusdata_path '+%x %X'))"
fi