2008-01-08 Sancho Lerena <slerena@gmail.com>

* Config.pm: Updated version to 1.3.1-dev
        
        * DB.pm: Added generic db function give_db_free(). keepalive modules area
        managed differently. Now have type 100 and update only tagente_estado table
        discarding historic data, but are managed like other module. Update data (OK)
        when updating tagent_access. Old Keepalive modules are deprecated and not used
        since from 1.2 version was not working :(

        * pandora_snmpconsole: Some small fixes from unknown user.

        * pandora_server: Ignore files with size 0 (expect to solve problems with 
        zerobyte BADXML files, need more testing...). Keepalive module management
        are now managed in a different way. This works for me but needs more testing.

        * pandora_network: Minimal optimization in locking for SNMP threads.



git-svn-id: https://svn.code.sf.net/p/pandora/code/trunk@687 c3f86ba8-e40f-0410-aaad-9ba5e7f4b01f
This commit is contained in:
slerena 2008-01-08 18:32:03 +00:00
parent 3176799f8b
commit 26f657af3d
6 changed files with 167 additions and 111 deletions

View File

@ -1,3 +1,21 @@
2008-01-08 Sancho Lerena <slerena@gmail.com>
* Config.pm: Updated version to 1.3.1-dev
* DB.pm: Added generic db function give_db_free(). keepalive modules area
managed differently. Now have type 100 and update only tagente_estado table
discarding historic data, but are managed like other module. Update data (OK)
when updating tagent_access. Old Keepalive modules are deprecated and not used
since from 1.2 version was not working :(
* pandora_snmpconsole: Some small fixes from unknown user.
* pandora_server: Ignore files with size 0 (expect to solve problems with
zerobyte BADXML files, need more testing...). Keepalive module management
are now managed in a different way. This works for me but needs more testing.
* pandora_network: Minimal optimization in locking for SNMP threads.
2007-12-19 Sancho Lerena <slerena@gmail.com>
* bin/pandora_network: Fixed bug #1854340, problem with file descriptors

View File

@ -448,9 +448,9 @@ sub pandora_query_snmp (%$$$$) {
}
}
# Locking for SNMP call. SNMP is not thread safe !!
my $OIDLIST = new SNMP::VarList([$perl_oid]);
{
lock $snmp_lock;
my $OIDLIST = new SNMP::VarList([$perl_oid]);
# Pass the VarList to getnext building an array of the output
my @OIDINFO = $SESSION->getnext($OIDLIST);
$output = $OIDINFO[0];
@ -493,7 +493,7 @@ sub exec_network_module {
my $flag;
my @sql_data;
if ((!defined($id_agente_modulo)) || ($id_agente_modulo eq "")){
return;
return 0;
}
my $query_sql = "SELECT * FROM tagente_modulo WHERE id_agente_modulo = $id_agente_modulo";
my $exec_sql = $dbh->prepare($query_sql);
@ -527,7 +527,7 @@ sub exec_network_module {
my $module_result = 1; # Fail by default
my $module_data = 0;
if ((defined($ip_target)) && ($ip_target ne "")) {
if ((defined($ip_target)) && ($ip_target ne "")) {
# ICMP Modules
# ------------

View File

@ -36,7 +36,7 @@ use PandoraFMS::Tools;
use PandoraFMS::DB;
# FLUSH in each IO, only for DEBUG, very slow !
$| = 0;
$| = 1;
my %pa_config;
@ -69,6 +69,7 @@ sub pandora_dataserver {
my $file_data;
my $file_md5;
my @file_list;
my $file_size;
my $onefile; # Each item of incoming directory
my $agent_filename;
my $dbh = DBI->connect("DBI:mysql:$pa_config->{'dbname'}:$pa_config->{'dbhost'}:3306",$pa_config->{"dbuser"}, $pa_config->{"dbpass"},{ RaiseError => 1, AutoCommit => 1 });
@ -76,68 +77,69 @@ sub pandora_dataserver {
while ( 1 ) { # Pandora module processor main loop
opendir(DIR, $pa_config->{'incomingdir'} ) or die "[FATAL] Cannot open Incoming data directory at $pa_config->{'incomingdir'}: $!";
while (defined($onefile = readdir(DIR))){
push @file_list,$onefile; # Push in a stack all directory entries for this loop
}
while (defined($onefile = pop @file_list)) { # Begin to process files
threads->yield;
$file_data = "$pa_config->{'incomingdir'}/$onefile";
next if $onefile =~ /^\.\.?$/; # Skip . and .. directory
# First filter any file that doesnt like ".data"
if ( $onefile =~ /([\-\:\;\.\,\_\s\a\*\=\(\)a-zA-Z0-9]*).data\z/ ) {
$agent_filename = $1;
$file_md5 = "$pa_config->{'incomingdir'}/$agent_filename.checksum";
# If check is disabled, ignore if file_md5 exists
if (( -e $file_md5 ) or ($pa_config->{'pandora_check'} == 0)){
# Verify integrity
my $check_result;
$check_result = md5check ($file_data,$file_md5);
if (($pa_config->{'pandora_check'} == 0) || ($check_result == 1)){
# PERL cannot "free" memory on user demmand, so
# we are declaring $config hash reference in inner loop
# to force PERL system to realloc memory in each loop.
# In Pandora 1.1 in "standard" PERL Implementations, we could
# have a memory leak problem. This is solved now :-)
# Source : http://www.rocketaware.com/perl/perlfaq3/
# Procesa_Datos its the main function to process datafile
my $config; # Hash Reference, used to store XML data
# But first we needed to verify integrity of data file
if ($pa_config->{'pandora_check'} == 1){
logger ($pa_config, "Integrity of Datafile using MD5 is verified: $file_data",3);
}
eval { # XML Processing error catching procedure. Critical due XML was no validated
logger ($pa_config, "Ready to parse $file_data",4);
$config = XMLin($file_data, forcearray=>'module');
};
if ($@) {
logger ($pa_config, "[ERROR] Error processing XML contents in $file_data",0);
logger ($pa_config, "[ERROR] $@", 0);
copy ($file_data,$file_data."_BADXML");
if (($pa_config->{'pandora_check'} == 1) && ( -e $file_md5 )) {
copy ($file_md5,$file_md5."_BADCHECKSUM");
}
if (($onefile =~ /^[a-zA-Z0-9]*/) && ( ((stat($pa_config->{'incomingdir'}."/".$onefile))[7]) > 0 )) {
push @file_list,$onefile; # Push in a stack all directory entries for this loop
}
procesa_datos ($pa_config, $config, $dbh);
undef $config;
# If _everything_ its ok..
# delete files
unlink ($file_data);
if ( -e $file_md5 ) {
unlink ($file_md5);
}
} else { # md5 check fails
logger ( $pa_config, "[ERROR] MD5 Checksum failed! for $file_data",0);
# delete files
unlink ($file_data);
if ( -e $file_md5 ) {
unlink ($file_md5);
}
}
} # No checksum file, ignore file
}
}
}
while (defined($onefile = pop @file_list)) { # Begin to process files
threads->yield;
$file_data = "$pa_config->{'incomingdir'}/$onefile";
#next if $onefile =~ /^\.\.?$/; # Skip . and .. directory
# First filter any file that doesnt like ".data"
if ( $onefile =~ /([\-\:\;\.\,\_\s\a\*\=\(\)a-zA-Z0-9]*).data\z/ ) {
$agent_filename = $1;
$file_md5 = "$pa_config->{'incomingdir'}/$agent_filename.checksum";
# If check is disabled, ignore if file_md5 exists
if (( -e $file_md5 ) or ($pa_config->{'pandora_check'} == 0)){
# Verify integrity
my $check_result;
$check_result = md5check ($file_data,$file_md5);
if (($pa_config->{'pandora_check'} == 0) || ($check_result == 1)){
# PERL cannot "free" memory on user demmand, so
# we are declaring $config hash reference in inner loop
# to force PERL system to realloc memory in each loop.
# In Pandora 1.1 in "standard" PERL Implementations, we could
# have a memory leak problem. This is solved now :-)
# Source : http://www.rocketaware.com/perl/perlfaq3/
# Procesa_Datos its the main function to process datafile
my $config; # Hash Reference, used to store XML data
# But first we needed to verify integrity of data file
if ($pa_config->{'pandora_check'} == 1){
logger ($pa_config, "Integrity of Datafile using MD5 is verified: $file_data",3);
}
eval { # XML Processing error catching procedure. Critical due XML was no validated
logger ($pa_config, "Ready to parse $file_data",4);
$config = XMLin($file_data, forcearray=>'module');
};
if ($@) {
logger ($pa_config, "[ERROR] Error processing XML contents in $file_data",0);
logger ($pa_config, "[ERROR] $@", 0);
copy ($file_data,$file_data."_BADXML");
if (($pa_config->{'pandora_check'} == 1) && ( -e $file_md5 )) {
copy ($file_md5,$file_md5."_BADCHECKSUM");
}
}
procesa_datos ($pa_config, $config, $dbh);
undef $config;
# If _everything_ its ok..
# delete files
unlink ($file_data);
if ( -e $file_md5 ) {
unlink ($file_md5);
}
} else { # md5 check fails
logger ( $pa_config, "[ERROR] MD5 Checksum failed! for $file_data",0);
# delete files
unlink ($file_data);
if ( -e $file_md5 ) {
unlink ($file_md5);
}
}
} # No checksum file, ignore file
}
}
closedir(DIR);
threads->yield;
threads->yield;
sleep $pa_config->{"server_threshold"};
}
} # End of main loop function
@ -170,27 +172,34 @@ sub keep_alive_check {
my $dbh = $_[1];
my $timestamp = &UnixDate ("today", "%Y-%m-%d %H:%M:%S");
my $query_idag = "SELECT tagente_modulo.id_agente_modulo, tagente_modulo.id_tipo_modulo, tagente_modulo.nombre, tagente_estado.datos FROM tagente_modulo, talerta_agente_modulo, tagente_estado WHERE tagente_modulo.id_agente_modulo = talerta_agente_modulo.id_agente_modulo AND talerta_agente_modulo.disable = 0 AND tagente_modulo.id_tipo_modulo = -1 AND tagente_estado.id_agente_modulo = tagente_modulo.id_agente_modulo";
my $s_idag = $dbh->prepare($query_idag);
$s_idag ->execute;
my $utimestamp = &UnixDate ("today", "%s");
my $query_idag = "SELECT tagente_modulo.id_agente_modulo, tagente_estado.utimestamp, tagente_estado.id_agente, tagente.intervalo, tagente.nombre, tagente_modulo.nombre FROM tagente_modulo, talerta_agente_modulo, tagente_estado, tagente WHERE tagente_modulo.id_agente_modulo = talerta_agente_modulo.id_agente_modulo AND talerta_agente_modulo.disable = 0 AND tagente_modulo.id_tipo_modulo = 100 AND tagente_estado.id_agente_modulo = tagente_modulo.id_agente_modulo AND tagente.id_agente = tagente_estado.id_agente AND tagente_estado.datos != 0";
my $s_idag = $dbh->prepare($query_idag);
$s_idag ->execute;
my $id_agent_module;
my $module_utimestamp;
my $id_agent;
my $interval;
my $agent_name;
my $module_name;
# data needed in loop (we'll reuse it)
my @data;
my $nombre_agente;
my $id_agente_modulo;
my $tipo_modulo;
my $nombre_modulo;
my $datos;
my @data;
if ($s_idag->rows != 0) {
while (@data = $s_idag->fetchrow_array()) {
threads->yield;
$id_agente_modulo = $data[0];
$nombre_agente = dame_nombreagente_agentemodulo ($pa_config, $id_agente_modulo, $dbh);
$nombre_modulo = $data[2];
$datos = $data[3];
$tipo_modulo = $data[1];
pandora_calcula_alerta ($pa_config, $timestamp, $nombre_agente, $tipo_modulo, $nombre_modulo, $datos, $dbh);
$id_agent_module = $data[0];
$module_utimestamp = $data[1];
$id_agent = $data[2];
$interval = $data[3];
$agent_name = $data[4];
$module_name = $data[5];
# Agent down - Keepalive utimestamp too low (2x)
if (($module_utimestamp + ($interval * 2)) < $utimestamp){
pandora_writestate ($pa_config, $agent_name, "keep_alive", $module_name, 0, 0, $dbh, 1);
}
}
}
$s_idag->finish();

View File

@ -147,7 +147,7 @@ sub pandora_snmptrapd {
} else { # not custom OID type, deleting old values in these vars
$custom_oid="";
$custom_type="";
$custom_value="type_desc";
$custom_value=$type_desc; # Bug fixed, 080108 by anonymous
}
$sql_insert = "insert into ttrap (timestamp, source, oid, type, value, oid_custom, value_custom, type_custom) values ('$timestamp', '$source', '$oid', $type, '$value', '$custom_oid', '$custom_value', '$custom_type')";
logger ($pa_config,"Received SNMP Trap from $source",2);

View File

@ -34,8 +34,8 @@ our @EXPORT = qw( pandora_help_screen
# There is no global vars, all variables (setup) passed as hash reference
# version: Defines actual version of Pandora Server for this module only
my $pandora_version = "1.4-dev";
my $pandora_build="PS071219";
my $pandora_version = "1.3.1dev";
my $pandora_build="PS080108";
our $VERSION = $pandora_version." ".$pandora_build;
# Setup hash

View File

@ -485,33 +485,43 @@ sub pandora_accessupdate (%$$) {
my $id_agent = $_[1];
my $dbh = $_[2];
if ($id_agent != -1){
my $intervalo = dame_intervalo ($pa_config, $id_agent, $dbh);
my $timestamp = &UnixDate("today","%Y-%m-%d %H:%M:%S");
my $temp = $intervalo / 2;
my $fecha_limite = DateCalc($timestamp,"- $temp seconds",\$err);
$fecha_limite = &UnixDate($fecha_limite,"%Y-%m-%d %H:%M:%S");
# Fecha limite has limit date, if there are records below this date
# we cannot insert any data in Database. We use a limit based on agent_interval / 2
# So if an agent has interval 300, could have a max of 24 records per hour in access_table
# This is to do not saturate database with access records (because if you hace a network module with interval 30, you have
# a new record each 30 seconds !
# Compare with tagente.ultimo_contacto (tagent_lastcontact in english), so this will have
# the latest update for this agent
my $query = "select count(*) from tagent_access where id_agent = $id_agent and timestamp > '$fecha_limite'";
my $query_exec = $dbh->prepare($query);
my @data_row;
$query_exec ->execute;
@data_row = $query_exec->fetchrow_array();
$temp = $data_row[0];
$query_exec->finish();
if ( $temp == 0) { # We need update access time
my $query2 = "insert into tagent_access (id_agent, timestamp) VALUES ($id_agent,'$timestamp')";
$dbh->do($query2);
logger($pa_config,"Updating tagent_access for agent id $id_agent",9);
}
}
if ($id_agent != -1){
my $intervalo = dame_intervalo ($pa_config, $id_agent, $dbh);
my $timestamp = &UnixDate("today","%Y-%m-%d %H:%M:%S");
my $temp = $intervalo / 2;
my $fecha_limite = DateCalc($timestamp,"- $temp seconds",\$err);
$fecha_limite = &UnixDate($fecha_limite,"%Y-%m-%d %H:%M:%S");
# Fecha limite has limit date, if there are records below this date
# we cannot insert any data in Database. We use a limit based on agent_interval / 2
# So if an agent has interval 300, could have a max of 24 records per hour in access_table
# This is to do not saturate database with access records (because if you hace a network module with interval 30, you have
# a new record each 30 seconds !
# Compare with tagente.ultimo_contacto (tagent_lastcontact in english), so this will have
# the latest update for this agent
my $query = "select count(*) from tagent_access where id_agent = $id_agent and timestamp > '$fecha_limite'";
my $query_exec = $dbh->prepare($query);
my @data_row;
$query_exec ->execute;
@data_row = $query_exec->fetchrow_array();
$temp = $data_row[0];
$query_exec->finish();
if ( $temp == 0) { # We need update access time
my $query2 = "insert into tagent_access (id_agent, timestamp) VALUES ($id_agent,'$timestamp')";
$dbh->do($query2);
logger($pa_config,"Updating tagent_access for agent id $id_agent",9);
}
# Update keepalive module (if present)
my $id_agent_module = give_db_free ("SELECT id_agente_modulo FROM tagente_modulo WHERE id_agente = $id_agent AND id_tipo_modulo = 100", $dbh);
if ($id_agent_module ne -1){
my $utimestamp = &UnixDate ("today", "%s");
# Status = 0 is monitor OK
$query2 = "UPDATE tagente_estado SET datos = 1, estado = 0, timestamp = '$timestamp', cambio = 0, last_try= '$timestamp', utimestamp = $utimestamp WHERE id_agente_modulo = $id_agent_module";
$dbh->do ($query2);
}
}
}
##########################################################################
@ -1151,7 +1161,7 @@ sub dame_agente_id (%$$) {
my @data;
$agent_name = sqlWrap ($agent_name);
# Calculate agent ID using select by its name
my $query_idag = "SELECT id_agente FROM tagente WHERE nombre = $agent_name";
my $query_idag = "SELECT id_agente FROM tagente WHERE nombre = $agent_name OR direccion = $agent_name"; # Fixed 080108 by anon (used on snmpconsole...).
my $s_idag = $dbh->prepare($query_idag);
$s_idag ->execute;
if ($s_idag->rows == 0) {
@ -1661,6 +1671,25 @@ sub give_db_value ($$$$$) {
return -1;
}
# ---------------------------------------------------------------
# Generic access to a field ($field) given a table
# ---------------------------------------------------------------
sub give_db_free ($$) {
my $condition = $_[0];
my $dbh = $_[1];
my $query = $condition;
my $s_idag = $dbh->prepare($query);
$s_idag ->execute;
if ($s_idag->rows != 0) {
my @data = $s_idag->fetchrow_array();
my $result = $data[0];
$s_idag->finish();
return $result;
}
return -1;
}
# End of function declaration
# End of defined Code