[3] | 1 | #!/usr/bin/env python |
---|
[225] | 2 | # |
---|
| 3 | # This file is part of Jobmonarch |
---|
| 4 | # |
---|
| 5 | # Copyright (C) 2006 Ramon Bastiaans |
---|
| 6 | # |
---|
| 7 | # Jobmonarch is free software; you can redistribute it and/or modify |
---|
| 8 | # it under the terms of the GNU General Public License as published by |
---|
| 9 | # the Free Software Foundation; either version 2 of the License, or |
---|
| 10 | # (at your option) any later version. |
---|
| 11 | # |
---|
| 12 | # Jobmonarch is distributed in the hope that it will be useful, |
---|
| 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
| 15 | # GNU General Public License for more details. |
---|
| 16 | # |
---|
| 17 | # You should have received a copy of the GNU General Public License |
---|
| 18 | # along with this program; if not, write to the Free Software |
---|
| 19 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 20 | # |
---|
[230] | 21 | # SVN $Id: jobarchived.py 500 2008-03-05 14:05:48Z bastiaans $ |
---|
| 22 | # |
---|
[3] | 23 | |
---|
[466] | 24 | import getopt, syslog, ConfigParser, sys |
---|
[3] | 25 | |
---|
[500] | 26 | VERSION='0.3.1' |
---|
[284] | 27 | |
---|
[466] | 28 | def usage( ver ): |
---|
[284] | 29 | |
---|
[466] | 30 | print 'jobarchived %s' %VERSION |
---|
[284] | 31 | |
---|
[466] | 32 | if ver: |
---|
| 33 | return 0 |
---|
[284] | 34 | |
---|
[435] | 35 | print |
---|
[466] | 36 | print 'Purpose:' |
---|
| 37 | print ' The Job Archive Daemon (jobarchived) stores batch job information in a SQL database' |
---|
| 38 | print ' and node statistics in a RRD archive' |
---|
[435] | 39 | print |
---|
[466] | 40 | print 'Usage: jobarchived [OPTIONS]' |
---|
| 41 | print |
---|
| 42 | print ' -c, --config=FILE The configuration file to use (default: /etc/jobarchived.conf)' |
---|
| 43 | print ' -p, --pidfile=FILE Use pid file to store the process id' |
---|
| 44 | print ' -h, --help Print help and exit' |
---|
| 45 | print ' -v, --version Print version and exit' |
---|
| 46 | print |
---|
[435] | 47 | |
---|
[214] | 48 | def processArgs( args ): |
---|
[6] | 49 | |
---|
[467] | 50 | SHORT_L = 'p:hvc:' |
---|
| 51 | LONG_L = [ 'help', 'config=', 'pidfile=', 'version' ] |
---|
[169] | 52 | |
---|
[466] | 53 | config_filename = '/etc/jobarchived.conf' |
---|
[169] | 54 | |
---|
[435] | 55 | global PIDFILE |
---|
| 56 | |
---|
| 57 | PIDFILE = None |
---|
| 58 | |
---|
[214] | 59 | try: |
---|
[169] | 60 | |
---|
[214] | 61 | opts, args = getopt.getopt( args, SHORT_L, LONG_L ) |
---|
[9] | 62 | |
---|
[214] | 63 | except getopt.error, detail: |
---|
[60] | 64 | |
---|
[214] | 65 | print detail |
---|
| 66 | sys.exit(1) |
---|
[9] | 67 | |
---|
[214] | 68 | for opt, value in opts: |
---|
[60] | 69 | |
---|
[214] | 70 | if opt in [ '--config', '-c' ]: |
---|
[13] | 71 | |
---|
[214] | 72 | config_filename = value |
---|
[198] | 73 | |
---|
[435] | 74 | if opt in [ '--pidfile', '-p' ]: |
---|
| 75 | |
---|
| 76 | PIDFILE = value |
---|
| 77 | |
---|
| 78 | if opt in [ '--help', '-h' ]: |
---|
| 79 | |
---|
[469] | 80 | usage( False ) |
---|
[435] | 81 | sys.exit( 0 ) |
---|
| 82 | |
---|
[466] | 83 | if opt in [ '--version', '-v' ]: |
---|
[60] | 84 | |
---|
[469] | 85 | usage( True ) |
---|
[466] | 86 | sys.exit( 0 ) |
---|
[22] | 87 | |
---|
[214] | 88 | try: |
---|
| 89 | return loadConfig( config_filename ) |
---|
[13] | 90 | |
---|
[214] | 91 | except ConfigParser.NoOptionError, detail: |
---|
| 92 | |
---|
| 93 | print detail |
---|
| 94 | sys.exit( 1 ) |
---|
| 95 | |
---|
| 96 | def loadConfig( filename ): |
---|
| 97 | |
---|
| 98 | def getlist( cfg_string ): |
---|
| 99 | |
---|
| 100 | my_list = [ ] |
---|
| 101 | |
---|
| 102 | for item_txt in cfg_string.split( ',' ): |
---|
| 103 | |
---|
| 104 | sep_char = None |
---|
| 105 | |
---|
| 106 | item_txt = item_txt.strip() |
---|
| 107 | |
---|
| 108 | for s_char in [ "'", '"' ]: |
---|
| 109 | |
---|
| 110 | if item_txt.find( s_char ) != -1: |
---|
| 111 | |
---|
| 112 | if item_txt.count( s_char ) != 2: |
---|
| 113 | |
---|
| 114 | print 'Missing quote: %s' %item_txt |
---|
| 115 | sys.exit( 1 ) |
---|
| 116 | |
---|
| 117 | else: |
---|
| 118 | |
---|
| 119 | sep_char = s_char |
---|
| 120 | break |
---|
| 121 | |
---|
| 122 | if sep_char: |
---|
| 123 | |
---|
| 124 | item_txt = item_txt.split( sep_char )[1] |
---|
| 125 | |
---|
| 126 | my_list.append( item_txt ) |
---|
| 127 | |
---|
| 128 | return my_list |
---|
| 129 | |
---|
| 130 | cfg = ConfigParser.ConfigParser() |
---|
| 131 | |
---|
| 132 | cfg.read( filename ) |
---|
| 133 | |
---|
[466] | 134 | global DEBUG_LEVEL, USE_SYSLOG, SYSLOG_LEVEL, SYSLOG_FACILITY, GMETAD_CONF, ARCHIVE_XMLSOURCE |
---|
| 135 | global ARCHIVE_DATASOURCES, ARCHIVE_PATH, ARCHIVE_HOURS_PER_RRD, ARCHIVE_EXCLUDE_METRICS |
---|
| 136 | global JOB_SQL_DBASE, DAEMONIZE, RRDTOOL, JOB_TIMEOUT, MODRRDTOOL |
---|
[214] | 137 | |
---|
[292] | 138 | ARCHIVE_PATH = cfg.get( 'DEFAULT', 'ARCHIVE_PATH' ) |
---|
[214] | 139 | |
---|
[292] | 140 | ARCHIVE_HOURS_PER_RRD = cfg.getint( 'DEFAULT', 'ARCHIVE_HOURS_PER_RRD' ) |
---|
[214] | 141 | |
---|
[292] | 142 | DEBUG_LEVEL = cfg.getint( 'DEFAULT', 'DEBUG_LEVEL' ) |
---|
[214] | 143 | |
---|
[292] | 144 | USE_SYSLOG = cfg.getboolean( 'DEFAULT', 'USE_SYSLOG' ) |
---|
[214] | 145 | |
---|
[292] | 146 | SYSLOG_LEVEL = cfg.getint( 'DEFAULT', 'SYSLOG_LEVEL' ) |
---|
[214] | 147 | |
---|
[375] | 148 | MODRRDTOOL = False |
---|
| 149 | |
---|
[214] | 150 | try: |
---|
[469] | 151 | global rrdtool |
---|
[375] | 152 | import rrdtool |
---|
[214] | 153 | |
---|
[375] | 154 | MODRRDTOOL = True |
---|
| 155 | |
---|
| 156 | except ImportError: |
---|
| 157 | |
---|
| 158 | MODRRDTOOL = False |
---|
| 159 | |
---|
[486] | 160 | print "ERROR: py-rrdtool import FAILED: failing back to DEPRECATED use of rrdtool binary. This will slow down jobarchived significantly!" |
---|
[375] | 161 | |
---|
[455] | 162 | RRDTOOL = cfg.get( 'DEFAULT', 'RRDTOOL' ) |
---|
| 163 | |
---|
[375] | 164 | try: |
---|
| 165 | |
---|
[292] | 166 | SYSLOG_FACILITY = eval( 'syslog.LOG_' + cfg.get( 'DEFAULT', 'SYSLOG_FACILITY' ) ) |
---|
[214] | 167 | |
---|
| 168 | except AttributeError, detail: |
---|
| 169 | |
---|
| 170 | print 'Unknown syslog facility' |
---|
| 171 | sys.exit( 1 ) |
---|
| 172 | |
---|
[292] | 173 | GMETAD_CONF = cfg.get( 'DEFAULT', 'GMETAD_CONF' ) |
---|
[214] | 174 | |
---|
[292] | 175 | ARCHIVE_XMLSOURCE = cfg.get( 'DEFAULT', 'ARCHIVE_XMLSOURCE' ) |
---|
[214] | 176 | |
---|
[292] | 177 | ARCHIVE_DATASOURCES = getlist( cfg.get( 'DEFAULT', 'ARCHIVE_DATASOURCES' ) ) |
---|
[214] | 178 | |
---|
[292] | 179 | ARCHIVE_EXCLUDE_METRICS = getlist( cfg.get( 'DEFAULT', 'ARCHIVE_EXCLUDE_METRICS' ) ) |
---|
[214] | 180 | |
---|
[292] | 181 | JOB_SQL_DBASE = cfg.get( 'DEFAULT', 'JOB_SQL_DBASE' ) |
---|
[214] | 182 | |
---|
[295] | 183 | JOB_TIMEOUT = cfg.getint( 'DEFAULT', 'JOB_TIMEOUT' ) |
---|
| 184 | |
---|
[292] | 185 | DAEMONIZE = cfg.getboolean( 'DEFAULT', 'DAEMONIZE' ) |
---|
[214] | 186 | |
---|
[224] | 187 | |
---|
[214] | 188 | return True |
---|
| 189 | |
---|
[17] | 190 | # What XML data types not to store |
---|
[13] | 191 | # |
---|
[17] | 192 | UNSUPPORTED_ARCHIVE_TYPES = [ 'string' ] |
---|
[9] | 193 | |
---|
[47] | 194 | # Maximum time (in seconds) a parsethread may run |
---|
| 195 | # |
---|
| 196 | PARSE_TIMEOUT = 60 |
---|
| 197 | |
---|
| 198 | # Maximum time (in seconds) a storethread may run |
---|
| 199 | # |
---|
| 200 | STORE_TIMEOUT = 360 |
---|
| 201 | |
---|
[8] | 202 | """ |
---|
[224] | 203 | The Job Archiving Daemon |
---|
[8] | 204 | """ |
---|
| 205 | |
---|
[214] | 206 | from types import * |
---|
| 207 | |
---|
| 208 | import xml.sax, xml.sax.handler, socket, string, os, os.path, time, thread, threading, random, re |
---|
| 209 | |
---|
[486] | 210 | try: |
---|
| 211 | from pyPgSQL import PgSQL |
---|
| 212 | |
---|
| 213 | except ImportError, details: |
---|
| 214 | |
---|
| 215 | print "FATAL ERROR: pyPgSQL python module not found" |
---|
| 216 | sys.exit( 1 ) |
---|
| 217 | |
---|
[379] | 218 | # Orginal from Andre van der Vlies <andre@vandervlies.xs4all.nl> for MySQL. Changed |
---|
| 219 | # and added some more functions for postgres. |
---|
| 220 | # |
---|
| 221 | # |
---|
| 222 | # Changed by: Bas van der Vlies <basv@sara.nl> |
---|
| 223 | # |
---|
| 224 | # SARA API for Postgres Database |
---|
| 225 | # |
---|
| 226 | # Changed by: Ramon Bastiaans for Job Monarch |
---|
| 227 | # |
---|
| 228 | |
---|
| 229 | class InitVars: |
---|
| 230 | Vars = {} |
---|
| 231 | |
---|
| 232 | def __init__(self, **key_arg): |
---|
| 233 | for (key, value) in key_arg.items(): |
---|
| 234 | if value: |
---|
| 235 | self.Vars[key] = value |
---|
| 236 | else: |
---|
| 237 | self.Vars[key] = None |
---|
| 238 | |
---|
| 239 | def __call__(self, *key): |
---|
| 240 | key = "%s" % key |
---|
| 241 | return self.Vars[key] |
---|
| 242 | |
---|
| 243 | def __getitem__(self, key): |
---|
| 244 | return self.Vars[key] |
---|
| 245 | |
---|
| 246 | def __repr__(self): |
---|
| 247 | return repr(self.Vars) |
---|
| 248 | |
---|
| 249 | def keys(self): |
---|
| 250 | barf = map(None, self.Vars.keys()) |
---|
| 251 | return barf |
---|
| 252 | |
---|
| 253 | def values(self): |
---|
| 254 | barf = map(None, self.Vars.values()) |
---|
| 255 | return barf |
---|
| 256 | |
---|
| 257 | def has_key(self, key): |
---|
| 258 | if self.Vars.has_key(key): |
---|
| 259 | return 1 |
---|
| 260 | else: |
---|
| 261 | return 0 |
---|
| 262 | |
---|
| 263 | class DBError(Exception): |
---|
| 264 | def __init__(self, msg=''): |
---|
| 265 | self.msg = msg |
---|
| 266 | Exception.__init__(self, msg) |
---|
| 267 | def __repr__(self): |
---|
| 268 | return self.msg |
---|
| 269 | __str__ = __repr__ |
---|
| 270 | |
---|
| 271 | # |
---|
| 272 | # Class to connect to a database |
---|
| 273 | # and return the queury in a list or dictionairy. |
---|
| 274 | # |
---|
| 275 | class DB: |
---|
| 276 | def __init__(self, db_vars): |
---|
| 277 | |
---|
| 278 | self.dict = db_vars |
---|
| 279 | |
---|
| 280 | if self.dict.has_key('User'): |
---|
| 281 | self.user = self.dict['User'] |
---|
| 282 | else: |
---|
| 283 | self.user = 'postgres' |
---|
| 284 | |
---|
| 285 | if self.dict.has_key('Host'): |
---|
| 286 | self.host = self.dict['Host'] |
---|
| 287 | else: |
---|
| 288 | self.host = 'localhost' |
---|
| 289 | |
---|
| 290 | if self.dict.has_key('Password'): |
---|
| 291 | self.passwd = self.dict['Password'] |
---|
| 292 | else: |
---|
| 293 | self.passwd = '' |
---|
| 294 | |
---|
| 295 | if self.dict.has_key('DataBaseName'): |
---|
| 296 | self.db = self.dict['DataBaseName'] |
---|
| 297 | else: |
---|
| 298 | self.db = 'uva_cluster_db' |
---|
| 299 | |
---|
| 300 | # connect_string = 'host:port:database:user:password: |
---|
| 301 | dsn = "%s::%s:%s:%s" %(self.host, self.db, self.user, self.passwd) |
---|
| 302 | |
---|
| 303 | try: |
---|
| 304 | self.SQL = PgSQL.connect(dsn) |
---|
| 305 | except PgSQL.Error, details: |
---|
| 306 | str = "%s" %details |
---|
| 307 | raise DBError(str) |
---|
| 308 | |
---|
| 309 | def __repr__(self): |
---|
| 310 | return repr(self.result) |
---|
| 311 | |
---|
| 312 | def __nonzero__(self): |
---|
| 313 | return not(self.result == None) |
---|
| 314 | |
---|
| 315 | def __len__(self): |
---|
| 316 | return len(self.result) |
---|
| 317 | |
---|
| 318 | def __getitem__(self,i): |
---|
| 319 | return self.result[i] |
---|
| 320 | |
---|
| 321 | def __getslice__(self,i,j): |
---|
| 322 | return self.result[i:j] |
---|
| 323 | |
---|
| 324 | def Get(self, q_str): |
---|
| 325 | c = self.SQL.cursor() |
---|
| 326 | try: |
---|
| 327 | c.execute(q_str) |
---|
| 328 | result = c.fetchall() |
---|
| 329 | except PgSQL.Error, details: |
---|
| 330 | c.close() |
---|
| 331 | str = "%s" %details |
---|
| 332 | raise DBError(str) |
---|
| 333 | |
---|
| 334 | c.close() |
---|
| 335 | return result |
---|
| 336 | |
---|
| 337 | def Set(self, q_str): |
---|
| 338 | c = self.SQL.cursor() |
---|
| 339 | try: |
---|
| 340 | c.execute(q_str) |
---|
| 341 | result = c.oidValue |
---|
| 342 | |
---|
| 343 | except PgSQL.Error, details: |
---|
| 344 | c.close() |
---|
| 345 | str = "%s" %details |
---|
| 346 | raise DBError(str) |
---|
| 347 | |
---|
| 348 | c.close() |
---|
| 349 | return result |
---|
| 350 | |
---|
| 351 | def Commit(self): |
---|
| 352 | self.SQL.commit() |
---|
| 353 | |
---|
[84] | 354 | class DataSQLStore: |
---|
| 355 | |
---|
| 356 | db_vars = None |
---|
| 357 | dbc = None |
---|
| 358 | |
---|
| 359 | def __init__( self, hostname, database ): |
---|
| 360 | |
---|
[379] | 361 | self.db_vars = InitVars(DataBaseName=database, |
---|
[84] | 362 | User='root', |
---|
| 363 | Host=hostname, |
---|
| 364 | Password='', |
---|
| 365 | Dictionary='true') |
---|
| 366 | |
---|
| 367 | try: |
---|
[379] | 368 | self.dbc = DB(self.db_vars) |
---|
| 369 | except DBError, details: |
---|
[169] | 370 | debug_msg( 0, 'FATAL ERROR: Unable to connect to database!: ' +str(details) ) |
---|
[84] | 371 | sys.exit(1) |
---|
| 372 | |
---|
| 373 | def setDatabase(self, statement): |
---|
| 374 | ret = self.doDatabase('set', statement) |
---|
| 375 | return ret |
---|
| 376 | |
---|
| 377 | def getDatabase(self, statement): |
---|
| 378 | ret = self.doDatabase('get', statement) |
---|
| 379 | return ret |
---|
| 380 | |
---|
| 381 | def doDatabase(self, type, statement): |
---|
| 382 | |
---|
[365] | 383 | debug_msg( 10, 'doDatabase(): %s: %s' %(type, statement) ) |
---|
[84] | 384 | try: |
---|
| 385 | if type == 'set': |
---|
| 386 | result = self.dbc.Set( statement ) |
---|
| 387 | self.dbc.Commit() |
---|
| 388 | elif type == 'get': |
---|
| 389 | result = self.dbc.Get( statement ) |
---|
| 390 | |
---|
[379] | 391 | except DBError, detail: |
---|
[84] | 392 | operation = statement.split(' ')[0] |
---|
[169] | 393 | debug_msg( 0, 'FATAL ERROR: ' +operation+ ' on database failed while doing ['+statement+'] full msg: '+str(detail) ) |
---|
[84] | 394 | sys.exit(1) |
---|
| 395 | |
---|
[365] | 396 | debug_msg( 10, 'doDatabase(): result: %s' %(result) ) |
---|
[84] | 397 | return result |
---|
| 398 | |
---|
[191] | 399 | def getJobNodeId( self, job_id, node_id ): |
---|
| 400 | |
---|
| 401 | id = self.getDatabase( "SELECT job_id,node_id FROM job_nodes WHERE job_id = '%s' AND node_id = '%s'" %(job_id, node_id) ) |
---|
| 402 | if len( id ) > 0: |
---|
| 403 | |
---|
| 404 | if len( id[0] ) > 0 and id[0] != '': |
---|
| 405 | |
---|
| 406 | return 1 |
---|
| 407 | |
---|
| 408 | return 0 |
---|
| 409 | |
---|
[84] | 410 | def getNodeId( self, hostname ): |
---|
| 411 | |
---|
[89] | 412 | id = self.getDatabase( "SELECT node_id FROM nodes WHERE node_hostname = '%s'" %hostname ) |
---|
[84] | 413 | |
---|
[89] | 414 | if len( id ) > 0: |
---|
[84] | 415 | |
---|
[89] | 416 | id = id[0][0] |
---|
| 417 | |
---|
[84] | 418 | return id |
---|
| 419 | else: |
---|
| 420 | return None |
---|
| 421 | |
---|
| 422 | def getNodeIds( self, hostnames ): |
---|
| 423 | |
---|
| 424 | ids = [ ] |
---|
| 425 | |
---|
| 426 | for node in hostnames: |
---|
| 427 | |
---|
| 428 | id = self.getNodeId( node ) |
---|
| 429 | |
---|
| 430 | if id: |
---|
| 431 | ids.append( id ) |
---|
| 432 | |
---|
| 433 | return ids |
---|
| 434 | |
---|
| 435 | def getJobId( self, jobid ): |
---|
| 436 | |
---|
| 437 | id = self.getDatabase( "SELECT job_id FROM jobs WHERE job_id = '%s'" %jobid ) |
---|
| 438 | |
---|
| 439 | if id: |
---|
[89] | 440 | id = id[0][0] |
---|
[84] | 441 | |
---|
| 442 | return id |
---|
| 443 | else: |
---|
| 444 | return None |
---|
| 445 | |
---|
| 446 | def addJob( self, job_id, jobattrs ): |
---|
| 447 | |
---|
| 448 | if not self.getJobId( job_id ): |
---|
| 449 | |
---|
| 450 | self.mutateJob( 'insert', job_id, jobattrs ) |
---|
| 451 | else: |
---|
| 452 | self.mutateJob( 'update', job_id, jobattrs ) |
---|
| 453 | |
---|
| 454 | def mutateJob( self, action, job_id, jobattrs ): |
---|
| 455 | |
---|
[292] | 456 | job_values = [ 'name', 'queue', 'owner', 'requested_time', 'requested_memory', 'ppn', 'status', 'start_timestamp', 'stop_timestamp' ] |
---|
[84] | 457 | |
---|
[292] | 458 | insert_col_str = 'job_id' |
---|
| 459 | insert_val_str = "'%s'" %job_id |
---|
| 460 | update_str = None |
---|
[84] | 461 | |
---|
[365] | 462 | debug_msg( 10, 'mutateJob(): %s %s' %(action,job_id)) |
---|
[84] | 463 | |
---|
[99] | 464 | ids = [ ] |
---|
| 465 | |
---|
[84] | 466 | for valname, value in jobattrs.items(): |
---|
| 467 | |
---|
[96] | 468 | if valname in job_values and value != '': |
---|
[84] | 469 | |
---|
| 470 | column_name = 'job_' + valname |
---|
| 471 | |
---|
| 472 | if action == 'insert': |
---|
| 473 | |
---|
| 474 | if not insert_col_str: |
---|
| 475 | insert_col_str = column_name |
---|
| 476 | else: |
---|
| 477 | insert_col_str = insert_col_str + ',' + column_name |
---|
| 478 | |
---|
| 479 | if not insert_val_str: |
---|
| 480 | insert_val_str = value |
---|
| 481 | else: |
---|
| 482 | insert_val_str = insert_val_str + ",'%s'" %value |
---|
| 483 | |
---|
| 484 | elif action == 'update': |
---|
| 485 | |
---|
| 486 | if not update_str: |
---|
| 487 | update_str = "%s='%s'" %(column_name, value) |
---|
| 488 | else: |
---|
| 489 | update_str = update_str + ",%s='%s'" %(column_name, value) |
---|
| 490 | |
---|
[90] | 491 | elif valname == 'nodes' and value: |
---|
[84] | 492 | |
---|
[191] | 493 | node_valid = 1 |
---|
[190] | 494 | |
---|
| 495 | if len(value) == 1: |
---|
| 496 | |
---|
[191] | 497 | if jobattrs['status'] == 'Q': |
---|
[190] | 498 | |
---|
[191] | 499 | node_valid = 0 |
---|
[190] | 500 | |
---|
[191] | 501 | else: |
---|
[190] | 502 | |
---|
[191] | 503 | node_valid = 0 |
---|
[190] | 504 | |
---|
[191] | 505 | for node_char in str(value[0]): |
---|
[190] | 506 | |
---|
[191] | 507 | if string.find( string.digits, node_char ) != -1 and not node_valid: |
---|
[190] | 508 | |
---|
[191] | 509 | node_valid = 1 |
---|
[84] | 510 | |
---|
[191] | 511 | if node_valid: |
---|
| 512 | |
---|
| 513 | ids = self.addNodes( value, jobattrs['domain'] ) |
---|
| 514 | |
---|
[84] | 515 | if action == 'insert': |
---|
| 516 | |
---|
| 517 | self.setDatabase( "INSERT INTO jobs ( %s ) VALUES ( %s )" %( insert_col_str, insert_val_str ) ) |
---|
[86] | 518 | |
---|
[84] | 519 | elif action == 'update': |
---|
| 520 | |
---|
[89] | 521 | self.setDatabase( "UPDATE jobs SET %s WHERE job_id=%s" %(update_str, job_id) ) |
---|
[84] | 522 | |
---|
[191] | 523 | if len( ids ) > 0: |
---|
| 524 | self.addJobNodes( job_id, ids ) |
---|
[190] | 525 | |
---|
[154] | 526 | def addNodes( self, hostnames, domain ): |
---|
[84] | 527 | |
---|
[98] | 528 | ids = [ ] |
---|
| 529 | |
---|
[84] | 530 | for node in hostnames: |
---|
| 531 | |
---|
[292] | 532 | node = '%s.%s' %( node, domain ) |
---|
| 533 | id = self.getNodeId( node ) |
---|
[84] | 534 | |
---|
| 535 | if not id: |
---|
| 536 | self.setDatabase( "INSERT INTO nodes ( node_hostname ) VALUES ( '%s' )" %node ) |
---|
[98] | 537 | id = self.getNodeId( node ) |
---|
[84] | 538 | |
---|
[98] | 539 | ids.append( id ) |
---|
| 540 | |
---|
| 541 | return ids |
---|
| 542 | |
---|
[86] | 543 | def addJobNodes( self, jobid, nodes ): |
---|
| 544 | |
---|
| 545 | for node in nodes: |
---|
| 546 | |
---|
[191] | 547 | if not self.getJobNodeId( jobid, node ): |
---|
| 548 | |
---|
| 549 | self.addJobNode( jobid, node ) |
---|
| 550 | |
---|
[84] | 551 | def addJobNode( self, jobid, nodeid ): |
---|
| 552 | |
---|
| 553 | self.setDatabase( "INSERT INTO job_nodes (job_id,node_id) VALUES ( %s,%s )" %(jobid, nodeid) ) |
---|
| 554 | |
---|
| 555 | def storeJobInfo( self, jobid, jobattrs ): |
---|
| 556 | |
---|
| 557 | self.addJob( jobid, jobattrs ) |
---|
| 558 | |
---|
[295] | 559 | def checkStaleJobs( self ): |
---|
| 560 | |
---|
[466] | 561 | # Locate all jobs in the database that are not set to finished |
---|
| 562 | # |
---|
[295] | 563 | q = "SELECT * from jobs WHERE job_status != 'F'" |
---|
| 564 | |
---|
| 565 | r = self.getDatabase( q ) |
---|
| 566 | |
---|
| 567 | if len( r ) == 0: |
---|
| 568 | |
---|
| 569 | return None |
---|
| 570 | |
---|
| 571 | cleanjobs = [ ] |
---|
| 572 | timeoutjobs = [ ] |
---|
| 573 | |
---|
| 574 | jobtimeout_sec = JOB_TIMEOUT * (60 * 60) |
---|
| 575 | cur_time = time.time() |
---|
| 576 | |
---|
| 577 | for row in r: |
---|
| 578 | |
---|
| 579 | job_id = row[0] |
---|
| 580 | job_requested_time = row[4] |
---|
| 581 | job_status = row[7] |
---|
| 582 | job_start_timestamp = row[8] |
---|
| 583 | |
---|
[466] | 584 | # If it was set to queued and we didn't see it started |
---|
| 585 | # there's not point in keeping it around |
---|
| 586 | # |
---|
[295] | 587 | if job_status == 'Q' or not job_start_timestamp: |
---|
| 588 | |
---|
| 589 | cleanjobs.append( job_id ) |
---|
| 590 | |
---|
| 591 | else: |
---|
| 592 | |
---|
| 593 | start_timestamp = int( job_start_timestamp ) |
---|
| 594 | |
---|
[466] | 595 | # If it was set to running longer than JOB_TIMEOUT |
---|
| 596 | # close the job: it probably finished while we were not running |
---|
| 597 | # |
---|
[295] | 598 | if ( cur_time - start_timestamp ) > jobtimeout_sec: |
---|
| 599 | |
---|
| 600 | if job_requested_time: |
---|
| 601 | |
---|
| 602 | rtime_epoch = reqtime2epoch( job_requested_time ) |
---|
| 603 | else: |
---|
| 604 | rtime_epoch = None |
---|
| 605 | |
---|
| 606 | timeoutjobs.append( (job_id, job_start_timestamp, rtime_epoch) ) |
---|
| 607 | |
---|
| 608 | debug_msg( 1, 'Found ' + str( len( cleanjobs ) ) + ' stale jobs in database: deleting entries' ) |
---|
| 609 | |
---|
[466] | 610 | # Purge these from database |
---|
| 611 | # |
---|
[295] | 612 | for j in cleanjobs: |
---|
| 613 | |
---|
| 614 | q = "DELETE FROM jobs WHERE job_id = '" + str( j ) + "'" |
---|
| 615 | self.setDatabase( q ) |
---|
| 616 | |
---|
| 617 | debug_msg( 1, 'Found ' + str( len( timeoutjobs ) ) + ' timed out jobs in database: closing entries' ) |
---|
| 618 | |
---|
[466] | 619 | # Close these jobs in the database |
---|
| 620 | # update the stop_timestamp to: start_timestamp + requested wallclock |
---|
| 621 | # and set state: finished |
---|
| 622 | # |
---|
[295] | 623 | for j in timeoutjobs: |
---|
| 624 | |
---|
| 625 | ( i, s, r ) = j |
---|
| 626 | |
---|
| 627 | if r: |
---|
| 628 | new_end_timestamp = int( s ) + r |
---|
| 629 | |
---|
[468] | 630 | q = "UPDATE jobs SET job_status='F',job_stop_timestamp = '" + str( new_end_timestamp ) + "' WHERE job_id = '" + str(i) + "'" |
---|
[295] | 631 | self.setDatabase( q ) |
---|
| 632 | |
---|
[37] | 633 | class RRDMutator: |
---|
[63] | 634 | """A class for performing RRD mutations""" |
---|
[37] | 635 | |
---|
[277] | 636 | binary = None |
---|
[37] | 637 | |
---|
| 638 | def __init__( self, binary=None ): |
---|
[63] | 639 | """Set alternate binary if supplied""" |
---|
[37] | 640 | |
---|
| 641 | if binary: |
---|
| 642 | self.binary = binary |
---|
| 643 | |
---|
| 644 | def create( self, filename, args ): |
---|
[63] | 645 | """Create a new rrd with args""" |
---|
| 646 | |
---|
[375] | 647 | global MODRRDTOOL |
---|
[37] | 648 | |
---|
[375] | 649 | if MODRRDTOOL: |
---|
| 650 | return self.perform( 'create', filename, args ) |
---|
| 651 | else: |
---|
| 652 | return self.perform( 'create', '"' + filename + '"', args ) |
---|
| 653 | |
---|
[37] | 654 | def update( self, filename, args ): |
---|
[63] | 655 | """Update a rrd with args""" |
---|
| 656 | |
---|
[375] | 657 | global MODRRDTOOL |
---|
[37] | 658 | |
---|
[375] | 659 | if MODRRDTOOL: |
---|
| 660 | return self.perform( 'update', filename, args ) |
---|
| 661 | else: |
---|
| 662 | return self.perform( 'update', '"' + filename + '"', args ) |
---|
| 663 | |
---|
[42] | 664 | def grabLastUpdate( self, filename ): |
---|
[63] | 665 | """Determine the last update time of filename rrd""" |
---|
[42] | 666 | |
---|
[375] | 667 | global MODRRDTOOL |
---|
| 668 | |
---|
[42] | 669 | last_update = 0 |
---|
| 670 | |
---|
[466] | 671 | # Use the py-rrdtool module if it's available on this system |
---|
| 672 | # |
---|
[375] | 673 | if MODRRDTOOL: |
---|
[53] | 674 | |
---|
[375] | 675 | debug_msg( 8, 'rrdtool.info( ' + filename + ' )' ) |
---|
[42] | 676 | |
---|
[375] | 677 | rrd_header = { } |
---|
[292] | 678 | |
---|
[375] | 679 | try: |
---|
| 680 | rrd_header = rrdtool.info( filename ) |
---|
| 681 | except rrdtool.error, msg: |
---|
| 682 | debug_msg( 8, str( msg ) ) |
---|
| 683 | |
---|
| 684 | if rrd_header.has_key( 'last_update' ): |
---|
| 685 | return last_update |
---|
| 686 | else: |
---|
| 687 | return 0 |
---|
| 688 | |
---|
[466] | 689 | # For backwards compatiblity: use the rrdtool binary if py-rrdtool is unavailable |
---|
| 690 | # DEPRECATED (slow!) |
---|
| 691 | # |
---|
[42] | 692 | else: |
---|
[375] | 693 | debug_msg( 8, self.binary + ' info ' + filename ) |
---|
[42] | 694 | |
---|
[375] | 695 | my_pipe = os.popen( self.binary + ' info "' + filename + '"' ) |
---|
| 696 | |
---|
| 697 | for line in my_pipe.readlines(): |
---|
| 698 | |
---|
| 699 | if line.find( 'last_update') != -1: |
---|
| 700 | |
---|
| 701 | last_update = line.split( ' = ' )[1] |
---|
| 702 | |
---|
| 703 | if my_pipe: |
---|
| 704 | |
---|
| 705 | my_pipe.close() |
---|
| 706 | |
---|
| 707 | if last_update: |
---|
| 708 | return last_update |
---|
| 709 | else: |
---|
| 710 | return 0 |
---|
| 711 | |
---|
| 712 | |
---|
[40] | 713 | def perform( self, action, filename, args ): |
---|
[63] | 714 | """Perform action on rrd filename with args""" |
---|
[37] | 715 | |
---|
[375] | 716 | global MODRRDTOOL |
---|
| 717 | |
---|
[37] | 718 | arg_string = None |
---|
| 719 | |
---|
[40] | 720 | if type( args ) is not ListType: |
---|
| 721 | debug_msg( 8, 'Arguments needs to be of type List' ) |
---|
| 722 | return 1 |
---|
| 723 | |
---|
[37] | 724 | for arg in args: |
---|
| 725 | |
---|
| 726 | if not arg_string: |
---|
| 727 | |
---|
| 728 | arg_string = arg |
---|
| 729 | else: |
---|
| 730 | arg_string = arg_string + ' ' + arg |
---|
| 731 | |
---|
[375] | 732 | if MODRRDTOOL: |
---|
[37] | 733 | |
---|
[375] | 734 | debug_msg( 8, 'rrdtool.' + action + "( " + filename + ' ' + arg_string + ")" ) |
---|
[292] | 735 | |
---|
[375] | 736 | try: |
---|
| 737 | debug_msg( 8, "filename '" + str(filename) + "' type "+ str(type(filename)) + " args " + str( args ) ) |
---|
[37] | 738 | |
---|
[375] | 739 | if action == 'create': |
---|
[146] | 740 | |
---|
[375] | 741 | rrdtool.create( str( filename ), *args ) |
---|
[37] | 742 | |
---|
[375] | 743 | elif action == 'update': |
---|
[37] | 744 | |
---|
[375] | 745 | rrdtool.update( str( filename ), *args ) |
---|
[365] | 746 | |
---|
[375] | 747 | except rrdtool.error, msg: |
---|
[365] | 748 | |
---|
[375] | 749 | error_msg = str( msg ) |
---|
| 750 | debug_msg( 8, error_msg ) |
---|
| 751 | return 1 |
---|
| 752 | |
---|
| 753 | else: |
---|
| 754 | |
---|
| 755 | debug_msg( 8, self.binary + ' ' + action + ' ' + filename + ' ' + arg_string ) |
---|
| 756 | |
---|
| 757 | cmd = os.popen( self.binary + ' ' + action + ' ' + filename + ' ' + arg_string ) |
---|
| 758 | lines = cmd.readlines() |
---|
| 759 | |
---|
| 760 | cmd.close() |
---|
| 761 | |
---|
| 762 | for line in lines: |
---|
| 763 | |
---|
| 764 | if line.find( 'ERROR' ) != -1: |
---|
| 765 | |
---|
| 766 | error_msg = string.join( line.split( ' ' )[1:] ) |
---|
| 767 | debug_msg( 8, error_msg ) |
---|
| 768 | return 1 |
---|
| 769 | |
---|
[37] | 770 | return 0 |
---|
| 771 | |
---|
[78] | 772 | class XMLProcessor: |
---|
| 773 | """Skeleton class for XML processor's""" |
---|
| 774 | |
---|
| 775 | def run( self ): |
---|
| 776 | """Do main processing of XML here""" |
---|
| 777 | |
---|
| 778 | pass |
---|
| 779 | |
---|
| 780 | class TorqueXMLProcessor( XMLProcessor ): |
---|
| 781 | """Main class for processing XML and acting with it""" |
---|
| 782 | |
---|
[295] | 783 | def __init__( self, XMLSource, DataStore ): |
---|
[78] | 784 | """Setup initial XML connection and handlers""" |
---|
| 785 | |
---|
[287] | 786 | self.myXMLSource = XMLSource |
---|
[295] | 787 | self.myXMLHandler = TorqueXMLHandler( DataStore ) |
---|
[287] | 788 | self.myXMLError = XMLErrorHandler() |
---|
[78] | 789 | |
---|
[287] | 790 | self.config = GangliaConfigParser( GMETAD_CONF ) |
---|
| 791 | |
---|
[78] | 792 | def run( self ): |
---|
| 793 | """Main XML processing""" |
---|
| 794 | |
---|
[169] | 795 | debug_msg( 1, 'torque_xml_thread(): started.' ) |
---|
[87] | 796 | |
---|
[78] | 797 | while( 1 ): |
---|
| 798 | |
---|
[287] | 799 | #self.myXMLSource = self.mXMLGatherer.getFileObject() |
---|
[469] | 800 | debug_msg( 1, 'torque_xml_thread(): Retrieving XML data..' ) |
---|
[176] | 801 | |
---|
[287] | 802 | my_data = self.myXMLSource.getData() |
---|
| 803 | |
---|
[469] | 804 | debug_msg( 1, 'torque_xml_thread(): Done retrieving.' ) |
---|
| 805 | |
---|
| 806 | if my_data: |
---|
| 807 | debug_msg( 1, 'ganglia_parse_thread(): Parsing XML..' ) |
---|
| 808 | |
---|
[287] | 809 | xml.sax.parseString( my_data, self.myXMLHandler, self.myXMLError ) |
---|
[469] | 810 | |
---|
| 811 | debug_msg( 1, 'ganglia_parse_thread(): Done parsing.' ) |
---|
[176] | 812 | |
---|
[169] | 813 | debug_msg( 1, 'torque_xml_thread(): Sleeping.. (%ss)' %(str( self.config.getLowestInterval() ) ) ) |
---|
[87] | 814 | time.sleep( self.config.getLowestInterval() ) |
---|
[78] | 815 | |
---|
[71] | 816 | class TorqueXMLHandler( xml.sax.handler.ContentHandler ): |
---|
[63] | 817 | """Parse Torque's jobinfo XML from our plugin""" |
---|
| 818 | |
---|
[72] | 819 | jobAttrs = { } |
---|
| 820 | |
---|
[295] | 821 | def __init__( self, datastore ): |
---|
[84] | 822 | |
---|
[295] | 823 | #self.ds = DataSQLStore( JOB_SQL_DBASE.split( '/' )[0], JOB_SQL_DBASE.split( '/' )[1] ) |
---|
| 824 | self.ds = datastore |
---|
[292] | 825 | self.jobs_processed = [ ] |
---|
| 826 | self.jobs_to_store = [ ] |
---|
[84] | 827 | |
---|
[183] | 828 | def startDocument( self ): |
---|
| 829 | |
---|
[292] | 830 | self.heartbeat = 0 |
---|
[365] | 831 | self.elementct = 0 |
---|
[183] | 832 | |
---|
[63] | 833 | def startElement( self, name, attrs ): |
---|
| 834 | """ |
---|
| 835 | This XML will be all gmetric XML |
---|
| 836 | so there will be no specific start/end element |
---|
| 837 | just one XML statement with all info |
---|
| 838 | """ |
---|
| 839 | |
---|
[73] | 840 | jobinfo = { } |
---|
[63] | 841 | |
---|
[365] | 842 | self.elementct += 1 |
---|
| 843 | |
---|
[199] | 844 | if name == 'CLUSTER': |
---|
[63] | 845 | |
---|
[372] | 846 | self.clustername = str( attrs.get( 'NAME', "" ) ) |
---|
[199] | 847 | |
---|
| 848 | elif name == 'METRIC' and self.clustername in ARCHIVE_DATASOURCES: |
---|
| 849 | |
---|
[372] | 850 | metricname = str( attrs.get( 'NAME', "" ) ) |
---|
[63] | 851 | |
---|
[285] | 852 | if metricname == 'MONARCH-HEARTBEAT': |
---|
[372] | 853 | self.heartbeat = str( attrs.get( 'VAL', "" ) ) |
---|
[63] | 854 | |
---|
[285] | 855 | elif metricname.find( 'MONARCH-JOB' ) != -1: |
---|
[63] | 856 | |
---|
[292] | 857 | job_id = metricname.split( 'MONARCH-JOB-' )[1].split( '-' )[0] |
---|
[372] | 858 | val = str( attrs.get( 'VAL', "" ) ) |
---|
[63] | 859 | |
---|
[96] | 860 | if not job_id in self.jobs_processed: |
---|
[292] | 861 | |
---|
[96] | 862 | self.jobs_processed.append( job_id ) |
---|
| 863 | |
---|
[73] | 864 | check_change = 0 |
---|
| 865 | |
---|
| 866 | if self.jobAttrs.has_key( job_id ): |
---|
[292] | 867 | |
---|
[73] | 868 | check_change = 1 |
---|
| 869 | |
---|
[63] | 870 | valinfo = val.split( ' ' ) |
---|
| 871 | |
---|
| 872 | for myval in valinfo: |
---|
| 873 | |
---|
[84] | 874 | if len( myval.split( '=' ) ) > 1: |
---|
[63] | 875 | |
---|
[292] | 876 | valname = myval.split( '=' )[0] |
---|
| 877 | value = myval.split( '=' )[1] |
---|
[70] | 878 | |
---|
[84] | 879 | if valname == 'nodes': |
---|
| 880 | value = value.split( ';' ) |
---|
[72] | 881 | |
---|
[84] | 882 | jobinfo[ valname ] = value |
---|
| 883 | |
---|
[73] | 884 | if check_change: |
---|
[182] | 885 | if self.jobinfoChanged( self.jobAttrs, job_id, jobinfo ) and self.jobAttrs[ job_id ]['status'] in [ 'R', 'Q' ]: |
---|
[292] | 886 | self.jobAttrs[ job_id ]['stop_timestamp'] = '' |
---|
| 887 | self.jobAttrs[ job_id ] = self.setJobAttrs( self.jobAttrs[ job_id ], jobinfo ) |
---|
[84] | 888 | if not job_id in self.jobs_to_store: |
---|
| 889 | self.jobs_to_store.append( job_id ) |
---|
| 890 | |
---|
[365] | 891 | debug_msg( 10, 'jobinfo for job %s has changed' %job_id ) |
---|
[73] | 892 | else: |
---|
| 893 | self.jobAttrs[ job_id ] = jobinfo |
---|
[84] | 894 | |
---|
| 895 | if not job_id in self.jobs_to_store: |
---|
| 896 | self.jobs_to_store.append( job_id ) |
---|
| 897 | |
---|
[365] | 898 | debug_msg( 10, 'jobinfo for job %s has changed' %job_id ) |
---|
[73] | 899 | |
---|
[77] | 900 | def endDocument( self ): |
---|
[74] | 901 | """When all metrics have gone, check if any jobs have finished""" |
---|
[72] | 902 | |
---|
[371] | 903 | debug_msg( 1, "XML: Processed "+str(self.elementct)+ " elements - found "+str(len(self.jobs_to_store))+" (updated) jobs" ) |
---|
[365] | 904 | |
---|
[182] | 905 | if self.heartbeat: |
---|
| 906 | for jobid, jobinfo in self.jobAttrs.items(): |
---|
[74] | 907 | |
---|
[182] | 908 | # This is an old job, not in current jobinfo list anymore |
---|
| 909 | # it must have finished, since we _did_ get a new heartbeat |
---|
| 910 | # |
---|
| 911 | mytime = int( jobinfo['reported'] ) + int( jobinfo['poll_interval'] ) |
---|
[102] | 912 | |
---|
[184] | 913 | if (mytime < self.heartbeat) and (jobid not in self.jobs_processed) and (jobinfo['status'] == 'R'): |
---|
[74] | 914 | |
---|
[182] | 915 | if not jobid in self.jobs_processed: |
---|
| 916 | self.jobs_processed.append( jobid ) |
---|
[96] | 917 | |
---|
[182] | 918 | self.jobAttrs[ jobid ]['status'] = 'F' |
---|
[360] | 919 | self.jobAttrs[ jobid ]['stop_timestamp'] = str( self.heartbeat ) |
---|
[96] | 920 | |
---|
[182] | 921 | if not jobid in self.jobs_to_store: |
---|
| 922 | self.jobs_to_store.append( jobid ) |
---|
[74] | 923 | |
---|
[182] | 924 | debug_msg( 1, 'torque_xml_thread(): Storing..' ) |
---|
[87] | 925 | |
---|
[182] | 926 | for jobid in self.jobs_to_store: |
---|
[295] | 927 | if self.jobAttrs[ jobid ]['status'] in [ 'R', 'F' ]: |
---|
[84] | 928 | |
---|
[184] | 929 | self.ds.storeJobInfo( jobid, self.jobAttrs[ jobid ] ) |
---|
| 930 | |
---|
| 931 | if self.jobAttrs[ jobid ]['status'] == 'F': |
---|
| 932 | del self.jobAttrs[ jobid ] |
---|
| 933 | |
---|
[182] | 934 | debug_msg( 1, 'torque_xml_thread(): Done storing.' ) |
---|
[87] | 935 | |
---|
[292] | 936 | self.jobs_processed = [ ] |
---|
| 937 | self.jobs_to_store = [ ] |
---|
[84] | 938 | |
---|
[82] | 939 | def setJobAttrs( self, old, new ): |
---|
| 940 | """ |
---|
| 941 | Set new job attributes in old, but not lose existing fields |
---|
| 942 | if old attributes doesn't have those |
---|
| 943 | """ |
---|
| 944 | |
---|
| 945 | for valname, value in new.items(): |
---|
| 946 | old[ valname ] = value |
---|
| 947 | |
---|
| 948 | return old |
---|
| 949 | |
---|
| 950 | |
---|
[73] | 951 | def jobinfoChanged( self, jobattrs, jobid, jobinfo ): |
---|
[74] | 952 | """ |
---|
| 953 | Check if jobinfo has changed from jobattrs[jobid] |
---|
| 954 | if it's report time is bigger than previous one |
---|
| 955 | and it is report time is recent (equal to heartbeat) |
---|
| 956 | """ |
---|
[72] | 957 | |
---|
[87] | 958 | ignore_changes = [ 'reported' ] |
---|
| 959 | |
---|
[73] | 960 | if jobattrs.has_key( jobid ): |
---|
| 961 | |
---|
| 962 | for valname, value in jobinfo.items(): |
---|
| 963 | |
---|
[87] | 964 | if valname not in ignore_changes: |
---|
[73] | 965 | |
---|
[87] | 966 | if jobattrs[ jobid ].has_key( valname ): |
---|
[73] | 967 | |
---|
[87] | 968 | if value != jobattrs[ jobid ][ valname ]: |
---|
[73] | 969 | |
---|
[87] | 970 | if jobinfo['reported'] > jobattrs[ jobid ][ 'reported' ] and jobinfo['reported'] == self.heartbeat: |
---|
[360] | 971 | return True |
---|
[73] | 972 | |
---|
[87] | 973 | else: |
---|
[360] | 974 | return True |
---|
[87] | 975 | |
---|
[360] | 976 | return False |
---|
[73] | 977 | |
---|
[71] | 978 | class GangliaXMLHandler( xml.sax.handler.ContentHandler ): |
---|
[63] | 979 | """Parse Ganglia's XML""" |
---|
[3] | 980 | |
---|
[295] | 981 | def __init__( self, config, datastore ): |
---|
[63] | 982 | """Setup initial variables and gather info on existing rrd archive""" |
---|
| 983 | |
---|
[292] | 984 | self.config = config |
---|
| 985 | self.clusters = { } |
---|
[295] | 986 | self.ds = datastore |
---|
[324] | 987 | |
---|
[295] | 988 | debug_msg( 1, 'Checking database..' ) |
---|
[365] | 989 | |
---|
| 990 | global DEBUG_LEVEL |
---|
| 991 | |
---|
| 992 | if DEBUG_LEVEL <= 2: |
---|
| 993 | self.ds.checkStaleJobs() |
---|
| 994 | |
---|
[295] | 995 | debug_msg( 1, 'Check done.' ) |
---|
[296] | 996 | debug_msg( 1, 'Checking rrd archive..' ) |
---|
[293] | 997 | self.gatherClusters() |
---|
[169] | 998 | debug_msg( 1, 'Check done.' ) |
---|
[33] | 999 | |
---|
[44] | 1000 | def gatherClusters( self ): |
---|
[63] | 1001 | """Find all existing clusters in archive dir""" |
---|
[44] | 1002 | |
---|
[292] | 1003 | archive_dir = check_dir(ARCHIVE_PATH) |
---|
[44] | 1004 | |
---|
[292] | 1005 | hosts = [ ] |
---|
[44] | 1006 | |
---|
| 1007 | if os.path.exists( archive_dir ): |
---|
| 1008 | |
---|
[292] | 1009 | dirlist = os.listdir( archive_dir ) |
---|
[44] | 1010 | |
---|
[369] | 1011 | for cfgcluster in ARCHIVE_DATASOURCES: |
---|
| 1012 | |
---|
| 1013 | if cfgcluster not in dirlist: |
---|
| 1014 | |
---|
| 1015 | # Autocreate a directory for this cluster |
---|
| 1016 | # assume it is new |
---|
| 1017 | # |
---|
| 1018 | cluster_dir = '%s/%s' %( check_dir(ARCHIVE_PATH), cfgcluster ) |
---|
| 1019 | |
---|
| 1020 | os.mkdir( cluster_dir ) |
---|
| 1021 | |
---|
[370] | 1022 | dirlist.append( cfgcluster ) |
---|
| 1023 | |
---|
[44] | 1024 | for item in dirlist: |
---|
| 1025 | |
---|
| 1026 | clustername = item |
---|
| 1027 | |
---|
[60] | 1028 | if not self.clusters.has_key( clustername ) and clustername in ARCHIVE_DATASOURCES: |
---|
[44] | 1029 | |
---|
| 1030 | self.clusters[ clustername ] = RRDHandler( self.config, clustername ) |
---|
| 1031 | |
---|
[365] | 1032 | debug_msg( 9, "Found "+str(len(self.clusters.keys()))+" clusters" ) |
---|
| 1033 | |
---|
[6] | 1034 | def startElement( self, name, attrs ): |
---|
[63] | 1035 | """Memorize appropriate data from xml start tags""" |
---|
[3] | 1036 | |
---|
[7] | 1037 | if name == 'GANGLIA_XML': |
---|
[32] | 1038 | |
---|
[372] | 1039 | self.XMLSource = str( attrs.get( 'SOURCE', "" ) ) |
---|
| 1040 | self.gangliaVersion = str( attrs.get( 'VERSION', "" ) ) |
---|
[32] | 1041 | |
---|
[12] | 1042 | debug_msg( 10, 'Found XML data: source %s version %s' %( self.XMLSource, self.gangliaVersion ) ) |
---|
[6] | 1043 | |
---|
[7] | 1044 | elif name == 'GRID': |
---|
[32] | 1045 | |
---|
[372] | 1046 | self.gridName = str( attrs.get( 'NAME', "" ) ) |
---|
| 1047 | self.time = str( attrs.get( 'LOCALTIME', "" ) ) |
---|
[32] | 1048 | |
---|
[12] | 1049 | debug_msg( 10, '`-Grid found: %s' %( self.gridName ) ) |
---|
[6] | 1050 | |
---|
[7] | 1051 | elif name == 'CLUSTER': |
---|
[32] | 1052 | |
---|
[372] | 1053 | self.clusterName = str( attrs.get( 'NAME', "" ) ) |
---|
| 1054 | self.time = str( attrs.get( 'LOCALTIME', "" ) ) |
---|
[32] | 1055 | |
---|
[60] | 1056 | if not self.clusters.has_key( self.clusterName ) and self.clusterName in ARCHIVE_DATASOURCES: |
---|
[32] | 1057 | |
---|
[34] | 1058 | self.clusters[ self.clusterName ] = RRDHandler( self.config, self.clusterName ) |
---|
[33] | 1059 | |
---|
[35] | 1060 | debug_msg( 10, ' |-Cluster found: %s' %( self.clusterName ) ) |
---|
[6] | 1061 | |
---|
[60] | 1062 | elif name == 'HOST' and self.clusterName in ARCHIVE_DATASOURCES: |
---|
[32] | 1063 | |
---|
[372] | 1064 | self.hostName = str( attrs.get( 'NAME', "" ) ) |
---|
| 1065 | self.hostIp = str( attrs.get( 'IP', "" ) ) |
---|
| 1066 | self.hostReported = str( attrs.get( 'REPORTED', "" ) ) |
---|
[32] | 1067 | |
---|
[12] | 1068 | debug_msg( 10, ' | |-Host found: %s - ip %s reported %s' %( self.hostName, self.hostIp, self.hostReported ) ) |
---|
[6] | 1069 | |
---|
[60] | 1070 | elif name == 'METRIC' and self.clusterName in ARCHIVE_DATASOURCES: |
---|
[6] | 1071 | |
---|
[372] | 1072 | type = str( attrs.get( 'TYPE', "" ) ) |
---|
[198] | 1073 | |
---|
| 1074 | exclude_metric = False |
---|
| 1075 | |
---|
| 1076 | for ex_metricstr in ARCHIVE_EXCLUDE_METRICS: |
---|
[6] | 1077 | |
---|
[372] | 1078 | orig_name = str( attrs.get( 'NAME', "" ) ) |
---|
[3] | 1079 | |
---|
[198] | 1080 | if string.lower( orig_name ) == string.lower( ex_metricstr ): |
---|
| 1081 | |
---|
| 1082 | exclude_metric = True |
---|
| 1083 | |
---|
| 1084 | elif re.match( ex_metricstr, orig_name ): |
---|
| 1085 | |
---|
| 1086 | exclude_metric = True |
---|
| 1087 | |
---|
| 1088 | if type not in UNSUPPORTED_ARCHIVE_TYPES and not exclude_metric: |
---|
| 1089 | |
---|
[292] | 1090 | myMetric = { } |
---|
[372] | 1091 | myMetric['name'] = str( attrs.get( 'NAME', "" ) ) |
---|
| 1092 | myMetric['val'] = str( attrs.get( 'VAL', "" ) ) |
---|
[292] | 1093 | myMetric['time'] = self.hostReported |
---|
[3] | 1094 | |
---|
[34] | 1095 | self.clusters[ self.clusterName ].memMetric( self.hostName, myMetric ) |
---|
[3] | 1096 | |
---|
[34] | 1097 | debug_msg( 11, ' | | |-metric: %s:%s' %( myMetric['name'], myMetric['val'] ) ) |
---|
[6] | 1098 | |
---|
[34] | 1099 | def storeMetrics( self ): |
---|
[63] | 1100 | """Store metrics of each cluster rrd handler""" |
---|
[9] | 1101 | |
---|
[34] | 1102 | for clustername, rrdh in self.clusters.items(): |
---|
[16] | 1103 | |
---|
[38] | 1104 | ret = rrdh.storeMetrics() |
---|
[9] | 1105 | |
---|
[38] | 1106 | if ret: |
---|
| 1107 | debug_msg( 9, 'An error occured while storing metrics for cluster %s' %clustername ) |
---|
| 1108 | return 1 |
---|
| 1109 | |
---|
| 1110 | return 0 |
---|
| 1111 | |
---|
[71] | 1112 | class XMLErrorHandler( xml.sax.handler.ErrorHandler ): |
---|
| 1113 | |
---|
| 1114 | def error( self, exception ): |
---|
| 1115 | """Recoverable error""" |
---|
| 1116 | |
---|
[169] | 1117 | debug_msg( 0, 'Recoverable XML error ' + str( exception ) + ' ignored.' ) |
---|
[71] | 1118 | |
---|
| 1119 | def fatalError( self, exception ): |
---|
| 1120 | """Non-recoverable error""" |
---|
| 1121 | |
---|
| 1122 | exception_str = str( exception ) |
---|
| 1123 | |
---|
| 1124 | # Ignore 'no element found' errors |
---|
| 1125 | if exception_str.find( 'no element found' ) != -1: |
---|
[169] | 1126 | debug_msg( 0, 'No XML data found: Socket not (re)connected or datasource not available.' ) |
---|
[71] | 1127 | return 0 |
---|
| 1128 | |
---|
[170] | 1129 | debug_msg( 0, 'FATAL ERROR: Non-recoverable XML error ' + str( exception ) ) |
---|
[71] | 1130 | sys.exit( 1 ) |
---|
| 1131 | |
---|
| 1132 | def warning( self, exception ): |
---|
| 1133 | """Warning""" |
---|
| 1134 | |
---|
| 1135 | debug_msg( 0, 'Warning ' + str( exception ) ) |
---|
| 1136 | |
---|
[78] | 1137 | class XMLGatherer: |
---|
[63] | 1138 | """Setup a connection and file object to Ganglia's XML""" |
---|
[3] | 1139 | |
---|
[287] | 1140 | s = None |
---|
| 1141 | fd = None |
---|
| 1142 | data = None |
---|
[293] | 1143 | slot = None |
---|
[8] | 1144 | |
---|
[287] | 1145 | # Time since the last update |
---|
| 1146 | # |
---|
| 1147 | LAST_UPDATE = 0 |
---|
| 1148 | |
---|
| 1149 | # Minimum interval between updates |
---|
| 1150 | # |
---|
| 1151 | MIN_UPDATE_INT = 10 |
---|
| 1152 | |
---|
| 1153 | # Is a update occuring now |
---|
| 1154 | # |
---|
| 1155 | update_now = False |
---|
| 1156 | |
---|
[8] | 1157 | def __init__( self, host, port ): |
---|
[63] | 1158 | """Store host and port for connection""" |
---|
[8] | 1159 | |
---|
[293] | 1160 | self.host = host |
---|
| 1161 | self.port = port |
---|
| 1162 | self.slot = threading.Lock() |
---|
[3] | 1163 | |
---|
[287] | 1164 | self.retrieveData() |
---|
| 1165 | |
---|
| 1166 | def retrieveData( self ): |
---|
[63] | 1167 | """Setup connection to XML source""" |
---|
[8] | 1168 | |
---|
[287] | 1169 | self.update_now = True |
---|
| 1170 | |
---|
[293] | 1171 | self.slot.acquire() |
---|
| 1172 | |
---|
[469] | 1173 | self.data = None |
---|
| 1174 | |
---|
[8] | 1175 | for res in socket.getaddrinfo( self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM ): |
---|
[32] | 1176 | |
---|
[5] | 1177 | af, socktype, proto, canonname, sa = res |
---|
[32] | 1178 | |
---|
[5] | 1179 | try: |
---|
[32] | 1180 | |
---|
[8] | 1181 | self.s = socket.socket( af, socktype, proto ) |
---|
[32] | 1182 | |
---|
[469] | 1183 | except ( socket.error, socket.gaierror, socket.herror, socket.timeout ), msg: |
---|
[32] | 1184 | |
---|
[8] | 1185 | self.s = None |
---|
[5] | 1186 | continue |
---|
[32] | 1187 | |
---|
[5] | 1188 | try: |
---|
[32] | 1189 | |
---|
[8] | 1190 | self.s.connect( sa ) |
---|
[32] | 1191 | |
---|
[469] | 1192 | except ( socket.error, socket.gaierror, socket.herror, socket.timeout ), msg: |
---|
[32] | 1193 | |
---|
[70] | 1194 | self.disconnect() |
---|
[5] | 1195 | continue |
---|
[32] | 1196 | |
---|
[5] | 1197 | break |
---|
[3] | 1198 | |
---|
[8] | 1199 | if self.s is None: |
---|
[32] | 1200 | |
---|
[170] | 1201 | debug_msg( 0, 'FATAL ERROR: Could not open socket or unable to connect to datasource!' ) |
---|
[287] | 1202 | self.update_now = False |
---|
[469] | 1203 | #sys.exit( 1 ) |
---|
[5] | 1204 | |
---|
[287] | 1205 | else: |
---|
[324] | 1206 | #self.s.send( '\n' ) |
---|
[287] | 1207 | |
---|
| 1208 | my_fp = self.s.makefile( 'r' ) |
---|
| 1209 | my_data = my_fp.readlines() |
---|
| 1210 | my_data = string.join( my_data, '' ) |
---|
| 1211 | |
---|
| 1212 | self.data = my_data |
---|
| 1213 | |
---|
| 1214 | self.LAST_UPDATE = time.time() |
---|
| 1215 | |
---|
[293] | 1216 | self.slot.release() |
---|
| 1217 | |
---|
[287] | 1218 | self.update_now = False |
---|
| 1219 | |
---|
[33] | 1220 | def disconnect( self ): |
---|
[63] | 1221 | """Close socket""" |
---|
[33] | 1222 | |
---|
| 1223 | if self.s: |
---|
[287] | 1224 | #self.s.shutdown( 2 ) |
---|
[33] | 1225 | self.s.close() |
---|
| 1226 | self.s = None |
---|
| 1227 | |
---|
| 1228 | def __del__( self ): |
---|
[63] | 1229 | """Kill the socket before we leave""" |
---|
[33] | 1230 | |
---|
| 1231 | self.disconnect() |
---|
| 1232 | |
---|
[287] | 1233 | def reGetData( self ): |
---|
[70] | 1234 | """Reconnect""" |
---|
[33] | 1235 | |
---|
[287] | 1236 | while self.update_now: |
---|
| 1237 | |
---|
| 1238 | # Must be another update in progress: |
---|
| 1239 | # Wait until the update is complete |
---|
| 1240 | # |
---|
| 1241 | time.sleep( 1 ) |
---|
| 1242 | |
---|
[38] | 1243 | if self.s: |
---|
| 1244 | self.disconnect() |
---|
[33] | 1245 | |
---|
[287] | 1246 | self.retrieveData() |
---|
[5] | 1247 | |
---|
[287] | 1248 | def getData( self ): |
---|
| 1249 | |
---|
| 1250 | """Return the XML data""" |
---|
| 1251 | |
---|
| 1252 | # If more than MIN_UPDATE_INT seconds passed since last data update |
---|
| 1253 | # update the XML first before returning it |
---|
| 1254 | # |
---|
| 1255 | |
---|
| 1256 | cur_time = time.time() |
---|
| 1257 | |
---|
| 1258 | if ( cur_time - self.LAST_UPDATE ) > self.MIN_UPDATE_INT: |
---|
| 1259 | |
---|
| 1260 | self.reGetData() |
---|
| 1261 | |
---|
| 1262 | while self.update_now: |
---|
| 1263 | |
---|
| 1264 | # Must be another update in progress: |
---|
| 1265 | # Wait until the update is complete |
---|
| 1266 | # |
---|
| 1267 | time.sleep( 1 ) |
---|
| 1268 | |
---|
| 1269 | return self.data |
---|
| 1270 | |
---|
[70] | 1271 | def makeFileDescriptor( self ): |
---|
| 1272 | """Make file descriptor that points to our socket connection""" |
---|
| 1273 | |
---|
| 1274 | self.reconnect() |
---|
| 1275 | |
---|
| 1276 | if self.s: |
---|
| 1277 | self.fd = self.s.makefile( 'r' ) |
---|
| 1278 | |
---|
| 1279 | def getFileObject( self ): |
---|
| 1280 | """Connect, and return a file object""" |
---|
| 1281 | |
---|
[78] | 1282 | self.makeFileDescriptor() |
---|
| 1283 | |
---|
[70] | 1284 | if self.fd: |
---|
| 1285 | return self.fd |
---|
| 1286 | |
---|
[78] | 1287 | class GangliaXMLProcessor( XMLProcessor ): |
---|
[63] | 1288 | """Main class for processing XML and acting with it""" |
---|
[5] | 1289 | |
---|
[295] | 1290 | def __init__( self, XMLSource, DataStore ): |
---|
[63] | 1291 | """Setup initial XML connection and handlers""" |
---|
[33] | 1292 | |
---|
[287] | 1293 | self.config = GangliaConfigParser( GMETAD_CONF ) |
---|
[33] | 1294 | |
---|
[293] | 1295 | #self.myXMLGatherer = XMLGatherer( ARCHIVE_XMLSOURCE.split( ':' )[0], ARCHIVE_XMLSOURCE.split( ':' )[1] ) |
---|
[287] | 1296 | #self.myXMLSource = self.myXMLGatherer.getFileObject() |
---|
| 1297 | self.myXMLSource = XMLSource |
---|
[295] | 1298 | self.ds = DataStore |
---|
| 1299 | self.myXMLHandler = GangliaXMLHandler( self.config, self.ds ) |
---|
[287] | 1300 | self.myXMLError = XMLErrorHandler() |
---|
[73] | 1301 | |
---|
[9] | 1302 | def run( self ): |
---|
[63] | 1303 | """Main XML processing; start a xml and storethread""" |
---|
[8] | 1304 | |
---|
[102] | 1305 | xml_thread = threading.Thread( None, self.processXML, 'xmlthread' ) |
---|
| 1306 | store_thread = threading.Thread( None, self.storeMetrics, 'storethread' ) |
---|
[22] | 1307 | |
---|
[36] | 1308 | while( 1 ): |
---|
| 1309 | |
---|
[102] | 1310 | if not xml_thread.isAlive(): |
---|
[36] | 1311 | # Gather XML at the same interval as gmetad |
---|
| 1312 | |
---|
| 1313 | # threaded call to: self.processXML() |
---|
| 1314 | # |
---|
[169] | 1315 | try: |
---|
| 1316 | xml_thread = threading.Thread( None, self.processXML, 'xml_thread' ) |
---|
| 1317 | xml_thread.start() |
---|
[176] | 1318 | except thread.error, msg: |
---|
[169] | 1319 | debug_msg( 0, 'ERROR: Unable to start xml_thread!: '+str(msg)) |
---|
| 1320 | #return 1 |
---|
[36] | 1321 | |
---|
[102] | 1322 | if not store_thread.isAlive(): |
---|
[55] | 1323 | # Store metrics every .. sec |
---|
[36] | 1324 | |
---|
[55] | 1325 | # threaded call to: self.storeMetrics() |
---|
| 1326 | # |
---|
[169] | 1327 | try: |
---|
| 1328 | store_thread = threading.Thread( None, self.storeMetrics, 'store_thread' ) |
---|
| 1329 | store_thread.start() |
---|
[176] | 1330 | except thread.error, msg: |
---|
[169] | 1331 | debug_msg( 0, 'ERROR: Unable to start store_thread!: '+str(msg)) |
---|
| 1332 | #return 1 |
---|
[36] | 1333 | |
---|
| 1334 | # Just sleep a sec here, to prevent daemon from going mad. We're all threads here anyway |
---|
| 1335 | time.sleep( 1 ) |
---|
| 1336 | |
---|
[33] | 1337 | def storeMetrics( self ): |
---|
[63] | 1338 | """Store metrics retained in memory to disk""" |
---|
[22] | 1339 | |
---|
[365] | 1340 | global DEBUG_LEVEL |
---|
| 1341 | |
---|
[63] | 1342 | # Store metrics somewhere between every 360 and 640 seconds |
---|
[38] | 1343 | # |
---|
[365] | 1344 | if DEBUG_LEVEL > 2: |
---|
| 1345 | #STORE_INTERVAL = 60 |
---|
| 1346 | STORE_INTERVAL = random.randint( 360, 640 ) |
---|
| 1347 | else: |
---|
| 1348 | STORE_INTERVAL = random.randint( 360, 640 ) |
---|
[22] | 1349 | |
---|
[169] | 1350 | try: |
---|
| 1351 | store_metric_thread = threading.Thread( None, self.storeThread, 'store_metric_thread' ) |
---|
| 1352 | store_metric_thread.start() |
---|
[176] | 1353 | except thread.error, msg: |
---|
[169] | 1354 | debug_msg( 0, 'ERROR: Unable to start ganglia_store_thread()!: '+str(msg) ) |
---|
| 1355 | return 1 |
---|
[36] | 1356 | |
---|
[169] | 1357 | debug_msg( 1, 'ganglia_store_thread(): started.' ) |
---|
| 1358 | |
---|
| 1359 | debug_msg( 1, 'ganglia_store_thread(): Sleeping.. (%ss)' %STORE_INTERVAL ) |
---|
[36] | 1360 | time.sleep( STORE_INTERVAL ) |
---|
[169] | 1361 | debug_msg( 1, 'ganglia_store_thread(): Done sleeping.' ) |
---|
[36] | 1362 | |
---|
[102] | 1363 | if store_metric_thread.isAlive(): |
---|
[36] | 1364 | |
---|
[169] | 1365 | debug_msg( 1, 'ganglia_store_thread(): storemetricthread() still running, waiting to finish..' ) |
---|
[136] | 1366 | store_metric_thread.join( STORE_TIMEOUT ) # Maximum time is for storing thread to finish |
---|
[169] | 1367 | debug_msg( 1, 'ganglia_store_thread(): Done waiting.' ) |
---|
[36] | 1368 | |
---|
[169] | 1369 | debug_msg( 1, 'ganglia_store_thread(): finished.' ) |
---|
[36] | 1370 | |
---|
| 1371 | return 0 |
---|
| 1372 | |
---|
[39] | 1373 | def storeThread( self ): |
---|
[63] | 1374 | """Actual metric storing thread""" |
---|
[39] | 1375 | |
---|
[169] | 1376 | debug_msg( 1, 'ganglia_store_metric_thread(): started.' ) |
---|
| 1377 | debug_msg( 1, 'ganglia_store_metric_thread(): Storing data..' ) |
---|
[78] | 1378 | ret = self.myXMLHandler.storeMetrics() |
---|
[176] | 1379 | if ret > 0: |
---|
| 1380 | debug_msg( 0, 'ganglia_store_metric_thread(): UNKNOWN ERROR %s while storing Metrics!' %str(ret) ) |
---|
[169] | 1381 | debug_msg( 1, 'ganglia_store_metric_thread(): Done storing.' ) |
---|
| 1382 | debug_msg( 1, 'ganglia_store_metric_thread(): finished.' ) |
---|
[39] | 1383 | |
---|
[176] | 1384 | return 0 |
---|
[39] | 1385 | |
---|
[8] | 1386 | def processXML( self ): |
---|
[63] | 1387 | """Process XML""" |
---|
[8] | 1388 | |
---|
[169] | 1389 | try: |
---|
| 1390 | parsethread = threading.Thread( None, self.parseThread, 'parsethread' ) |
---|
| 1391 | parsethread.start() |
---|
[176] | 1392 | except thread.error, msg: |
---|
[169] | 1393 | debug_msg( 0, 'ERROR: Unable to start ganglia_xml_thread()!: ' + str(msg) ) |
---|
| 1394 | return 1 |
---|
[8] | 1395 | |
---|
[169] | 1396 | debug_msg( 1, 'ganglia_xml_thread(): started.' ) |
---|
[36] | 1397 | |
---|
[169] | 1398 | debug_msg( 1, 'ganglia_xml_thread(): Sleeping.. (%ss)' %self.config.getLowestInterval() ) |
---|
[36] | 1399 | time.sleep( float( self.config.getLowestInterval() ) ) |
---|
[169] | 1400 | debug_msg( 1, 'ganglia_xml_thread(): Done sleeping.' ) |
---|
[36] | 1401 | |
---|
| 1402 | if parsethread.isAlive(): |
---|
| 1403 | |
---|
[169] | 1404 | debug_msg( 1, 'ganglia_xml_thread(): parsethread() still running, waiting (%ss) to finish..' %PARSE_TIMEOUT ) |
---|
[47] | 1405 | parsethread.join( PARSE_TIMEOUT ) # Maximum time for XML thread to finish |
---|
[169] | 1406 | debug_msg( 1, 'ganglia_xml_thread(): Done waiting.' ) |
---|
[36] | 1407 | |
---|
[169] | 1408 | debug_msg( 1, 'ganglia_xml_thread(): finished.' ) |
---|
[36] | 1409 | |
---|
| 1410 | return 0 |
---|
| 1411 | |
---|
[39] | 1412 | def parseThread( self ): |
---|
[63] | 1413 | """Actual parsing thread""" |
---|
[39] | 1414 | |
---|
[169] | 1415 | debug_msg( 1, 'ganglia_parse_thread(): started.' ) |
---|
[469] | 1416 | debug_msg( 1, 'ganglia_parse_thread(): Retrieving XML data..' ) |
---|
[287] | 1417 | |
---|
| 1418 | my_data = self.myXMLSource.getData() |
---|
[176] | 1419 | |
---|
[469] | 1420 | debug_msg( 1, 'ganglia_parse_thread(): Done retrieving.' ) |
---|
[293] | 1421 | |
---|
[469] | 1422 | if my_data: |
---|
| 1423 | debug_msg( 1, 'ganglia_parse_thread(): Parsing XML..' ) |
---|
[287] | 1424 | xml.sax.parseString( my_data, self.myXMLHandler, self.myXMLError ) |
---|
[469] | 1425 | debug_msg( 1, 'ganglia_parse_thread(): Done parsing.' ) |
---|
[176] | 1426 | |
---|
[169] | 1427 | debug_msg( 1, 'ganglia_parse_thread(): finished.' ) |
---|
[39] | 1428 | |
---|
[176] | 1429 | return 0 |
---|
[39] | 1430 | |
---|
[9] | 1431 | class GangliaConfigParser: |
---|
| 1432 | |
---|
[34] | 1433 | sources = [ ] |
---|
[9] | 1434 | |
---|
| 1435 | def __init__( self, config ): |
---|
[63] | 1436 | """Parse some stuff from our gmetad's config, such as polling interval""" |
---|
[32] | 1437 | |
---|
[9] | 1438 | self.config = config |
---|
| 1439 | self.parseValues() |
---|
| 1440 | |
---|
[32] | 1441 | def parseValues( self ): |
---|
[63] | 1442 | """Parse certain values from gmetad.conf""" |
---|
[9] | 1443 | |
---|
| 1444 | readcfg = open( self.config, 'r' ) |
---|
| 1445 | |
---|
| 1446 | for line in readcfg.readlines(): |
---|
| 1447 | |
---|
| 1448 | if line.count( '"' ) > 1: |
---|
| 1449 | |
---|
[10] | 1450 | if line.find( 'data_source' ) != -1 and line[0] != '#': |
---|
[9] | 1451 | |
---|
[292] | 1452 | source = { } |
---|
| 1453 | source['name'] = line.split( '"' )[1] |
---|
| 1454 | source_words = line.split( '"' )[2].split( ' ' ) |
---|
[9] | 1455 | |
---|
| 1456 | for word in source_words: |
---|
| 1457 | |
---|
| 1458 | valid_interval = 1 |
---|
| 1459 | |
---|
| 1460 | for letter in word: |
---|
[32] | 1461 | |
---|
[9] | 1462 | if letter not in string.digits: |
---|
[32] | 1463 | |
---|
[9] | 1464 | valid_interval = 0 |
---|
| 1465 | |
---|
[10] | 1466 | if valid_interval and len(word) > 0: |
---|
[32] | 1467 | |
---|
[9] | 1468 | source['interval'] = word |
---|
[12] | 1469 | debug_msg( 9, 'polling interval for %s = %s' %(source['name'], source['interval'] ) ) |
---|
[33] | 1470 | |
---|
| 1471 | # No interval found, use Ganglia's default |
---|
| 1472 | if not source.has_key( 'interval' ): |
---|
| 1473 | source['interval'] = 15 |
---|
| 1474 | debug_msg( 9, 'polling interval for %s defaulted to 15' %(source['name']) ) |
---|
[32] | 1475 | |
---|
[33] | 1476 | self.sources.append( source ) |
---|
[9] | 1477 | |
---|
| 1478 | def getInterval( self, source_name ): |
---|
[63] | 1479 | """Return interval for source_name""" |
---|
[32] | 1480 | |
---|
[9] | 1481 | for source in self.sources: |
---|
[32] | 1482 | |
---|
[12] | 1483 | if source['name'] == source_name: |
---|
[32] | 1484 | |
---|
[9] | 1485 | return source['interval'] |
---|
[32] | 1486 | |
---|
[9] | 1487 | return None |
---|
| 1488 | |
---|
[34] | 1489 | def getLowestInterval( self ): |
---|
[63] | 1490 | """Return the lowest interval of all clusters""" |
---|
[34] | 1491 | |
---|
| 1492 | lowest_interval = 0 |
---|
| 1493 | |
---|
| 1494 | for source in self.sources: |
---|
| 1495 | |
---|
| 1496 | if not lowest_interval or source['interval'] <= lowest_interval: |
---|
| 1497 | |
---|
| 1498 | lowest_interval = source['interval'] |
---|
| 1499 | |
---|
| 1500 | # Return 15 when nothing is found, so that the daemon won't go insane with 0 sec delays |
---|
| 1501 | if lowest_interval: |
---|
| 1502 | return lowest_interval |
---|
| 1503 | else: |
---|
| 1504 | return 15 |
---|
| 1505 | |
---|
[9] | 1506 | class RRDHandler: |
---|
[63] | 1507 | """Class for handling RRD activity""" |
---|
[9] | 1508 | |
---|
[32] | 1509 | myMetrics = { } |
---|
[40] | 1510 | lastStored = { } |
---|
[47] | 1511 | timeserials = { } |
---|
[36] | 1512 | slot = None |
---|
[32] | 1513 | |
---|
[33] | 1514 | def __init__( self, config, cluster ): |
---|
[63] | 1515 | """Setup initial variables""" |
---|
[78] | 1516 | |
---|
[455] | 1517 | global MODRRDTOOL |
---|
| 1518 | |
---|
[292] | 1519 | self.block = 0 |
---|
| 1520 | self.cluster = cluster |
---|
| 1521 | self.config = config |
---|
| 1522 | self.slot = threading.Lock() |
---|
| 1523 | |
---|
[455] | 1524 | if MODRRDTOOL: |
---|
| 1525 | |
---|
| 1526 | self.rrdm = RRDMutator() |
---|
| 1527 | else: |
---|
| 1528 | self.rrdm = RRDMutator( RRDTOOL ) |
---|
| 1529 | |
---|
[365] | 1530 | global DEBUG_LEVEL |
---|
[9] | 1531 | |
---|
[365] | 1532 | if DEBUG_LEVEL <= 2: |
---|
| 1533 | self.gatherLastUpdates() |
---|
| 1534 | |
---|
[42] | 1535 | def gatherLastUpdates( self ): |
---|
[63] | 1536 | """Populate the lastStored list, containing timestamps of all last updates""" |
---|
[42] | 1537 | |
---|
| 1538 | cluster_dir = '%s/%s' %( check_dir(ARCHIVE_PATH), self.cluster ) |
---|
| 1539 | |
---|
| 1540 | hosts = [ ] |
---|
| 1541 | |
---|
| 1542 | if os.path.exists( cluster_dir ): |
---|
| 1543 | |
---|
[44] | 1544 | dirlist = os.listdir( cluster_dir ) |
---|
[42] | 1545 | |
---|
[44] | 1546 | for dir in dirlist: |
---|
[42] | 1547 | |
---|
[44] | 1548 | hosts.append( dir ) |
---|
[42] | 1549 | |
---|
| 1550 | for host in hosts: |
---|
| 1551 | |
---|
[292] | 1552 | host_dir = cluster_dir + '/' + host |
---|
| 1553 | dirlist = os.listdir( host_dir ) |
---|
[47] | 1554 | |
---|
| 1555 | for dir in dirlist: |
---|
| 1556 | |
---|
| 1557 | if not self.timeserials.has_key( host ): |
---|
| 1558 | |
---|
| 1559 | self.timeserials[ host ] = [ ] |
---|
| 1560 | |
---|
| 1561 | self.timeserials[ host ].append( dir ) |
---|
| 1562 | |
---|
[42] | 1563 | last_serial = self.getLastRrdTimeSerial( host ) |
---|
[292] | 1564 | |
---|
[42] | 1565 | if last_serial: |
---|
| 1566 | |
---|
| 1567 | metric_dir = cluster_dir + '/' + host + '/' + last_serial |
---|
[292] | 1568 | |
---|
[42] | 1569 | if os.path.exists( metric_dir ): |
---|
| 1570 | |
---|
[44] | 1571 | dirlist = os.listdir( metric_dir ) |
---|
[42] | 1572 | |
---|
[44] | 1573 | for file in dirlist: |
---|
[42] | 1574 | |
---|
[44] | 1575 | metricname = file.split( '.rrd' )[0] |
---|
[42] | 1576 | |
---|
[44] | 1577 | if not self.lastStored.has_key( host ): |
---|
[42] | 1578 | |
---|
[44] | 1579 | self.lastStored[ host ] = { } |
---|
[42] | 1580 | |
---|
[44] | 1581 | self.lastStored[ host ][ metricname ] = self.rrdm.grabLastUpdate( metric_dir + '/' + file ) |
---|
[42] | 1582 | |
---|
[32] | 1583 | def getClusterName( self ): |
---|
[63] | 1584 | """Return clustername""" |
---|
| 1585 | |
---|
[32] | 1586 | return self.cluster |
---|
| 1587 | |
---|
| 1588 | def memMetric( self, host, metric ): |
---|
[63] | 1589 | """Store metric from host in memory""" |
---|
[32] | 1590 | |
---|
[179] | 1591 | # <ATOMIC> |
---|
| 1592 | # |
---|
| 1593 | self.slot.acquire() |
---|
| 1594 | |
---|
[34] | 1595 | if self.myMetrics.has_key( host ): |
---|
[32] | 1596 | |
---|
[34] | 1597 | if self.myMetrics[ host ].has_key( metric['name'] ): |
---|
[32] | 1598 | |
---|
[34] | 1599 | for mymetric in self.myMetrics[ host ][ metric['name'] ]: |
---|
[32] | 1600 | |
---|
[34] | 1601 | if mymetric['time'] == metric['time']: |
---|
[32] | 1602 | |
---|
[34] | 1603 | # Allready have this metric, abort |
---|
[179] | 1604 | self.slot.release() |
---|
[34] | 1605 | return 1 |
---|
| 1606 | else: |
---|
| 1607 | self.myMetrics[ host ][ metric['name'] ] = [ ] |
---|
| 1608 | else: |
---|
[292] | 1609 | self.myMetrics[ host ] = { } |
---|
| 1610 | self.myMetrics[ host ][ metric['name'] ] = [ ] |
---|
[32] | 1611 | |
---|
[63] | 1612 | # Push new metric onto stack |
---|
| 1613 | # atomic code; only 1 thread at a time may access the stack |
---|
| 1614 | |
---|
[32] | 1615 | self.myMetrics[ host ][ metric['name'] ].append( metric ) |
---|
| 1616 | |
---|
[40] | 1617 | self.slot.release() |
---|
[53] | 1618 | # |
---|
| 1619 | # </ATOMIC> |
---|
[40] | 1620 | |
---|
[47] | 1621 | def makeUpdateList( self, host, metriclist ): |
---|
[63] | 1622 | """ |
---|
| 1623 | Make a list of update values for rrdupdate |
---|
| 1624 | but only those that we didn't store before |
---|
| 1625 | """ |
---|
[37] | 1626 | |
---|
[292] | 1627 | update_list = [ ] |
---|
| 1628 | metric = None |
---|
[37] | 1629 | |
---|
[47] | 1630 | while len( metriclist ) > 0: |
---|
[37] | 1631 | |
---|
[53] | 1632 | metric = metriclist.pop( 0 ) |
---|
[37] | 1633 | |
---|
[53] | 1634 | if self.checkStoreMetric( host, metric ): |
---|
[292] | 1635 | |
---|
[365] | 1636 | u_val = str( metric['time'] ) + ':' + str( metric['val'] ) |
---|
| 1637 | #update_list.append( str('%s:%s') %( metric['time'], metric['val'] ) ) |
---|
| 1638 | update_list.append( u_val ) |
---|
[40] | 1639 | |
---|
[37] | 1640 | return update_list |
---|
| 1641 | |
---|
[49] | 1642 | def checkStoreMetric( self, host, metric ): |
---|
[63] | 1643 | """Check if supplied metric if newer than last one stored""" |
---|
[40] | 1644 | |
---|
| 1645 | if self.lastStored.has_key( host ): |
---|
| 1646 | |
---|
[47] | 1647 | if self.lastStored[ host ].has_key( metric['name'] ): |
---|
[40] | 1648 | |
---|
[47] | 1649 | if metric['time'] <= self.lastStored[ host ][ metric['name'] ]: |
---|
[40] | 1650 | |
---|
[50] | 1651 | # This is old |
---|
[40] | 1652 | return 0 |
---|
| 1653 | |
---|
[50] | 1654 | return 1 |
---|
| 1655 | |
---|
[54] | 1656 | def memLastUpdate( self, host, metricname, metriclist ): |
---|
[63] | 1657 | """ |
---|
| 1658 | Memorize the time of the latest metric from metriclist |
---|
| 1659 | but only if it wasn't allready memorized |
---|
| 1660 | """ |
---|
[50] | 1661 | |
---|
[54] | 1662 | if not self.lastStored.has_key( host ): |
---|
| 1663 | self.lastStored[ host ] = { } |
---|
| 1664 | |
---|
[50] | 1665 | last_update_time = 0 |
---|
| 1666 | |
---|
| 1667 | for metric in metriclist: |
---|
| 1668 | |
---|
[54] | 1669 | if metric['name'] == metricname: |
---|
[50] | 1670 | |
---|
[54] | 1671 | if metric['time'] > last_update_time: |
---|
[50] | 1672 | |
---|
[54] | 1673 | last_update_time = metric['time'] |
---|
[40] | 1674 | |
---|
[54] | 1675 | if self.lastStored[ host ].has_key( metricname ): |
---|
[52] | 1676 | |
---|
[54] | 1677 | if last_update_time <= self.lastStored[ host ][ metricname ]: |
---|
[52] | 1678 | return 1 |
---|
[40] | 1679 | |
---|
[54] | 1680 | self.lastStored[ host ][ metricname ] = last_update_time |
---|
[52] | 1681 | |
---|
[33] | 1682 | def storeMetrics( self ): |
---|
[63] | 1683 | """ |
---|
| 1684 | Store all metrics from memory to disk |
---|
| 1685 | and do it to the RRD's in appropriate timeperiod directory |
---|
| 1686 | """ |
---|
[33] | 1687 | |
---|
[365] | 1688 | debug_msg( 5, "Entering storeMetrics()") |
---|
| 1689 | |
---|
| 1690 | count_values = 0 |
---|
| 1691 | count_metrics = 0 |
---|
| 1692 | count_bits = 0 |
---|
| 1693 | |
---|
[33] | 1694 | for hostname, mymetrics in self.myMetrics.items(): |
---|
| 1695 | |
---|
| 1696 | for metricname, mymetric in mymetrics.items(): |
---|
| 1697 | |
---|
[365] | 1698 | count_metrics += 1 |
---|
| 1699 | |
---|
| 1700 | for dmetric in mymetric: |
---|
| 1701 | |
---|
| 1702 | count_values += 1 |
---|
| 1703 | |
---|
| 1704 | count_bits += len( dmetric['time'] ) |
---|
| 1705 | count_bits += len( dmetric['val'] ) |
---|
| 1706 | |
---|
| 1707 | count_bytes = count_bits / 8 |
---|
| 1708 | |
---|
| 1709 | debug_msg( 5, "size of cluster '" + self.cluster + "': " + |
---|
| 1710 | str( len( self.myMetrics.keys() ) ) + " hosts " + |
---|
| 1711 | str( count_metrics ) + " metrics " + str( count_values ) + " values " + |
---|
| 1712 | str( count_bits ) + " bits " + str( count_bytes ) + " bytes " ) |
---|
| 1713 | |
---|
| 1714 | for hostname, mymetrics in self.myMetrics.items(): |
---|
| 1715 | |
---|
| 1716 | for metricname, mymetric in mymetrics.items(): |
---|
| 1717 | |
---|
[53] | 1718 | metrics_to_store = [ ] |
---|
| 1719 | |
---|
[63] | 1720 | # Pop metrics from stack for storing until none is left |
---|
| 1721 | # atomic code: only 1 thread at a time may access myMetrics |
---|
| 1722 | |
---|
[53] | 1723 | # <ATOMIC> |
---|
[50] | 1724 | # |
---|
[47] | 1725 | self.slot.acquire() |
---|
[33] | 1726 | |
---|
[54] | 1727 | while len( self.myMetrics[ hostname ][ metricname ] ) > 0: |
---|
[53] | 1728 | |
---|
[54] | 1729 | if len( self.myMetrics[ hostname ][ metricname ] ) > 0: |
---|
[53] | 1730 | |
---|
[176] | 1731 | try: |
---|
| 1732 | metrics_to_store.append( self.myMetrics[ hostname ][ metricname ].pop( 0 ) ) |
---|
| 1733 | except IndexError, msg: |
---|
| 1734 | |
---|
[179] | 1735 | # Somehow sometimes myMetrics[ hostname ][ metricname ] |
---|
| 1736 | # is still len 0 when the statement is executed. |
---|
| 1737 | # Just ignore indexerror's.. |
---|
[176] | 1738 | pass |
---|
| 1739 | |
---|
[53] | 1740 | self.slot.release() |
---|
| 1741 | # |
---|
| 1742 | # </ATOMIC> |
---|
| 1743 | |
---|
[47] | 1744 | # Create a mapping table, each metric to the period where it should be stored |
---|
| 1745 | # |
---|
[53] | 1746 | metric_serial_table = self.determineSerials( hostname, metricname, metrics_to_store ) |
---|
[33] | 1747 | |
---|
[50] | 1748 | update_rets = [ ] |
---|
| 1749 | |
---|
[47] | 1750 | for period, pmetric in metric_serial_table.items(): |
---|
| 1751 | |
---|
[146] | 1752 | create_ret = self.createCheck( hostname, metricname, period ) |
---|
[47] | 1753 | |
---|
| 1754 | update_ret = self.update( hostname, metricname, period, pmetric ) |
---|
| 1755 | |
---|
| 1756 | if update_ret == 0: |
---|
| 1757 | |
---|
| 1758 | debug_msg( 9, 'stored metric %s for %s' %( hostname, metricname ) ) |
---|
| 1759 | else: |
---|
| 1760 | debug_msg( 9, 'metric update failed' ) |
---|
| 1761 | |
---|
[146] | 1762 | update_rets.append( create_ret ) |
---|
[50] | 1763 | update_rets.append( update_ret ) |
---|
[47] | 1764 | |
---|
[179] | 1765 | # Lets ignore errors here for now, we need to make sure last update time |
---|
| 1766 | # is correct! |
---|
| 1767 | # |
---|
| 1768 | #if not (1) in update_rets: |
---|
[50] | 1769 | |
---|
[179] | 1770 | self.memLastUpdate( hostname, metricname, metrics_to_store ) |
---|
[50] | 1771 | |
---|
[365] | 1772 | debug_msg( 5, "Leaving storeMetrics()") |
---|
| 1773 | |
---|
[17] | 1774 | def makeTimeSerial( self ): |
---|
[63] | 1775 | """Generate a time serial. Seconds since epoch""" |
---|
[17] | 1776 | |
---|
| 1777 | # Seconds since epoch |
---|
| 1778 | mytime = int( time.time() ) |
---|
| 1779 | |
---|
| 1780 | return mytime |
---|
| 1781 | |
---|
[50] | 1782 | def makeRrdPath( self, host, metricname, timeserial ): |
---|
[63] | 1783 | """Make a RRD location/path and filename""" |
---|
[17] | 1784 | |
---|
[292] | 1785 | rrd_dir = '%s/%s/%s/%s' %( check_dir(ARCHIVE_PATH), self.cluster, host, timeserial ) |
---|
| 1786 | rrd_file = '%s/%s.rrd' %( rrd_dir, metricname ) |
---|
[17] | 1787 | |
---|
| 1788 | return rrd_dir, rrd_file |
---|
| 1789 | |
---|
[20] | 1790 | def getLastRrdTimeSerial( self, host ): |
---|
[63] | 1791 | """Find the last timeserial (directory) for this host""" |
---|
[17] | 1792 | |
---|
[19] | 1793 | newest_timeserial = 0 |
---|
| 1794 | |
---|
[47] | 1795 | for dir in self.timeserials[ host ]: |
---|
[32] | 1796 | |
---|
[47] | 1797 | valid_dir = 1 |
---|
[17] | 1798 | |
---|
[47] | 1799 | for letter in dir: |
---|
| 1800 | if letter not in string.digits: |
---|
| 1801 | valid_dir = 0 |
---|
[17] | 1802 | |
---|
[47] | 1803 | if valid_dir: |
---|
| 1804 | timeserial = dir |
---|
| 1805 | if timeserial > newest_timeserial: |
---|
| 1806 | newest_timeserial = timeserial |
---|
[17] | 1807 | |
---|
| 1808 | if newest_timeserial: |
---|
[18] | 1809 | return newest_timeserial |
---|
[17] | 1810 | else: |
---|
| 1811 | return 0 |
---|
| 1812 | |
---|
[47] | 1813 | def determinePeriod( self, host, check_serial ): |
---|
[63] | 1814 | """Determine to which period (directory) this time(serial) belongs""" |
---|
[47] | 1815 | |
---|
| 1816 | period_serial = 0 |
---|
| 1817 | |
---|
[56] | 1818 | if self.timeserials.has_key( host ): |
---|
[47] | 1819 | |
---|
[56] | 1820 | for serial in self.timeserials[ host ]: |
---|
[47] | 1821 | |
---|
[56] | 1822 | if check_serial >= serial and period_serial < serial: |
---|
[47] | 1823 | |
---|
[56] | 1824 | period_serial = serial |
---|
| 1825 | |
---|
[47] | 1826 | return period_serial |
---|
| 1827 | |
---|
| 1828 | def determineSerials( self, host, metricname, metriclist ): |
---|
| 1829 | """ |
---|
| 1830 | Determine the correct serial and corresponding rrd to store |
---|
| 1831 | for a list of metrics |
---|
| 1832 | """ |
---|
| 1833 | |
---|
| 1834 | metric_serial_table = { } |
---|
| 1835 | |
---|
| 1836 | for metric in metriclist: |
---|
| 1837 | |
---|
| 1838 | if metric['name'] == metricname: |
---|
| 1839 | |
---|
[292] | 1840 | period = self.determinePeriod( host, metric['time'] ) |
---|
[47] | 1841 | |
---|
[292] | 1842 | archive_secs = ARCHIVE_HOURS_PER_RRD * (60 * 60) |
---|
[47] | 1843 | |
---|
[49] | 1844 | if (int( metric['time'] ) - int( period ) ) > archive_secs: |
---|
[47] | 1845 | |
---|
| 1846 | # This one should get it's own new period |
---|
| 1847 | period = metric['time'] |
---|
[57] | 1848 | |
---|
| 1849 | if not self.timeserials.has_key( host ): |
---|
| 1850 | self.timeserials[ host ] = [ ] |
---|
| 1851 | |
---|
[50] | 1852 | self.timeserials[ host ].append( period ) |
---|
[47] | 1853 | |
---|
| 1854 | if not metric_serial_table.has_key( period ): |
---|
| 1855 | |
---|
[49] | 1856 | metric_serial_table[ period ] = [ ] |
---|
[47] | 1857 | |
---|
| 1858 | metric_serial_table[ period ].append( metric ) |
---|
| 1859 | |
---|
| 1860 | return metric_serial_table |
---|
| 1861 | |
---|
[33] | 1862 | def createCheck( self, host, metricname, timeserial ): |
---|
[63] | 1863 | """Check if an rrd allready exists for this metric, create if not""" |
---|
[9] | 1864 | |
---|
[35] | 1865 | debug_msg( 9, 'rrdcreate: using timeserial %s for %s/%s' %( timeserial, host, metricname ) ) |
---|
[47] | 1866 | |
---|
[33] | 1867 | rrd_dir, rrd_file = self.makeRrdPath( host, metricname, timeserial ) |
---|
[17] | 1868 | |
---|
[9] | 1869 | if not os.path.exists( rrd_dir ): |
---|
[58] | 1870 | |
---|
| 1871 | try: |
---|
| 1872 | os.makedirs( rrd_dir ) |
---|
| 1873 | |
---|
[169] | 1874 | except os.OSError, msg: |
---|
[58] | 1875 | |
---|
| 1876 | if msg.find( 'File exists' ) != -1: |
---|
| 1877 | |
---|
| 1878 | # Ignore exists errors |
---|
| 1879 | pass |
---|
| 1880 | |
---|
| 1881 | else: |
---|
| 1882 | |
---|
| 1883 | print msg |
---|
| 1884 | return |
---|
| 1885 | |
---|
[14] | 1886 | debug_msg( 9, 'created dir %s' %( str(rrd_dir) ) ) |
---|
[9] | 1887 | |
---|
[14] | 1888 | if not os.path.exists( rrd_file ): |
---|
[9] | 1889 | |
---|
[292] | 1890 | interval = self.config.getInterval( self.cluster ) |
---|
| 1891 | heartbeat = 8 * int( interval ) |
---|
[9] | 1892 | |
---|
[292] | 1893 | params = [ ] |
---|
[12] | 1894 | |
---|
[37] | 1895 | params.append( '--step' ) |
---|
| 1896 | params.append( str( interval ) ) |
---|
[12] | 1897 | |
---|
[37] | 1898 | params.append( '--start' ) |
---|
[47] | 1899 | params.append( str( int( timeserial ) - 1 ) ) |
---|
[12] | 1900 | |
---|
[37] | 1901 | params.append( 'DS:sum:GAUGE:%d:U:U' %heartbeat ) |
---|
| 1902 | params.append( 'RRA:AVERAGE:0.5:1:%s' %(ARCHIVE_HOURS_PER_RRD * 240) ) |
---|
[13] | 1903 | |
---|
[37] | 1904 | self.rrdm.create( str(rrd_file), params ) |
---|
| 1905 | |
---|
[14] | 1906 | debug_msg( 9, 'created rrd %s' %( str(rrd_file) ) ) |
---|
| 1907 | |
---|
[47] | 1908 | def update( self, host, metricname, timeserial, metriclist ): |
---|
[63] | 1909 | """ |
---|
| 1910 | Update rrd file for host with metricname |
---|
| 1911 | in directory timeserial with metriclist |
---|
| 1912 | """ |
---|
[9] | 1913 | |
---|
[35] | 1914 | debug_msg( 9, 'rrdupdate: using timeserial %s for %s/%s' %( timeserial, host, metricname ) ) |
---|
[9] | 1915 | |
---|
[292] | 1916 | rrd_dir, rrd_file = self.makeRrdPath( host, metricname, timeserial ) |
---|
[18] | 1917 | |
---|
[292] | 1918 | update_list = self.makeUpdateList( host, metriclist ) |
---|
[15] | 1919 | |
---|
[41] | 1920 | if len( update_list ) > 0: |
---|
| 1921 | ret = self.rrdm.update( str(rrd_file), update_list ) |
---|
[32] | 1922 | |
---|
[41] | 1923 | if ret: |
---|
| 1924 | return 1 |
---|
[27] | 1925 | |
---|
[41] | 1926 | debug_msg( 9, 'updated rrd %s with %s' %( str(rrd_file), string.join( update_list ) ) ) |
---|
[15] | 1927 | |
---|
[36] | 1928 | return 0 |
---|
| 1929 | |
---|
[169] | 1930 | def daemon(): |
---|
| 1931 | """daemonized threading""" |
---|
[8] | 1932 | |
---|
[169] | 1933 | # Fork the first child |
---|
| 1934 | # |
---|
| 1935 | pid = os.fork() |
---|
| 1936 | |
---|
| 1937 | if pid > 0: |
---|
| 1938 | |
---|
| 1939 | sys.exit(0) # end parent |
---|
| 1940 | |
---|
| 1941 | # creates a session and sets the process group ID |
---|
| 1942 | # |
---|
| 1943 | os.setsid() |
---|
| 1944 | |
---|
| 1945 | # Fork the second child |
---|
| 1946 | # |
---|
| 1947 | pid = os.fork() |
---|
| 1948 | |
---|
| 1949 | if pid > 0: |
---|
| 1950 | |
---|
| 1951 | sys.exit(0) # end parent |
---|
| 1952 | |
---|
[435] | 1953 | write_pidfile() |
---|
| 1954 | |
---|
[169] | 1955 | # Go to the root directory and set the umask |
---|
| 1956 | # |
---|
| 1957 | os.chdir('/') |
---|
| 1958 | os.umask(0) |
---|
| 1959 | |
---|
| 1960 | sys.stdin.close() |
---|
| 1961 | sys.stdout.close() |
---|
| 1962 | sys.stderr.close() |
---|
| 1963 | |
---|
[273] | 1964 | os.open('/dev/null', os.O_RDWR) |
---|
| 1965 | os.dup2(0, 1) |
---|
| 1966 | os.dup2(0, 2) |
---|
[169] | 1967 | |
---|
| 1968 | run() |
---|
| 1969 | |
---|
| 1970 | def run(): |
---|
| 1971 | """Threading start""" |
---|
| 1972 | |
---|
[469] | 1973 | config = GangliaConfigParser( GMETAD_CONF ) |
---|
| 1974 | s_timeout = int( config.getLowestInterval() - 1 ) |
---|
| 1975 | |
---|
| 1976 | socket.setdefaulttimeout( s_timeout ) |
---|
| 1977 | |
---|
[287] | 1978 | myXMLSource = XMLGatherer( ARCHIVE_XMLSOURCE.split( ':' )[0], ARCHIVE_XMLSOURCE.split( ':' )[1] ) |
---|
[295] | 1979 | myDataStore = DataSQLStore( JOB_SQL_DBASE.split( '/' )[0], JOB_SQL_DBASE.split( '/' )[1] ) |
---|
[8] | 1980 | |
---|
[295] | 1981 | myTorqueProcessor = TorqueXMLProcessor( myXMLSource, myDataStore ) |
---|
| 1982 | myGangliaProcessor = GangliaXMLProcessor( myXMLSource, myDataStore ) |
---|
[287] | 1983 | |
---|
[169] | 1984 | try: |
---|
[292] | 1985 | torque_xml_thread = threading.Thread( None, myTorqueProcessor.run, 'torque_proc_thread' ) |
---|
| 1986 | ganglia_xml_thread = threading.Thread( None, myGangliaProcessor.run, 'ganglia_proc_thread' ) |
---|
[22] | 1987 | |
---|
[169] | 1988 | torque_xml_thread.start() |
---|
| 1989 | ganglia_xml_thread.start() |
---|
| 1990 | |
---|
[176] | 1991 | except thread.error, msg: |
---|
[169] | 1992 | debug_msg( 0, 'FATAL ERROR: Unable to start main threads!: '+ str(msg) ) |
---|
| 1993 | syslog.closelog() |
---|
| 1994 | sys.exit(1) |
---|
| 1995 | |
---|
| 1996 | debug_msg( 0, 'main threading started.' ) |
---|
[78] | 1997 | |
---|
[169] | 1998 | def main(): |
---|
| 1999 | """Program startup""" |
---|
| 2000 | |
---|
[375] | 2001 | global DAEMONIZE, USE_SYSLOG |
---|
| 2002 | |
---|
[214] | 2003 | if not processArgs( sys.argv[1:] ): |
---|
| 2004 | sys.exit( 1 ) |
---|
| 2005 | |
---|
[169] | 2006 | if( DAEMONIZE and USE_SYSLOG ): |
---|
| 2007 | syslog.openlog( 'jobarchived', syslog.LOG_NOWAIT, SYSLOG_FACILITY ) |
---|
| 2008 | |
---|
| 2009 | if DAEMONIZE: |
---|
| 2010 | daemon() |
---|
| 2011 | else: |
---|
| 2012 | run() |
---|
| 2013 | |
---|
| 2014 | # |
---|
[81] | 2015 | # Global functions |
---|
[169] | 2016 | # |
---|
[81] | 2017 | |
---|
[9] | 2018 | def check_dir( directory ): |
---|
[63] | 2019 | """Check if directory is a proper directory. I.e.: Does _not_ end with a '/'""" |
---|
[9] | 2020 | |
---|
| 2021 | if directory[-1] == '/': |
---|
| 2022 | directory = directory[:-1] |
---|
| 2023 | |
---|
| 2024 | return directory |
---|
| 2025 | |
---|
[295] | 2026 | def reqtime2epoch( rtime ): |
---|
| 2027 | |
---|
| 2028 | (hours, minutes, seconds ) = rtime.split( ':' ) |
---|
| 2029 | |
---|
| 2030 | etime = int(seconds) |
---|
| 2031 | etime = etime + ( int(minutes) * 60 ) |
---|
| 2032 | etime = etime + ( int(hours) * 60 * 60 ) |
---|
| 2033 | |
---|
| 2034 | return etime |
---|
| 2035 | |
---|
[12] | 2036 | def debug_msg( level, msg ): |
---|
[169] | 2037 | """Only print msg if correct levels""" |
---|
[12] | 2038 | |
---|
[169] | 2039 | if (not DAEMONIZE and DEBUG_LEVEL >= level): |
---|
| 2040 | sys.stderr.write( printTime() + ' - ' + msg + '\n' ) |
---|
| 2041 | |
---|
| 2042 | if (DAEMONIZE and USE_SYSLOG and SYSLOG_LEVEL >= level): |
---|
| 2043 | syslog.syslog( msg ) |
---|
[12] | 2044 | |
---|
[46] | 2045 | def printTime( ): |
---|
[63] | 2046 | """Print current time in human readable format""" |
---|
[46] | 2047 | |
---|
| 2048 | return time.strftime("%a %d %b %Y %H:%M:%S") |
---|
| 2049 | |
---|
[435] | 2050 | def write_pidfile(): |
---|
| 2051 | |
---|
| 2052 | # Write pidfile if PIDFILE exists |
---|
| 2053 | if PIDFILE: |
---|
| 2054 | |
---|
| 2055 | pid = os.getpid() |
---|
| 2056 | |
---|
| 2057 | pidfile = open(PIDFILE, 'w') |
---|
| 2058 | |
---|
| 2059 | pidfile.write( str( pid ) ) |
---|
| 2060 | pidfile.close() |
---|
| 2061 | |
---|
[63] | 2062 | # Ooohh, someone started me! Let's go.. |
---|
[469] | 2063 | # |
---|
[9] | 2064 | if __name__ == '__main__': |
---|
| 2065 | main() |
---|