[3] | 1 | #!/usr/bin/env python |
---|
[225] | 2 | # |
---|
| 3 | # This file is part of Jobmonarch |
---|
| 4 | # |
---|
| 5 | # Copyright (C) 2006 Ramon Bastiaans |
---|
| 6 | # |
---|
| 7 | # Jobmonarch is free software; you can redistribute it and/or modify |
---|
| 8 | # it under the terms of the GNU General Public License as published by |
---|
| 9 | # the Free Software Foundation; either version 2 of the License, or |
---|
| 10 | # (at your option) any later version. |
---|
| 11 | # |
---|
| 12 | # Jobmonarch is distributed in the hope that it will be useful, |
---|
| 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
| 15 | # GNU General Public License for more details. |
---|
| 16 | # |
---|
| 17 | # You should have received a copy of the GNU General Public License |
---|
| 18 | # along with this program; if not, write to the Free Software |
---|
| 19 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 20 | # |
---|
[230] | 21 | # SVN $Id: jobarchived.py 379 2007-06-30 14:13:34Z bastiaans $ |
---|
| 22 | # |
---|
[3] | 23 | |
---|
[284] | 24 | DEFAULT_SEARCH_PATH = '/usr/share/jobarchived' |
---|
[3] | 25 | |
---|
[284] | 26 | import sys |
---|
| 27 | |
---|
| 28 | if DEFAULT_SEARCH_PATH not in sys.path: |
---|
| 29 | |
---|
| 30 | sys.path.append( DEFAULT_SEARCH_PATH ) |
---|
| 31 | |
---|
| 32 | import getopt, syslog, ConfigParser |
---|
| 33 | |
---|
[214] | 34 | def processArgs( args ): |
---|
[6] | 35 | |
---|
[292] | 36 | SHORT_L = 'c:' |
---|
| 37 | LONG_L = 'config=' |
---|
[169] | 38 | |
---|
[214] | 39 | config_filename = None |
---|
[169] | 40 | |
---|
[214] | 41 | try: |
---|
[169] | 42 | |
---|
[214] | 43 | opts, args = getopt.getopt( args, SHORT_L, LONG_L ) |
---|
[9] | 44 | |
---|
[214] | 45 | except getopt.error, detail: |
---|
[60] | 46 | |
---|
[214] | 47 | print detail |
---|
| 48 | sys.exit(1) |
---|
[9] | 49 | |
---|
[214] | 50 | for opt, value in opts: |
---|
[60] | 51 | |
---|
[214] | 52 | if opt in [ '--config', '-c' ]: |
---|
[13] | 53 | |
---|
[214] | 54 | config_filename = value |
---|
[198] | 55 | |
---|
[214] | 56 | if not config_filename: |
---|
[60] | 57 | |
---|
[214] | 58 | config_filename = '/etc/jobarchived.conf' |
---|
[22] | 59 | |
---|
[214] | 60 | try: |
---|
| 61 | return loadConfig( config_filename ) |
---|
[13] | 62 | |
---|
[214] | 63 | except ConfigParser.NoOptionError, detail: |
---|
| 64 | |
---|
| 65 | print detail |
---|
| 66 | sys.exit( 1 ) |
---|
| 67 | |
---|
| 68 | def loadConfig( filename ): |
---|
| 69 | |
---|
| 70 | def getlist( cfg_string ): |
---|
| 71 | |
---|
| 72 | my_list = [ ] |
---|
| 73 | |
---|
| 74 | for item_txt in cfg_string.split( ',' ): |
---|
| 75 | |
---|
| 76 | sep_char = None |
---|
| 77 | |
---|
| 78 | item_txt = item_txt.strip() |
---|
| 79 | |
---|
| 80 | for s_char in [ "'", '"' ]: |
---|
| 81 | |
---|
| 82 | if item_txt.find( s_char ) != -1: |
---|
| 83 | |
---|
| 84 | if item_txt.count( s_char ) != 2: |
---|
| 85 | |
---|
| 86 | print 'Missing quote: %s' %item_txt |
---|
| 87 | sys.exit( 1 ) |
---|
| 88 | |
---|
| 89 | else: |
---|
| 90 | |
---|
| 91 | sep_char = s_char |
---|
| 92 | break |
---|
| 93 | |
---|
| 94 | if sep_char: |
---|
| 95 | |
---|
| 96 | item_txt = item_txt.split( sep_char )[1] |
---|
| 97 | |
---|
| 98 | my_list.append( item_txt ) |
---|
| 99 | |
---|
| 100 | return my_list |
---|
| 101 | |
---|
| 102 | cfg = ConfigParser.ConfigParser() |
---|
| 103 | |
---|
| 104 | cfg.read( filename ) |
---|
| 105 | |
---|
[375] | 106 | global DEBUG_LEVEL, USE_SYSLOG, SYSLOG_LEVEL, SYSLOG_FACILITY, GMETAD_CONF, ARCHIVE_XMLSOURCE, ARCHIVE_DATASOURCES, ARCHIVE_PATH, ARCHIVE_HOURS_PER_RRD, ARCHIVE_EXCLUDE_METRICS, JOB_SQL_DBASE, DAEMONIZE, RRDTOOL, JOB_TIMEOUT, MODRRDTOOL |
---|
[214] | 107 | |
---|
[292] | 108 | ARCHIVE_PATH = cfg.get( 'DEFAULT', 'ARCHIVE_PATH' ) |
---|
[214] | 109 | |
---|
[292] | 110 | ARCHIVE_HOURS_PER_RRD = cfg.getint( 'DEFAULT', 'ARCHIVE_HOURS_PER_RRD' ) |
---|
[214] | 111 | |
---|
[292] | 112 | DEBUG_LEVEL = cfg.getint( 'DEFAULT', 'DEBUG_LEVEL' ) |
---|
[214] | 113 | |
---|
[292] | 114 | USE_SYSLOG = cfg.getboolean( 'DEFAULT', 'USE_SYSLOG' ) |
---|
[214] | 115 | |
---|
[292] | 116 | SYSLOG_LEVEL = cfg.getint( 'DEFAULT', 'SYSLOG_LEVEL' ) |
---|
[214] | 117 | |
---|
[375] | 118 | MODRRDTOOL = False |
---|
| 119 | |
---|
[214] | 120 | try: |
---|
[375] | 121 | import rrdtool |
---|
[214] | 122 | |
---|
[375] | 123 | MODRRDTOOL = True |
---|
| 124 | |
---|
| 125 | except ImportError: |
---|
| 126 | |
---|
| 127 | MODRRDTOOL = False |
---|
| 128 | |
---|
| 129 | debug_msg( 0, "ERROR: py-rrdtool import FAILED: failing back to DEPRECATED use of rrdtool binary. This will slow down jobmond significantly!" ) |
---|
| 130 | |
---|
| 131 | try: |
---|
| 132 | |
---|
[292] | 133 | SYSLOG_FACILITY = eval( 'syslog.LOG_' + cfg.get( 'DEFAULT', 'SYSLOG_FACILITY' ) ) |
---|
[214] | 134 | |
---|
| 135 | except AttributeError, detail: |
---|
| 136 | |
---|
| 137 | print 'Unknown syslog facility' |
---|
| 138 | sys.exit( 1 ) |
---|
| 139 | |
---|
[292] | 140 | GMETAD_CONF = cfg.get( 'DEFAULT', 'GMETAD_CONF' ) |
---|
[214] | 141 | |
---|
[292] | 142 | ARCHIVE_XMLSOURCE = cfg.get( 'DEFAULT', 'ARCHIVE_XMLSOURCE' ) |
---|
[214] | 143 | |
---|
[292] | 144 | ARCHIVE_DATASOURCES = getlist( cfg.get( 'DEFAULT', 'ARCHIVE_DATASOURCES' ) ) |
---|
[214] | 145 | |
---|
[292] | 146 | ARCHIVE_EXCLUDE_METRICS = getlist( cfg.get( 'DEFAULT', 'ARCHIVE_EXCLUDE_METRICS' ) ) |
---|
[214] | 147 | |
---|
[292] | 148 | JOB_SQL_DBASE = cfg.get( 'DEFAULT', 'JOB_SQL_DBASE' ) |
---|
[214] | 149 | |
---|
[295] | 150 | JOB_TIMEOUT = cfg.getint( 'DEFAULT', 'JOB_TIMEOUT' ) |
---|
| 151 | |
---|
[292] | 152 | DAEMONIZE = cfg.getboolean( 'DEFAULT', 'DAEMONIZE' ) |
---|
[214] | 153 | |
---|
[292] | 154 | RRDTOOL = cfg.get( 'DEFAULT', 'RRDTOOL' ) |
---|
[224] | 155 | |
---|
[214] | 156 | return True |
---|
| 157 | |
---|
[17] | 158 | # What XML data types not to store |
---|
[13] | 159 | # |
---|
[17] | 160 | UNSUPPORTED_ARCHIVE_TYPES = [ 'string' ] |
---|
[9] | 161 | |
---|
[47] | 162 | # Maximum time (in seconds) a parsethread may run |
---|
| 163 | # |
---|
| 164 | PARSE_TIMEOUT = 60 |
---|
| 165 | |
---|
| 166 | # Maximum time (in seconds) a storethread may run |
---|
| 167 | # |
---|
| 168 | STORE_TIMEOUT = 360 |
---|
| 169 | |
---|
[8] | 170 | """ |
---|
[224] | 171 | The Job Archiving Daemon |
---|
[8] | 172 | """ |
---|
| 173 | |
---|
[214] | 174 | from types import * |
---|
| 175 | |
---|
| 176 | import xml.sax, xml.sax.handler, socket, string, os, os.path, time, thread, threading, random, re |
---|
[365] | 177 | import rrdtool |
---|
[379] | 178 | from pyPgSQL import PgSQL |
---|
[214] | 179 | |
---|
[379] | 180 | # Orginal from Andre van der Vlies <andre@vandervlies.xs4all.nl> for MySQL. Changed |
---|
| 181 | # and added some more functions for postgres. |
---|
| 182 | # |
---|
| 183 | # |
---|
| 184 | # Changed by: Bas van der Vlies <basv@sara.nl> |
---|
| 185 | # |
---|
| 186 | # SARA API for Postgres Database |
---|
| 187 | # |
---|
| 188 | # Changed by: Ramon Bastiaans for Job Monarch |
---|
| 189 | # |
---|
| 190 | |
---|
| 191 | class InitVars: |
---|
| 192 | Vars = {} |
---|
| 193 | |
---|
| 194 | def __init__(self, **key_arg): |
---|
| 195 | for (key, value) in key_arg.items(): |
---|
| 196 | if value: |
---|
| 197 | self.Vars[key] = value |
---|
| 198 | else: |
---|
| 199 | self.Vars[key] = None |
---|
| 200 | |
---|
| 201 | def __call__(self, *key): |
---|
| 202 | key = "%s" % key |
---|
| 203 | return self.Vars[key] |
---|
| 204 | |
---|
| 205 | def __getitem__(self, key): |
---|
| 206 | return self.Vars[key] |
---|
| 207 | |
---|
| 208 | def __repr__(self): |
---|
| 209 | return repr(self.Vars) |
---|
| 210 | |
---|
| 211 | def keys(self): |
---|
| 212 | barf = map(None, self.Vars.keys()) |
---|
| 213 | return barf |
---|
| 214 | |
---|
| 215 | def values(self): |
---|
| 216 | barf = map(None, self.Vars.values()) |
---|
| 217 | return barf |
---|
| 218 | |
---|
| 219 | def has_key(self, key): |
---|
| 220 | if self.Vars.has_key(key): |
---|
| 221 | return 1 |
---|
| 222 | else: |
---|
| 223 | return 0 |
---|
| 224 | |
---|
| 225 | class DBError(Exception): |
---|
| 226 | def __init__(self, msg=''): |
---|
| 227 | self.msg = msg |
---|
| 228 | Exception.__init__(self, msg) |
---|
| 229 | def __repr__(self): |
---|
| 230 | return self.msg |
---|
| 231 | __str__ = __repr__ |
---|
| 232 | |
---|
| 233 | # |
---|
| 234 | # Class to connect to a database |
---|
| 235 | # and return the queury in a list or dictionairy. |
---|
| 236 | # |
---|
| 237 | class DB: |
---|
| 238 | def __init__(self, db_vars): |
---|
| 239 | |
---|
| 240 | self.dict = db_vars |
---|
| 241 | |
---|
| 242 | if self.dict.has_key('User'): |
---|
| 243 | self.user = self.dict['User'] |
---|
| 244 | else: |
---|
| 245 | self.user = 'postgres' |
---|
| 246 | |
---|
| 247 | if self.dict.has_key('Host'): |
---|
| 248 | self.host = self.dict['Host'] |
---|
| 249 | else: |
---|
| 250 | self.host = 'localhost' |
---|
| 251 | |
---|
| 252 | if self.dict.has_key('Password'): |
---|
| 253 | self.passwd = self.dict['Password'] |
---|
| 254 | else: |
---|
| 255 | self.passwd = '' |
---|
| 256 | |
---|
| 257 | if self.dict.has_key('DataBaseName'): |
---|
| 258 | self.db = self.dict['DataBaseName'] |
---|
| 259 | else: |
---|
| 260 | self.db = 'uva_cluster_db' |
---|
| 261 | |
---|
| 262 | # connect_string = 'host:port:database:user:password: |
---|
| 263 | dsn = "%s::%s:%s:%s" %(self.host, self.db, self.user, self.passwd) |
---|
| 264 | |
---|
| 265 | try: |
---|
| 266 | self.SQL = PgSQL.connect(dsn) |
---|
| 267 | except PgSQL.Error, details: |
---|
| 268 | str = "%s" %details |
---|
| 269 | raise DBError(str) |
---|
| 270 | |
---|
| 271 | def __repr__(self): |
---|
| 272 | return repr(self.result) |
---|
| 273 | |
---|
| 274 | def __nonzero__(self): |
---|
| 275 | return not(self.result == None) |
---|
| 276 | |
---|
| 277 | def __len__(self): |
---|
| 278 | return len(self.result) |
---|
| 279 | |
---|
| 280 | def __getitem__(self,i): |
---|
| 281 | return self.result[i] |
---|
| 282 | |
---|
| 283 | def __getslice__(self,i,j): |
---|
| 284 | return self.result[i:j] |
---|
| 285 | |
---|
| 286 | def Get(self, q_str): |
---|
| 287 | c = self.SQL.cursor() |
---|
| 288 | try: |
---|
| 289 | c.execute(q_str) |
---|
| 290 | result = c.fetchall() |
---|
| 291 | except PgSQL.Error, details: |
---|
| 292 | c.close() |
---|
| 293 | str = "%s" %details |
---|
| 294 | raise DBError(str) |
---|
| 295 | |
---|
| 296 | c.close() |
---|
| 297 | return result |
---|
| 298 | |
---|
| 299 | def Set(self, q_str): |
---|
| 300 | c = self.SQL.cursor() |
---|
| 301 | try: |
---|
| 302 | c.execute(q_str) |
---|
| 303 | result = c.oidValue |
---|
| 304 | |
---|
| 305 | except PgSQL.Error, details: |
---|
| 306 | c.close() |
---|
| 307 | str = "%s" %details |
---|
| 308 | raise DBError(str) |
---|
| 309 | |
---|
| 310 | c.close() |
---|
| 311 | return result |
---|
| 312 | |
---|
| 313 | def Commit(self): |
---|
| 314 | self.SQL.commit() |
---|
| 315 | |
---|
[84] | 316 | class DataSQLStore: |
---|
| 317 | |
---|
| 318 | db_vars = None |
---|
| 319 | dbc = None |
---|
| 320 | |
---|
| 321 | def __init__( self, hostname, database ): |
---|
| 322 | |
---|
[379] | 323 | self.db_vars = InitVars(DataBaseName=database, |
---|
[84] | 324 | User='root', |
---|
| 325 | Host=hostname, |
---|
| 326 | Password='', |
---|
| 327 | Dictionary='true') |
---|
| 328 | |
---|
| 329 | try: |
---|
[379] | 330 | self.dbc = DB(self.db_vars) |
---|
| 331 | except DBError, details: |
---|
[169] | 332 | debug_msg( 0, 'FATAL ERROR: Unable to connect to database!: ' +str(details) ) |
---|
[84] | 333 | sys.exit(1) |
---|
| 334 | |
---|
| 335 | def setDatabase(self, statement): |
---|
| 336 | ret = self.doDatabase('set', statement) |
---|
| 337 | return ret |
---|
| 338 | |
---|
| 339 | def getDatabase(self, statement): |
---|
| 340 | ret = self.doDatabase('get', statement) |
---|
| 341 | return ret |
---|
| 342 | |
---|
| 343 | def doDatabase(self, type, statement): |
---|
| 344 | |
---|
[365] | 345 | debug_msg( 10, 'doDatabase(): %s: %s' %(type, statement) ) |
---|
[84] | 346 | try: |
---|
| 347 | if type == 'set': |
---|
| 348 | result = self.dbc.Set( statement ) |
---|
| 349 | self.dbc.Commit() |
---|
| 350 | elif type == 'get': |
---|
| 351 | result = self.dbc.Get( statement ) |
---|
| 352 | |
---|
[379] | 353 | except DBError, detail: |
---|
[84] | 354 | operation = statement.split(' ')[0] |
---|
[169] | 355 | debug_msg( 0, 'FATAL ERROR: ' +operation+ ' on database failed while doing ['+statement+'] full msg: '+str(detail) ) |
---|
[84] | 356 | sys.exit(1) |
---|
| 357 | |
---|
[365] | 358 | debug_msg( 10, 'doDatabase(): result: %s' %(result) ) |
---|
[84] | 359 | return result |
---|
| 360 | |
---|
[191] | 361 | def getJobNodeId( self, job_id, node_id ): |
---|
| 362 | |
---|
| 363 | id = self.getDatabase( "SELECT job_id,node_id FROM job_nodes WHERE job_id = '%s' AND node_id = '%s'" %(job_id, node_id) ) |
---|
| 364 | if len( id ) > 0: |
---|
| 365 | |
---|
| 366 | if len( id[0] ) > 0 and id[0] != '': |
---|
| 367 | |
---|
| 368 | return 1 |
---|
| 369 | |
---|
| 370 | return 0 |
---|
| 371 | |
---|
[84] | 372 | def getNodeId( self, hostname ): |
---|
| 373 | |
---|
[89] | 374 | id = self.getDatabase( "SELECT node_id FROM nodes WHERE node_hostname = '%s'" %hostname ) |
---|
[84] | 375 | |
---|
[89] | 376 | if len( id ) > 0: |
---|
[84] | 377 | |
---|
[89] | 378 | id = id[0][0] |
---|
| 379 | |
---|
[84] | 380 | return id |
---|
| 381 | else: |
---|
| 382 | return None |
---|
| 383 | |
---|
| 384 | def getNodeIds( self, hostnames ): |
---|
| 385 | |
---|
| 386 | ids = [ ] |
---|
| 387 | |
---|
| 388 | for node in hostnames: |
---|
| 389 | |
---|
| 390 | id = self.getNodeId( node ) |
---|
| 391 | |
---|
| 392 | if id: |
---|
| 393 | ids.append( id ) |
---|
| 394 | |
---|
| 395 | return ids |
---|
| 396 | |
---|
| 397 | def getJobId( self, jobid ): |
---|
| 398 | |
---|
| 399 | id = self.getDatabase( "SELECT job_id FROM jobs WHERE job_id = '%s'" %jobid ) |
---|
| 400 | |
---|
| 401 | if id: |
---|
[89] | 402 | id = id[0][0] |
---|
[84] | 403 | |
---|
| 404 | return id |
---|
| 405 | else: |
---|
| 406 | return None |
---|
| 407 | |
---|
| 408 | def addJob( self, job_id, jobattrs ): |
---|
| 409 | |
---|
| 410 | if not self.getJobId( job_id ): |
---|
| 411 | |
---|
| 412 | self.mutateJob( 'insert', job_id, jobattrs ) |
---|
| 413 | else: |
---|
| 414 | self.mutateJob( 'update', job_id, jobattrs ) |
---|
| 415 | |
---|
| 416 | def mutateJob( self, action, job_id, jobattrs ): |
---|
| 417 | |
---|
[292] | 418 | job_values = [ 'name', 'queue', 'owner', 'requested_time', 'requested_memory', 'ppn', 'status', 'start_timestamp', 'stop_timestamp' ] |
---|
[84] | 419 | |
---|
[292] | 420 | insert_col_str = 'job_id' |
---|
| 421 | insert_val_str = "'%s'" %job_id |
---|
| 422 | update_str = None |
---|
[84] | 423 | |
---|
[365] | 424 | debug_msg( 10, 'mutateJob(): %s %s' %(action,job_id)) |
---|
[84] | 425 | |
---|
[99] | 426 | ids = [ ] |
---|
| 427 | |
---|
[84] | 428 | for valname, value in jobattrs.items(): |
---|
| 429 | |
---|
[96] | 430 | if valname in job_values and value != '': |
---|
[84] | 431 | |
---|
| 432 | column_name = 'job_' + valname |
---|
| 433 | |
---|
| 434 | if action == 'insert': |
---|
| 435 | |
---|
| 436 | if not insert_col_str: |
---|
| 437 | insert_col_str = column_name |
---|
| 438 | else: |
---|
| 439 | insert_col_str = insert_col_str + ',' + column_name |
---|
| 440 | |
---|
| 441 | if not insert_val_str: |
---|
| 442 | insert_val_str = value |
---|
| 443 | else: |
---|
| 444 | insert_val_str = insert_val_str + ",'%s'" %value |
---|
| 445 | |
---|
| 446 | elif action == 'update': |
---|
| 447 | |
---|
| 448 | if not update_str: |
---|
| 449 | update_str = "%s='%s'" %(column_name, value) |
---|
| 450 | else: |
---|
| 451 | update_str = update_str + ",%s='%s'" %(column_name, value) |
---|
| 452 | |
---|
[90] | 453 | elif valname == 'nodes' and value: |
---|
[84] | 454 | |
---|
[191] | 455 | node_valid = 1 |
---|
[190] | 456 | |
---|
| 457 | if len(value) == 1: |
---|
| 458 | |
---|
[191] | 459 | if jobattrs['status'] == 'Q': |
---|
[190] | 460 | |
---|
[191] | 461 | node_valid = 0 |
---|
[190] | 462 | |
---|
[191] | 463 | else: |
---|
[190] | 464 | |
---|
[191] | 465 | node_valid = 0 |
---|
[190] | 466 | |
---|
[191] | 467 | for node_char in str(value[0]): |
---|
[190] | 468 | |
---|
[191] | 469 | if string.find( string.digits, node_char ) != -1 and not node_valid: |
---|
[190] | 470 | |
---|
[191] | 471 | node_valid = 1 |
---|
[84] | 472 | |
---|
[191] | 473 | if node_valid: |
---|
| 474 | |
---|
| 475 | ids = self.addNodes( value, jobattrs['domain'] ) |
---|
| 476 | |
---|
[84] | 477 | if action == 'insert': |
---|
| 478 | |
---|
| 479 | self.setDatabase( "INSERT INTO jobs ( %s ) VALUES ( %s )" %( insert_col_str, insert_val_str ) ) |
---|
[86] | 480 | |
---|
[84] | 481 | elif action == 'update': |
---|
| 482 | |
---|
[89] | 483 | self.setDatabase( "UPDATE jobs SET %s WHERE job_id=%s" %(update_str, job_id) ) |
---|
[84] | 484 | |
---|
[191] | 485 | if len( ids ) > 0: |
---|
| 486 | self.addJobNodes( job_id, ids ) |
---|
[190] | 487 | |
---|
[154] | 488 | def addNodes( self, hostnames, domain ): |
---|
[84] | 489 | |
---|
[98] | 490 | ids = [ ] |
---|
| 491 | |
---|
[84] | 492 | for node in hostnames: |
---|
| 493 | |
---|
[292] | 494 | node = '%s.%s' %( node, domain ) |
---|
| 495 | id = self.getNodeId( node ) |
---|
[84] | 496 | |
---|
| 497 | if not id: |
---|
| 498 | self.setDatabase( "INSERT INTO nodes ( node_hostname ) VALUES ( '%s' )" %node ) |
---|
[98] | 499 | id = self.getNodeId( node ) |
---|
[84] | 500 | |
---|
[98] | 501 | ids.append( id ) |
---|
| 502 | |
---|
| 503 | return ids |
---|
| 504 | |
---|
[86] | 505 | def addJobNodes( self, jobid, nodes ): |
---|
| 506 | |
---|
| 507 | for node in nodes: |
---|
| 508 | |
---|
[191] | 509 | if not self.getJobNodeId( jobid, node ): |
---|
| 510 | |
---|
| 511 | self.addJobNode( jobid, node ) |
---|
| 512 | |
---|
[84] | 513 | def addJobNode( self, jobid, nodeid ): |
---|
| 514 | |
---|
| 515 | self.setDatabase( "INSERT INTO job_nodes (job_id,node_id) VALUES ( %s,%s )" %(jobid, nodeid) ) |
---|
| 516 | |
---|
| 517 | def storeJobInfo( self, jobid, jobattrs ): |
---|
| 518 | |
---|
| 519 | self.addJob( jobid, jobattrs ) |
---|
| 520 | |
---|
[295] | 521 | def checkStaleJobs( self ): |
---|
| 522 | |
---|
| 523 | q = "SELECT * from jobs WHERE job_status != 'F'" |
---|
| 524 | |
---|
| 525 | r = self.getDatabase( q ) |
---|
| 526 | |
---|
| 527 | if len( r ) == 0: |
---|
| 528 | |
---|
| 529 | return None |
---|
| 530 | |
---|
| 531 | cleanjobs = [ ] |
---|
| 532 | timeoutjobs = [ ] |
---|
| 533 | |
---|
| 534 | jobtimeout_sec = JOB_TIMEOUT * (60 * 60) |
---|
| 535 | cur_time = time.time() |
---|
| 536 | |
---|
| 537 | for row in r: |
---|
| 538 | |
---|
| 539 | job_id = row[0] |
---|
| 540 | job_requested_time = row[4] |
---|
| 541 | job_status = row[7] |
---|
| 542 | job_start_timestamp = row[8] |
---|
| 543 | |
---|
| 544 | if job_status == 'Q' or not job_start_timestamp: |
---|
| 545 | |
---|
| 546 | cleanjobs.append( job_id ) |
---|
| 547 | |
---|
| 548 | else: |
---|
| 549 | |
---|
| 550 | start_timestamp = int( job_start_timestamp ) |
---|
| 551 | |
---|
| 552 | if ( cur_time - start_timestamp ) > jobtimeout_sec: |
---|
| 553 | |
---|
| 554 | if job_requested_time: |
---|
| 555 | |
---|
| 556 | rtime_epoch = reqtime2epoch( job_requested_time ) |
---|
| 557 | else: |
---|
| 558 | rtime_epoch = None |
---|
| 559 | |
---|
| 560 | timeoutjobs.append( (job_id, job_start_timestamp, rtime_epoch) ) |
---|
| 561 | |
---|
| 562 | debug_msg( 1, 'Found ' + str( len( cleanjobs ) ) + ' stale jobs in database: deleting entries' ) |
---|
| 563 | |
---|
| 564 | for j in cleanjobs: |
---|
| 565 | |
---|
| 566 | q = "DELETE FROM jobs WHERE job_id = '" + str( j ) + "'" |
---|
| 567 | self.setDatabase( q ) |
---|
| 568 | |
---|
| 569 | debug_msg( 1, 'Found ' + str( len( timeoutjobs ) ) + ' timed out jobs in database: closing entries' ) |
---|
| 570 | |
---|
| 571 | for j in timeoutjobs: |
---|
| 572 | |
---|
| 573 | ( i, s, r ) = j |
---|
| 574 | |
---|
| 575 | if r: |
---|
| 576 | new_end_timestamp = int( s ) + r |
---|
| 577 | |
---|
| 578 | q = "UPDATE jobs SET job_stop_timestamp = '" + str( new_end_timestamp ) + "' WHERE job_id = '" + str(i) + "'" |
---|
| 579 | self.setDatabase( q ) |
---|
| 580 | |
---|
[37] | 581 | class RRDMutator: |
---|
[63] | 582 | """A class for performing RRD mutations""" |
---|
[37] | 583 | |
---|
[277] | 584 | binary = None |
---|
[37] | 585 | |
---|
[365] | 586 | |
---|
[37] | 587 | def __init__( self, binary=None ): |
---|
[63] | 588 | """Set alternate binary if supplied""" |
---|
[37] | 589 | |
---|
| 590 | if binary: |
---|
| 591 | self.binary = binary |
---|
| 592 | |
---|
| 593 | def create( self, filename, args ): |
---|
[63] | 594 | """Create a new rrd with args""" |
---|
| 595 | |
---|
[375] | 596 | global MODRRDTOOL |
---|
[37] | 597 | |
---|
[375] | 598 | if MODRRDTOOL: |
---|
| 599 | return self.perform( 'create', filename, args ) |
---|
| 600 | else: |
---|
| 601 | return self.perform( 'create', '"' + filename + '"', args ) |
---|
| 602 | |
---|
[37] | 603 | def update( self, filename, args ): |
---|
[63] | 604 | """Update a rrd with args""" |
---|
| 605 | |
---|
[375] | 606 | global MODRRDTOOL |
---|
[37] | 607 | |
---|
[375] | 608 | if MODRRDTOOL: |
---|
| 609 | return self.perform( 'update', filename, args ) |
---|
| 610 | else: |
---|
| 611 | return self.perform( 'update', '"' + filename + '"', args ) |
---|
| 612 | |
---|
[42] | 613 | def grabLastUpdate( self, filename ): |
---|
[63] | 614 | """Determine the last update time of filename rrd""" |
---|
[42] | 615 | |
---|
[375] | 616 | global MODRRDTOOL |
---|
| 617 | |
---|
[42] | 618 | last_update = 0 |
---|
| 619 | |
---|
[375] | 620 | if MODRRDTOOL: |
---|
[53] | 621 | |
---|
[375] | 622 | debug_msg( 8, 'rrdtool.info( ' + filename + ' )' ) |
---|
[42] | 623 | |
---|
[375] | 624 | rrd_header = { } |
---|
[292] | 625 | |
---|
[375] | 626 | try: |
---|
| 627 | rrd_header = rrdtool.info( filename ) |
---|
| 628 | except rrdtool.error, msg: |
---|
| 629 | debug_msg( 8, str( msg ) ) |
---|
| 630 | |
---|
| 631 | if rrd_header.has_key( 'last_update' ): |
---|
| 632 | return last_update |
---|
| 633 | else: |
---|
| 634 | return 0 |
---|
| 635 | |
---|
[42] | 636 | else: |
---|
[375] | 637 | debug_msg( 8, self.binary + ' info ' + filename ) |
---|
[42] | 638 | |
---|
[375] | 639 | my_pipe = os.popen( self.binary + ' info "' + filename + '"' ) |
---|
| 640 | |
---|
| 641 | for line in my_pipe.readlines(): |
---|
| 642 | |
---|
| 643 | if line.find( 'last_update') != -1: |
---|
| 644 | |
---|
| 645 | last_update = line.split( ' = ' )[1] |
---|
| 646 | |
---|
| 647 | if my_pipe: |
---|
| 648 | |
---|
| 649 | my_pipe.close() |
---|
| 650 | |
---|
| 651 | if last_update: |
---|
| 652 | return last_update |
---|
| 653 | else: |
---|
| 654 | return 0 |
---|
| 655 | |
---|
| 656 | |
---|
[40] | 657 | def perform( self, action, filename, args ): |
---|
[63] | 658 | """Perform action on rrd filename with args""" |
---|
[37] | 659 | |
---|
[375] | 660 | global MODRRDTOOL |
---|
| 661 | |
---|
[37] | 662 | arg_string = None |
---|
| 663 | |
---|
[40] | 664 | if type( args ) is not ListType: |
---|
| 665 | debug_msg( 8, 'Arguments needs to be of type List' ) |
---|
| 666 | return 1 |
---|
| 667 | |
---|
[37] | 668 | for arg in args: |
---|
| 669 | |
---|
| 670 | if not arg_string: |
---|
| 671 | |
---|
| 672 | arg_string = arg |
---|
| 673 | else: |
---|
| 674 | arg_string = arg_string + ' ' + arg |
---|
| 675 | |
---|
[375] | 676 | if MODRRDTOOL: |
---|
[37] | 677 | |
---|
[375] | 678 | debug_msg( 8, 'rrdtool.' + action + "( " + filename + ' ' + arg_string + ")" ) |
---|
[292] | 679 | |
---|
[375] | 680 | try: |
---|
| 681 | debug_msg( 8, "filename '" + str(filename) + "' type "+ str(type(filename)) + " args " + str( args ) ) |
---|
[37] | 682 | |
---|
[375] | 683 | if action == 'create': |
---|
[146] | 684 | |
---|
[375] | 685 | rrdtool.create( str( filename ), *args ) |
---|
[37] | 686 | |
---|
[375] | 687 | elif action == 'update': |
---|
[37] | 688 | |
---|
[375] | 689 | rrdtool.update( str( filename ), *args ) |
---|
[365] | 690 | |
---|
[375] | 691 | except rrdtool.error, msg: |
---|
[365] | 692 | |
---|
[375] | 693 | error_msg = str( msg ) |
---|
| 694 | debug_msg( 8, error_msg ) |
---|
| 695 | return 1 |
---|
| 696 | |
---|
| 697 | else: |
---|
| 698 | |
---|
| 699 | debug_msg( 8, self.binary + ' ' + action + ' ' + filename + ' ' + arg_string ) |
---|
| 700 | |
---|
| 701 | cmd = os.popen( self.binary + ' ' + action + ' ' + filename + ' ' + arg_string ) |
---|
| 702 | lines = cmd.readlines() |
---|
| 703 | |
---|
| 704 | cmd.close() |
---|
| 705 | |
---|
| 706 | for line in lines: |
---|
| 707 | |
---|
| 708 | if line.find( 'ERROR' ) != -1: |
---|
| 709 | |
---|
| 710 | error_msg = string.join( line.split( ' ' )[1:] ) |
---|
| 711 | debug_msg( 8, error_msg ) |
---|
| 712 | return 1 |
---|
| 713 | |
---|
[37] | 714 | return 0 |
---|
| 715 | |
---|
[78] | 716 | class XMLProcessor: |
---|
| 717 | """Skeleton class for XML processor's""" |
---|
| 718 | |
---|
| 719 | def run( self ): |
---|
| 720 | """Do main processing of XML here""" |
---|
| 721 | |
---|
| 722 | pass |
---|
| 723 | |
---|
| 724 | class TorqueXMLProcessor( XMLProcessor ): |
---|
| 725 | """Main class for processing XML and acting with it""" |
---|
| 726 | |
---|
[295] | 727 | def __init__( self, XMLSource, DataStore ): |
---|
[78] | 728 | """Setup initial XML connection and handlers""" |
---|
| 729 | |
---|
[293] | 730 | #self.myXMLGatherer = XMLGatherer( ARCHIVE_XMLSOURCE.split( ':' )[0], ARCHIVE_XMLSOURCE.split( ':' )[1] ) |
---|
[287] | 731 | #self.myXMLSource = self.myXMLGatherer.getFileObject() |
---|
| 732 | self.myXMLSource = XMLSource |
---|
[295] | 733 | self.myXMLHandler = TorqueXMLHandler( DataStore ) |
---|
[287] | 734 | self.myXMLError = XMLErrorHandler() |
---|
[78] | 735 | |
---|
[287] | 736 | self.config = GangliaConfigParser( GMETAD_CONF ) |
---|
| 737 | |
---|
[78] | 738 | def run( self ): |
---|
| 739 | """Main XML processing""" |
---|
| 740 | |
---|
[169] | 741 | debug_msg( 1, 'torque_xml_thread(): started.' ) |
---|
[87] | 742 | |
---|
[78] | 743 | while( 1 ): |
---|
| 744 | |
---|
[287] | 745 | #self.myXMLSource = self.mXMLGatherer.getFileObject() |
---|
[169] | 746 | debug_msg( 1, 'torque_xml_thread(): Parsing..' ) |
---|
[176] | 747 | |
---|
[287] | 748 | my_data = self.myXMLSource.getData() |
---|
| 749 | |
---|
[176] | 750 | try: |
---|
[287] | 751 | xml.sax.parseString( my_data, self.myXMLHandler, self.myXMLError ) |
---|
[176] | 752 | except socket.error, msg: |
---|
| 753 | debug_msg( 0, 'ERROR: Socket error in connect to datasource!: %s' %msg ) |
---|
| 754 | |
---|
[169] | 755 | debug_msg( 1, 'torque_xml_thread(): Done parsing.' ) |
---|
| 756 | debug_msg( 1, 'torque_xml_thread(): Sleeping.. (%ss)' %(str( self.config.getLowestInterval() ) ) ) |
---|
[87] | 757 | time.sleep( self.config.getLowestInterval() ) |
---|
[78] | 758 | |
---|
[71] | 759 | class TorqueXMLHandler( xml.sax.handler.ContentHandler ): |
---|
[63] | 760 | """Parse Torque's jobinfo XML from our plugin""" |
---|
| 761 | |
---|
[72] | 762 | jobAttrs = { } |
---|
| 763 | |
---|
[295] | 764 | def __init__( self, datastore ): |
---|
[84] | 765 | |
---|
[295] | 766 | #self.ds = DataSQLStore( JOB_SQL_DBASE.split( '/' )[0], JOB_SQL_DBASE.split( '/' )[1] ) |
---|
| 767 | self.ds = datastore |
---|
[292] | 768 | self.jobs_processed = [ ] |
---|
| 769 | self.jobs_to_store = [ ] |
---|
[84] | 770 | |
---|
[183] | 771 | def startDocument( self ): |
---|
| 772 | |
---|
[292] | 773 | self.heartbeat = 0 |
---|
[365] | 774 | self.elementct = 0 |
---|
[183] | 775 | |
---|
[63] | 776 | def startElement( self, name, attrs ): |
---|
| 777 | """ |
---|
| 778 | This XML will be all gmetric XML |
---|
| 779 | so there will be no specific start/end element |
---|
| 780 | just one XML statement with all info |
---|
| 781 | """ |
---|
| 782 | |
---|
[73] | 783 | jobinfo = { } |
---|
[63] | 784 | |
---|
[365] | 785 | self.elementct += 1 |
---|
| 786 | |
---|
[199] | 787 | if name == 'CLUSTER': |
---|
[63] | 788 | |
---|
[372] | 789 | self.clustername = str( attrs.get( 'NAME', "" ) ) |
---|
[199] | 790 | |
---|
| 791 | elif name == 'METRIC' and self.clustername in ARCHIVE_DATASOURCES: |
---|
| 792 | |
---|
[372] | 793 | metricname = str( attrs.get( 'NAME', "" ) ) |
---|
[63] | 794 | |
---|
[285] | 795 | if metricname == 'MONARCH-HEARTBEAT': |
---|
[372] | 796 | self.heartbeat = str( attrs.get( 'VAL', "" ) ) |
---|
[63] | 797 | |
---|
[285] | 798 | elif metricname.find( 'MONARCH-JOB' ) != -1: |
---|
[63] | 799 | |
---|
[292] | 800 | job_id = metricname.split( 'MONARCH-JOB-' )[1].split( '-' )[0] |
---|
[372] | 801 | val = str( attrs.get( 'VAL', "" ) ) |
---|
[63] | 802 | |
---|
[96] | 803 | if not job_id in self.jobs_processed: |
---|
[292] | 804 | |
---|
[96] | 805 | self.jobs_processed.append( job_id ) |
---|
| 806 | |
---|
[73] | 807 | check_change = 0 |
---|
| 808 | |
---|
| 809 | if self.jobAttrs.has_key( job_id ): |
---|
[292] | 810 | |
---|
[73] | 811 | check_change = 1 |
---|
| 812 | |
---|
[63] | 813 | valinfo = val.split( ' ' ) |
---|
| 814 | |
---|
| 815 | for myval in valinfo: |
---|
| 816 | |
---|
[84] | 817 | if len( myval.split( '=' ) ) > 1: |
---|
[63] | 818 | |
---|
[292] | 819 | valname = myval.split( '=' )[0] |
---|
| 820 | value = myval.split( '=' )[1] |
---|
[70] | 821 | |
---|
[84] | 822 | if valname == 'nodes': |
---|
| 823 | value = value.split( ';' ) |
---|
[72] | 824 | |
---|
[84] | 825 | jobinfo[ valname ] = value |
---|
| 826 | |
---|
[73] | 827 | if check_change: |
---|
[182] | 828 | if self.jobinfoChanged( self.jobAttrs, job_id, jobinfo ) and self.jobAttrs[ job_id ]['status'] in [ 'R', 'Q' ]: |
---|
[292] | 829 | self.jobAttrs[ job_id ]['stop_timestamp'] = '' |
---|
| 830 | self.jobAttrs[ job_id ] = self.setJobAttrs( self.jobAttrs[ job_id ], jobinfo ) |
---|
[84] | 831 | if not job_id in self.jobs_to_store: |
---|
| 832 | self.jobs_to_store.append( job_id ) |
---|
| 833 | |
---|
[365] | 834 | debug_msg( 10, 'jobinfo for job %s has changed' %job_id ) |
---|
[73] | 835 | else: |
---|
| 836 | self.jobAttrs[ job_id ] = jobinfo |
---|
[84] | 837 | |
---|
| 838 | if not job_id in self.jobs_to_store: |
---|
| 839 | self.jobs_to_store.append( job_id ) |
---|
| 840 | |
---|
[365] | 841 | debug_msg( 10, 'jobinfo for job %s has changed' %job_id ) |
---|
[73] | 842 | |
---|
[77] | 843 | def endDocument( self ): |
---|
[74] | 844 | """When all metrics have gone, check if any jobs have finished""" |
---|
[72] | 845 | |
---|
[371] | 846 | debug_msg( 1, "XML: Processed "+str(self.elementct)+ " elements - found "+str(len(self.jobs_to_store))+" (updated) jobs" ) |
---|
[365] | 847 | |
---|
[182] | 848 | if self.heartbeat: |
---|
| 849 | for jobid, jobinfo in self.jobAttrs.items(): |
---|
[74] | 850 | |
---|
[182] | 851 | # This is an old job, not in current jobinfo list anymore |
---|
| 852 | # it must have finished, since we _did_ get a new heartbeat |
---|
| 853 | # |
---|
| 854 | mytime = int( jobinfo['reported'] ) + int( jobinfo['poll_interval'] ) |
---|
[102] | 855 | |
---|
[184] | 856 | if (mytime < self.heartbeat) and (jobid not in self.jobs_processed) and (jobinfo['status'] == 'R'): |
---|
[74] | 857 | |
---|
[182] | 858 | if not jobid in self.jobs_processed: |
---|
| 859 | self.jobs_processed.append( jobid ) |
---|
[96] | 860 | |
---|
[182] | 861 | self.jobAttrs[ jobid ]['status'] = 'F' |
---|
[360] | 862 | self.jobAttrs[ jobid ]['stop_timestamp'] = str( self.heartbeat ) |
---|
[96] | 863 | |
---|
[182] | 864 | if not jobid in self.jobs_to_store: |
---|
| 865 | self.jobs_to_store.append( jobid ) |
---|
[74] | 866 | |
---|
[182] | 867 | debug_msg( 1, 'torque_xml_thread(): Storing..' ) |
---|
[87] | 868 | |
---|
[182] | 869 | for jobid in self.jobs_to_store: |
---|
[295] | 870 | if self.jobAttrs[ jobid ]['status'] in [ 'R', 'F' ]: |
---|
[84] | 871 | |
---|
[184] | 872 | self.ds.storeJobInfo( jobid, self.jobAttrs[ jobid ] ) |
---|
| 873 | |
---|
| 874 | if self.jobAttrs[ jobid ]['status'] == 'F': |
---|
| 875 | del self.jobAttrs[ jobid ] |
---|
| 876 | |
---|
[182] | 877 | debug_msg( 1, 'torque_xml_thread(): Done storing.' ) |
---|
[87] | 878 | |
---|
[292] | 879 | self.jobs_processed = [ ] |
---|
| 880 | self.jobs_to_store = [ ] |
---|
[84] | 881 | |
---|
[82] | 882 | def setJobAttrs( self, old, new ): |
---|
| 883 | """ |
---|
| 884 | Set new job attributes in old, but not lose existing fields |
---|
| 885 | if old attributes doesn't have those |
---|
| 886 | """ |
---|
| 887 | |
---|
| 888 | for valname, value in new.items(): |
---|
| 889 | old[ valname ] = value |
---|
| 890 | |
---|
| 891 | return old |
---|
| 892 | |
---|
| 893 | |
---|
[73] | 894 | def jobinfoChanged( self, jobattrs, jobid, jobinfo ): |
---|
[74] | 895 | """ |
---|
| 896 | Check if jobinfo has changed from jobattrs[jobid] |
---|
| 897 | if it's report time is bigger than previous one |
---|
| 898 | and it is report time is recent (equal to heartbeat) |
---|
| 899 | """ |
---|
[72] | 900 | |
---|
[87] | 901 | ignore_changes = [ 'reported' ] |
---|
| 902 | |
---|
[73] | 903 | if jobattrs.has_key( jobid ): |
---|
| 904 | |
---|
| 905 | for valname, value in jobinfo.items(): |
---|
| 906 | |
---|
[87] | 907 | if valname not in ignore_changes: |
---|
[73] | 908 | |
---|
[87] | 909 | if jobattrs[ jobid ].has_key( valname ): |
---|
[73] | 910 | |
---|
[87] | 911 | if value != jobattrs[ jobid ][ valname ]: |
---|
[73] | 912 | |
---|
[87] | 913 | if jobinfo['reported'] > jobattrs[ jobid ][ 'reported' ] and jobinfo['reported'] == self.heartbeat: |
---|
[360] | 914 | return True |
---|
[73] | 915 | |
---|
[87] | 916 | else: |
---|
[360] | 917 | return True |
---|
[87] | 918 | |
---|
[360] | 919 | return False |
---|
[73] | 920 | |
---|
[71] | 921 | class GangliaXMLHandler( xml.sax.handler.ContentHandler ): |
---|
[63] | 922 | """Parse Ganglia's XML""" |
---|
[3] | 923 | |
---|
[295] | 924 | def __init__( self, config, datastore ): |
---|
[63] | 925 | """Setup initial variables and gather info on existing rrd archive""" |
---|
| 926 | |
---|
[292] | 927 | self.config = config |
---|
| 928 | self.clusters = { } |
---|
[295] | 929 | self.ds = datastore |
---|
[324] | 930 | |
---|
[295] | 931 | debug_msg( 1, 'Checking database..' ) |
---|
[365] | 932 | |
---|
| 933 | global DEBUG_LEVEL |
---|
| 934 | |
---|
| 935 | if DEBUG_LEVEL <= 2: |
---|
| 936 | self.ds.checkStaleJobs() |
---|
| 937 | |
---|
[295] | 938 | debug_msg( 1, 'Check done.' ) |
---|
[296] | 939 | debug_msg( 1, 'Checking rrd archive..' ) |
---|
[293] | 940 | self.gatherClusters() |
---|
[169] | 941 | debug_msg( 1, 'Check done.' ) |
---|
[33] | 942 | |
---|
[44] | 943 | def gatherClusters( self ): |
---|
[63] | 944 | """Find all existing clusters in archive dir""" |
---|
[44] | 945 | |
---|
[292] | 946 | archive_dir = check_dir(ARCHIVE_PATH) |
---|
[44] | 947 | |
---|
[292] | 948 | hosts = [ ] |
---|
[44] | 949 | |
---|
| 950 | if os.path.exists( archive_dir ): |
---|
| 951 | |
---|
[292] | 952 | dirlist = os.listdir( archive_dir ) |
---|
[44] | 953 | |
---|
[369] | 954 | for cfgcluster in ARCHIVE_DATASOURCES: |
---|
| 955 | |
---|
| 956 | if cfgcluster not in dirlist: |
---|
| 957 | |
---|
| 958 | # Autocreate a directory for this cluster |
---|
| 959 | # assume it is new |
---|
| 960 | # |
---|
| 961 | cluster_dir = '%s/%s' %( check_dir(ARCHIVE_PATH), cfgcluster ) |
---|
| 962 | |
---|
| 963 | os.mkdir( cluster_dir ) |
---|
| 964 | |
---|
[370] | 965 | dirlist.append( cfgcluster ) |
---|
| 966 | |
---|
[44] | 967 | for item in dirlist: |
---|
| 968 | |
---|
| 969 | clustername = item |
---|
| 970 | |
---|
[60] | 971 | if not self.clusters.has_key( clustername ) and clustername in ARCHIVE_DATASOURCES: |
---|
[44] | 972 | |
---|
| 973 | self.clusters[ clustername ] = RRDHandler( self.config, clustername ) |
---|
| 974 | |
---|
[365] | 975 | debug_msg( 9, "Found "+str(len(self.clusters.keys()))+" clusters" ) |
---|
| 976 | |
---|
[6] | 977 | def startElement( self, name, attrs ): |
---|
[63] | 978 | """Memorize appropriate data from xml start tags""" |
---|
[3] | 979 | |
---|
[7] | 980 | if name == 'GANGLIA_XML': |
---|
[32] | 981 | |
---|
[372] | 982 | self.XMLSource = str( attrs.get( 'SOURCE', "" ) ) |
---|
| 983 | self.gangliaVersion = str( attrs.get( 'VERSION', "" ) ) |
---|
[32] | 984 | |
---|
[12] | 985 | debug_msg( 10, 'Found XML data: source %s version %s' %( self.XMLSource, self.gangliaVersion ) ) |
---|
[6] | 986 | |
---|
[7] | 987 | elif name == 'GRID': |
---|
[32] | 988 | |
---|
[372] | 989 | self.gridName = str( attrs.get( 'NAME', "" ) ) |
---|
| 990 | self.time = str( attrs.get( 'LOCALTIME', "" ) ) |
---|
[32] | 991 | |
---|
[12] | 992 | debug_msg( 10, '`-Grid found: %s' %( self.gridName ) ) |
---|
[6] | 993 | |
---|
[7] | 994 | elif name == 'CLUSTER': |
---|
[32] | 995 | |
---|
[372] | 996 | self.clusterName = str( attrs.get( 'NAME', "" ) ) |
---|
| 997 | self.time = str( attrs.get( 'LOCALTIME', "" ) ) |
---|
[32] | 998 | |
---|
[60] | 999 | if not self.clusters.has_key( self.clusterName ) and self.clusterName in ARCHIVE_DATASOURCES: |
---|
[32] | 1000 | |
---|
[34] | 1001 | self.clusters[ self.clusterName ] = RRDHandler( self.config, self.clusterName ) |
---|
[33] | 1002 | |
---|
[35] | 1003 | debug_msg( 10, ' |-Cluster found: %s' %( self.clusterName ) ) |
---|
[6] | 1004 | |
---|
[60] | 1005 | elif name == 'HOST' and self.clusterName in ARCHIVE_DATASOURCES: |
---|
[32] | 1006 | |
---|
[372] | 1007 | self.hostName = str( attrs.get( 'NAME', "" ) ) |
---|
| 1008 | self.hostIp = str( attrs.get( 'IP', "" ) ) |
---|
| 1009 | self.hostReported = str( attrs.get( 'REPORTED', "" ) ) |
---|
[32] | 1010 | |
---|
[12] | 1011 | debug_msg( 10, ' | |-Host found: %s - ip %s reported %s' %( self.hostName, self.hostIp, self.hostReported ) ) |
---|
[6] | 1012 | |
---|
[60] | 1013 | elif name == 'METRIC' and self.clusterName in ARCHIVE_DATASOURCES: |
---|
[6] | 1014 | |
---|
[372] | 1015 | type = str( attrs.get( 'TYPE', "" ) ) |
---|
[198] | 1016 | |
---|
| 1017 | exclude_metric = False |
---|
| 1018 | |
---|
| 1019 | for ex_metricstr in ARCHIVE_EXCLUDE_METRICS: |
---|
[6] | 1020 | |
---|
[372] | 1021 | orig_name = str( attrs.get( 'NAME', "" ) ) |
---|
[3] | 1022 | |
---|
[198] | 1023 | if string.lower( orig_name ) == string.lower( ex_metricstr ): |
---|
| 1024 | |
---|
| 1025 | exclude_metric = True |
---|
| 1026 | |
---|
| 1027 | elif re.match( ex_metricstr, orig_name ): |
---|
| 1028 | |
---|
| 1029 | exclude_metric = True |
---|
| 1030 | |
---|
| 1031 | if type not in UNSUPPORTED_ARCHIVE_TYPES and not exclude_metric: |
---|
| 1032 | |
---|
[292] | 1033 | myMetric = { } |
---|
[372] | 1034 | myMetric['name'] = str( attrs.get( 'NAME', "" ) ) |
---|
| 1035 | myMetric['val'] = str( attrs.get( 'VAL', "" ) ) |
---|
[292] | 1036 | myMetric['time'] = self.hostReported |
---|
[3] | 1037 | |
---|
[34] | 1038 | self.clusters[ self.clusterName ].memMetric( self.hostName, myMetric ) |
---|
[3] | 1039 | |
---|
[34] | 1040 | debug_msg( 11, ' | | |-metric: %s:%s' %( myMetric['name'], myMetric['val'] ) ) |
---|
[6] | 1041 | |
---|
[34] | 1042 | def storeMetrics( self ): |
---|
[63] | 1043 | """Store metrics of each cluster rrd handler""" |
---|
[9] | 1044 | |
---|
[34] | 1045 | for clustername, rrdh in self.clusters.items(): |
---|
[16] | 1046 | |
---|
[38] | 1047 | ret = rrdh.storeMetrics() |
---|
[9] | 1048 | |
---|
[38] | 1049 | if ret: |
---|
| 1050 | debug_msg( 9, 'An error occured while storing metrics for cluster %s' %clustername ) |
---|
| 1051 | return 1 |
---|
| 1052 | |
---|
| 1053 | return 0 |
---|
| 1054 | |
---|
[71] | 1055 | class XMLErrorHandler( xml.sax.handler.ErrorHandler ): |
---|
| 1056 | |
---|
| 1057 | def error( self, exception ): |
---|
| 1058 | """Recoverable error""" |
---|
| 1059 | |
---|
[169] | 1060 | debug_msg( 0, 'Recoverable XML error ' + str( exception ) + ' ignored.' ) |
---|
[71] | 1061 | |
---|
| 1062 | def fatalError( self, exception ): |
---|
| 1063 | """Non-recoverable error""" |
---|
| 1064 | |
---|
| 1065 | exception_str = str( exception ) |
---|
| 1066 | |
---|
| 1067 | # Ignore 'no element found' errors |
---|
| 1068 | if exception_str.find( 'no element found' ) != -1: |
---|
[169] | 1069 | debug_msg( 0, 'No XML data found: Socket not (re)connected or datasource not available.' ) |
---|
[71] | 1070 | return 0 |
---|
| 1071 | |
---|
[170] | 1072 | debug_msg( 0, 'FATAL ERROR: Non-recoverable XML error ' + str( exception ) ) |
---|
[71] | 1073 | sys.exit( 1 ) |
---|
| 1074 | |
---|
| 1075 | def warning( self, exception ): |
---|
| 1076 | """Warning""" |
---|
| 1077 | |
---|
| 1078 | debug_msg( 0, 'Warning ' + str( exception ) ) |
---|
| 1079 | |
---|
[78] | 1080 | class XMLGatherer: |
---|
[63] | 1081 | """Setup a connection and file object to Ganglia's XML""" |
---|
[3] | 1082 | |
---|
[287] | 1083 | s = None |
---|
| 1084 | fd = None |
---|
| 1085 | data = None |
---|
[293] | 1086 | slot = None |
---|
[8] | 1087 | |
---|
[287] | 1088 | # Time since the last update |
---|
| 1089 | # |
---|
| 1090 | LAST_UPDATE = 0 |
---|
| 1091 | |
---|
| 1092 | # Minimum interval between updates |
---|
| 1093 | # |
---|
| 1094 | MIN_UPDATE_INT = 10 |
---|
| 1095 | |
---|
| 1096 | # Is a update occuring now |
---|
| 1097 | # |
---|
| 1098 | update_now = False |
---|
| 1099 | |
---|
[8] | 1100 | def __init__( self, host, port ): |
---|
[63] | 1101 | """Store host and port for connection""" |
---|
[8] | 1102 | |
---|
[293] | 1103 | self.host = host |
---|
| 1104 | self.port = port |
---|
| 1105 | self.slot = threading.Lock() |
---|
[3] | 1106 | |
---|
[287] | 1107 | self.retrieveData() |
---|
| 1108 | |
---|
| 1109 | def retrieveData( self ): |
---|
[63] | 1110 | """Setup connection to XML source""" |
---|
[8] | 1111 | |
---|
[287] | 1112 | self.update_now = True |
---|
| 1113 | |
---|
[293] | 1114 | self.slot.acquire() |
---|
| 1115 | |
---|
[8] | 1116 | for res in socket.getaddrinfo( self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM ): |
---|
[32] | 1117 | |
---|
[5] | 1118 | af, socktype, proto, canonname, sa = res |
---|
[32] | 1119 | |
---|
[5] | 1120 | try: |
---|
[32] | 1121 | |
---|
[8] | 1122 | self.s = socket.socket( af, socktype, proto ) |
---|
[32] | 1123 | |
---|
[5] | 1124 | except socket.error, msg: |
---|
[32] | 1125 | |
---|
[8] | 1126 | self.s = None |
---|
[5] | 1127 | continue |
---|
[32] | 1128 | |
---|
[5] | 1129 | try: |
---|
[32] | 1130 | |
---|
[8] | 1131 | self.s.connect( sa ) |
---|
[32] | 1132 | |
---|
[5] | 1133 | except socket.error, msg: |
---|
[32] | 1134 | |
---|
[70] | 1135 | self.disconnect() |
---|
[5] | 1136 | continue |
---|
[32] | 1137 | |
---|
[5] | 1138 | break |
---|
[3] | 1139 | |
---|
[8] | 1140 | if self.s is None: |
---|
[32] | 1141 | |
---|
[170] | 1142 | debug_msg( 0, 'FATAL ERROR: Could not open socket or unable to connect to datasource!' ) |
---|
[287] | 1143 | self.update_now = False |
---|
[33] | 1144 | sys.exit( 1 ) |
---|
[5] | 1145 | |
---|
[287] | 1146 | else: |
---|
[324] | 1147 | #self.s.send( '\n' ) |
---|
[287] | 1148 | |
---|
| 1149 | my_fp = self.s.makefile( 'r' ) |
---|
| 1150 | my_data = my_fp.readlines() |
---|
| 1151 | my_data = string.join( my_data, '' ) |
---|
| 1152 | |
---|
| 1153 | self.data = my_data |
---|
| 1154 | |
---|
| 1155 | self.LAST_UPDATE = time.time() |
---|
| 1156 | |
---|
[293] | 1157 | self.slot.release() |
---|
| 1158 | |
---|
[287] | 1159 | self.update_now = False |
---|
| 1160 | |
---|
[33] | 1161 | def disconnect( self ): |
---|
[63] | 1162 | """Close socket""" |
---|
[33] | 1163 | |
---|
| 1164 | if self.s: |
---|
[287] | 1165 | #self.s.shutdown( 2 ) |
---|
[33] | 1166 | self.s.close() |
---|
| 1167 | self.s = None |
---|
| 1168 | |
---|
| 1169 | def __del__( self ): |
---|
[63] | 1170 | """Kill the socket before we leave""" |
---|
[33] | 1171 | |
---|
| 1172 | self.disconnect() |
---|
| 1173 | |
---|
[287] | 1174 | def reGetData( self ): |
---|
[70] | 1175 | """Reconnect""" |
---|
[33] | 1176 | |
---|
[287] | 1177 | while self.update_now: |
---|
| 1178 | |
---|
| 1179 | # Must be another update in progress: |
---|
| 1180 | # Wait until the update is complete |
---|
| 1181 | # |
---|
| 1182 | time.sleep( 1 ) |
---|
| 1183 | |
---|
[38] | 1184 | if self.s: |
---|
| 1185 | self.disconnect() |
---|
[33] | 1186 | |
---|
[287] | 1187 | self.retrieveData() |
---|
[5] | 1188 | |
---|
[287] | 1189 | def getData( self ): |
---|
| 1190 | |
---|
| 1191 | """Return the XML data""" |
---|
| 1192 | |
---|
| 1193 | # If more than MIN_UPDATE_INT seconds passed since last data update |
---|
| 1194 | # update the XML first before returning it |
---|
| 1195 | # |
---|
| 1196 | |
---|
| 1197 | cur_time = time.time() |
---|
| 1198 | |
---|
| 1199 | if ( cur_time - self.LAST_UPDATE ) > self.MIN_UPDATE_INT: |
---|
| 1200 | |
---|
| 1201 | self.reGetData() |
---|
| 1202 | |
---|
| 1203 | while self.update_now: |
---|
| 1204 | |
---|
| 1205 | # Must be another update in progress: |
---|
| 1206 | # Wait until the update is complete |
---|
| 1207 | # |
---|
| 1208 | time.sleep( 1 ) |
---|
| 1209 | |
---|
| 1210 | return self.data |
---|
| 1211 | |
---|
[70] | 1212 | def makeFileDescriptor( self ): |
---|
| 1213 | """Make file descriptor that points to our socket connection""" |
---|
| 1214 | |
---|
| 1215 | self.reconnect() |
---|
| 1216 | |
---|
| 1217 | if self.s: |
---|
| 1218 | self.fd = self.s.makefile( 'r' ) |
---|
| 1219 | |
---|
| 1220 | def getFileObject( self ): |
---|
| 1221 | """Connect, and return a file object""" |
---|
| 1222 | |
---|
[78] | 1223 | self.makeFileDescriptor() |
---|
| 1224 | |
---|
[70] | 1225 | if self.fd: |
---|
| 1226 | return self.fd |
---|
| 1227 | |
---|
[78] | 1228 | class GangliaXMLProcessor( XMLProcessor ): |
---|
[63] | 1229 | """Main class for processing XML and acting with it""" |
---|
[5] | 1230 | |
---|
[295] | 1231 | def __init__( self, XMLSource, DataStore ): |
---|
[63] | 1232 | """Setup initial XML connection and handlers""" |
---|
[33] | 1233 | |
---|
[287] | 1234 | self.config = GangliaConfigParser( GMETAD_CONF ) |
---|
[33] | 1235 | |
---|
[293] | 1236 | #self.myXMLGatherer = XMLGatherer( ARCHIVE_XMLSOURCE.split( ':' )[0], ARCHIVE_XMLSOURCE.split( ':' )[1] ) |
---|
[287] | 1237 | #self.myXMLSource = self.myXMLGatherer.getFileObject() |
---|
| 1238 | self.myXMLSource = XMLSource |
---|
[295] | 1239 | self.ds = DataStore |
---|
| 1240 | self.myXMLHandler = GangliaXMLHandler( self.config, self.ds ) |
---|
[287] | 1241 | self.myXMLError = XMLErrorHandler() |
---|
[73] | 1242 | |
---|
[9] | 1243 | def run( self ): |
---|
[63] | 1244 | """Main XML processing; start a xml and storethread""" |
---|
[8] | 1245 | |
---|
[102] | 1246 | xml_thread = threading.Thread( None, self.processXML, 'xmlthread' ) |
---|
| 1247 | store_thread = threading.Thread( None, self.storeMetrics, 'storethread' ) |
---|
[22] | 1248 | |
---|
[36] | 1249 | while( 1 ): |
---|
| 1250 | |
---|
[102] | 1251 | if not xml_thread.isAlive(): |
---|
[36] | 1252 | # Gather XML at the same interval as gmetad |
---|
| 1253 | |
---|
| 1254 | # threaded call to: self.processXML() |
---|
| 1255 | # |
---|
[169] | 1256 | try: |
---|
| 1257 | xml_thread = threading.Thread( None, self.processXML, 'xml_thread' ) |
---|
| 1258 | xml_thread.start() |
---|
[176] | 1259 | except thread.error, msg: |
---|
[169] | 1260 | debug_msg( 0, 'ERROR: Unable to start xml_thread!: '+str(msg)) |
---|
| 1261 | #return 1 |
---|
[36] | 1262 | |
---|
[102] | 1263 | if not store_thread.isAlive(): |
---|
[55] | 1264 | # Store metrics every .. sec |
---|
[36] | 1265 | |
---|
[55] | 1266 | # threaded call to: self.storeMetrics() |
---|
| 1267 | # |
---|
[169] | 1268 | try: |
---|
| 1269 | store_thread = threading.Thread( None, self.storeMetrics, 'store_thread' ) |
---|
| 1270 | store_thread.start() |
---|
[176] | 1271 | except thread.error, msg: |
---|
[169] | 1272 | debug_msg( 0, 'ERROR: Unable to start store_thread!: '+str(msg)) |
---|
| 1273 | #return 1 |
---|
[36] | 1274 | |
---|
| 1275 | # Just sleep a sec here, to prevent daemon from going mad. We're all threads here anyway |
---|
| 1276 | time.sleep( 1 ) |
---|
| 1277 | |
---|
[33] | 1278 | def storeMetrics( self ): |
---|
[63] | 1279 | """Store metrics retained in memory to disk""" |
---|
[22] | 1280 | |
---|
[365] | 1281 | global DEBUG_LEVEL |
---|
| 1282 | |
---|
[63] | 1283 | # Store metrics somewhere between every 360 and 640 seconds |
---|
[38] | 1284 | # |
---|
[365] | 1285 | if DEBUG_LEVEL > 2: |
---|
| 1286 | #STORE_INTERVAL = 60 |
---|
| 1287 | STORE_INTERVAL = random.randint( 360, 640 ) |
---|
| 1288 | else: |
---|
| 1289 | STORE_INTERVAL = random.randint( 360, 640 ) |
---|
[22] | 1290 | |
---|
[169] | 1291 | try: |
---|
| 1292 | store_metric_thread = threading.Thread( None, self.storeThread, 'store_metric_thread' ) |
---|
| 1293 | store_metric_thread.start() |
---|
[176] | 1294 | except thread.error, msg: |
---|
[169] | 1295 | debug_msg( 0, 'ERROR: Unable to start ganglia_store_thread()!: '+str(msg) ) |
---|
| 1296 | return 1 |
---|
[36] | 1297 | |
---|
[169] | 1298 | debug_msg( 1, 'ganglia_store_thread(): started.' ) |
---|
| 1299 | |
---|
| 1300 | debug_msg( 1, 'ganglia_store_thread(): Sleeping.. (%ss)' %STORE_INTERVAL ) |
---|
[36] | 1301 | time.sleep( STORE_INTERVAL ) |
---|
[169] | 1302 | debug_msg( 1, 'ganglia_store_thread(): Done sleeping.' ) |
---|
[36] | 1303 | |
---|
[102] | 1304 | if store_metric_thread.isAlive(): |
---|
[36] | 1305 | |
---|
[169] | 1306 | debug_msg( 1, 'ganglia_store_thread(): storemetricthread() still running, waiting to finish..' ) |
---|
[136] | 1307 | store_metric_thread.join( STORE_TIMEOUT ) # Maximum time is for storing thread to finish |
---|
[169] | 1308 | debug_msg( 1, 'ganglia_store_thread(): Done waiting.' ) |
---|
[36] | 1309 | |
---|
[169] | 1310 | debug_msg( 1, 'ganglia_store_thread(): finished.' ) |
---|
[36] | 1311 | |
---|
| 1312 | return 0 |
---|
| 1313 | |
---|
[39] | 1314 | def storeThread( self ): |
---|
[63] | 1315 | """Actual metric storing thread""" |
---|
[39] | 1316 | |
---|
[169] | 1317 | debug_msg( 1, 'ganglia_store_metric_thread(): started.' ) |
---|
| 1318 | debug_msg( 1, 'ganglia_store_metric_thread(): Storing data..' ) |
---|
[78] | 1319 | ret = self.myXMLHandler.storeMetrics() |
---|
[176] | 1320 | if ret > 0: |
---|
| 1321 | debug_msg( 0, 'ganglia_store_metric_thread(): UNKNOWN ERROR %s while storing Metrics!' %str(ret) ) |
---|
[169] | 1322 | debug_msg( 1, 'ganglia_store_metric_thread(): Done storing.' ) |
---|
| 1323 | debug_msg( 1, 'ganglia_store_metric_thread(): finished.' ) |
---|
[39] | 1324 | |
---|
[176] | 1325 | return 0 |
---|
[39] | 1326 | |
---|
[8] | 1327 | def processXML( self ): |
---|
[63] | 1328 | """Process XML""" |
---|
[8] | 1329 | |
---|
[169] | 1330 | try: |
---|
| 1331 | parsethread = threading.Thread( None, self.parseThread, 'parsethread' ) |
---|
| 1332 | parsethread.start() |
---|
[176] | 1333 | except thread.error, msg: |
---|
[169] | 1334 | debug_msg( 0, 'ERROR: Unable to start ganglia_xml_thread()!: ' + str(msg) ) |
---|
| 1335 | return 1 |
---|
[8] | 1336 | |
---|
[169] | 1337 | debug_msg( 1, 'ganglia_xml_thread(): started.' ) |
---|
[36] | 1338 | |
---|
[169] | 1339 | debug_msg( 1, 'ganglia_xml_thread(): Sleeping.. (%ss)' %self.config.getLowestInterval() ) |
---|
[36] | 1340 | time.sleep( float( self.config.getLowestInterval() ) ) |
---|
[169] | 1341 | debug_msg( 1, 'ganglia_xml_thread(): Done sleeping.' ) |
---|
[36] | 1342 | |
---|
| 1343 | if parsethread.isAlive(): |
---|
| 1344 | |
---|
[169] | 1345 | debug_msg( 1, 'ganglia_xml_thread(): parsethread() still running, waiting (%ss) to finish..' %PARSE_TIMEOUT ) |
---|
[47] | 1346 | parsethread.join( PARSE_TIMEOUT ) # Maximum time for XML thread to finish |
---|
[169] | 1347 | debug_msg( 1, 'ganglia_xml_thread(): Done waiting.' ) |
---|
[36] | 1348 | |
---|
[169] | 1349 | debug_msg( 1, 'ganglia_xml_thread(): finished.' ) |
---|
[36] | 1350 | |
---|
| 1351 | return 0 |
---|
| 1352 | |
---|
[39] | 1353 | def parseThread( self ): |
---|
[63] | 1354 | """Actual parsing thread""" |
---|
[39] | 1355 | |
---|
[169] | 1356 | debug_msg( 1, 'ganglia_parse_thread(): started.' ) |
---|
| 1357 | debug_msg( 1, 'ganglia_parse_thread(): Parsing XML..' ) |
---|
[287] | 1358 | #self.myXMLSource = self.myXMLGatherer.getFileObject() |
---|
| 1359 | |
---|
| 1360 | my_data = self.myXMLSource.getData() |
---|
[176] | 1361 | |
---|
[293] | 1362 | #print my_data |
---|
| 1363 | |
---|
[176] | 1364 | try: |
---|
[287] | 1365 | xml.sax.parseString( my_data, self.myXMLHandler, self.myXMLError ) |
---|
[176] | 1366 | except socket.error, msg: |
---|
| 1367 | debug_msg( 0, 'ERROR: Socket error in connect to datasource!: %s' %msg ) |
---|
| 1368 | |
---|
[169] | 1369 | debug_msg( 1, 'ganglia_parse_thread(): Done parsing.' ) |
---|
| 1370 | debug_msg( 1, 'ganglia_parse_thread(): finished.' ) |
---|
[39] | 1371 | |
---|
[176] | 1372 | return 0 |
---|
[39] | 1373 | |
---|
[9] | 1374 | class GangliaConfigParser: |
---|
| 1375 | |
---|
[34] | 1376 | sources = [ ] |
---|
[9] | 1377 | |
---|
| 1378 | def __init__( self, config ): |
---|
[63] | 1379 | """Parse some stuff from our gmetad's config, such as polling interval""" |
---|
[32] | 1380 | |
---|
[9] | 1381 | self.config = config |
---|
| 1382 | self.parseValues() |
---|
| 1383 | |
---|
[32] | 1384 | def parseValues( self ): |
---|
[63] | 1385 | """Parse certain values from gmetad.conf""" |
---|
[9] | 1386 | |
---|
| 1387 | readcfg = open( self.config, 'r' ) |
---|
| 1388 | |
---|
| 1389 | for line in readcfg.readlines(): |
---|
| 1390 | |
---|
| 1391 | if line.count( '"' ) > 1: |
---|
| 1392 | |
---|
[10] | 1393 | if line.find( 'data_source' ) != -1 and line[0] != '#': |
---|
[9] | 1394 | |
---|
[292] | 1395 | source = { } |
---|
| 1396 | source['name'] = line.split( '"' )[1] |
---|
| 1397 | source_words = line.split( '"' )[2].split( ' ' ) |
---|
[9] | 1398 | |
---|
| 1399 | for word in source_words: |
---|
| 1400 | |
---|
| 1401 | valid_interval = 1 |
---|
| 1402 | |
---|
| 1403 | for letter in word: |
---|
[32] | 1404 | |
---|
[9] | 1405 | if letter not in string.digits: |
---|
[32] | 1406 | |
---|
[9] | 1407 | valid_interval = 0 |
---|
| 1408 | |
---|
[10] | 1409 | if valid_interval and len(word) > 0: |
---|
[32] | 1410 | |
---|
[9] | 1411 | source['interval'] = word |
---|
[12] | 1412 | debug_msg( 9, 'polling interval for %s = %s' %(source['name'], source['interval'] ) ) |
---|
[33] | 1413 | |
---|
| 1414 | # No interval found, use Ganglia's default |
---|
| 1415 | if not source.has_key( 'interval' ): |
---|
| 1416 | source['interval'] = 15 |
---|
| 1417 | debug_msg( 9, 'polling interval for %s defaulted to 15' %(source['name']) ) |
---|
[32] | 1418 | |
---|
[33] | 1419 | self.sources.append( source ) |
---|
[9] | 1420 | |
---|
| 1421 | def getInterval( self, source_name ): |
---|
[63] | 1422 | """Return interval for source_name""" |
---|
[32] | 1423 | |
---|
[9] | 1424 | for source in self.sources: |
---|
[32] | 1425 | |
---|
[12] | 1426 | if source['name'] == source_name: |
---|
[32] | 1427 | |
---|
[9] | 1428 | return source['interval'] |
---|
[32] | 1429 | |
---|
[9] | 1430 | return None |
---|
| 1431 | |
---|
[34] | 1432 | def getLowestInterval( self ): |
---|
[63] | 1433 | """Return the lowest interval of all clusters""" |
---|
[34] | 1434 | |
---|
| 1435 | lowest_interval = 0 |
---|
| 1436 | |
---|
| 1437 | for source in self.sources: |
---|
| 1438 | |
---|
| 1439 | if not lowest_interval or source['interval'] <= lowest_interval: |
---|
| 1440 | |
---|
| 1441 | lowest_interval = source['interval'] |
---|
| 1442 | |
---|
| 1443 | # Return 15 when nothing is found, so that the daemon won't go insane with 0 sec delays |
---|
| 1444 | if lowest_interval: |
---|
| 1445 | return lowest_interval |
---|
| 1446 | else: |
---|
| 1447 | return 15 |
---|
| 1448 | |
---|
[9] | 1449 | class RRDHandler: |
---|
[63] | 1450 | """Class for handling RRD activity""" |
---|
[9] | 1451 | |
---|
[32] | 1452 | myMetrics = { } |
---|
[40] | 1453 | lastStored = { } |
---|
[47] | 1454 | timeserials = { } |
---|
[36] | 1455 | slot = None |
---|
[32] | 1456 | |
---|
[33] | 1457 | def __init__( self, config, cluster ): |
---|
[63] | 1458 | """Setup initial variables""" |
---|
[78] | 1459 | |
---|
[292] | 1460 | self.block = 0 |
---|
| 1461 | self.cluster = cluster |
---|
| 1462 | self.config = config |
---|
| 1463 | self.slot = threading.Lock() |
---|
| 1464 | self.rrdm = RRDMutator( RRDTOOL ) |
---|
| 1465 | |
---|
[365] | 1466 | global DEBUG_LEVEL |
---|
[9] | 1467 | |
---|
[365] | 1468 | if DEBUG_LEVEL <= 2: |
---|
| 1469 | self.gatherLastUpdates() |
---|
| 1470 | |
---|
[42] | 1471 | def gatherLastUpdates( self ): |
---|
[63] | 1472 | """Populate the lastStored list, containing timestamps of all last updates""" |
---|
[42] | 1473 | |
---|
| 1474 | cluster_dir = '%s/%s' %( check_dir(ARCHIVE_PATH), self.cluster ) |
---|
| 1475 | |
---|
| 1476 | hosts = [ ] |
---|
| 1477 | |
---|
| 1478 | if os.path.exists( cluster_dir ): |
---|
| 1479 | |
---|
[44] | 1480 | dirlist = os.listdir( cluster_dir ) |
---|
[42] | 1481 | |
---|
[44] | 1482 | for dir in dirlist: |
---|
[42] | 1483 | |
---|
[44] | 1484 | hosts.append( dir ) |
---|
[42] | 1485 | |
---|
| 1486 | for host in hosts: |
---|
| 1487 | |
---|
[292] | 1488 | host_dir = cluster_dir + '/' + host |
---|
| 1489 | dirlist = os.listdir( host_dir ) |
---|
[47] | 1490 | |
---|
| 1491 | for dir in dirlist: |
---|
| 1492 | |
---|
| 1493 | if not self.timeserials.has_key( host ): |
---|
| 1494 | |
---|
| 1495 | self.timeserials[ host ] = [ ] |
---|
| 1496 | |
---|
| 1497 | self.timeserials[ host ].append( dir ) |
---|
| 1498 | |
---|
[42] | 1499 | last_serial = self.getLastRrdTimeSerial( host ) |
---|
[292] | 1500 | |
---|
[42] | 1501 | if last_serial: |
---|
| 1502 | |
---|
| 1503 | metric_dir = cluster_dir + '/' + host + '/' + last_serial |
---|
[292] | 1504 | |
---|
[42] | 1505 | if os.path.exists( metric_dir ): |
---|
| 1506 | |
---|
[44] | 1507 | dirlist = os.listdir( metric_dir ) |
---|
[42] | 1508 | |
---|
[44] | 1509 | for file in dirlist: |
---|
[42] | 1510 | |
---|
[44] | 1511 | metricname = file.split( '.rrd' )[0] |
---|
[42] | 1512 | |
---|
[44] | 1513 | if not self.lastStored.has_key( host ): |
---|
[42] | 1514 | |
---|
[44] | 1515 | self.lastStored[ host ] = { } |
---|
[42] | 1516 | |
---|
[44] | 1517 | self.lastStored[ host ][ metricname ] = self.rrdm.grabLastUpdate( metric_dir + '/' + file ) |
---|
[42] | 1518 | |
---|
[32] | 1519 | def getClusterName( self ): |
---|
[63] | 1520 | """Return clustername""" |
---|
| 1521 | |
---|
[32] | 1522 | return self.cluster |
---|
| 1523 | |
---|
| 1524 | def memMetric( self, host, metric ): |
---|
[63] | 1525 | """Store metric from host in memory""" |
---|
[32] | 1526 | |
---|
[179] | 1527 | # <ATOMIC> |
---|
| 1528 | # |
---|
| 1529 | self.slot.acquire() |
---|
| 1530 | |
---|
[34] | 1531 | if self.myMetrics.has_key( host ): |
---|
[32] | 1532 | |
---|
[34] | 1533 | if self.myMetrics[ host ].has_key( metric['name'] ): |
---|
[32] | 1534 | |
---|
[34] | 1535 | for mymetric in self.myMetrics[ host ][ metric['name'] ]: |
---|
[32] | 1536 | |
---|
[34] | 1537 | if mymetric['time'] == metric['time']: |
---|
[32] | 1538 | |
---|
[34] | 1539 | # Allready have this metric, abort |
---|
[179] | 1540 | self.slot.release() |
---|
[34] | 1541 | return 1 |
---|
| 1542 | else: |
---|
| 1543 | self.myMetrics[ host ][ metric['name'] ] = [ ] |
---|
| 1544 | else: |
---|
[292] | 1545 | self.myMetrics[ host ] = { } |
---|
| 1546 | self.myMetrics[ host ][ metric['name'] ] = [ ] |
---|
[32] | 1547 | |
---|
[63] | 1548 | # Push new metric onto stack |
---|
| 1549 | # atomic code; only 1 thread at a time may access the stack |
---|
| 1550 | |
---|
[32] | 1551 | self.myMetrics[ host ][ metric['name'] ].append( metric ) |
---|
| 1552 | |
---|
[40] | 1553 | self.slot.release() |
---|
[53] | 1554 | # |
---|
| 1555 | # </ATOMIC> |
---|
[40] | 1556 | |
---|
[47] | 1557 | def makeUpdateList( self, host, metriclist ): |
---|
[63] | 1558 | """ |
---|
| 1559 | Make a list of update values for rrdupdate |
---|
| 1560 | but only those that we didn't store before |
---|
| 1561 | """ |
---|
[37] | 1562 | |
---|
[292] | 1563 | update_list = [ ] |
---|
| 1564 | metric = None |
---|
[37] | 1565 | |
---|
[47] | 1566 | while len( metriclist ) > 0: |
---|
[37] | 1567 | |
---|
[53] | 1568 | metric = metriclist.pop( 0 ) |
---|
[37] | 1569 | |
---|
[53] | 1570 | if self.checkStoreMetric( host, metric ): |
---|
[292] | 1571 | |
---|
[365] | 1572 | u_val = str( metric['time'] ) + ':' + str( metric['val'] ) |
---|
| 1573 | #update_list.append( str('%s:%s') %( metric['time'], metric['val'] ) ) |
---|
| 1574 | update_list.append( u_val ) |
---|
[40] | 1575 | |
---|
[37] | 1576 | return update_list |
---|
| 1577 | |
---|
[49] | 1578 | def checkStoreMetric( self, host, metric ): |
---|
[63] | 1579 | """Check if supplied metric if newer than last one stored""" |
---|
[40] | 1580 | |
---|
| 1581 | if self.lastStored.has_key( host ): |
---|
| 1582 | |
---|
[47] | 1583 | if self.lastStored[ host ].has_key( metric['name'] ): |
---|
[40] | 1584 | |
---|
[47] | 1585 | if metric['time'] <= self.lastStored[ host ][ metric['name'] ]: |
---|
[40] | 1586 | |
---|
[50] | 1587 | # This is old |
---|
[40] | 1588 | return 0 |
---|
| 1589 | |
---|
[50] | 1590 | return 1 |
---|
| 1591 | |
---|
[54] | 1592 | def memLastUpdate( self, host, metricname, metriclist ): |
---|
[63] | 1593 | """ |
---|
| 1594 | Memorize the time of the latest metric from metriclist |
---|
| 1595 | but only if it wasn't allready memorized |
---|
| 1596 | """ |
---|
[50] | 1597 | |
---|
[54] | 1598 | if not self.lastStored.has_key( host ): |
---|
| 1599 | self.lastStored[ host ] = { } |
---|
| 1600 | |
---|
[50] | 1601 | last_update_time = 0 |
---|
| 1602 | |
---|
| 1603 | for metric in metriclist: |
---|
| 1604 | |
---|
[54] | 1605 | if metric['name'] == metricname: |
---|
[50] | 1606 | |
---|
[54] | 1607 | if metric['time'] > last_update_time: |
---|
[50] | 1608 | |
---|
[54] | 1609 | last_update_time = metric['time'] |
---|
[40] | 1610 | |
---|
[54] | 1611 | if self.lastStored[ host ].has_key( metricname ): |
---|
[52] | 1612 | |
---|
[54] | 1613 | if last_update_time <= self.lastStored[ host ][ metricname ]: |
---|
[52] | 1614 | return 1 |
---|
[40] | 1615 | |
---|
[54] | 1616 | self.lastStored[ host ][ metricname ] = last_update_time |
---|
[52] | 1617 | |
---|
[33] | 1618 | def storeMetrics( self ): |
---|
[63] | 1619 | """ |
---|
| 1620 | Store all metrics from memory to disk |
---|
| 1621 | and do it to the RRD's in appropriate timeperiod directory |
---|
| 1622 | """ |
---|
[33] | 1623 | |
---|
[365] | 1624 | debug_msg( 5, "Entering storeMetrics()") |
---|
| 1625 | |
---|
| 1626 | count_values = 0 |
---|
| 1627 | count_metrics = 0 |
---|
| 1628 | count_bits = 0 |
---|
| 1629 | |
---|
[33] | 1630 | for hostname, mymetrics in self.myMetrics.items(): |
---|
| 1631 | |
---|
| 1632 | for metricname, mymetric in mymetrics.items(): |
---|
| 1633 | |
---|
[365] | 1634 | count_metrics += 1 |
---|
| 1635 | |
---|
| 1636 | for dmetric in mymetric: |
---|
| 1637 | |
---|
| 1638 | count_values += 1 |
---|
| 1639 | |
---|
| 1640 | count_bits += len( dmetric['time'] ) |
---|
| 1641 | count_bits += len( dmetric['val'] ) |
---|
| 1642 | |
---|
| 1643 | count_bytes = count_bits / 8 |
---|
| 1644 | |
---|
| 1645 | debug_msg( 5, "size of cluster '" + self.cluster + "': " + |
---|
| 1646 | str( len( self.myMetrics.keys() ) ) + " hosts " + |
---|
| 1647 | str( count_metrics ) + " metrics " + str( count_values ) + " values " + |
---|
| 1648 | str( count_bits ) + " bits " + str( count_bytes ) + " bytes " ) |
---|
| 1649 | |
---|
| 1650 | for hostname, mymetrics in self.myMetrics.items(): |
---|
| 1651 | |
---|
| 1652 | for metricname, mymetric in mymetrics.items(): |
---|
| 1653 | |
---|
[53] | 1654 | metrics_to_store = [ ] |
---|
| 1655 | |
---|
[63] | 1656 | # Pop metrics from stack for storing until none is left |
---|
| 1657 | # atomic code: only 1 thread at a time may access myMetrics |
---|
| 1658 | |
---|
[53] | 1659 | # <ATOMIC> |
---|
[50] | 1660 | # |
---|
[47] | 1661 | self.slot.acquire() |
---|
[33] | 1662 | |
---|
[54] | 1663 | while len( self.myMetrics[ hostname ][ metricname ] ) > 0: |
---|
[53] | 1664 | |
---|
[54] | 1665 | if len( self.myMetrics[ hostname ][ metricname ] ) > 0: |
---|
[53] | 1666 | |
---|
[176] | 1667 | try: |
---|
| 1668 | metrics_to_store.append( self.myMetrics[ hostname ][ metricname ].pop( 0 ) ) |
---|
| 1669 | except IndexError, msg: |
---|
| 1670 | |
---|
[179] | 1671 | # Somehow sometimes myMetrics[ hostname ][ metricname ] |
---|
| 1672 | # is still len 0 when the statement is executed. |
---|
| 1673 | # Just ignore indexerror's.. |
---|
[176] | 1674 | pass |
---|
| 1675 | |
---|
[53] | 1676 | self.slot.release() |
---|
| 1677 | # |
---|
| 1678 | # </ATOMIC> |
---|
| 1679 | |
---|
[47] | 1680 | # Create a mapping table, each metric to the period where it should be stored |
---|
| 1681 | # |
---|
[53] | 1682 | metric_serial_table = self.determineSerials( hostname, metricname, metrics_to_store ) |
---|
[33] | 1683 | |
---|
[50] | 1684 | update_rets = [ ] |
---|
| 1685 | |
---|
[47] | 1686 | for period, pmetric in metric_serial_table.items(): |
---|
| 1687 | |
---|
[146] | 1688 | create_ret = self.createCheck( hostname, metricname, period ) |
---|
[47] | 1689 | |
---|
| 1690 | update_ret = self.update( hostname, metricname, period, pmetric ) |
---|
| 1691 | |
---|
| 1692 | if update_ret == 0: |
---|
| 1693 | |
---|
| 1694 | debug_msg( 9, 'stored metric %s for %s' %( hostname, metricname ) ) |
---|
| 1695 | else: |
---|
| 1696 | debug_msg( 9, 'metric update failed' ) |
---|
| 1697 | |
---|
[146] | 1698 | update_rets.append( create_ret ) |
---|
[50] | 1699 | update_rets.append( update_ret ) |
---|
[47] | 1700 | |
---|
[179] | 1701 | # Lets ignore errors here for now, we need to make sure last update time |
---|
| 1702 | # is correct! |
---|
| 1703 | # |
---|
| 1704 | #if not (1) in update_rets: |
---|
[50] | 1705 | |
---|
[179] | 1706 | self.memLastUpdate( hostname, metricname, metrics_to_store ) |
---|
[50] | 1707 | |
---|
[365] | 1708 | debug_msg( 5, "Leaving storeMetrics()") |
---|
| 1709 | |
---|
[17] | 1710 | def makeTimeSerial( self ): |
---|
[63] | 1711 | """Generate a time serial. Seconds since epoch""" |
---|
[17] | 1712 | |
---|
| 1713 | # Seconds since epoch |
---|
| 1714 | mytime = int( time.time() ) |
---|
| 1715 | |
---|
| 1716 | return mytime |
---|
| 1717 | |
---|
[50] | 1718 | def makeRrdPath( self, host, metricname, timeserial ): |
---|
[63] | 1719 | """Make a RRD location/path and filename""" |
---|
[17] | 1720 | |
---|
[292] | 1721 | rrd_dir = '%s/%s/%s/%s' %( check_dir(ARCHIVE_PATH), self.cluster, host, timeserial ) |
---|
| 1722 | rrd_file = '%s/%s.rrd' %( rrd_dir, metricname ) |
---|
[17] | 1723 | |
---|
| 1724 | return rrd_dir, rrd_file |
---|
| 1725 | |
---|
[20] | 1726 | def getLastRrdTimeSerial( self, host ): |
---|
[63] | 1727 | """Find the last timeserial (directory) for this host""" |
---|
[17] | 1728 | |
---|
[19] | 1729 | newest_timeserial = 0 |
---|
| 1730 | |
---|
[47] | 1731 | for dir in self.timeserials[ host ]: |
---|
[32] | 1732 | |
---|
[47] | 1733 | valid_dir = 1 |
---|
[17] | 1734 | |
---|
[47] | 1735 | for letter in dir: |
---|
| 1736 | if letter not in string.digits: |
---|
| 1737 | valid_dir = 0 |
---|
[17] | 1738 | |
---|
[47] | 1739 | if valid_dir: |
---|
| 1740 | timeserial = dir |
---|
| 1741 | if timeserial > newest_timeserial: |
---|
| 1742 | newest_timeserial = timeserial |
---|
[17] | 1743 | |
---|
| 1744 | if newest_timeserial: |
---|
[18] | 1745 | return newest_timeserial |
---|
[17] | 1746 | else: |
---|
| 1747 | return 0 |
---|
| 1748 | |
---|
[47] | 1749 | def determinePeriod( self, host, check_serial ): |
---|
[63] | 1750 | """Determine to which period (directory) this time(serial) belongs""" |
---|
[47] | 1751 | |
---|
| 1752 | period_serial = 0 |
---|
| 1753 | |
---|
[56] | 1754 | if self.timeserials.has_key( host ): |
---|
[47] | 1755 | |
---|
[56] | 1756 | for serial in self.timeserials[ host ]: |
---|
[47] | 1757 | |
---|
[56] | 1758 | if check_serial >= serial and period_serial < serial: |
---|
[47] | 1759 | |
---|
[56] | 1760 | period_serial = serial |
---|
| 1761 | |
---|
[47] | 1762 | return period_serial |
---|
| 1763 | |
---|
| 1764 | def determineSerials( self, host, metricname, metriclist ): |
---|
| 1765 | """ |
---|
| 1766 | Determine the correct serial and corresponding rrd to store |
---|
| 1767 | for a list of metrics |
---|
| 1768 | """ |
---|
| 1769 | |
---|
| 1770 | metric_serial_table = { } |
---|
| 1771 | |
---|
| 1772 | for metric in metriclist: |
---|
| 1773 | |
---|
| 1774 | if metric['name'] == metricname: |
---|
| 1775 | |
---|
[292] | 1776 | period = self.determinePeriod( host, metric['time'] ) |
---|
[47] | 1777 | |
---|
[292] | 1778 | archive_secs = ARCHIVE_HOURS_PER_RRD * (60 * 60) |
---|
[47] | 1779 | |
---|
[49] | 1780 | if (int( metric['time'] ) - int( period ) ) > archive_secs: |
---|
[47] | 1781 | |
---|
| 1782 | # This one should get it's own new period |
---|
| 1783 | period = metric['time'] |
---|
[57] | 1784 | |
---|
| 1785 | if not self.timeserials.has_key( host ): |
---|
| 1786 | self.timeserials[ host ] = [ ] |
---|
| 1787 | |
---|
[50] | 1788 | self.timeserials[ host ].append( period ) |
---|
[47] | 1789 | |
---|
| 1790 | if not metric_serial_table.has_key( period ): |
---|
| 1791 | |
---|
[49] | 1792 | metric_serial_table[ period ] = [ ] |
---|
[47] | 1793 | |
---|
| 1794 | metric_serial_table[ period ].append( metric ) |
---|
| 1795 | |
---|
| 1796 | return metric_serial_table |
---|
| 1797 | |
---|
[33] | 1798 | def createCheck( self, host, metricname, timeserial ): |
---|
[63] | 1799 | """Check if an rrd allready exists for this metric, create if not""" |
---|
[9] | 1800 | |
---|
[35] | 1801 | debug_msg( 9, 'rrdcreate: using timeserial %s for %s/%s' %( timeserial, host, metricname ) ) |
---|
[47] | 1802 | |
---|
[33] | 1803 | rrd_dir, rrd_file = self.makeRrdPath( host, metricname, timeserial ) |
---|
[17] | 1804 | |
---|
[9] | 1805 | if not os.path.exists( rrd_dir ): |
---|
[58] | 1806 | |
---|
| 1807 | try: |
---|
| 1808 | os.makedirs( rrd_dir ) |
---|
| 1809 | |
---|
[169] | 1810 | except os.OSError, msg: |
---|
[58] | 1811 | |
---|
| 1812 | if msg.find( 'File exists' ) != -1: |
---|
| 1813 | |
---|
| 1814 | # Ignore exists errors |
---|
| 1815 | pass |
---|
| 1816 | |
---|
| 1817 | else: |
---|
| 1818 | |
---|
| 1819 | print msg |
---|
| 1820 | return |
---|
| 1821 | |
---|
[14] | 1822 | debug_msg( 9, 'created dir %s' %( str(rrd_dir) ) ) |
---|
[9] | 1823 | |
---|
[14] | 1824 | if not os.path.exists( rrd_file ): |
---|
[9] | 1825 | |
---|
[292] | 1826 | interval = self.config.getInterval( self.cluster ) |
---|
| 1827 | heartbeat = 8 * int( interval ) |
---|
[9] | 1828 | |
---|
[292] | 1829 | params = [ ] |
---|
[12] | 1830 | |
---|
[37] | 1831 | params.append( '--step' ) |
---|
| 1832 | params.append( str( interval ) ) |
---|
[12] | 1833 | |
---|
[37] | 1834 | params.append( '--start' ) |
---|
[47] | 1835 | params.append( str( int( timeserial ) - 1 ) ) |
---|
[12] | 1836 | |
---|
[37] | 1837 | params.append( 'DS:sum:GAUGE:%d:U:U' %heartbeat ) |
---|
| 1838 | params.append( 'RRA:AVERAGE:0.5:1:%s' %(ARCHIVE_HOURS_PER_RRD * 240) ) |
---|
[13] | 1839 | |
---|
[37] | 1840 | self.rrdm.create( str(rrd_file), params ) |
---|
| 1841 | |
---|
[14] | 1842 | debug_msg( 9, 'created rrd %s' %( str(rrd_file) ) ) |
---|
| 1843 | |
---|
[47] | 1844 | def update( self, host, metricname, timeserial, metriclist ): |
---|
[63] | 1845 | """ |
---|
| 1846 | Update rrd file for host with metricname |
---|
| 1847 | in directory timeserial with metriclist |
---|
| 1848 | """ |
---|
[9] | 1849 | |
---|
[35] | 1850 | debug_msg( 9, 'rrdupdate: using timeserial %s for %s/%s' %( timeserial, host, metricname ) ) |
---|
[9] | 1851 | |
---|
[292] | 1852 | rrd_dir, rrd_file = self.makeRrdPath( host, metricname, timeserial ) |
---|
[18] | 1853 | |
---|
[292] | 1854 | update_list = self.makeUpdateList( host, metriclist ) |
---|
[15] | 1855 | |
---|
[41] | 1856 | if len( update_list ) > 0: |
---|
| 1857 | ret = self.rrdm.update( str(rrd_file), update_list ) |
---|
[32] | 1858 | |
---|
[41] | 1859 | if ret: |
---|
| 1860 | return 1 |
---|
[27] | 1861 | |
---|
[41] | 1862 | debug_msg( 9, 'updated rrd %s with %s' %( str(rrd_file), string.join( update_list ) ) ) |
---|
[15] | 1863 | |
---|
[36] | 1864 | return 0 |
---|
| 1865 | |
---|
[169] | 1866 | def daemon(): |
---|
| 1867 | """daemonized threading""" |
---|
[8] | 1868 | |
---|
[169] | 1869 | # Fork the first child |
---|
| 1870 | # |
---|
| 1871 | pid = os.fork() |
---|
| 1872 | |
---|
| 1873 | if pid > 0: |
---|
| 1874 | |
---|
| 1875 | sys.exit(0) # end parent |
---|
| 1876 | |
---|
| 1877 | # creates a session and sets the process group ID |
---|
| 1878 | # |
---|
| 1879 | os.setsid() |
---|
| 1880 | |
---|
| 1881 | # Fork the second child |
---|
| 1882 | # |
---|
| 1883 | pid = os.fork() |
---|
| 1884 | |
---|
| 1885 | if pid > 0: |
---|
| 1886 | |
---|
| 1887 | sys.exit(0) # end parent |
---|
| 1888 | |
---|
| 1889 | # Go to the root directory and set the umask |
---|
| 1890 | # |
---|
| 1891 | os.chdir('/') |
---|
| 1892 | os.umask(0) |
---|
| 1893 | |
---|
| 1894 | sys.stdin.close() |
---|
| 1895 | sys.stdout.close() |
---|
| 1896 | sys.stderr.close() |
---|
| 1897 | |
---|
[273] | 1898 | os.open('/dev/null', os.O_RDWR) |
---|
| 1899 | os.dup2(0, 1) |
---|
| 1900 | os.dup2(0, 2) |
---|
[169] | 1901 | |
---|
| 1902 | run() |
---|
| 1903 | |
---|
| 1904 | def run(): |
---|
| 1905 | """Threading start""" |
---|
| 1906 | |
---|
[287] | 1907 | myXMLSource = XMLGatherer( ARCHIVE_XMLSOURCE.split( ':' )[0], ARCHIVE_XMLSOURCE.split( ':' )[1] ) |
---|
[295] | 1908 | myDataStore = DataSQLStore( JOB_SQL_DBASE.split( '/' )[0], JOB_SQL_DBASE.split( '/' )[1] ) |
---|
[8] | 1909 | |
---|
[295] | 1910 | myTorqueProcessor = TorqueXMLProcessor( myXMLSource, myDataStore ) |
---|
| 1911 | myGangliaProcessor = GangliaXMLProcessor( myXMLSource, myDataStore ) |
---|
[287] | 1912 | |
---|
[169] | 1913 | try: |
---|
[292] | 1914 | torque_xml_thread = threading.Thread( None, myTorqueProcessor.run, 'torque_proc_thread' ) |
---|
| 1915 | ganglia_xml_thread = threading.Thread( None, myGangliaProcessor.run, 'ganglia_proc_thread' ) |
---|
[22] | 1916 | |
---|
[169] | 1917 | torque_xml_thread.start() |
---|
| 1918 | ganglia_xml_thread.start() |
---|
| 1919 | |
---|
[176] | 1920 | except thread.error, msg: |
---|
[169] | 1921 | debug_msg( 0, 'FATAL ERROR: Unable to start main threads!: '+ str(msg) ) |
---|
| 1922 | syslog.closelog() |
---|
| 1923 | sys.exit(1) |
---|
| 1924 | |
---|
| 1925 | debug_msg( 0, 'main threading started.' ) |
---|
[78] | 1926 | |
---|
[169] | 1927 | def main(): |
---|
| 1928 | """Program startup""" |
---|
| 1929 | |
---|
[375] | 1930 | global DAEMONIZE, USE_SYSLOG |
---|
| 1931 | |
---|
[214] | 1932 | if not processArgs( sys.argv[1:] ): |
---|
| 1933 | sys.exit( 1 ) |
---|
| 1934 | |
---|
[169] | 1935 | if( DAEMONIZE and USE_SYSLOG ): |
---|
| 1936 | syslog.openlog( 'jobarchived', syslog.LOG_NOWAIT, SYSLOG_FACILITY ) |
---|
| 1937 | |
---|
| 1938 | if DAEMONIZE: |
---|
| 1939 | daemon() |
---|
| 1940 | else: |
---|
| 1941 | run() |
---|
| 1942 | |
---|
| 1943 | # |
---|
[81] | 1944 | # Global functions |
---|
[169] | 1945 | # |
---|
[81] | 1946 | |
---|
[9] | 1947 | def check_dir( directory ): |
---|
[63] | 1948 | """Check if directory is a proper directory. I.e.: Does _not_ end with a '/'""" |
---|
[9] | 1949 | |
---|
| 1950 | if directory[-1] == '/': |
---|
| 1951 | directory = directory[:-1] |
---|
| 1952 | |
---|
| 1953 | return directory |
---|
| 1954 | |
---|
[295] | 1955 | def reqtime2epoch( rtime ): |
---|
| 1956 | |
---|
| 1957 | (hours, minutes, seconds ) = rtime.split( ':' ) |
---|
| 1958 | |
---|
| 1959 | etime = int(seconds) |
---|
| 1960 | etime = etime + ( int(minutes) * 60 ) |
---|
| 1961 | etime = etime + ( int(hours) * 60 * 60 ) |
---|
| 1962 | |
---|
| 1963 | return etime |
---|
| 1964 | |
---|
[12] | 1965 | def debug_msg( level, msg ): |
---|
[169] | 1966 | """Only print msg if correct levels""" |
---|
[12] | 1967 | |
---|
[169] | 1968 | if (not DAEMONIZE and DEBUG_LEVEL >= level): |
---|
| 1969 | sys.stderr.write( printTime() + ' - ' + msg + '\n' ) |
---|
| 1970 | |
---|
| 1971 | if (DAEMONIZE and USE_SYSLOG and SYSLOG_LEVEL >= level): |
---|
| 1972 | syslog.syslog( msg ) |
---|
[12] | 1973 | |
---|
[46] | 1974 | def printTime( ): |
---|
[63] | 1975 | """Print current time in human readable format""" |
---|
[46] | 1976 | |
---|
| 1977 | return time.strftime("%a %d %b %Y %H:%M:%S") |
---|
| 1978 | |
---|
[63] | 1979 | # Ooohh, someone started me! Let's go.. |
---|
[9] | 1980 | if __name__ == '__main__': |
---|
| 1981 | main() |
---|