1 | #!/usr/bin/env python |
---|
2 | # |
---|
3 | # This file is part of Jobmonarch |
---|
4 | # |
---|
5 | # Copyright (C) 2006-2013 Ramon Bastiaans |
---|
6 | # |
---|
7 | # Jobmonarch is free software; you can redistribute it and/or modify |
---|
8 | # it under the terms of the GNU General Public License as published by |
---|
9 | # the Free Software Foundation; either version 2 of the License, or |
---|
10 | # (at your option) any later version. |
---|
11 | # |
---|
12 | # Jobmonarch is distributed in the hope that it will be useful, |
---|
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
15 | # GNU General Public License for more details. |
---|
16 | # |
---|
17 | # You should have received a copy of the GNU General Public License |
---|
18 | # along with this program; if not, write to the Free Software |
---|
19 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
20 | # |
---|
21 | # SVN $Id: jobarchived.py 949 2014-01-20 16:04:55Z ramonb $ |
---|
22 | # |
---|
23 | |
---|
24 | import getopt, syslog, ConfigParser, sys |
---|
25 | |
---|
26 | VERSION='__VERSION__' |
---|
27 | |
---|
28 | def usage( ver ): |
---|
29 | |
---|
30 | print 'jobarchived %s' %VERSION |
---|
31 | |
---|
32 | if ver: |
---|
33 | return 0 |
---|
34 | |
---|
35 | print |
---|
36 | print 'Purpose:' |
---|
37 | print ' The Job Archive Daemon (jobarchived) stores batch job information in a SQL database' |
---|
38 | print ' and node statistics in a RRD archive' |
---|
39 | print |
---|
40 | print 'Usage: jobarchived [OPTIONS]' |
---|
41 | print |
---|
42 | print ' -c, --config=FILE The configuration file to use (default: /etc/jobarchived.conf)' |
---|
43 | print ' -p, --pidfile=FILE Use pid file to store the process id' |
---|
44 | print ' -h, --help Print help and exit' |
---|
45 | print ' -v, --version Print version and exit' |
---|
46 | print |
---|
47 | |
---|
48 | def processArgs( args ): |
---|
49 | |
---|
50 | SHORT_L = 'p:hvc:' |
---|
51 | LONG_L = [ 'help', 'config=', 'pidfile=', 'version' ] |
---|
52 | |
---|
53 | config_filename = '/etc/jobarchived.conf' |
---|
54 | |
---|
55 | global PIDFILE |
---|
56 | |
---|
57 | PIDFILE = None |
---|
58 | |
---|
59 | try: |
---|
60 | |
---|
61 | opts, args = getopt.getopt( args, SHORT_L, LONG_L ) |
---|
62 | |
---|
63 | except getopt.error, detail: |
---|
64 | |
---|
65 | print detail |
---|
66 | sys.exit(1) |
---|
67 | |
---|
68 | for opt, value in opts: |
---|
69 | |
---|
70 | if opt in [ '--config', '-c' ]: |
---|
71 | |
---|
72 | config_filename = value |
---|
73 | |
---|
74 | if opt in [ '--pidfile', '-p' ]: |
---|
75 | |
---|
76 | PIDFILE = value |
---|
77 | |
---|
78 | if opt in [ '--help', '-h' ]: |
---|
79 | |
---|
80 | usage( False ) |
---|
81 | sys.exit( 0 ) |
---|
82 | |
---|
83 | if opt in [ '--version', '-v' ]: |
---|
84 | |
---|
85 | usage( True ) |
---|
86 | sys.exit( 0 ) |
---|
87 | |
---|
88 | try: |
---|
89 | return loadConfig( config_filename ) |
---|
90 | |
---|
91 | except ConfigParser.NoOptionError, detail: |
---|
92 | |
---|
93 | print detail |
---|
94 | sys.exit( 1 ) |
---|
95 | |
---|
96 | def loadConfig( filename ): |
---|
97 | |
---|
98 | def getlist( cfg_string ): |
---|
99 | |
---|
100 | my_list = [ ] |
---|
101 | |
---|
102 | for item_txt in cfg_string.split( ',' ): |
---|
103 | |
---|
104 | sep_char = None |
---|
105 | |
---|
106 | item_txt = item_txt.strip() |
---|
107 | |
---|
108 | for s_char in [ "'", '"' ]: |
---|
109 | |
---|
110 | if item_txt.find( s_char ) != -1: |
---|
111 | |
---|
112 | if item_txt.count( s_char ) != 2: |
---|
113 | |
---|
114 | print 'Missing quote: %s' %item_txt |
---|
115 | sys.exit( 1 ) |
---|
116 | |
---|
117 | else: |
---|
118 | |
---|
119 | sep_char = s_char |
---|
120 | break |
---|
121 | |
---|
122 | if sep_char: |
---|
123 | |
---|
124 | item_txt = item_txt.split( sep_char )[1] |
---|
125 | |
---|
126 | my_list.append( item_txt ) |
---|
127 | |
---|
128 | return my_list |
---|
129 | |
---|
130 | cfg = ConfigParser.ConfigParser() |
---|
131 | |
---|
132 | cfg.read( filename ) |
---|
133 | |
---|
134 | global DEBUG_LEVEL, USE_SYSLOG, SYSLOG_LEVEL, SYSLOG_FACILITY, GMETAD_CONF, ARCHIVE_XMLSOURCE |
---|
135 | global ARCHIVE_DATASOURCES, ARCHIVE_PATH, ARCHIVE_HOURS_PER_RRD, ARCHIVE_EXCLUDE_METRICS |
---|
136 | global JOB_SQL_DBASE, DAEMONIZE, RRDTOOL, JOB_TIMEOUT, MODRRDTOOL, JOB_SQL_PASSWORD, JOB_SQL_USER |
---|
137 | |
---|
138 | ARCHIVE_PATH = cfg.get( 'DEFAULT', 'ARCHIVE_PATH' ) |
---|
139 | |
---|
140 | ARCHIVE_HOURS_PER_RRD = cfg.getint( 'DEFAULT', 'ARCHIVE_HOURS_PER_RRD' ) |
---|
141 | |
---|
142 | DEBUG_LEVEL = cfg.getint( 'DEFAULT', 'DEBUG_LEVEL' ) |
---|
143 | |
---|
144 | USE_SYSLOG = cfg.getboolean( 'DEFAULT', 'USE_SYSLOG' ) |
---|
145 | |
---|
146 | SYSLOG_LEVEL = cfg.getint( 'DEFAULT', 'SYSLOG_LEVEL' ) |
---|
147 | |
---|
148 | MODRRDTOOL = False |
---|
149 | |
---|
150 | try: |
---|
151 | global rrdtool |
---|
152 | import rrdtool |
---|
153 | |
---|
154 | MODRRDTOOL = True |
---|
155 | |
---|
156 | except ImportError: |
---|
157 | |
---|
158 | MODRRDTOOL = False |
---|
159 | |
---|
160 | print "ERROR: py-rrdtool import FAILED: failing back to DEPRECATED use of rrdtool binary. This will slow down jobarchived significantly!" |
---|
161 | |
---|
162 | RRDTOOL = cfg.get( 'DEFAULT', 'RRDTOOL' ) |
---|
163 | |
---|
164 | try: |
---|
165 | |
---|
166 | SYSLOG_FACILITY = eval( 'syslog.LOG_' + cfg.get( 'DEFAULT', 'SYSLOG_FACILITY' ) ) |
---|
167 | |
---|
168 | except AttributeError, detail: |
---|
169 | |
---|
170 | print 'Unknown syslog facility' |
---|
171 | sys.exit( 1 ) |
---|
172 | |
---|
173 | GMETAD_CONF = cfg.get( 'DEFAULT', 'GMETAD_CONF' ) |
---|
174 | |
---|
175 | ARCHIVE_XMLSOURCE = cfg.get( 'DEFAULT', 'ARCHIVE_XMLSOURCE' ) |
---|
176 | |
---|
177 | ARCHIVE_DATASOURCES = getlist( cfg.get( 'DEFAULT', 'ARCHIVE_DATASOURCES' ) ) |
---|
178 | |
---|
179 | ARCHIVE_EXCLUDE_METRICS = getlist( cfg.get( 'DEFAULT', 'ARCHIVE_EXCLUDE_METRICS' ) ) |
---|
180 | |
---|
181 | JOB_SQL_DBASE = cfg.get( 'DEFAULT', 'JOB_SQL_DBASE' ) |
---|
182 | JOB_SQL_USER = cfg.get( 'DEFAULT', 'JOB_SQL_USER' ) |
---|
183 | JOB_SQL_PASSWORD = cfg.get( 'DEFAULT', 'JOB_SQL_PASSWORD' ) |
---|
184 | |
---|
185 | JOB_TIMEOUT = cfg.getint( 'DEFAULT', 'JOB_TIMEOUT' ) |
---|
186 | |
---|
187 | DAEMONIZE = cfg.getboolean( 'DEFAULT', 'DAEMONIZE' ) |
---|
188 | |
---|
189 | return True |
---|
190 | |
---|
191 | # What XML data types not to store |
---|
192 | # |
---|
193 | UNSUPPORTED_ARCHIVE_TYPES = [ 'string' ] |
---|
194 | |
---|
195 | # Maximum time (in seconds) a parsethread may run |
---|
196 | # |
---|
197 | PARSE_TIMEOUT = 60 |
---|
198 | |
---|
199 | # Maximum time (in seconds) a storethread may run |
---|
200 | # |
---|
201 | STORE_TIMEOUT = 360 |
---|
202 | |
---|
203 | """ |
---|
204 | The Job Archiving Daemon |
---|
205 | """ |
---|
206 | |
---|
207 | from types import * |
---|
208 | |
---|
209 | import xml.sax, xml.sax.handler, socket, string, os, os.path, time, thread, threading, random, re |
---|
210 | |
---|
211 | try: |
---|
212 | import psycopg2 |
---|
213 | |
---|
214 | except ImportError, details: |
---|
215 | |
---|
216 | print "FATAL ERROR: psycopg2 python module not found" |
---|
217 | sys.exit( 1 ) |
---|
218 | |
---|
219 | class InitVars: |
---|
220 | Vars = {} |
---|
221 | |
---|
222 | def __init__(self, **key_arg): |
---|
223 | for (key, value) in key_arg.items(): |
---|
224 | if value: |
---|
225 | self.Vars[key] = value |
---|
226 | else: |
---|
227 | self.Vars[key] = None |
---|
228 | |
---|
229 | def __call__(self, *key): |
---|
230 | key = "%s" % key |
---|
231 | return self.Vars[key] |
---|
232 | |
---|
233 | def __getitem__(self, key): |
---|
234 | return self.Vars[key] |
---|
235 | |
---|
236 | def __repr__(self): |
---|
237 | return repr(self.Vars) |
---|
238 | |
---|
239 | def keys(self): |
---|
240 | barf = map(None, self.Vars.keys()) |
---|
241 | return barf |
---|
242 | |
---|
243 | def values(self): |
---|
244 | barf = map(None, self.Vars.values()) |
---|
245 | return barf |
---|
246 | |
---|
247 | def has_key(self, key): |
---|
248 | if self.Vars.has_key(key): |
---|
249 | return 1 |
---|
250 | else: |
---|
251 | return 0 |
---|
252 | |
---|
253 | class DBError(Exception): |
---|
254 | def __init__(self, msg=''): |
---|
255 | self.msg = msg |
---|
256 | Exception.__init__(self, msg) |
---|
257 | def __repr__(self): |
---|
258 | return self.msg |
---|
259 | __str__ = __repr__ |
---|
260 | |
---|
261 | # |
---|
262 | # Class to connect to a database |
---|
263 | # and return the queury in a list or dictionairy. |
---|
264 | # |
---|
265 | class DB: |
---|
266 | def __init__(self, db_vars): |
---|
267 | |
---|
268 | self.dict = db_vars |
---|
269 | |
---|
270 | if self.dict.has_key('User'): |
---|
271 | self.user = self.dict['User'] |
---|
272 | else: |
---|
273 | self.user = 'postgres' |
---|
274 | |
---|
275 | if self.dict.has_key('Host'): |
---|
276 | self.host = self.dict['Host'] |
---|
277 | else: |
---|
278 | self.host = 'localhost' |
---|
279 | |
---|
280 | if self.dict.has_key('Password'): |
---|
281 | self.passwd = self.dict['Password'] |
---|
282 | else: |
---|
283 | self.passwd = '' |
---|
284 | |
---|
285 | if self.dict.has_key('DataBaseName'): |
---|
286 | self.db = self.dict['DataBaseName'] |
---|
287 | else: |
---|
288 | self.db = 'jobarchive' |
---|
289 | |
---|
290 | # connect_string = 'host:port:database:user:password: |
---|
291 | dsn = "host='%s' dbname='%s' user='%s' password='%s'" %(self.host, self.db, self.user, self.passwd) |
---|
292 | |
---|
293 | try: |
---|
294 | self.SQL = psycopg2.connect(dsn) |
---|
295 | except psycopg2.Error, details: |
---|
296 | str = "%s" %details |
---|
297 | raise DBError(str) |
---|
298 | |
---|
299 | def __repr__(self): |
---|
300 | return repr(self.result) |
---|
301 | |
---|
302 | def __nonzero__(self): |
---|
303 | return not(self.result == None) |
---|
304 | |
---|
305 | def __len__(self): |
---|
306 | return len(self.result) |
---|
307 | |
---|
308 | def __getitem__(self,i): |
---|
309 | return self.result[i] |
---|
310 | |
---|
311 | def __getslice__(self,i,j): |
---|
312 | return self.result[i:j] |
---|
313 | |
---|
314 | def Get(self, q_str): |
---|
315 | c = self.SQL.cursor() |
---|
316 | try: |
---|
317 | c.execute(q_str) |
---|
318 | result = c.fetchall() |
---|
319 | except psycopg2.Error, details: |
---|
320 | c.close() |
---|
321 | str = "%s" %details |
---|
322 | raise DBError(str) |
---|
323 | |
---|
324 | c.close() |
---|
325 | return result |
---|
326 | |
---|
327 | def Set(self, q_str): |
---|
328 | c = self.SQL.cursor() |
---|
329 | try: |
---|
330 | c.execute(q_str) |
---|
331 | |
---|
332 | except psycopg2.Error, details: |
---|
333 | c.close() |
---|
334 | str = "%s" %details |
---|
335 | raise DBError(str) |
---|
336 | |
---|
337 | c.close() |
---|
338 | return True |
---|
339 | |
---|
340 | def Commit(self): |
---|
341 | |
---|
342 | return self.SQL.commit() |
---|
343 | |
---|
344 | def Rollback( self ): |
---|
345 | |
---|
346 | return self.SQL.rollback() |
---|
347 | |
---|
348 | class DataSQLStore: |
---|
349 | |
---|
350 | db_vars = None |
---|
351 | dbc = None |
---|
352 | |
---|
353 | def __init__( self, hostname, database ): |
---|
354 | |
---|
355 | global JOB_SQL_USER, JOB_SQL_PASSWORD |
---|
356 | |
---|
357 | self.db_vars = InitVars(DataBaseName=database, |
---|
358 | User=JOB_SQL_USER, |
---|
359 | Host=hostname, |
---|
360 | Password=JOB_SQL_PASSWORD, |
---|
361 | Dictionary='true') |
---|
362 | |
---|
363 | try: |
---|
364 | self.dbc = DB(self.db_vars) |
---|
365 | except DBError, details: |
---|
366 | debug_msg( 0, 'FATAL ERROR: Unable to connect to database!: ' +str(details) ) |
---|
367 | sys.exit(1) |
---|
368 | |
---|
369 | def setDatabase(self, statement): |
---|
370 | |
---|
371 | ret = self.doDatabase('set', statement) |
---|
372 | return ret |
---|
373 | |
---|
374 | def getDatabase(self, statement): |
---|
375 | |
---|
376 | ret = self.doDatabase('get', statement) |
---|
377 | return ret |
---|
378 | |
---|
379 | def doCommit( self ): |
---|
380 | |
---|
381 | return self.dbc.Commit() |
---|
382 | |
---|
383 | def doRollback( self ): |
---|
384 | |
---|
385 | return self.dbc.Rollback() |
---|
386 | |
---|
387 | def doDatabase(self, type, statement): |
---|
388 | |
---|
389 | debug_msg( 10, 'doDatabase(): %s: %s' %(type, statement) ) |
---|
390 | try: |
---|
391 | if type == 'set': |
---|
392 | result = self.dbc.Set( statement ) |
---|
393 | elif type == 'get': |
---|
394 | result = self.dbc.Get( statement ) |
---|
395 | |
---|
396 | except DBError, detail: |
---|
397 | operation = statement.split(' ')[0] |
---|
398 | debug_msg( 0, 'ERROR: ' +operation+ ' on database failed while doing ['+statement+'] full msg: '+str(detail) ) |
---|
399 | return False |
---|
400 | |
---|
401 | debug_msg( 10, 'doDatabase(): result: %s' %(result) ) |
---|
402 | return result |
---|
403 | |
---|
404 | def getJobNodeId( self, job_id, node_id ): |
---|
405 | |
---|
406 | id = self.getDatabase( "SELECT job_id,node_id FROM job_nodes WHERE job_id = '%s' AND node_id = '%s'" %(job_id, node_id) ) |
---|
407 | if not id: |
---|
408 | return False |
---|
409 | |
---|
410 | if len( id ) > 0: |
---|
411 | |
---|
412 | if len( id[0] ) > 0 and id[0] != '': |
---|
413 | |
---|
414 | return True |
---|
415 | |
---|
416 | return False |
---|
417 | |
---|
418 | def getNodeId( self, hostname ): |
---|
419 | |
---|
420 | id = self.getDatabase( "SELECT node_id FROM nodes WHERE node_hostname = '%s'" %hostname ) |
---|
421 | |
---|
422 | if len( id ) > 0: |
---|
423 | |
---|
424 | id = id[0][0] |
---|
425 | |
---|
426 | return id |
---|
427 | else: |
---|
428 | return None |
---|
429 | |
---|
430 | def getNodeIds( self, hostnames ): |
---|
431 | |
---|
432 | ids = [ ] |
---|
433 | |
---|
434 | for node in hostnames: |
---|
435 | |
---|
436 | id = self.getNodeId( node ) |
---|
437 | |
---|
438 | if id: |
---|
439 | ids.append( id ) |
---|
440 | |
---|
441 | return ids |
---|
442 | |
---|
443 | def getJobId( self, jobid ): |
---|
444 | |
---|
445 | id = self.getDatabase( "SELECT job_id FROM jobs WHERE job_id = '%s'" %jobid ) |
---|
446 | |
---|
447 | if id: |
---|
448 | id = id[0][0] |
---|
449 | |
---|
450 | return id |
---|
451 | else: |
---|
452 | return None |
---|
453 | |
---|
454 | def addJob( self, job_id, jobattrs ): |
---|
455 | |
---|
456 | if not self.getJobId( job_id ): |
---|
457 | |
---|
458 | return self.mutateJob( 'insert', job_id, jobattrs ) |
---|
459 | else: |
---|
460 | return self.mutateJob( 'update', job_id, jobattrs ) |
---|
461 | |
---|
462 | def mutateJob( self, action, job_id, jobattrs ): |
---|
463 | |
---|
464 | job_values = [ 'name', 'queue', 'owner', 'requested_time', 'requested_memory', 'ppn', 'status', 'start_timestamp', 'stop_timestamp' ] |
---|
465 | |
---|
466 | insert_col_str = 'job_id' |
---|
467 | insert_val_str = "'%s'" %job_id |
---|
468 | update_str = None |
---|
469 | |
---|
470 | debug_msg( 10, 'mutateJob(): %s %s' %(action,job_id)) |
---|
471 | |
---|
472 | ids = [ ] |
---|
473 | |
---|
474 | for valname, value in jobattrs.items(): |
---|
475 | |
---|
476 | if valname in job_values and value != '': |
---|
477 | |
---|
478 | column_name = 'job_' + valname |
---|
479 | |
---|
480 | if action == 'insert': |
---|
481 | |
---|
482 | if not insert_col_str: |
---|
483 | insert_col_str = column_name |
---|
484 | else: |
---|
485 | insert_col_str = insert_col_str + ',' + column_name |
---|
486 | |
---|
487 | if not insert_val_str: |
---|
488 | insert_val_str = value |
---|
489 | else: |
---|
490 | insert_val_str = insert_val_str + ",'%s'" %value |
---|
491 | |
---|
492 | elif action == 'update': |
---|
493 | |
---|
494 | if not update_str: |
---|
495 | update_str = "%s='%s'" %(column_name, value) |
---|
496 | else: |
---|
497 | update_str = update_str + ",%s='%s'" %(column_name, value) |
---|
498 | |
---|
499 | elif valname == 'nodes' and value: |
---|
500 | |
---|
501 | node_valid = 1 |
---|
502 | |
---|
503 | if len(value) == 1: |
---|
504 | |
---|
505 | if jobattrs['status'] == 'Q': |
---|
506 | |
---|
507 | node_valid = 0 |
---|
508 | |
---|
509 | else: |
---|
510 | |
---|
511 | node_valid = 0 |
---|
512 | |
---|
513 | for node_char in str(value[0]): |
---|
514 | |
---|
515 | if string.find( string.digits, node_char ) != -1 and not node_valid: |
---|
516 | |
---|
517 | node_valid = 1 |
---|
518 | |
---|
519 | if node_valid: |
---|
520 | |
---|
521 | ids = self.addNodes( value, jobattrs['domain'] ) |
---|
522 | |
---|
523 | if action == 'insert': |
---|
524 | |
---|
525 | db_ret = self.setDatabase( "INSERT INTO jobs ( %s ) VALUES ( %s )" %( insert_col_str, insert_val_str ) ) |
---|
526 | |
---|
527 | elif action == 'update': |
---|
528 | |
---|
529 | db_ret = self.setDatabase( "UPDATE jobs SET %s WHERE job_id='%s'" %(update_str, job_id) ) |
---|
530 | |
---|
531 | if len( ids ) > 0: |
---|
532 | self.addJobNodes( job_id, ids ) |
---|
533 | |
---|
534 | return db_ret |
---|
535 | |
---|
536 | def addNodes( self, hostnames, domain ): |
---|
537 | |
---|
538 | ids = [ ] |
---|
539 | |
---|
540 | for node in hostnames: |
---|
541 | |
---|
542 | node = '%s.%s' %( node, domain ) |
---|
543 | id = self.getNodeId( node ) |
---|
544 | |
---|
545 | if not id: |
---|
546 | self.setDatabase( "INSERT INTO nodes ( node_hostname ) VALUES ( '%s' )" %node ) |
---|
547 | id = self.getNodeId( node ) |
---|
548 | |
---|
549 | ids.append( id ) |
---|
550 | |
---|
551 | return ids |
---|
552 | |
---|
553 | def addJobNodes( self, jobid, nodes ): |
---|
554 | |
---|
555 | for node in nodes: |
---|
556 | |
---|
557 | if not self.getJobNodeId( jobid, node ): |
---|
558 | |
---|
559 | self.addJobNode( jobid, node ) |
---|
560 | |
---|
561 | def addJobNode( self, jobid, nodeid ): |
---|
562 | |
---|
563 | self.setDatabase( "INSERT INTO job_nodes (job_id,node_id) VALUES ( '%s',%s )" %(jobid, nodeid) ) |
---|
564 | |
---|
565 | def storeJobInfo( self, jobid, jobattrs ): |
---|
566 | |
---|
567 | return self.addJob( jobid, jobattrs ) |
---|
568 | |
---|
569 | def checkTimedoutJobs( self ): |
---|
570 | |
---|
571 | debug_msg( 1, 'Housekeeping: checking database for timed out jobs..' ) |
---|
572 | |
---|
573 | # Locate all jobs in the database that are not set to finished |
---|
574 | # |
---|
575 | q = "SELECT * from jobs WHERE job_status != 'F'" |
---|
576 | |
---|
577 | r = self.getDatabase( q ) |
---|
578 | |
---|
579 | if len( r ) == 0: |
---|
580 | |
---|
581 | return None |
---|
582 | |
---|
583 | timeoutjobs = [ ] |
---|
584 | |
---|
585 | jobtimeout_sec = JOB_TIMEOUT * (60 * 60) |
---|
586 | cur_time = time.time() |
---|
587 | |
---|
588 | for row in r: |
---|
589 | |
---|
590 | job_id = row[0] |
---|
591 | job_requested_time = row[4] |
---|
592 | job_status = row[7] |
---|
593 | job_start_timestamp = row[8] |
---|
594 | |
---|
595 | # If it was set to queued and we didn't see it started |
---|
596 | # there's not point in keeping it around |
---|
597 | # |
---|
598 | if job_status == 'R' and job_start_timestamp: |
---|
599 | |
---|
600 | start_timestamp = int( job_start_timestamp ) |
---|
601 | |
---|
602 | # If it was set to running longer than JOB_TIMEOUT |
---|
603 | # close the job: it probably finished while we were not running |
---|
604 | # |
---|
605 | if ( cur_time - start_timestamp ) > jobtimeout_sec: |
---|
606 | |
---|
607 | if job_requested_time: |
---|
608 | |
---|
609 | rtime_epoch = reqtime2epoch( job_requested_time ) |
---|
610 | else: |
---|
611 | rtime_epoch = None |
---|
612 | |
---|
613 | timeoutjobs.append( (job_id, job_start_timestamp, rtime_epoch) ) |
---|
614 | |
---|
615 | debug_msg( 1, 'Housekeeping: Found ' + str( len( timeoutjobs ) ) + ' timed out jobs in database: closing entries' ) |
---|
616 | |
---|
617 | ret_jobids_clean = [ ] |
---|
618 | |
---|
619 | # Close these jobs in the database |
---|
620 | # update the stop_timestamp to: start_timestamp + requested wallclock |
---|
621 | # and set state: finished |
---|
622 | # |
---|
623 | for j in timeoutjobs: |
---|
624 | |
---|
625 | ( i, s, r ) = j |
---|
626 | |
---|
627 | if r: |
---|
628 | new_end_timestamp = int( s ) + r |
---|
629 | |
---|
630 | q = "UPDATE jobs SET job_status='F',job_stop_timestamp = '" + str( new_end_timestamp ) + "' WHERE job_id = '" + str(i) + "'" |
---|
631 | self.setDatabase( q ) |
---|
632 | else: |
---|
633 | |
---|
634 | # Requested walltime unknown: cannot guess end time: sorry delete them |
---|
635 | q = "DELETE FROM jobs WHERE job_id = '" + str( i ) + "'" |
---|
636 | self.setDatabase( q ) |
---|
637 | |
---|
638 | ret_jobids_clean.append( i ) |
---|
639 | |
---|
640 | debug_msg( 1, 'Housekeeping: done.' ) |
---|
641 | |
---|
642 | return ret_jobids_clean |
---|
643 | |
---|
644 | def checkStaleJobs( self ): |
---|
645 | |
---|
646 | debug_msg( 1, 'Housekeeping: checking database for stale jobs..' ) |
---|
647 | |
---|
648 | # Locate all jobs in the database that are not set to finished |
---|
649 | # |
---|
650 | q = "SELECT * from jobs WHERE job_status != 'F'" |
---|
651 | |
---|
652 | r = self.getDatabase( q ) |
---|
653 | |
---|
654 | if len( r ) == 0: |
---|
655 | |
---|
656 | return None |
---|
657 | |
---|
658 | cleanjobs = [ ] |
---|
659 | |
---|
660 | cur_time = time.time() |
---|
661 | |
---|
662 | for row in r: |
---|
663 | |
---|
664 | job_id = row[0] |
---|
665 | job_requested_time = row[4] |
---|
666 | job_status = row[7] |
---|
667 | job_start_timestamp = row[8] |
---|
668 | |
---|
669 | # If it was set to queued and we didn't see it started |
---|
670 | # there's not point in keeping it around |
---|
671 | # |
---|
672 | if job_status == 'Q' or not job_start_timestamp: |
---|
673 | |
---|
674 | cleanjobs.append( job_id ) |
---|
675 | |
---|
676 | debug_msg( 1, 'Housekeeping: Found ' + str( len( cleanjobs ) ) + ' stale jobs in database: deleting entries' ) |
---|
677 | |
---|
678 | # Purge these from database |
---|
679 | # |
---|
680 | for j in cleanjobs: |
---|
681 | |
---|
682 | q = "DELETE FROM jobs WHERE job_id = '" + str( j ) + "'" |
---|
683 | self.setDatabase( q ) |
---|
684 | |
---|
685 | debug_msg( 1, 'Housekeeping: done.' ) |
---|
686 | |
---|
687 | return cleanjobs |
---|
688 | |
---|
689 | class RRDMutator: |
---|
690 | """A class for performing RRD mutations""" |
---|
691 | |
---|
692 | binary = None |
---|
693 | |
---|
694 | def __init__( self, binary=None ): |
---|
695 | """Set alternate binary if supplied""" |
---|
696 | |
---|
697 | if binary: |
---|
698 | self.binary = binary |
---|
699 | |
---|
700 | def create( self, filename, args ): |
---|
701 | """Create a new rrd with args""" |
---|
702 | |
---|
703 | global MODRRDTOOL |
---|
704 | |
---|
705 | if MODRRDTOOL: |
---|
706 | return self.perform( 'create', filename, args ) |
---|
707 | else: |
---|
708 | return self.perform( 'create', '"' + filename + '"', args ) |
---|
709 | |
---|
710 | def update( self, filename, args ): |
---|
711 | """Update a rrd with args""" |
---|
712 | |
---|
713 | global MODRRDTOOL |
---|
714 | |
---|
715 | if MODRRDTOOL: |
---|
716 | return self.perform( 'update', filename, args ) |
---|
717 | else: |
---|
718 | return self.perform( 'update', '"' + filename + '"', args ) |
---|
719 | |
---|
720 | def grabLastUpdate( self, filename ): |
---|
721 | """Determine the last update time of filename rrd""" |
---|
722 | |
---|
723 | global MODRRDTOOL |
---|
724 | |
---|
725 | last_update = 0 |
---|
726 | |
---|
727 | # Use the py-rrdtool module if it's available on this system |
---|
728 | # |
---|
729 | if MODRRDTOOL: |
---|
730 | |
---|
731 | debug_msg( 8, 'rrdtool.info( ' + filename + ' )' ) |
---|
732 | |
---|
733 | rrd_header = { } |
---|
734 | |
---|
735 | try: |
---|
736 | rrd_header = rrdtool.info( filename ) |
---|
737 | except rrdtool.error, msg: |
---|
738 | debug_msg( 8, str( msg ) ) |
---|
739 | |
---|
740 | if rrd_header.has_key( 'last_update' ): |
---|
741 | return last_update |
---|
742 | else: |
---|
743 | return 0 |
---|
744 | |
---|
745 | # For backwards compatiblity: use the rrdtool binary if py-rrdtool is unavailable |
---|
746 | # DEPRECATED (slow!) |
---|
747 | # |
---|
748 | else: |
---|
749 | debug_msg( 8, self.binary + ' info ' + filename ) |
---|
750 | |
---|
751 | my_pipe = os.popen( self.binary + ' info "' + filename + '"' ) |
---|
752 | |
---|
753 | for line in my_pipe.readlines(): |
---|
754 | |
---|
755 | if line.find( 'last_update') != -1: |
---|
756 | |
---|
757 | last_update = line.split( ' = ' )[1] |
---|
758 | |
---|
759 | if my_pipe: |
---|
760 | |
---|
761 | my_pipe.close() |
---|
762 | |
---|
763 | if last_update: |
---|
764 | return last_update |
---|
765 | else: |
---|
766 | return 0 |
---|
767 | |
---|
768 | |
---|
769 | def perform( self, action, filename, args ): |
---|
770 | """Perform action on rrd filename with args""" |
---|
771 | |
---|
772 | global MODRRDTOOL |
---|
773 | |
---|
774 | arg_string = None |
---|
775 | |
---|
776 | if type( args ) is not ListType: |
---|
777 | debug_msg( 8, 'Arguments needs to be of type List' ) |
---|
778 | return 1 |
---|
779 | |
---|
780 | for arg in args: |
---|
781 | |
---|
782 | if not arg_string: |
---|
783 | |
---|
784 | arg_string = arg |
---|
785 | else: |
---|
786 | arg_string = arg_string + ' ' + arg |
---|
787 | |
---|
788 | if MODRRDTOOL: |
---|
789 | |
---|
790 | debug_msg( 8, 'rrdtool.' + action + "( " + filename + ' ' + arg_string + ")" ) |
---|
791 | |
---|
792 | try: |
---|
793 | debug_msg( 8, "filename '" + str(filename) + "' type "+ str(type(filename)) + " args " + str( args ) ) |
---|
794 | |
---|
795 | if action == 'create': |
---|
796 | |
---|
797 | rrdtool.create( str( filename ), *args ) |
---|
798 | |
---|
799 | elif action == 'update': |
---|
800 | |
---|
801 | rrdtool.update( str( filename ), *args ) |
---|
802 | |
---|
803 | except rrdtool.error, msg: |
---|
804 | |
---|
805 | error_msg = str( msg ) |
---|
806 | debug_msg( 8, error_msg ) |
---|
807 | return 1 |
---|
808 | |
---|
809 | else: |
---|
810 | |
---|
811 | debug_msg( 8, self.binary + ' ' + action + ' ' + filename + ' ' + arg_string ) |
---|
812 | |
---|
813 | cmd = os.popen( self.binary + ' ' + action + ' ' + filename + ' ' + arg_string ) |
---|
814 | lines = cmd.readlines() |
---|
815 | |
---|
816 | cmd.close() |
---|
817 | |
---|
818 | for line in lines: |
---|
819 | |
---|
820 | if line.find( 'ERROR' ) != -1: |
---|
821 | |
---|
822 | error_msg = string.join( line.split( ' ' )[1:] ) |
---|
823 | debug_msg( 8, error_msg ) |
---|
824 | return 1 |
---|
825 | |
---|
826 | return 0 |
---|
827 | |
---|
828 | class XMLProcessor: |
---|
829 | """Skeleton class for XML processor's""" |
---|
830 | |
---|
831 | def run( self ): |
---|
832 | """Do main processing of XML here""" |
---|
833 | |
---|
834 | pass |
---|
835 | |
---|
836 | class JobXMLProcessor( XMLProcessor ): |
---|
837 | """Main class for processing XML and acting with it""" |
---|
838 | |
---|
839 | def __init__( self, XMLSource, DataStore ): |
---|
840 | """Setup initial XML connection and handlers""" |
---|
841 | |
---|
842 | self.myXMLSource = XMLSource |
---|
843 | self.myXMLHandler = JobXMLHandler( DataStore ) |
---|
844 | self.myXMLError = XMLErrorHandler() |
---|
845 | |
---|
846 | self.config = GangliaConfigParser( GMETAD_CONF ) |
---|
847 | |
---|
848 | self.kill_thread = False |
---|
849 | |
---|
850 | def killThread( self ): |
---|
851 | |
---|
852 | self.kill_thread = True |
---|
853 | |
---|
854 | def run( self ): |
---|
855 | """Main XML processing""" |
---|
856 | |
---|
857 | debug_msg( 1, 'job_xml_thread(): started.' ) |
---|
858 | |
---|
859 | while( 1 ): |
---|
860 | |
---|
861 | debug_msg( 1, 'job_xml_thread(): Retrieving XML data..' ) |
---|
862 | |
---|
863 | my_data = self.myXMLSource.getData() |
---|
864 | |
---|
865 | debug_msg( 1, 'job_xml_thread(): Done retrieving: data size %d' %len(my_data) ) |
---|
866 | |
---|
867 | if my_data: |
---|
868 | debug_msg( 1, 'job_xml_thread(): Parsing XML..' ) |
---|
869 | |
---|
870 | xml.sax.parseString( my_data, self.myXMLHandler, self.myXMLError ) |
---|
871 | |
---|
872 | if self.myXMLError.isFatal(): |
---|
873 | |
---|
874 | sys.exit( 1 ) |
---|
875 | |
---|
876 | debug_msg( 1, 'job_xml_thread(): Done parsing.' ) |
---|
877 | else: |
---|
878 | debug_msg( 1, 'job_xml_thread(): Got no data.' ) |
---|
879 | |
---|
880 | if self.kill_thread: |
---|
881 | |
---|
882 | debug_msg( 1, 'job_xml_thread(): killed.' ) |
---|
883 | return None |
---|
884 | |
---|
885 | debug_msg( 1, 'job_xml_thread(): Sleeping.. (%ss)' %(str( self.config.getLowestInterval() ) ) ) |
---|
886 | time.sleep( self.config.getLowestInterval() ) |
---|
887 | |
---|
888 | class JobXMLHandler( xml.sax.handler.ContentHandler ): |
---|
889 | """Parse Job's jobinfo XML from our plugin""" |
---|
890 | |
---|
891 | def __init__( self, datastore ): |
---|
892 | |
---|
893 | self.ds = datastore |
---|
894 | self.jobs_processed = [ ] |
---|
895 | self.jobs_to_store = [ ] |
---|
896 | self.jobAttrs = { } |
---|
897 | self.jobAttrsSaved = { } |
---|
898 | |
---|
899 | self.iteration = 0 |
---|
900 | |
---|
901 | self.ds.checkTimedoutJobs() |
---|
902 | self.ds.checkStaleJobs() |
---|
903 | |
---|
904 | debug_msg( 1, "XML: Handler created" ) |
---|
905 | |
---|
906 | def startDocument( self ): |
---|
907 | |
---|
908 | self.jobs_processed = [ ] |
---|
909 | self.heartbeat = 0 |
---|
910 | self.elementct = 0 |
---|
911 | self.iteration = self.iteration + 1 |
---|
912 | |
---|
913 | if self.iteration > 20: |
---|
914 | |
---|
915 | timedout_jobs = self.ds.checkTimedoutJobs() |
---|
916 | self.iteration = 0 |
---|
917 | |
---|
918 | if timedout_jobs != None: |
---|
919 | |
---|
920 | for j in timedout_jobs: |
---|
921 | |
---|
922 | del self.jobAttrs[ j ] |
---|
923 | del self.jobAttrsSaved[ j ] |
---|
924 | |
---|
925 | debug_msg( 1, "XML: Start document: iteration %s" %str(self.iteration) ) |
---|
926 | |
---|
927 | def startElement( self, name, attrs ): |
---|
928 | """ |
---|
929 | This XML will be all gmetric XML |
---|
930 | so there will be no specific start/end element |
---|
931 | just one XML statement with all info |
---|
932 | """ |
---|
933 | |
---|
934 | global ARCHIVE_DATASOURCES |
---|
935 | |
---|
936 | jobinfo = { } |
---|
937 | |
---|
938 | self.elementct += 1 |
---|
939 | |
---|
940 | if name == 'CLUSTER': |
---|
941 | |
---|
942 | self.clustername = str( attrs.get( 'NAME', "" ) ) |
---|
943 | |
---|
944 | elif name == 'METRIC' and self.clustername in ARCHIVE_DATASOURCES: |
---|
945 | |
---|
946 | metricname = str( attrs.get( 'NAME', "" ) ) |
---|
947 | |
---|
948 | if metricname == 'zplugin_monarch_heartbeat': |
---|
949 | |
---|
950 | self.heartbeat = str( attrs.get( 'VAL', "" ) ) |
---|
951 | |
---|
952 | elif metricname.find( 'zplugin_monarch_job' ) != -1: |
---|
953 | |
---|
954 | job_id = metricname.split( 'zplugin_monarch_job_' )[1].split( '_' )[1] |
---|
955 | val = str( attrs.get( 'VAL', "" ) ) |
---|
956 | |
---|
957 | valinfo = val.split( ' ' ) |
---|
958 | |
---|
959 | for myval in valinfo: |
---|
960 | |
---|
961 | if len( myval.split( '=' ) ) > 1: |
---|
962 | |
---|
963 | valname = myval.split( '=' )[0] |
---|
964 | value = myval.split( '=' )[1] |
---|
965 | |
---|
966 | if valname == 'nodes': |
---|
967 | |
---|
968 | value = value.split( ';' ) |
---|
969 | |
---|
970 | jobinfo[ valname ] = value |
---|
971 | |
---|
972 | self.jobAttrs[ job_id ] = jobinfo |
---|
973 | |
---|
974 | self.jobs_processed.append( job_id ) |
---|
975 | |
---|
976 | def endDocument( self ): |
---|
977 | """When all metrics have gone, check if any jobs have finished""" |
---|
978 | |
---|
979 | jobs_finished = [ ] |
---|
980 | |
---|
981 | debug_msg( 1, "XML: Processed "+str(self.elementct)+ " elements - found "+str(len(self.jobs_processed))+" jobs" ) |
---|
982 | |
---|
983 | if self.heartbeat == 0: |
---|
984 | return None |
---|
985 | |
---|
986 | for jobid, jobinfo in self.jobAttrs.items(): |
---|
987 | |
---|
988 | if jobinfo['reported'] != self.heartbeat: |
---|
989 | |
---|
990 | if (jobinfo['status'] != 'R'): |
---|
991 | debug_msg( 1, 'job %s report time %s does not match current heartbeat %s : ignoring job' %(jobid, jobinfo['reported'], self.heartbeat ) ) |
---|
992 | del self.jobAttrs[ jobid ] |
---|
993 | |
---|
994 | if jobid in self.jobs_to_store: |
---|
995 | del self.jobs_to_store[ jobid ] |
---|
996 | |
---|
997 | continue |
---|
998 | |
---|
999 | elif jobid not in self.jobs_processed: |
---|
1000 | |
---|
1001 | # Was running previous heartbeat but not anymore: must be finished |
---|
1002 | self.jobAttrs[ jobid ]['status'] = 'F' |
---|
1003 | self.jobAttrs[ jobid ]['stop_timestamp'] = str( self.heartbeat ) |
---|
1004 | debug_msg( 1, 'job %s appears to have finished' %jobid ) |
---|
1005 | |
---|
1006 | jobs_finished.append( jobid ) |
---|
1007 | |
---|
1008 | if not jobid in self.jobs_to_store: |
---|
1009 | self.jobs_to_store.append( jobid ) |
---|
1010 | |
---|
1011 | continue |
---|
1012 | |
---|
1013 | elif self.jobAttrsSaved.has_key( jobid ): |
---|
1014 | |
---|
1015 | # This should pretty much never happen, but hey let's be careful |
---|
1016 | # Perhaps if someone altered their job while in queue with qalter |
---|
1017 | |
---|
1018 | if self.jobinfoChanged( jobid, jobinfo ): |
---|
1019 | |
---|
1020 | self.jobAttrs[ jobid ]['stop_timestamp'] = '' |
---|
1021 | self.jobAttrs[ jobid ] = self.setJobAttrs( self.jobAttrs[ jobid ], jobinfo ) |
---|
1022 | |
---|
1023 | if not jobid in self.jobs_to_store: |
---|
1024 | |
---|
1025 | self.jobs_to_store.append( jobid ) |
---|
1026 | |
---|
1027 | debug_msg( 10, 'jobinfo for job %s has changed' %jobid ) |
---|
1028 | else: |
---|
1029 | debug_msg( 10, 'new job %s' %jobid ) |
---|
1030 | |
---|
1031 | if not jobid in self.jobs_to_store: |
---|
1032 | |
---|
1033 | self.jobs_to_store.append( jobid ) |
---|
1034 | |
---|
1035 | debug_msg( 1, 'job_xml_thread(): Found %s updated jobs.' %len(self.jobs_to_store) ) |
---|
1036 | |
---|
1037 | failed_store = [ ] |
---|
1038 | succes_store = [ ] |
---|
1039 | |
---|
1040 | if len( self.jobs_to_store ) > 0: |
---|
1041 | |
---|
1042 | debug_msg( 1, 'job_xml_thread(): Storing jobs to database..' ) |
---|
1043 | |
---|
1044 | for n in range( 0, len(self.jobs_to_store ) ): |
---|
1045 | |
---|
1046 | if len( self.jobs_to_store ) == 0: |
---|
1047 | break |
---|
1048 | |
---|
1049 | jobid = self.jobs_to_store.pop( 0 ) |
---|
1050 | |
---|
1051 | db_ok = self.ds.storeJobInfo( jobid, self.jobAttrs[ jobid ] ) |
---|
1052 | |
---|
1053 | if not db_ok: |
---|
1054 | |
---|
1055 | self.ds.doRollback() |
---|
1056 | failed_store.append( jobid ) |
---|
1057 | continue |
---|
1058 | |
---|
1059 | self.ds.doCommit() |
---|
1060 | succes_store.append( jobid ) |
---|
1061 | |
---|
1062 | if not jobid in jobs_finished: |
---|
1063 | |
---|
1064 | self.jobAttrsSaved[ jobid ] = self.jobAttrs[ jobid ] |
---|
1065 | |
---|
1066 | elif self.jobAttrsSaved.has_key( jobid ): |
---|
1067 | |
---|
1068 | del self.jobAttrsSaved[ jobid ] |
---|
1069 | |
---|
1070 | if self.jobAttrs[ jobid ]['status'] == 'F': |
---|
1071 | |
---|
1072 | del self.jobAttrs[ jobid ] |
---|
1073 | |
---|
1074 | result_str = 'succesfully stored: %s jobs' %str(len(succes_store)) |
---|
1075 | |
---|
1076 | if len( failed_store ) > 0: |
---|
1077 | result_str = result_str + ' - failed to store: %s jobs - deferred to next interval' %str(len(failed_store)) |
---|
1078 | |
---|
1079 | debug_msg( 1, 'job_xml_thread(): Done storing. %s' %result_str ) |
---|
1080 | |
---|
1081 | else: |
---|
1082 | debug_msg( 1, 'job_xml_thread(): No jobs to store.' ) |
---|
1083 | |
---|
1084 | self.jobs_processed = [ ] |
---|
1085 | |
---|
1086 | # TODO: once in while check database AND self.jobAttrsSaved for stale jobs |
---|
1087 | |
---|
1088 | def setJobAttrs( self, old, new ): |
---|
1089 | """ |
---|
1090 | Set new job attributes in old, but not lose existing fields |
---|
1091 | if old attributes doesn't have those |
---|
1092 | """ |
---|
1093 | |
---|
1094 | for valname, value in new.items(): |
---|
1095 | old[ valname ] = value |
---|
1096 | |
---|
1097 | return old |
---|
1098 | |
---|
1099 | |
---|
1100 | def jobinfoChanged( self, jobid, jobinfo ): |
---|
1101 | """ |
---|
1102 | Check if jobinfo has changed from jobattrs[jobid] |
---|
1103 | if it's report time is bigger than previous one |
---|
1104 | and it is report time is recent (equal to heartbeat) |
---|
1105 | """ |
---|
1106 | |
---|
1107 | ignore_changes = [ 'reported' ] |
---|
1108 | |
---|
1109 | if self.jobAttrsSaved.has_key( jobid ): |
---|
1110 | |
---|
1111 | for valname, value in jobinfo.items(): |
---|
1112 | |
---|
1113 | if valname not in ignore_changes: |
---|
1114 | |
---|
1115 | if self.jobAttrsSaved[ jobid ].has_key( valname ): |
---|
1116 | |
---|
1117 | if value != self.jobAttrsSaved[ jobid ][ valname ]: |
---|
1118 | |
---|
1119 | if jobinfo['reported'] > self.jobAttrsSaved[ jobid ][ 'reported' ]: |
---|
1120 | |
---|
1121 | debug_msg( 1, "job %s field '%s' changed since saved from: %s to: %s" %( jobid, valname, value, self.jobAttrsSaved[ jobid ][ valname ] ) ) |
---|
1122 | |
---|
1123 | return True |
---|
1124 | |
---|
1125 | else: |
---|
1126 | debug_msg( 1, "job %s did not have field '%s'" %( jobid, valname ) ) |
---|
1127 | return True |
---|
1128 | |
---|
1129 | return False |
---|
1130 | |
---|
1131 | class GangliaXMLHandler( xml.sax.handler.ContentHandler ): |
---|
1132 | """Parse Ganglia's XML""" |
---|
1133 | |
---|
1134 | def __init__( self, config, datastore ): |
---|
1135 | """Setup initial variables and gather info on existing rrd archive""" |
---|
1136 | |
---|
1137 | self.config = config |
---|
1138 | self.clusters = { } |
---|
1139 | self.ds = datastore |
---|
1140 | |
---|
1141 | debug_msg( 1, 'Housekeeping: checking RRD archive (may take a while)..' ) |
---|
1142 | self.gatherClusters() |
---|
1143 | debug_msg( 1, 'Housekeeping: RRD check complete.' ) |
---|
1144 | |
---|
1145 | def gatherClusters( self ): |
---|
1146 | """Find all existing clusters in archive dir""" |
---|
1147 | |
---|
1148 | global ARCHIVE_DATASOURCES |
---|
1149 | |
---|
1150 | archive_dir = check_dir(ARCHIVE_PATH) |
---|
1151 | |
---|
1152 | hosts = [ ] |
---|
1153 | |
---|
1154 | if os.path.exists( archive_dir ): |
---|
1155 | |
---|
1156 | dirlist = os.listdir( archive_dir ) |
---|
1157 | |
---|
1158 | for cfgcluster in ARCHIVE_DATASOURCES: |
---|
1159 | |
---|
1160 | if cfgcluster not in dirlist: |
---|
1161 | |
---|
1162 | # Autocreate a directory for this cluster |
---|
1163 | # assume it is new |
---|
1164 | # |
---|
1165 | cluster_dir = '%s/%s' %( check_dir(ARCHIVE_PATH), cfgcluster ) |
---|
1166 | |
---|
1167 | os.mkdir( cluster_dir ) |
---|
1168 | |
---|
1169 | dirlist.append( cfgcluster ) |
---|
1170 | |
---|
1171 | for item in dirlist: |
---|
1172 | |
---|
1173 | clustername = item |
---|
1174 | |
---|
1175 | if not self.clusters.has_key( clustername ) and clustername in ARCHIVE_DATASOURCES: |
---|
1176 | |
---|
1177 | self.clusters[ clustername ] = RRDHandler( self.config, clustername ) |
---|
1178 | |
---|
1179 | debug_msg( 9, 'Found cluster dir: %s' %( clustername ) ) |
---|
1180 | |
---|
1181 | debug_msg( 9, "Found "+str(len(self.clusters.keys()))+" cluster dirs" ) |
---|
1182 | |
---|
1183 | def startElement( self, name, attrs ): |
---|
1184 | """Memorize appropriate data from xml start tags""" |
---|
1185 | |
---|
1186 | global ARCHIVE_DATASOURCES |
---|
1187 | |
---|
1188 | if name == 'GANGLIA_XML': |
---|
1189 | |
---|
1190 | self.XMLSource = str( attrs.get( 'SOURCE', "" ) ) |
---|
1191 | self.gangliaVersion = str( attrs.get( 'VERSION', "" ) ) |
---|
1192 | |
---|
1193 | debug_msg( 10, 'Found XML data: source %s version %s' %( self.XMLSource, self.gangliaVersion ) ) |
---|
1194 | |
---|
1195 | elif name == 'GRID': |
---|
1196 | |
---|
1197 | self.gridName = str( attrs.get( 'NAME', "" ) ) |
---|
1198 | self.time = str( attrs.get( 'LOCALTIME', "" ) ) |
---|
1199 | |
---|
1200 | debug_msg( 10, '`-Grid found: %s' %( self.gridName ) ) |
---|
1201 | |
---|
1202 | elif name == 'CLUSTER': |
---|
1203 | |
---|
1204 | self.clusterName = str( attrs.get( 'NAME', "" ) ) |
---|
1205 | self.time = str( attrs.get( 'LOCALTIME', "" ) ) |
---|
1206 | |
---|
1207 | if not self.clusters.has_key( self.clusterName ) and self.clusterName in ARCHIVE_DATASOURCES: |
---|
1208 | |
---|
1209 | self.clusters[ self.clusterName ] = RRDHandler( self.config, self.clusterName ) |
---|
1210 | |
---|
1211 | debug_msg( 10, ' |-Cluster found: %s' %( self.clusterName ) ) |
---|
1212 | |
---|
1213 | elif name == 'HOST' and self.clusterName in ARCHIVE_DATASOURCES: |
---|
1214 | |
---|
1215 | self.hostName = str( attrs.get( 'NAME', "" ) ) |
---|
1216 | self.hostIp = str( attrs.get( 'IP', "" ) ) |
---|
1217 | self.hostReported = str( attrs.get( 'REPORTED', "" ) ) |
---|
1218 | |
---|
1219 | debug_msg( 10, ' | |-Host found: %s - ip %s reported %s' %( self.hostName, self.hostIp, self.hostReported ) ) |
---|
1220 | |
---|
1221 | elif name == 'METRIC' and self.clusterName in ARCHIVE_DATASOURCES: |
---|
1222 | |
---|
1223 | type = str( attrs.get( 'TYPE', "" ) ) |
---|
1224 | |
---|
1225 | exclude_metric = False |
---|
1226 | |
---|
1227 | for ex_metricstr in ARCHIVE_EXCLUDE_METRICS: |
---|
1228 | |
---|
1229 | orig_name = str( attrs.get( 'NAME', "" ) ) |
---|
1230 | |
---|
1231 | if string.lower( orig_name ) == string.lower( ex_metricstr ): |
---|
1232 | |
---|
1233 | exclude_metric = True |
---|
1234 | |
---|
1235 | elif re.match( ex_metricstr, orig_name ): |
---|
1236 | |
---|
1237 | exclude_metric = True |
---|
1238 | |
---|
1239 | if type not in UNSUPPORTED_ARCHIVE_TYPES and not exclude_metric: |
---|
1240 | |
---|
1241 | myMetric = { } |
---|
1242 | myMetric['name'] = str( attrs.get( 'NAME', "" ) ) |
---|
1243 | myMetric['val'] = str( attrs.get( 'VAL', "" ) ) |
---|
1244 | myMetric['time'] = self.hostReported |
---|
1245 | |
---|
1246 | self.clusters[ self.clusterName ].memMetric( self.hostName, myMetric ) |
---|
1247 | |
---|
1248 | debug_msg( 9, 'added metric %s from host %s to cluster %s' %( myMetric['name'], self.hostName, self.clusterName ) ) |
---|
1249 | debug_msg( 11, ' | | |-metric: %s:%s' %( myMetric['name'], myMetric['val'] ) ) |
---|
1250 | |
---|
1251 | def storeMetrics( self ): |
---|
1252 | """Store metrics of each cluster rrd handler""" |
---|
1253 | |
---|
1254 | for clustername, rrdh in self.clusters.items(): |
---|
1255 | |
---|
1256 | ret = rrdh.storeMetrics() |
---|
1257 | |
---|
1258 | if ret: |
---|
1259 | debug_msg( 9, 'An error occured while storing metrics for cluster %s' %clustername ) |
---|
1260 | return 1 |
---|
1261 | |
---|
1262 | return 0 |
---|
1263 | |
---|
1264 | class XMLErrorHandler( xml.sax.handler.ErrorHandler ): |
---|
1265 | |
---|
1266 | def __init__( self ): |
---|
1267 | |
---|
1268 | self.me_fatal = False |
---|
1269 | |
---|
1270 | def error( self, exception ): |
---|
1271 | """Recoverable error""" |
---|
1272 | |
---|
1273 | debug_msg( 0, 'Recoverable XML error ' + str( exception ) + ' ignored.' ) |
---|
1274 | |
---|
1275 | def fatalError( self, exception ): |
---|
1276 | """Non-recoverable error""" |
---|
1277 | |
---|
1278 | exception_str = str( exception ) |
---|
1279 | |
---|
1280 | # Ignore 'no element found' errors |
---|
1281 | if exception_str.find( 'no element found' ) != -1: |
---|
1282 | |
---|
1283 | debug_msg( 0, 'No XML data found: Socket not (re)connected or datasource not available.' ) |
---|
1284 | return 0 |
---|
1285 | |
---|
1286 | self.me_fatal = True |
---|
1287 | |
---|
1288 | debug_msg( 0, 'FATAL ERROR: Non-recoverable XML error ' + str( exception ) ) |
---|
1289 | |
---|
1290 | def isFatal( self ): |
---|
1291 | |
---|
1292 | return self.me_fatal |
---|
1293 | |
---|
1294 | def warning( self, exception ): |
---|
1295 | """Warning""" |
---|
1296 | |
---|
1297 | debug_msg( 0, 'Warning ' + str( exception ) ) |
---|
1298 | |
---|
1299 | class XMLGatherer: |
---|
1300 | """Setup a connection and file object to Ganglia's XML""" |
---|
1301 | |
---|
1302 | s = None |
---|
1303 | fd = None |
---|
1304 | data = None |
---|
1305 | slot = None |
---|
1306 | |
---|
1307 | # Time since the last update |
---|
1308 | # |
---|
1309 | LAST_UPDATE = 0 |
---|
1310 | |
---|
1311 | # Minimum interval between updates |
---|
1312 | # |
---|
1313 | MIN_UPDATE_INT = 10 |
---|
1314 | |
---|
1315 | # Is a update occuring now |
---|
1316 | # |
---|
1317 | update_now = False |
---|
1318 | |
---|
1319 | def __init__( self, host, port ): |
---|
1320 | """Store host and port for connection""" |
---|
1321 | |
---|
1322 | self.host = host |
---|
1323 | self.port = port |
---|
1324 | self.slot = threading.Lock() |
---|
1325 | |
---|
1326 | self.retrieveData() |
---|
1327 | |
---|
1328 | def retrieveData( self ): |
---|
1329 | """Setup connection to XML source""" |
---|
1330 | |
---|
1331 | self.update_now = True |
---|
1332 | |
---|
1333 | self.slot.acquire() |
---|
1334 | |
---|
1335 | self.data = None |
---|
1336 | |
---|
1337 | for res in socket.getaddrinfo( self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM ): |
---|
1338 | |
---|
1339 | af, socktype, proto, canonname, sa = res |
---|
1340 | |
---|
1341 | try: |
---|
1342 | |
---|
1343 | self.s = socket.socket( af, socktype, proto ) |
---|
1344 | |
---|
1345 | except ( socket.error, socket.gaierror, socket.herror, socket.timeout ), msg: |
---|
1346 | |
---|
1347 | self.s = None |
---|
1348 | continue |
---|
1349 | |
---|
1350 | try: |
---|
1351 | |
---|
1352 | self.s.connect( sa ) |
---|
1353 | |
---|
1354 | except ( socket.error, socket.gaierror, socket.herror, socket.timeout ), msg: |
---|
1355 | |
---|
1356 | self.disconnect() |
---|
1357 | continue |
---|
1358 | |
---|
1359 | break |
---|
1360 | |
---|
1361 | if self.s is None: |
---|
1362 | |
---|
1363 | debug_msg( 0, 'FATAL ERROR: Could not open socket or unable to connect to datasource!' ) |
---|
1364 | self.update_now = False |
---|
1365 | #sys.exit( 1 ) |
---|
1366 | |
---|
1367 | else: |
---|
1368 | #self.s.send( '\n' ) |
---|
1369 | |
---|
1370 | my_fp = self.s.makefile( 'r' ) |
---|
1371 | my_data = my_fp.readlines() |
---|
1372 | my_data = string.join( my_data, '' ) |
---|
1373 | |
---|
1374 | self.data = my_data |
---|
1375 | |
---|
1376 | self.LAST_UPDATE = time.time() |
---|
1377 | |
---|
1378 | self.slot.release() |
---|
1379 | |
---|
1380 | self.update_now = False |
---|
1381 | |
---|
1382 | def disconnect( self ): |
---|
1383 | """Close socket""" |
---|
1384 | |
---|
1385 | if self.s: |
---|
1386 | #self.s.shutdown( 2 ) |
---|
1387 | self.s.close() |
---|
1388 | self.s = None |
---|
1389 | |
---|
1390 | def __del__( self ): |
---|
1391 | """Kill the socket before we leave""" |
---|
1392 | |
---|
1393 | self.disconnect() |
---|
1394 | |
---|
1395 | def reGetData( self ): |
---|
1396 | """Reconnect""" |
---|
1397 | |
---|
1398 | while self.update_now: |
---|
1399 | |
---|
1400 | # Must be another update in progress: |
---|
1401 | # Wait until the update is complete |
---|
1402 | # |
---|
1403 | time.sleep( 1 ) |
---|
1404 | |
---|
1405 | if self.s: |
---|
1406 | self.disconnect() |
---|
1407 | |
---|
1408 | self.retrieveData() |
---|
1409 | |
---|
1410 | def getData( self ): |
---|
1411 | |
---|
1412 | """Return the XML data""" |
---|
1413 | |
---|
1414 | # If more than MIN_UPDATE_INT seconds passed since last data update |
---|
1415 | # update the XML first before returning it |
---|
1416 | # |
---|
1417 | |
---|
1418 | cur_time = time.time() |
---|
1419 | |
---|
1420 | if ( cur_time - self.LAST_UPDATE ) > self.MIN_UPDATE_INT: |
---|
1421 | |
---|
1422 | self.reGetData() |
---|
1423 | |
---|
1424 | while self.update_now: |
---|
1425 | |
---|
1426 | # Must be another update in progress: |
---|
1427 | # Wait until the update is complete |
---|
1428 | # |
---|
1429 | time.sleep( 1 ) |
---|
1430 | |
---|
1431 | return self.data |
---|
1432 | |
---|
1433 | def makeFileDescriptor( self ): |
---|
1434 | """Make file descriptor that points to our socket connection""" |
---|
1435 | |
---|
1436 | self.reconnect() |
---|
1437 | |
---|
1438 | if self.s: |
---|
1439 | self.fd = self.s.makefile( 'r' ) |
---|
1440 | |
---|
1441 | def getFileObject( self ): |
---|
1442 | """Connect, and return a file object""" |
---|
1443 | |
---|
1444 | self.makeFileDescriptor() |
---|
1445 | |
---|
1446 | if self.fd: |
---|
1447 | return self.fd |
---|
1448 | |
---|
1449 | class GangliaXMLProcessor( XMLProcessor ): |
---|
1450 | """Main class for processing XML and acting with it""" |
---|
1451 | |
---|
1452 | def __init__( self, XMLSource, DataStore ): |
---|
1453 | """Setup initial XML connection and handlers""" |
---|
1454 | |
---|
1455 | self.config = GangliaConfigParser( GMETAD_CONF ) |
---|
1456 | self.myXMLSource = XMLSource |
---|
1457 | self.ds = DataStore |
---|
1458 | self.myXMLHandler = GangliaXMLHandler( self.config, self.ds ) |
---|
1459 | self.myXMLError = XMLErrorHandler() |
---|
1460 | |
---|
1461 | def run( self ): |
---|
1462 | """Main XML processing; start a xml and storethread""" |
---|
1463 | |
---|
1464 | xml_thread = threading.Thread( None, self.processXML, 'xmlthread' ) |
---|
1465 | store_thread = threading.Thread( None, self.storeMetrics, 'storethread' ) |
---|
1466 | |
---|
1467 | while( 1 ): |
---|
1468 | |
---|
1469 | if not xml_thread.isAlive(): |
---|
1470 | # Gather XML at the same interval as gmetad |
---|
1471 | |
---|
1472 | # threaded call to: self.processXML() |
---|
1473 | # |
---|
1474 | try: |
---|
1475 | xml_thread = threading.Thread( None, self.processXML, 'xml_thread' ) |
---|
1476 | xml_thread.start() |
---|
1477 | except thread.error, msg: |
---|
1478 | debug_msg( 0, 'ERROR: Unable to start xml_thread!: '+str(msg)) |
---|
1479 | #return 1 |
---|
1480 | |
---|
1481 | if not store_thread.isAlive(): |
---|
1482 | # Store metrics every .. sec |
---|
1483 | |
---|
1484 | # threaded call to: self.storeMetrics() |
---|
1485 | # |
---|
1486 | try: |
---|
1487 | store_thread = threading.Thread( None, self.storeMetrics, 'store_thread' ) |
---|
1488 | store_thread.start() |
---|
1489 | except thread.error, msg: |
---|
1490 | debug_msg( 0, 'ERROR: Unable to start store_thread!: '+str(msg)) |
---|
1491 | #return 1 |
---|
1492 | |
---|
1493 | # Just sleep a sec here, to prevent daemon from going mad. We're all threads here anyway |
---|
1494 | time.sleep( 1 ) |
---|
1495 | |
---|
1496 | def storeMetrics( self ): |
---|
1497 | """Store metrics retained in memory to disk""" |
---|
1498 | |
---|
1499 | global DEBUG_LEVEL |
---|
1500 | |
---|
1501 | # Store metrics somewhere between every 360 and 640 seconds |
---|
1502 | # |
---|
1503 | if DEBUG_LEVEL >= 1: |
---|
1504 | STORE_INTERVAL = 60 |
---|
1505 | else: |
---|
1506 | STORE_INTERVAL = random.randint( 300, 600 ) |
---|
1507 | |
---|
1508 | try: |
---|
1509 | store_metric_thread = threading.Thread( None, self.storeThread, 'store_metric_thread' ) |
---|
1510 | store_metric_thread.start() |
---|
1511 | except thread.error, msg: |
---|
1512 | debug_msg( 0, 'ERROR: Unable to start ganglia_store_thread()!: '+str(msg) ) |
---|
1513 | return 1 |
---|
1514 | |
---|
1515 | debug_msg( 1, 'ganglia_store_thread(): started.' ) |
---|
1516 | |
---|
1517 | debug_msg( 1, 'ganglia_store_thread(): Sleeping.. (%ss)' %STORE_INTERVAL ) |
---|
1518 | time.sleep( STORE_INTERVAL ) |
---|
1519 | debug_msg( 1, 'ganglia_store_thread(): Done sleeping.' ) |
---|
1520 | |
---|
1521 | if store_metric_thread.isAlive(): |
---|
1522 | |
---|
1523 | debug_msg( 1, 'ganglia_store_thread(): storemetricthread() still running, waiting to finish..' ) |
---|
1524 | store_metric_thread.join( STORE_TIMEOUT ) # Maximum time is for storing thread to finish |
---|
1525 | |
---|
1526 | if store_metric_thread.isAlive(): |
---|
1527 | |
---|
1528 | debug_msg( 1, 'ganglia_store_thread(): Done waiting: storemetricthread() still running :( now what?' ) |
---|
1529 | else: |
---|
1530 | debug_msg( 1, 'ganglia_store_thread(): Done waiting: storemetricthread() has finished' ) |
---|
1531 | |
---|
1532 | debug_msg( 1, 'ganglia_store_thread(): finished.' ) |
---|
1533 | |
---|
1534 | return 0 |
---|
1535 | |
---|
1536 | def storeThread( self ): |
---|
1537 | """Actual metric storing thread""" |
---|
1538 | |
---|
1539 | debug_msg( 1, 'ganglia_store_metric_thread(): started.' ) |
---|
1540 | debug_msg( 1, 'ganglia_store_metric_thread(): Storing data..' ) |
---|
1541 | |
---|
1542 | ret = self.myXMLHandler.storeMetrics() |
---|
1543 | if ret > 0: |
---|
1544 | debug_msg( 0, 'ganglia_store_metric_thread(): UNKNOWN ERROR %s while storing Metrics!' %str(ret) ) |
---|
1545 | |
---|
1546 | debug_msg( 1, 'ganglia_store_metric_thread(): Done storing.' ) |
---|
1547 | debug_msg( 1, 'ganglia_store_metric_thread(): finished.' ) |
---|
1548 | |
---|
1549 | return 0 |
---|
1550 | |
---|
1551 | def processXML( self ): |
---|
1552 | """Process XML""" |
---|
1553 | |
---|
1554 | try: |
---|
1555 | parsethread = threading.Thread( None, self.parseThread, 'parsethread' ) |
---|
1556 | parsethread.start() |
---|
1557 | except thread.error, msg: |
---|
1558 | debug_msg( 0, 'ERROR: Unable to start ganglia_xml_thread()!: ' + str(msg) ) |
---|
1559 | return 1 |
---|
1560 | |
---|
1561 | debug_msg( 1, 'ganglia_xml_thread(): started.' ) |
---|
1562 | |
---|
1563 | debug_msg( 1, 'ganglia_xml_thread(): Sleeping.. (%ss)' %self.config.getLowestInterval() ) |
---|
1564 | time.sleep( float( self.config.getLowestInterval() ) ) |
---|
1565 | debug_msg( 1, 'ganglia_xml_thread(): Done sleeping.' ) |
---|
1566 | |
---|
1567 | if parsethread.isAlive(): |
---|
1568 | |
---|
1569 | debug_msg( 1, 'ganglia_xml_thread(): parsethread() still running, waiting (%ss) to finish..' %PARSE_TIMEOUT ) |
---|
1570 | parsethread.join( PARSE_TIMEOUT ) # Maximum time for XML thread to finish |
---|
1571 | |
---|
1572 | if parsethread.isAlive(): |
---|
1573 | debug_msg( 1, 'ganglia_xml_thread(): Done waiting: parsethread() still running :( now what?' ) |
---|
1574 | else: |
---|
1575 | debug_msg( 1, 'ganglia_xml_thread(): Done waiting: parsethread() finished' ) |
---|
1576 | |
---|
1577 | debug_msg( 1, 'ganglia_xml_thread(): finished.' ) |
---|
1578 | |
---|
1579 | return 0 |
---|
1580 | |
---|
1581 | def parseThread( self ): |
---|
1582 | """Actual parsing thread""" |
---|
1583 | |
---|
1584 | debug_msg( 1, 'ganglia_parse_thread(): started.' ) |
---|
1585 | debug_msg( 1, 'ganglia_parse_thread(): Retrieving XML data..' ) |
---|
1586 | |
---|
1587 | my_data = self.myXMLSource.getData() |
---|
1588 | |
---|
1589 | debug_msg( 1, 'ganglia_parse_thread(): Done retrieving: data size %d' %len(my_data) ) |
---|
1590 | |
---|
1591 | if my_data: |
---|
1592 | debug_msg( 1, 'ganglia_parse_thread(): Parsing XML..' ) |
---|
1593 | xml.sax.parseString( my_data, self.myXMLHandler, self.myXMLError ) |
---|
1594 | debug_msg( 1, 'ganglia_parse_thread(): Done parsing.' ) |
---|
1595 | |
---|
1596 | debug_msg( 1, 'ganglia_parse_thread(): finished.' ) |
---|
1597 | |
---|
1598 | return 0 |
---|
1599 | |
---|
1600 | class GangliaConfigParser: |
---|
1601 | |
---|
1602 | sources = [ ] |
---|
1603 | |
---|
1604 | def __init__( self, config ): |
---|
1605 | """Parse some stuff from our gmetad's config, such as polling interval""" |
---|
1606 | |
---|
1607 | self.config = config |
---|
1608 | self.parseValues() |
---|
1609 | |
---|
1610 | def parseValues( self ): |
---|
1611 | """Parse certain values from gmetad.conf""" |
---|
1612 | |
---|
1613 | readcfg = open( self.config, 'r' ) |
---|
1614 | |
---|
1615 | for line in readcfg.readlines(): |
---|
1616 | |
---|
1617 | if line.count( '"' ) > 1: |
---|
1618 | |
---|
1619 | if line.find( 'data_source' ) != -1 and line[0] != '#': |
---|
1620 | |
---|
1621 | source = { } |
---|
1622 | source['name'] = line.split( '"' )[1] |
---|
1623 | source_value_words = line.split( '"' )[2].split( ' ' ) |
---|
1624 | |
---|
1625 | check_interval = source_value_words[0] |
---|
1626 | |
---|
1627 | try: |
---|
1628 | |
---|
1629 | source['interval'] = int( check_interval ) |
---|
1630 | debug_msg( 9, 'polling interval for %s = %s' %(source['name'], str( source['interval'] ) ) ) |
---|
1631 | except ValueError: |
---|
1632 | |
---|
1633 | source['interval'] = 15 |
---|
1634 | debug_msg( 9, 'polling interval for %s defaulted to 15' %(source['name']) ) |
---|
1635 | |
---|
1636 | self.sources.append( source ) |
---|
1637 | |
---|
1638 | readcfg.close() |
---|
1639 | |
---|
1640 | def clusterExists( self, source_name ): |
---|
1641 | |
---|
1642 | for source in self.sources: |
---|
1643 | |
---|
1644 | if source['name'] == source_name: |
---|
1645 | |
---|
1646 | return True |
---|
1647 | |
---|
1648 | return False |
---|
1649 | |
---|
1650 | def getInterval( self, source_name ): |
---|
1651 | """Return interval for source_name""" |
---|
1652 | |
---|
1653 | for source in self.sources: |
---|
1654 | |
---|
1655 | if source['name'] == source_name: |
---|
1656 | |
---|
1657 | return source['interval'] |
---|
1658 | |
---|
1659 | return None |
---|
1660 | |
---|
1661 | def getLowestInterval( self ): |
---|
1662 | """Return the lowest interval of all clusters""" |
---|
1663 | |
---|
1664 | lowest_interval = 0 |
---|
1665 | |
---|
1666 | for source in self.sources: |
---|
1667 | |
---|
1668 | if not lowest_interval or source['interval'] <= lowest_interval: |
---|
1669 | |
---|
1670 | lowest_interval = source['interval'] |
---|
1671 | |
---|
1672 | # Return 15 when nothing is found, so that the daemon won't go insane with 0 sec delays |
---|
1673 | if lowest_interval: |
---|
1674 | return lowest_interval |
---|
1675 | else: |
---|
1676 | return 15 |
---|
1677 | |
---|
1678 | class RRDHandler: |
---|
1679 | """Class for handling RRD activity""" |
---|
1680 | |
---|
1681 | |
---|
1682 | def __init__( self, config, cluster ): |
---|
1683 | """Setup initial variables""" |
---|
1684 | |
---|
1685 | global MODRRDTOOL |
---|
1686 | |
---|
1687 | self.block = 0 |
---|
1688 | self.cluster = cluster |
---|
1689 | self.config = config |
---|
1690 | self.slot = threading.Lock() |
---|
1691 | self.myMetrics = { } |
---|
1692 | self.lastStored = { } |
---|
1693 | self.timeserials = { } |
---|
1694 | |
---|
1695 | if MODRRDTOOL: |
---|
1696 | |
---|
1697 | self.rrdm = RRDMutator() |
---|
1698 | else: |
---|
1699 | self.rrdm = RRDMutator( RRDTOOL ) |
---|
1700 | |
---|
1701 | global DEBUG_LEVEL |
---|
1702 | |
---|
1703 | if DEBUG_LEVEL <= 2: |
---|
1704 | self.gatherLastUpdates() |
---|
1705 | |
---|
1706 | def gatherLastUpdates( self ): |
---|
1707 | """Populate the lastStored list, containing timestamps of all last updates""" |
---|
1708 | |
---|
1709 | cluster_dir = '%s/%s' %( check_dir(ARCHIVE_PATH), self.cluster ) |
---|
1710 | |
---|
1711 | hosts = [ ] |
---|
1712 | |
---|
1713 | if os.path.exists( cluster_dir ): |
---|
1714 | |
---|
1715 | dirlist = os.listdir( cluster_dir ) |
---|
1716 | |
---|
1717 | for dir in dirlist: |
---|
1718 | |
---|
1719 | hosts.append( dir ) |
---|
1720 | |
---|
1721 | for host in hosts: |
---|
1722 | |
---|
1723 | host_dir = cluster_dir + '/' + host |
---|
1724 | dirlist = os.listdir( host_dir ) |
---|
1725 | |
---|
1726 | for dir in dirlist: |
---|
1727 | |
---|
1728 | if not self.timeserials.has_key( host ): |
---|
1729 | |
---|
1730 | self.timeserials[ host ] = [ ] |
---|
1731 | |
---|
1732 | self.timeserials[ host ].append( dir ) |
---|
1733 | |
---|
1734 | last_serial = self.getLastRrdTimeSerial( host ) |
---|
1735 | |
---|
1736 | if last_serial: |
---|
1737 | |
---|
1738 | metric_dir = cluster_dir + '/' + host + '/' + last_serial |
---|
1739 | |
---|
1740 | if os.path.exists( metric_dir ): |
---|
1741 | |
---|
1742 | dirlist = os.listdir( metric_dir ) |
---|
1743 | |
---|
1744 | for file in dirlist: |
---|
1745 | |
---|
1746 | metricname = file.split( '.rrd' )[0] |
---|
1747 | |
---|
1748 | if not self.lastStored.has_key( host ): |
---|
1749 | |
---|
1750 | self.lastStored[ host ] = { } |
---|
1751 | |
---|
1752 | self.lastStored[ host ][ metricname ] = self.rrdm.grabLastUpdate( metric_dir + '/' + file ) |
---|
1753 | |
---|
1754 | def getClusterName( self ): |
---|
1755 | """Return clustername""" |
---|
1756 | |
---|
1757 | return self.cluster |
---|
1758 | |
---|
1759 | def memMetric( self, host, metric ): |
---|
1760 | """Store metric from host in memory""" |
---|
1761 | |
---|
1762 | # <ATOMIC> |
---|
1763 | # |
---|
1764 | self.slot.acquire() |
---|
1765 | |
---|
1766 | if self.myMetrics.has_key( host ): |
---|
1767 | |
---|
1768 | if self.myMetrics[ host ].has_key( metric['name'] ): |
---|
1769 | |
---|
1770 | for mymetric in self.myMetrics[ host ][ metric['name'] ]: |
---|
1771 | |
---|
1772 | if mymetric['time'] == metric['time']: |
---|
1773 | |
---|
1774 | # Allready have this metric, abort |
---|
1775 | self.slot.release() |
---|
1776 | return 1 |
---|
1777 | else: |
---|
1778 | self.myMetrics[ host ][ metric['name'] ] = [ ] |
---|
1779 | else: |
---|
1780 | self.myMetrics[ host ] = { } |
---|
1781 | self.myMetrics[ host ][ metric['name'] ] = [ ] |
---|
1782 | |
---|
1783 | # Push new metric onto stack |
---|
1784 | # atomic code; only 1 thread at a time may access the stack |
---|
1785 | |
---|
1786 | self.myMetrics[ host ][ metric['name'] ].append( metric ) |
---|
1787 | |
---|
1788 | self.slot.release() |
---|
1789 | # |
---|
1790 | # </ATOMIC> |
---|
1791 | |
---|
1792 | def makeUpdateList( self, host, metriclist ): |
---|
1793 | """ |
---|
1794 | Make a list of update values for rrdupdate |
---|
1795 | but only those that we didn't store before |
---|
1796 | """ |
---|
1797 | |
---|
1798 | update_list = [ ] |
---|
1799 | metric = None |
---|
1800 | |
---|
1801 | while len( metriclist ) > 0: |
---|
1802 | |
---|
1803 | metric = metriclist.pop( 0 ) |
---|
1804 | |
---|
1805 | if self.checkStoreMetric( host, metric ): |
---|
1806 | |
---|
1807 | u_val = str( metric['time'] ) + ':' + str( metric['val'] ) |
---|
1808 | #update_list.append( str('%s:%s') %( metric['time'], metric['val'] ) ) |
---|
1809 | update_list.append( u_val ) |
---|
1810 | |
---|
1811 | return update_list |
---|
1812 | |
---|
1813 | def checkStoreMetric( self, host, metric ): |
---|
1814 | """Check if supplied metric if newer than last one stored""" |
---|
1815 | |
---|
1816 | if self.lastStored.has_key( host ): |
---|
1817 | |
---|
1818 | if self.lastStored[ host ].has_key( metric['name'] ): |
---|
1819 | |
---|
1820 | if metric['time'] <= self.lastStored[ host ][ metric['name'] ]: |
---|
1821 | |
---|
1822 | # This is old |
---|
1823 | return 0 |
---|
1824 | |
---|
1825 | return 1 |
---|
1826 | |
---|
1827 | def memLastUpdate( self, host, metricname, metriclist ): |
---|
1828 | """ |
---|
1829 | Memorize the time of the latest metric from metriclist |
---|
1830 | but only if it wasn't allready memorized |
---|
1831 | """ |
---|
1832 | |
---|
1833 | if not self.lastStored.has_key( host ): |
---|
1834 | self.lastStored[ host ] = { } |
---|
1835 | |
---|
1836 | last_update_time = 0 |
---|
1837 | |
---|
1838 | for metric in metriclist: |
---|
1839 | |
---|
1840 | if metric['name'] == metricname: |
---|
1841 | |
---|
1842 | if metric['time'] > last_update_time: |
---|
1843 | |
---|
1844 | last_update_time = metric['time'] |
---|
1845 | |
---|
1846 | if self.lastStored[ host ].has_key( metricname ): |
---|
1847 | |
---|
1848 | if last_update_time <= self.lastStored[ host ][ metricname ]: |
---|
1849 | return 1 |
---|
1850 | |
---|
1851 | self.lastStored[ host ][ metricname ] = last_update_time |
---|
1852 | |
---|
1853 | def storeMetrics( self ): |
---|
1854 | """ |
---|
1855 | Store all metrics from memory to disk |
---|
1856 | and do it to the RRD's in appropriate timeperiod directory |
---|
1857 | """ |
---|
1858 | |
---|
1859 | debug_msg( 5, "Entering storeMetrics()") |
---|
1860 | |
---|
1861 | count_values = 0 |
---|
1862 | count_metrics = 0 |
---|
1863 | count_bits = 0 |
---|
1864 | |
---|
1865 | for hostname, mymetrics in self.myMetrics.items(): |
---|
1866 | |
---|
1867 | for metricname, mymetric in mymetrics.items(): |
---|
1868 | |
---|
1869 | count_metrics += 1 |
---|
1870 | |
---|
1871 | for dmetric in mymetric: |
---|
1872 | |
---|
1873 | count_values += 1 |
---|
1874 | |
---|
1875 | count_bits += len( dmetric['time'] ) |
---|
1876 | count_bits += len( dmetric['val'] ) |
---|
1877 | |
---|
1878 | count_bytes = count_bits / 8 |
---|
1879 | |
---|
1880 | debug_msg( 5, "size of cluster '" + self.cluster + "': " + |
---|
1881 | str( len( self.myMetrics.keys() ) ) + " hosts " + |
---|
1882 | str( count_metrics ) + " metrics " + str( count_values ) + " values " + |
---|
1883 | str( count_bits ) + " bits " + str( count_bytes ) + " bytes " ) |
---|
1884 | |
---|
1885 | for hostname, mymetrics in self.myMetrics.items(): |
---|
1886 | |
---|
1887 | for metricname, mymetric in mymetrics.items(): |
---|
1888 | |
---|
1889 | metrics_to_store = [ ] |
---|
1890 | |
---|
1891 | # Pop metrics from stack for storing until none is left |
---|
1892 | # atomic code: only 1 thread at a time may access myMetrics |
---|
1893 | |
---|
1894 | # <ATOMIC> |
---|
1895 | # |
---|
1896 | self.slot.acquire() |
---|
1897 | |
---|
1898 | while len( self.myMetrics[ hostname ][ metricname ] ) > 0: |
---|
1899 | |
---|
1900 | if len( self.myMetrics[ hostname ][ metricname ] ) > 0: |
---|
1901 | |
---|
1902 | try: |
---|
1903 | metrics_to_store.append( self.myMetrics[ hostname ][ metricname ].pop( 0 ) ) |
---|
1904 | except IndexError, msg: |
---|
1905 | |
---|
1906 | # Somehow sometimes myMetrics[ hostname ][ metricname ] |
---|
1907 | # is still len 0 when the statement is executed. |
---|
1908 | # Just ignore indexerror's.. |
---|
1909 | pass |
---|
1910 | |
---|
1911 | self.slot.release() |
---|
1912 | # |
---|
1913 | # </ATOMIC> |
---|
1914 | |
---|
1915 | # Create a mapping table, each metric to the period where it should be stored |
---|
1916 | # |
---|
1917 | metric_serial_table = self.determineSerials( hostname, metricname, metrics_to_store ) |
---|
1918 | |
---|
1919 | update_rets = [ ] |
---|
1920 | |
---|
1921 | for period, pmetric in metric_serial_table.items(): |
---|
1922 | |
---|
1923 | create_ret = self.createCheck( hostname, metricname, period ) |
---|
1924 | |
---|
1925 | update_ret = self.update( hostname, metricname, period, pmetric ) |
---|
1926 | |
---|
1927 | if update_ret == 0: |
---|
1928 | |
---|
1929 | debug_msg( 9, 'stored metric %s for %s' %( hostname, metricname ) ) |
---|
1930 | else: |
---|
1931 | debug_msg( 9, 'metric update failed' ) |
---|
1932 | |
---|
1933 | update_rets.append( create_ret ) |
---|
1934 | update_rets.append( update_ret ) |
---|
1935 | |
---|
1936 | # Lets ignore errors here for now, we need to make sure last update time |
---|
1937 | # is correct! |
---|
1938 | # |
---|
1939 | #if not (1) in update_rets: |
---|
1940 | |
---|
1941 | self.memLastUpdate( hostname, metricname, metrics_to_store ) |
---|
1942 | |
---|
1943 | debug_msg( 5, "Leaving storeMetrics()") |
---|
1944 | |
---|
1945 | def makeTimeSerial( self ): |
---|
1946 | """Generate a time serial. Seconds since epoch""" |
---|
1947 | |
---|
1948 | # Seconds since epoch |
---|
1949 | mytime = int( time.time() ) |
---|
1950 | |
---|
1951 | return mytime |
---|
1952 | |
---|
1953 | def makeRrdPath( self, host, metricname, timeserial ): |
---|
1954 | """Make a RRD location/path and filename""" |
---|
1955 | |
---|
1956 | rrd_dir = '%s/%s/%s/%s' %( check_dir(ARCHIVE_PATH), self.cluster, host, timeserial ) |
---|
1957 | rrd_file = '%s/%s.rrd' %( rrd_dir, metricname ) |
---|
1958 | |
---|
1959 | return rrd_dir, rrd_file |
---|
1960 | |
---|
1961 | def getLastRrdTimeSerial( self, host ): |
---|
1962 | """Find the last timeserial (directory) for this host""" |
---|
1963 | |
---|
1964 | newest_timeserial = 0 |
---|
1965 | |
---|
1966 | for dir in self.timeserials[ host ]: |
---|
1967 | |
---|
1968 | valid_dir = 1 |
---|
1969 | |
---|
1970 | for letter in dir: |
---|
1971 | if letter not in string.digits: |
---|
1972 | valid_dir = 0 |
---|
1973 | |
---|
1974 | if valid_dir: |
---|
1975 | timeserial = dir |
---|
1976 | if timeserial > newest_timeserial: |
---|
1977 | newest_timeserial = timeserial |
---|
1978 | |
---|
1979 | if newest_timeserial: |
---|
1980 | return newest_timeserial |
---|
1981 | else: |
---|
1982 | return 0 |
---|
1983 | |
---|
1984 | def determinePeriod( self, host, check_serial ): |
---|
1985 | """Determine to which period (directory) this time(serial) belongs""" |
---|
1986 | |
---|
1987 | period_serial = 0 |
---|
1988 | |
---|
1989 | if self.timeserials.has_key( host ): |
---|
1990 | |
---|
1991 | for serial in self.timeserials[ host ]: |
---|
1992 | |
---|
1993 | if check_serial >= serial and period_serial < serial: |
---|
1994 | |
---|
1995 | period_serial = serial |
---|
1996 | |
---|
1997 | return period_serial |
---|
1998 | |
---|
1999 | def determineSerials( self, host, metricname, metriclist ): |
---|
2000 | """ |
---|
2001 | Determine the correct serial and corresponding rrd to store |
---|
2002 | for a list of metrics |
---|
2003 | """ |
---|
2004 | |
---|
2005 | metric_serial_table = { } |
---|
2006 | |
---|
2007 | for metric in metriclist: |
---|
2008 | |
---|
2009 | if metric['name'] == metricname: |
---|
2010 | |
---|
2011 | period = self.determinePeriod( host, metric['time'] ) |
---|
2012 | |
---|
2013 | archive_secs = ARCHIVE_HOURS_PER_RRD * (60 * 60) |
---|
2014 | |
---|
2015 | if (int( metric['time'] ) - int( period ) ) > archive_secs: |
---|
2016 | |
---|
2017 | # This one should get it's own new period |
---|
2018 | period = metric['time'] |
---|
2019 | |
---|
2020 | if not self.timeserials.has_key( host ): |
---|
2021 | self.timeserials[ host ] = [ ] |
---|
2022 | |
---|
2023 | self.timeserials[ host ].append( period ) |
---|
2024 | |
---|
2025 | if not metric_serial_table.has_key( period ): |
---|
2026 | |
---|
2027 | metric_serial_table[ period ] = [ ] |
---|
2028 | |
---|
2029 | metric_serial_table[ period ].append( metric ) |
---|
2030 | |
---|
2031 | return metric_serial_table |
---|
2032 | |
---|
2033 | def createCheck( self, host, metricname, timeserial ): |
---|
2034 | """Check if an rrd allready exists for this metric, create if not""" |
---|
2035 | |
---|
2036 | debug_msg( 9, 'rrdcreate: using timeserial %s for %s/%s' %( timeserial, host, metricname ) ) |
---|
2037 | |
---|
2038 | rrd_dir, rrd_file = self.makeRrdPath( host, metricname, timeserial ) |
---|
2039 | |
---|
2040 | if not os.path.exists( rrd_dir ): |
---|
2041 | |
---|
2042 | try: |
---|
2043 | os.makedirs( rrd_dir ) |
---|
2044 | |
---|
2045 | except os.OSError, msg: |
---|
2046 | |
---|
2047 | if msg.find( 'File exists' ) != -1: |
---|
2048 | |
---|
2049 | # Ignore exists errors |
---|
2050 | pass |
---|
2051 | |
---|
2052 | else: |
---|
2053 | |
---|
2054 | print msg |
---|
2055 | return |
---|
2056 | |
---|
2057 | debug_msg( 9, 'created dir %s' %( str(rrd_dir) ) ) |
---|
2058 | |
---|
2059 | if not os.path.exists( rrd_file ): |
---|
2060 | |
---|
2061 | interval = self.config.getInterval( self.cluster ) |
---|
2062 | heartbeat = 8 * int( interval ) |
---|
2063 | |
---|
2064 | params = [ ] |
---|
2065 | |
---|
2066 | params.append( '--step' ) |
---|
2067 | params.append( str( interval ) ) |
---|
2068 | |
---|
2069 | params.append( '--start' ) |
---|
2070 | params.append( str( int( timeserial ) - 1 ) ) |
---|
2071 | |
---|
2072 | params.append( 'DS:sum:GAUGE:%d:U:U' %heartbeat ) |
---|
2073 | params.append( 'RRA:AVERAGE:0.5:1:%s' %(ARCHIVE_HOURS_PER_RRD * 240) ) |
---|
2074 | |
---|
2075 | self.rrdm.create( str(rrd_file), params ) |
---|
2076 | |
---|
2077 | debug_msg( 9, 'created rrd %s' %( str(rrd_file) ) ) |
---|
2078 | |
---|
2079 | def update( self, host, metricname, timeserial, metriclist ): |
---|
2080 | """ |
---|
2081 | Update rrd file for host with metricname |
---|
2082 | in directory timeserial with metriclist |
---|
2083 | """ |
---|
2084 | |
---|
2085 | debug_msg( 9, 'rrdupdate: using timeserial %s for %s/%s' %( timeserial, host, metricname ) ) |
---|
2086 | |
---|
2087 | rrd_dir, rrd_file = self.makeRrdPath( host, metricname, timeserial ) |
---|
2088 | |
---|
2089 | update_list = self.makeUpdateList( host, metriclist ) |
---|
2090 | |
---|
2091 | if len( update_list ) > 0: |
---|
2092 | ret = self.rrdm.update( str(rrd_file), update_list ) |
---|
2093 | |
---|
2094 | if ret: |
---|
2095 | return 1 |
---|
2096 | |
---|
2097 | debug_msg( 9, 'updated rrd %s with %s' %( str(rrd_file), string.join( update_list ) ) ) |
---|
2098 | |
---|
2099 | return 0 |
---|
2100 | |
---|
2101 | def daemon(): |
---|
2102 | """daemonized threading""" |
---|
2103 | |
---|
2104 | # Fork the first child |
---|
2105 | # |
---|
2106 | pid = os.fork() |
---|
2107 | |
---|
2108 | if pid > 0: |
---|
2109 | |
---|
2110 | sys.exit(0) # end parent |
---|
2111 | |
---|
2112 | # creates a session and sets the process group ID |
---|
2113 | # |
---|
2114 | os.setsid() |
---|
2115 | |
---|
2116 | # Fork the second child |
---|
2117 | # |
---|
2118 | pid = os.fork() |
---|
2119 | |
---|
2120 | if pid > 0: |
---|
2121 | |
---|
2122 | sys.exit(0) # end parent |
---|
2123 | |
---|
2124 | write_pidfile() |
---|
2125 | |
---|
2126 | # Go to the root directory and set the umask |
---|
2127 | # |
---|
2128 | os.chdir('/') |
---|
2129 | os.umask(0) |
---|
2130 | |
---|
2131 | sys.stdin.close() |
---|
2132 | sys.stdout.close() |
---|
2133 | sys.stderr.close() |
---|
2134 | |
---|
2135 | os.open('/dev/null', os.O_RDWR) |
---|
2136 | os.dup2(0, 1) |
---|
2137 | os.dup2(0, 2) |
---|
2138 | |
---|
2139 | run() |
---|
2140 | |
---|
2141 | def run(): |
---|
2142 | """Threading start""" |
---|
2143 | |
---|
2144 | global ARCHIVE_DATASOURCES |
---|
2145 | |
---|
2146 | config = GangliaConfigParser( GMETAD_CONF ) |
---|
2147 | |
---|
2148 | for ds in ARCHIVE_DATASOURCES: |
---|
2149 | |
---|
2150 | if not config.clusterExists( ds ): |
---|
2151 | |
---|
2152 | print "FATAL ERROR: Data source with name '%s' not found in %s" %( ds, GMETAD_CONF ) |
---|
2153 | sys.exit( 1 ) |
---|
2154 | |
---|
2155 | s_timeout = int( config.getLowestInterval() - 1 ) |
---|
2156 | |
---|
2157 | socket.setdefaulttimeout( s_timeout ) |
---|
2158 | |
---|
2159 | myXMLSource = XMLGatherer( ARCHIVE_XMLSOURCE.split( ':' )[0], ARCHIVE_XMLSOURCE.split( ':' )[1] ) |
---|
2160 | myDataStore = DataSQLStore( JOB_SQL_DBASE.split( '/' )[0], JOB_SQL_DBASE.split( '/' )[1] ) |
---|
2161 | |
---|
2162 | myJobProcessor = JobXMLProcessor( myXMLSource, myDataStore ) |
---|
2163 | myGangliaProcessor = GangliaXMLProcessor( myXMLSource, myDataStore ) |
---|
2164 | |
---|
2165 | try: |
---|
2166 | job_xml_thread = threading.Thread( None, myJobProcessor.run, 'job_proc_thread' ) |
---|
2167 | ganglia_xml_thread = threading.Thread( None, myGangliaProcessor.run, 'ganglia_proc_thread' ) |
---|
2168 | |
---|
2169 | job_xml_thread.start() |
---|
2170 | ganglia_xml_thread.start() |
---|
2171 | |
---|
2172 | except thread.error, msg: |
---|
2173 | debug_msg( 0, 'FATAL ERROR: Unable to start main threads!: '+ str(msg) ) |
---|
2174 | syslog.closelog() |
---|
2175 | sys.exit(1) |
---|
2176 | |
---|
2177 | debug_msg( 0, 'main threading started.' ) |
---|
2178 | |
---|
2179 | def main(): |
---|
2180 | """Program startup""" |
---|
2181 | |
---|
2182 | global DAEMONIZE, USE_SYSLOG |
---|
2183 | |
---|
2184 | if not processArgs( sys.argv[1:] ): |
---|
2185 | sys.exit( 1 ) |
---|
2186 | |
---|
2187 | if( DAEMONIZE and USE_SYSLOG ): |
---|
2188 | syslog.openlog( 'jobarchived', syslog.LOG_NOWAIT, SYSLOG_FACILITY ) |
---|
2189 | |
---|
2190 | if DAEMONIZE: |
---|
2191 | daemon() |
---|
2192 | else: |
---|
2193 | run() |
---|
2194 | |
---|
2195 | # |
---|
2196 | # Global functions |
---|
2197 | # |
---|
2198 | |
---|
2199 | def check_dir( directory ): |
---|
2200 | """Check if directory is a proper directory. I.e.: Does _not_ end with a '/'""" |
---|
2201 | |
---|
2202 | if directory[-1] == '/': |
---|
2203 | directory = directory[:-1] |
---|
2204 | |
---|
2205 | return directory |
---|
2206 | |
---|
2207 | def reqtime2epoch( rtime ): |
---|
2208 | |
---|
2209 | (hours, minutes, seconds ) = rtime.split( ':' ) |
---|
2210 | |
---|
2211 | etime = int(seconds) |
---|
2212 | etime = etime + ( int(minutes) * 60 ) |
---|
2213 | etime = etime + ( int(hours) * 60 * 60 ) |
---|
2214 | |
---|
2215 | return etime |
---|
2216 | |
---|
2217 | def debug_msg( level, msg ): |
---|
2218 | """Only print msg if correct levels""" |
---|
2219 | |
---|
2220 | if (not DAEMONIZE and DEBUG_LEVEL >= level): |
---|
2221 | sys.stderr.write( printTime() + ' - ' + msg + '\n' ) |
---|
2222 | |
---|
2223 | if (DAEMONIZE and USE_SYSLOG and SYSLOG_LEVEL >= level): |
---|
2224 | syslog.syslog( msg ) |
---|
2225 | |
---|
2226 | def printTime( ): |
---|
2227 | """Print current time in human readable format""" |
---|
2228 | |
---|
2229 | return time.strftime("%a %d %b %Y %H:%M:%S") |
---|
2230 | |
---|
2231 | def write_pidfile(): |
---|
2232 | |
---|
2233 | # Write pidfile if PIDFILE exists |
---|
2234 | if PIDFILE: |
---|
2235 | |
---|
2236 | pid = os.getpid() |
---|
2237 | |
---|
2238 | pidfile = open(PIDFILE, 'w') |
---|
2239 | |
---|
2240 | pidfile.write( str( pid ) ) |
---|
2241 | pidfile.close() |
---|
2242 | |
---|
2243 | # Ooohh, someone started me! Let's go.. |
---|
2244 | # |
---|
2245 | if __name__ == '__main__': |
---|
2246 | main() |
---|