1 | #!/usr/bin/env python |
---|
2 | # |
---|
3 | # This file is part of Jobmonarch |
---|
4 | # |
---|
5 | # Copyright (C) 2006-2007 Ramon Bastiaans |
---|
6 | # |
---|
7 | # Jobmonarch is free software; you can redistribute it and/or modify |
---|
8 | # it under the terms of the GNU General Public License as published by |
---|
9 | # the Free Software Foundation; either version 2 of the License, or |
---|
10 | # (at your option) any later version. |
---|
11 | # |
---|
12 | # Jobmonarch is distributed in the hope that it will be useful, |
---|
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
15 | # GNU General Public License for more details. |
---|
16 | # |
---|
17 | # You should have received a copy of the GNU General Public License |
---|
18 | # along with this program; if not, write to the Free Software |
---|
19 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
20 | # |
---|
21 | # SVN $Id: jobmond.py 476 2008-02-22 11:00:59Z bastiaans $ |
---|
22 | # |
---|
23 | |
---|
24 | import sys, getopt, ConfigParser, time, os, socket, string, re |
---|
25 | import xdrlib, socket, syslog, xml, xml.sax |
---|
26 | from xml.sax import saxutils, make_parser |
---|
27 | from xml.sax import make_parser |
---|
28 | from xml.sax.handler import feature_namespaces |
---|
29 | |
---|
30 | VERSION='0.3' |
---|
31 | |
---|
32 | def usage( ver ): |
---|
33 | |
---|
34 | print 'jobmond %s' %VERSION |
---|
35 | |
---|
36 | if ver: |
---|
37 | return 0 |
---|
38 | |
---|
39 | print |
---|
40 | print 'Purpose:' |
---|
41 | print ' The Job Monitoring Daemon (jobmond) reports batch jobs information and statistics' |
---|
42 | print ' to Ganglia, which can be viewed with Job Monarch web frontend' |
---|
43 | print |
---|
44 | print 'Usage: jobmond [OPTIONS]' |
---|
45 | print |
---|
46 | print ' -c, --config=FILE The configuration file to use (default: /etc/jobmond.conf)' |
---|
47 | print ' -p, --pidfile=FILE Use pid file to store the process id' |
---|
48 | print ' -h, --help Print help and exit' |
---|
49 | print ' -v, --version Print version and exit' |
---|
50 | print |
---|
51 | |
---|
52 | def processArgs( args ): |
---|
53 | |
---|
54 | SHORT_L = 'p:hvc:' |
---|
55 | LONG_L = [ 'help', 'config=', 'pidfile=', 'version' ] |
---|
56 | |
---|
57 | global PIDFILE |
---|
58 | PIDFILE = None |
---|
59 | |
---|
60 | config_filename = '/etc/jobmond.conf' |
---|
61 | |
---|
62 | try: |
---|
63 | |
---|
64 | opts, args = getopt.getopt( args, SHORT_L, LONG_L ) |
---|
65 | |
---|
66 | except getopt.GetoptError, detail: |
---|
67 | |
---|
68 | print detail |
---|
69 | usage() |
---|
70 | sys.exit( 1 ) |
---|
71 | |
---|
72 | for opt, value in opts: |
---|
73 | |
---|
74 | if opt in [ '--config', '-c' ]: |
---|
75 | |
---|
76 | config_filename = value |
---|
77 | |
---|
78 | if opt in [ '--pidfile', '-p' ]: |
---|
79 | |
---|
80 | PIDFILE = value |
---|
81 | |
---|
82 | if opt in [ '--help', '-h' ]: |
---|
83 | |
---|
84 | usage( False ) |
---|
85 | sys.exit( 0 ) |
---|
86 | |
---|
87 | if opt in [ '--version', '-v' ]: |
---|
88 | |
---|
89 | usage( True ) |
---|
90 | sys.exit( 0 ) |
---|
91 | |
---|
92 | return loadConfig( config_filename ) |
---|
93 | |
---|
94 | def loadConfig( filename ): |
---|
95 | |
---|
96 | def getlist( cfg_string ): |
---|
97 | |
---|
98 | my_list = [ ] |
---|
99 | |
---|
100 | for item_txt in cfg_string.split( ',' ): |
---|
101 | |
---|
102 | sep_char = None |
---|
103 | |
---|
104 | item_txt = item_txt.strip() |
---|
105 | |
---|
106 | for s_char in [ "'", '"' ]: |
---|
107 | |
---|
108 | if item_txt.find( s_char ) != -1: |
---|
109 | |
---|
110 | if item_txt.count( s_char ) != 2: |
---|
111 | |
---|
112 | print 'Missing quote: %s' %item_txt |
---|
113 | sys.exit( 1 ) |
---|
114 | |
---|
115 | else: |
---|
116 | |
---|
117 | sep_char = s_char |
---|
118 | break |
---|
119 | |
---|
120 | if sep_char: |
---|
121 | |
---|
122 | item_txt = item_txt.split( sep_char )[1] |
---|
123 | |
---|
124 | my_list.append( item_txt ) |
---|
125 | |
---|
126 | return my_list |
---|
127 | |
---|
128 | cfg = ConfigParser.ConfigParser() |
---|
129 | |
---|
130 | cfg.read( filename ) |
---|
131 | |
---|
132 | global DEBUG_LEVEL, DAEMONIZE, BATCH_SERVER, BATCH_POLL_INTERVAL |
---|
133 | global GMOND_CONF, DETECT_TIME_DIFFS, BATCH_HOST_TRANSLATE |
---|
134 | global BATCH_API, QUEUE, GMETRIC_TARGET, USE_SYSLOG |
---|
135 | global SYSLOG_LEVEL, SYSLOG_FACILITY, GMETRIC_BINARY |
---|
136 | |
---|
137 | DEBUG_LEVEL = cfg.getint( 'DEFAULT', 'DEBUG_LEVEL' ) |
---|
138 | |
---|
139 | DAEMONIZE = cfg.getboolean( 'DEFAULT', 'DAEMONIZE' ) |
---|
140 | |
---|
141 | SYSLOG_LEVEL = -1 |
---|
142 | SYSLOG_FACILITY = None |
---|
143 | |
---|
144 | try: |
---|
145 | USE_SYSLOG = cfg.getboolean( 'DEFAULT', 'USE_SYSLOG' ) |
---|
146 | |
---|
147 | except ConfigParser.NoOptionError: |
---|
148 | |
---|
149 | USE_SYSLOG = True |
---|
150 | |
---|
151 | debug_msg( 0, 'ERROR: no option USE_SYSLOG found: assuming yes' ) |
---|
152 | |
---|
153 | |
---|
154 | |
---|
155 | if USE_SYSLOG: |
---|
156 | |
---|
157 | try: |
---|
158 | SYSLOG_LEVEL = cfg.getint( 'DEFAULT', 'SYSLOG_LEVEL' ) |
---|
159 | |
---|
160 | except ConfigParser.NoOptionError: |
---|
161 | |
---|
162 | debug_msg( 0, 'ERROR: no option SYSLOG_LEVEL found: assuming level 0' ) |
---|
163 | SYSLOG_LEVEL = 0 |
---|
164 | |
---|
165 | try: |
---|
166 | |
---|
167 | SYSLOG_FACILITY = eval( 'syslog.LOG_' + cfg.get( 'DEFAULT', 'SYSLOG_FACILITY' ) ) |
---|
168 | |
---|
169 | except ConfigParser.NoOptionError: |
---|
170 | |
---|
171 | SYSLOG_FACILITY = syslog.LOG_DAEMON |
---|
172 | |
---|
173 | debug_msg( 0, 'ERROR: no option SYSLOG_FACILITY found: assuming facility DAEMON' ) |
---|
174 | |
---|
175 | try: |
---|
176 | |
---|
177 | BATCH_SERVER = cfg.get( 'DEFAULT', 'BATCH_SERVER' ) |
---|
178 | |
---|
179 | except ConfigParser.NoOptionError: |
---|
180 | |
---|
181 | # Backwards compatibility for old configs |
---|
182 | # |
---|
183 | |
---|
184 | BATCH_SERVER = cfg.get( 'DEFAULT', 'TORQUE_SERVER' ) |
---|
185 | api_guess = 'pbs' |
---|
186 | |
---|
187 | try: |
---|
188 | |
---|
189 | BATCH_POLL_INTERVAL = cfg.getint( 'DEFAULT', 'BATCH_POLL_INTERVAL' ) |
---|
190 | |
---|
191 | except ConfigParser.NoOptionError: |
---|
192 | |
---|
193 | # Backwards compatibility for old configs |
---|
194 | # |
---|
195 | |
---|
196 | BATCH_POLL_INTERVAL = cfg.getint( 'DEFAULT', 'TORQUE_POLL_INTERVAL' ) |
---|
197 | api_guess = 'pbs' |
---|
198 | |
---|
199 | try: |
---|
200 | |
---|
201 | GMOND_CONF = cfg.get( 'DEFAULT', 'GMOND_CONF' ) |
---|
202 | |
---|
203 | except ConfigParser.NoOptionError: |
---|
204 | |
---|
205 | GMOND_CONF = None |
---|
206 | |
---|
207 | try: |
---|
208 | |
---|
209 | GMETRIC_BINARY = cfg.get( 'DEFAULT', 'GMETRIC_BINARY' ) |
---|
210 | |
---|
211 | except ConfigParser.NoOptionError: |
---|
212 | |
---|
213 | GMETRIC_BINARY = '/usr/bin/gmetric' |
---|
214 | |
---|
215 | DETECT_TIME_DIFFS = cfg.getboolean( 'DEFAULT', 'DETECT_TIME_DIFFS' ) |
---|
216 | |
---|
217 | BATCH_HOST_TRANSLATE = getlist( cfg.get( 'DEFAULT', 'BATCH_HOST_TRANSLATE' ) ) |
---|
218 | |
---|
219 | try: |
---|
220 | |
---|
221 | BATCH_API = cfg.get( 'DEFAULT', 'BATCH_API' ) |
---|
222 | |
---|
223 | except ConfigParser.NoOptionError, detail: |
---|
224 | |
---|
225 | if BATCH_SERVER and api_guess: |
---|
226 | |
---|
227 | BATCH_API = api_guess |
---|
228 | else: |
---|
229 | debug_msg( 0, "FATAL ERROR: BATCH_API not set and can't make guess" ) |
---|
230 | sys.exit( 1 ) |
---|
231 | |
---|
232 | try: |
---|
233 | |
---|
234 | QUEUE = getlist( cfg.get( 'DEFAULT', 'QUEUE' ) ) |
---|
235 | |
---|
236 | except ConfigParser.NoOptionError, detail: |
---|
237 | |
---|
238 | QUEUE = None |
---|
239 | |
---|
240 | try: |
---|
241 | |
---|
242 | GMETRIC_TARGET = cfg.get( 'DEFAULT', 'GMETRIC_TARGET' ) |
---|
243 | |
---|
244 | except ConfigParser.NoOptionError: |
---|
245 | |
---|
246 | GMETRIC_TARGET = None |
---|
247 | |
---|
248 | if not GMOND_CONF: |
---|
249 | |
---|
250 | debug_msg( 0, "FATAL ERROR: GMETRIC_TARGET and GMOND_CONF both not set! Set at least one!" ) |
---|
251 | sys.exit( 1 ) |
---|
252 | else: |
---|
253 | |
---|
254 | debug_msg( 0, "ERROR: GMETRIC_TARGET not set: internel Gmetric handling aborted. Failing back to DEPRECATED use of gmond.conf/gmetric binary. This will slow down jobmond significantly!" ) |
---|
255 | |
---|
256 | return True |
---|
257 | |
---|
258 | METRIC_MAX_VAL_LEN = 900 |
---|
259 | |
---|
260 | class DataProcessor: |
---|
261 | |
---|
262 | """Class for processing of data""" |
---|
263 | |
---|
264 | binary = None |
---|
265 | |
---|
266 | def __init__( self, binary=None ): |
---|
267 | |
---|
268 | """Remember alternate binary location if supplied""" |
---|
269 | |
---|
270 | global GMETRIC_BINARY |
---|
271 | |
---|
272 | if binary: |
---|
273 | self.binary = binary |
---|
274 | |
---|
275 | if not self.binary: |
---|
276 | self.binary = GMETRIC_BINARY |
---|
277 | |
---|
278 | # Timeout for XML |
---|
279 | # |
---|
280 | # From ganglia's documentation: |
---|
281 | # |
---|
282 | # 'A metric will be deleted DMAX seconds after it is received, and |
---|
283 | # DMAX=0 means eternal life.' |
---|
284 | |
---|
285 | self.dmax = str( int( int( BATCH_POLL_INTERVAL ) * 2 ) ) |
---|
286 | |
---|
287 | if GMOND_CONF: |
---|
288 | |
---|
289 | try: |
---|
290 | gmond_file = GMOND_CONF |
---|
291 | |
---|
292 | except NameError: |
---|
293 | gmond_file = '/etc/gmond.conf' |
---|
294 | |
---|
295 | if not os.path.exists( gmond_file ): |
---|
296 | debug_msg( 0, 'FATAL ERROR: ' + gmond_file + ' does not exist' ) |
---|
297 | sys.exit( 1 ) |
---|
298 | |
---|
299 | incompatible = self.checkGmetricVersion() |
---|
300 | |
---|
301 | if incompatible: |
---|
302 | |
---|
303 | debug_msg( 0, 'Gmetric version not compatible, please upgrade to at least 3.0.1' ) |
---|
304 | sys.exit( 1 ) |
---|
305 | |
---|
306 | def checkGmetricVersion( self ): |
---|
307 | |
---|
308 | """ |
---|
309 | Check version of gmetric is at least 3.0.1 |
---|
310 | for the syntax we use |
---|
311 | """ |
---|
312 | |
---|
313 | global METRIC_MAX_VAL_LEN |
---|
314 | |
---|
315 | incompatible = 0 |
---|
316 | |
---|
317 | gfp = os.popen( self.binary + ' --version' ) |
---|
318 | lines = gfp.readlines() |
---|
319 | |
---|
320 | gfp.close() |
---|
321 | |
---|
322 | for line in lines: |
---|
323 | |
---|
324 | line = line.split( ' ' ) |
---|
325 | |
---|
326 | if len( line ) == 2 and str( line ).find( 'gmetric' ) != -1: |
---|
327 | |
---|
328 | gmetric_version = line[1].split( '\n' )[0] |
---|
329 | |
---|
330 | version_major = int( gmetric_version.split( '.' )[0] ) |
---|
331 | version_minor = int( gmetric_version.split( '.' )[1] ) |
---|
332 | version_patch = int( gmetric_version.split( '.' )[2] ) |
---|
333 | |
---|
334 | incompatible = 0 |
---|
335 | |
---|
336 | if version_major < 3: |
---|
337 | |
---|
338 | incompatible = 1 |
---|
339 | |
---|
340 | elif version_major == 3: |
---|
341 | |
---|
342 | if version_minor == 0: |
---|
343 | |
---|
344 | if version_patch < 1: |
---|
345 | |
---|
346 | incompatible = 1 |
---|
347 | |
---|
348 | # Gmetric 3.0.1 >< 3.0.3 had a bug in the max metric length |
---|
349 | # |
---|
350 | if version_patch < 3: |
---|
351 | |
---|
352 | METRIC_MAX_VAL_LEN = 900 |
---|
353 | |
---|
354 | elif version_patch >= 3: |
---|
355 | |
---|
356 | METRIC_MAX_VAL_LEN = 1400 |
---|
357 | |
---|
358 | return incompatible |
---|
359 | |
---|
360 | def multicastGmetric( self, metricname, metricval, valtype='string', units='' ): |
---|
361 | |
---|
362 | """Call gmetric binary and multicast""" |
---|
363 | |
---|
364 | cmd = self.binary |
---|
365 | |
---|
366 | if GMETRIC_TARGET: |
---|
367 | |
---|
368 | GMETRIC_TARGET_HOST = GMETRIC_TARGET.split( ':' )[0] |
---|
369 | GMETRIC_TARGET_PORT = GMETRIC_TARGET.split( ':' )[1] |
---|
370 | |
---|
371 | metric_debug = "[gmetric] name: %s - val: %s - dmax: %s" %( str( metricname ), str( metricval ), str( self.dmax ) ) |
---|
372 | |
---|
373 | debug_msg( 10, printTime() + ' ' + metric_debug) |
---|
374 | |
---|
375 | gm = Gmetric( GMETRIC_TARGET_HOST, GMETRIC_TARGET_PORT ) |
---|
376 | |
---|
377 | gm.send( str( metricname ), str( metricval ), str( self.dmax ), valtype, units ) |
---|
378 | |
---|
379 | else: |
---|
380 | try: |
---|
381 | cmd = cmd + ' -c' + GMOND_CONF |
---|
382 | |
---|
383 | except NameError: |
---|
384 | |
---|
385 | debug_msg( 10, 'Assuming /etc/gmond.conf for gmetric cmd (ommitting)' ) |
---|
386 | |
---|
387 | cmd = cmd + ' -n' + str( metricname )+ ' -v"' + str( metricval )+ '" -t' + str( valtype ) + ' -d' + str( self.dmax ) |
---|
388 | |
---|
389 | if len( units ) > 0: |
---|
390 | |
---|
391 | cmd = cmd + ' -u"' + units + '"' |
---|
392 | |
---|
393 | debug_msg( 10, printTime() + ' ' + cmd ) |
---|
394 | |
---|
395 | os.system( cmd ) |
---|
396 | |
---|
397 | class DataGatherer: |
---|
398 | |
---|
399 | """Skeleton class for batch system DataGatherer""" |
---|
400 | |
---|
401 | def printJobs( self, jobs ): |
---|
402 | |
---|
403 | """Print a jobinfo overview""" |
---|
404 | |
---|
405 | for name, attrs in self.jobs.items(): |
---|
406 | |
---|
407 | print 'job %s' %(name) |
---|
408 | |
---|
409 | for name, val in attrs.items(): |
---|
410 | |
---|
411 | print '\t%s = %s' %( name, val ) |
---|
412 | |
---|
413 | def printJob( self, jobs, job_id ): |
---|
414 | |
---|
415 | """Print job with job_id from jobs""" |
---|
416 | |
---|
417 | print 'job %s' %(job_id) |
---|
418 | |
---|
419 | for name, val in jobs[ job_id ].items(): |
---|
420 | |
---|
421 | print '\t%s = %s' %( name, val ) |
---|
422 | |
---|
423 | def daemon( self ): |
---|
424 | |
---|
425 | """Run as daemon forever""" |
---|
426 | |
---|
427 | # Fork the first child |
---|
428 | # |
---|
429 | pid = os.fork() |
---|
430 | if pid > 0: |
---|
431 | sys.exit(0) # end parent |
---|
432 | |
---|
433 | # creates a session and sets the process group ID |
---|
434 | # |
---|
435 | os.setsid() |
---|
436 | |
---|
437 | # Fork the second child |
---|
438 | # |
---|
439 | pid = os.fork() |
---|
440 | if pid > 0: |
---|
441 | sys.exit(0) # end parent |
---|
442 | |
---|
443 | write_pidfile() |
---|
444 | |
---|
445 | # Go to the root directory and set the umask |
---|
446 | # |
---|
447 | os.chdir('/') |
---|
448 | os.umask(0) |
---|
449 | |
---|
450 | sys.stdin.close() |
---|
451 | sys.stdout.close() |
---|
452 | sys.stderr.close() |
---|
453 | |
---|
454 | os.open('/dev/null', os.O_RDWR) |
---|
455 | os.dup2(0, 1) |
---|
456 | os.dup2(0, 2) |
---|
457 | |
---|
458 | self.run() |
---|
459 | |
---|
460 | def run( self ): |
---|
461 | |
---|
462 | """Main thread""" |
---|
463 | |
---|
464 | while ( 1 ): |
---|
465 | |
---|
466 | self.getJobData() |
---|
467 | self.submitJobData() |
---|
468 | time.sleep( BATCH_POLL_INTERVAL ) |
---|
469 | |
---|
470 | class SgeQstatXMLParser(xml.sax.handler.ContentHandler): |
---|
471 | |
---|
472 | """Babu Sundaram's experimental SGE qstat XML parser""" |
---|
473 | |
---|
474 | def __init__(self, qstatinxml): |
---|
475 | |
---|
476 | self.qstatfile = qstatinxml |
---|
477 | self.attribs = {} |
---|
478 | self.value = '' |
---|
479 | self.jobID = '' |
---|
480 | self.currentJobInfo = '' |
---|
481 | self.job_list = [] |
---|
482 | self.EOFFlag = 0 |
---|
483 | self.jobinfoCount = 0 |
---|
484 | |
---|
485 | |
---|
486 | def startElement(self, name, attrs): |
---|
487 | |
---|
488 | if name == 'job_list': |
---|
489 | self.currentJobInfo = 'Status=' + attrs.get('state', None) + ' ' |
---|
490 | elif name == 'job_info': |
---|
491 | self.job_list = [] |
---|
492 | self.jobinfoCount += 1 |
---|
493 | |
---|
494 | def characters(self, ch): |
---|
495 | |
---|
496 | self.value = self.value + ch |
---|
497 | |
---|
498 | def endElement(self, name): |
---|
499 | |
---|
500 | if len(self.value.strip()) > 0 : |
---|
501 | |
---|
502 | self.currentJobInfo += name + '=' + self.value.strip() + ' ' |
---|
503 | elif name != 'job_list': |
---|
504 | |
---|
505 | self.currentJobInfo += name + '=Unknown ' |
---|
506 | |
---|
507 | if name == 'JB_job_number': |
---|
508 | |
---|
509 | self.jobID = self.value.strip() |
---|
510 | self.job_list.append(self.jobID) |
---|
511 | |
---|
512 | if name == 'job_list': |
---|
513 | |
---|
514 | if self.attribs.has_key(self.jobID) == False: |
---|
515 | self.attribs[self.jobID] = self.currentJobInfo |
---|
516 | elif self.attribs.has_key(self.jobID) and self.attribs[self.jobID] != self.currentJobInfo: |
---|
517 | self.attribs[self.jobID] = self.currentJobInfo |
---|
518 | self.currentJobInfo = '' |
---|
519 | self.jobID = '' |
---|
520 | |
---|
521 | elif name == 'job_info' and self.jobinfoCount == 2: |
---|
522 | |
---|
523 | deljobs = [] |
---|
524 | for id in self.attribs: |
---|
525 | try: |
---|
526 | self.job_list.index(str(id)) |
---|
527 | except ValueError: |
---|
528 | deljobs.append(id) |
---|
529 | for i in deljobs: |
---|
530 | del self.attribs[i] |
---|
531 | deljobs = [] |
---|
532 | self.jobinfoCount = 0 |
---|
533 | |
---|
534 | self.value = '' |
---|
535 | |
---|
536 | class SgeDataGatherer(DataGatherer): |
---|
537 | |
---|
538 | jobs = { } |
---|
539 | SGE_QSTAT_XML_FILE = '/tmp/.jobmonarch.sge.qstat' |
---|
540 | |
---|
541 | def __init__( self ): |
---|
542 | """Setup appropriate variables""" |
---|
543 | |
---|
544 | self.jobs = { } |
---|
545 | self.timeoffset = 0 |
---|
546 | self.dp = DataProcessor() |
---|
547 | self.initSgeJobInfo() |
---|
548 | |
---|
549 | def initSgeJobInfo( self ): |
---|
550 | """This is outside the scope of DRMAA; Get the current jobs in SGE""" |
---|
551 | """This is a hack because we cant get info about jobs beyond""" |
---|
552 | """those in the current DRMAA session""" |
---|
553 | |
---|
554 | self.qstatparser = SgeQstatXMLParser( self.SGE_QSTAT_XML_FILE ) |
---|
555 | |
---|
556 | # Obtain the qstat information from SGE in XML format |
---|
557 | # This would change to DRMAA-specific calls from 6.0u9 |
---|
558 | |
---|
559 | def getJobData(self): |
---|
560 | """Gather all data on current jobs in SGE""" |
---|
561 | |
---|
562 | # Get the information about the current jobs in the SGE queue |
---|
563 | info = os.popen("qstat -ext -xml").readlines() |
---|
564 | f = open(self.SGE_QSTAT_XML_FILE,'w') |
---|
565 | for lines in info: |
---|
566 | f.write(lines) |
---|
567 | f.close() |
---|
568 | |
---|
569 | # Parse the input |
---|
570 | f = open(self.qstatparser.qstatfile, 'r') |
---|
571 | xml.sax.parse(f, self.qstatparser) |
---|
572 | f.close() |
---|
573 | |
---|
574 | self.cur_time = time.time() |
---|
575 | |
---|
576 | return self.qstatparser.attribs |
---|
577 | |
---|
578 | def submitJobData(self): |
---|
579 | """Submit job info list""" |
---|
580 | |
---|
581 | self.dp.multicastGmetric( 'MONARCH-HEARTBEAT', str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) ) |
---|
582 | # Now let's spread the knowledge |
---|
583 | # |
---|
584 | metric_increment = 0 |
---|
585 | for jobid, jobattrs in self.qstatparser.attribs.items(): |
---|
586 | |
---|
587 | self.dp.multicastGmetric( 'MONARCH-JOB-' + jobid + '-' + str(metric_increment), jobattrs) |
---|
588 | |
---|
589 | class PbsDataGatherer( DataGatherer ): |
---|
590 | |
---|
591 | """This is the DataGatherer for PBS and Torque""" |
---|
592 | |
---|
593 | global PBSQuery |
---|
594 | |
---|
595 | def __init__( self ): |
---|
596 | |
---|
597 | """Setup appropriate variables""" |
---|
598 | |
---|
599 | self.jobs = { } |
---|
600 | self.timeoffset = 0 |
---|
601 | self.dp = DataProcessor() |
---|
602 | |
---|
603 | self.initPbsQuery() |
---|
604 | |
---|
605 | def initPbsQuery( self ): |
---|
606 | |
---|
607 | self.pq = None |
---|
608 | |
---|
609 | if( BATCH_SERVER ): |
---|
610 | |
---|
611 | self.pq = PBSQuery( BATCH_SERVER ) |
---|
612 | else: |
---|
613 | self.pq = PBSQuery() |
---|
614 | |
---|
615 | def getAttr( self, attrs, name ): |
---|
616 | |
---|
617 | """Return certain attribute from dictionary, if exists""" |
---|
618 | |
---|
619 | if attrs.has_key( name ): |
---|
620 | |
---|
621 | return attrs[ name ] |
---|
622 | else: |
---|
623 | return '' |
---|
624 | |
---|
625 | def jobDataChanged( self, jobs, job_id, attrs ): |
---|
626 | |
---|
627 | """Check if job with attrs and job_id in jobs has changed""" |
---|
628 | |
---|
629 | if jobs.has_key( job_id ): |
---|
630 | |
---|
631 | oldData = jobs[ job_id ] |
---|
632 | else: |
---|
633 | return 1 |
---|
634 | |
---|
635 | for name, val in attrs.items(): |
---|
636 | |
---|
637 | if oldData.has_key( name ): |
---|
638 | |
---|
639 | if oldData[ name ] != attrs[ name ]: |
---|
640 | |
---|
641 | return 1 |
---|
642 | |
---|
643 | else: |
---|
644 | return 1 |
---|
645 | |
---|
646 | return 0 |
---|
647 | |
---|
648 | def getJobData( self ): |
---|
649 | |
---|
650 | """Gather all data on current jobs in Torque""" |
---|
651 | |
---|
652 | joblist = {} |
---|
653 | self.cur_time = 0 |
---|
654 | |
---|
655 | try: |
---|
656 | joblist = self.pq.getjobs() |
---|
657 | self.cur_time = time.time() |
---|
658 | |
---|
659 | except PBSError, detail: |
---|
660 | |
---|
661 | debug_msg( 10, "Caught PBS unavailable, skipping until next polling interval: " + str( detail ) ) |
---|
662 | return None |
---|
663 | |
---|
664 | jobs_processed = [ ] |
---|
665 | |
---|
666 | my_domain = string.join( socket.getfqdn().split( '.' )[1:], '.' ) |
---|
667 | |
---|
668 | for name, attrs in joblist.items(): |
---|
669 | |
---|
670 | job_id = name.split( '.' )[0] |
---|
671 | |
---|
672 | name = self.getAttr( attrs, 'Job_Name' ) |
---|
673 | queue = self.getAttr( attrs, 'queue' ) |
---|
674 | |
---|
675 | if QUEUE: |
---|
676 | |
---|
677 | if QUEUE != queue: |
---|
678 | |
---|
679 | continue |
---|
680 | |
---|
681 | owner = self.getAttr( attrs, 'Job_Owner' ).split( '@' )[0] |
---|
682 | requested_time = self.getAttr( attrs, 'Resource_List.walltime' ) |
---|
683 | requested_memory = self.getAttr( attrs, 'Resource_List.mem' ) |
---|
684 | |
---|
685 | mynoderequest = self.getAttr( attrs, 'Resource_List.nodes' ) |
---|
686 | |
---|
687 | ppn = '' |
---|
688 | |
---|
689 | if mynoderequest.find( ':' ) != -1 and mynoderequest.find( 'ppn' ) != -1: |
---|
690 | |
---|
691 | mynoderequest_fields = mynoderequest.split( ':' ) |
---|
692 | |
---|
693 | for mynoderequest_field in mynoderequest_fields: |
---|
694 | |
---|
695 | if mynoderequest_field.find( 'ppn' ) != -1: |
---|
696 | |
---|
697 | ppn = mynoderequest_field.split( 'ppn=' )[1] |
---|
698 | |
---|
699 | status = self.getAttr( attrs, 'job_state' ) |
---|
700 | |
---|
701 | if status in [ 'Q', 'R' ]: |
---|
702 | |
---|
703 | jobs_processed.append( job_id ) |
---|
704 | |
---|
705 | queued_timestamp = self.getAttr( attrs, 'ctime' ) |
---|
706 | |
---|
707 | if status == 'R': |
---|
708 | |
---|
709 | start_timestamp = self.getAttr( attrs, 'mtime' ) |
---|
710 | nodes = self.getAttr( attrs, 'exec_host' ).split( '+' ) |
---|
711 | |
---|
712 | nodeslist = [ ] |
---|
713 | |
---|
714 | for node in nodes: |
---|
715 | |
---|
716 | host = node.split( '/' )[0] |
---|
717 | |
---|
718 | host_domain = string.join( host.split( '.' )[1:], '.' ) |
---|
719 | |
---|
720 | if host_domain == my_domain: |
---|
721 | |
---|
722 | host = host.split( '.' )[0] |
---|
723 | |
---|
724 | if nodeslist.count( host ) == 0: |
---|
725 | |
---|
726 | for translate_pattern in BATCH_HOST_TRANSLATE: |
---|
727 | |
---|
728 | if translate_pattern.find( '/' ) != -1: |
---|
729 | |
---|
730 | translate_orig = translate_pattern.split( '/' )[1] |
---|
731 | translate_new = translate_pattern.split( '/' )[2] |
---|
732 | |
---|
733 | host = re.sub( translate_orig, translate_new, host ) |
---|
734 | |
---|
735 | if not host in nodeslist: |
---|
736 | |
---|
737 | nodeslist.append( host ) |
---|
738 | |
---|
739 | if DETECT_TIME_DIFFS: |
---|
740 | |
---|
741 | # If a job start if later than our current date, |
---|
742 | # that must mean the Torque server's time is later |
---|
743 | # than our local time. |
---|
744 | |
---|
745 | if int( start_timestamp ) > int( int( self.cur_time ) + int( self.timeoffset ) ): |
---|
746 | |
---|
747 | self.timeoffset = int( int(start_timestamp) - int(self.cur_time) ) |
---|
748 | |
---|
749 | elif status == 'Q': |
---|
750 | |
---|
751 | # 'mynodequest' can be a string in the following syntax according to the |
---|
752 | # Torque Administator's manual: |
---|
753 | # |
---|
754 | # {<node_count> | <hostname>}[:ppn=<ppn>][:<property>[:<property>]...][+ ...] |
---|
755 | # {<node_count> | <hostname>}[:ppn=<ppn>][:<property>[:<property>]...][+ ...] |
---|
756 | # etc |
---|
757 | # |
---|
758 | |
---|
759 | # |
---|
760 | # For now we only count the amount of nodes request and ignore properties |
---|
761 | # |
---|
762 | |
---|
763 | start_timestamp = '' |
---|
764 | count_mynodes = 0 |
---|
765 | |
---|
766 | for node in mynoderequest.split( '+' ): |
---|
767 | |
---|
768 | # Just grab the {node_count|hostname} part and ignore properties |
---|
769 | # |
---|
770 | nodepart = node.split( ':' )[0] |
---|
771 | |
---|
772 | # Let's assume a node_count value |
---|
773 | # |
---|
774 | numeric_node = 1 |
---|
775 | |
---|
776 | # Chop the value up into characters |
---|
777 | # |
---|
778 | for letter in nodepart: |
---|
779 | |
---|
780 | # If this char is not a digit (0-9), this must be a hostname |
---|
781 | # |
---|
782 | if letter not in string.digits: |
---|
783 | |
---|
784 | numeric_node = 0 |
---|
785 | |
---|
786 | # If this is a hostname, just count this as one (1) node |
---|
787 | # |
---|
788 | if not numeric_node: |
---|
789 | |
---|
790 | count_mynodes = count_mynodes + 1 |
---|
791 | else: |
---|
792 | |
---|
793 | # If this a number, it must be the node_count |
---|
794 | # and increase our count with it's value |
---|
795 | # |
---|
796 | try: |
---|
797 | count_mynodes = count_mynodes + int( nodepart ) |
---|
798 | |
---|
799 | except ValueError, detail: |
---|
800 | |
---|
801 | # When we arrive here I must be bugged or very confused |
---|
802 | # THIS SHOULD NOT HAPPEN! |
---|
803 | # |
---|
804 | debug_msg( 10, str( detail ) ) |
---|
805 | debug_msg( 10, "Encountered weird node in Resources_List?!" ) |
---|
806 | debug_msg( 10, 'nodepart = ' + str( nodepart ) ) |
---|
807 | debug_msg( 10, 'job = ' + str( name ) ) |
---|
808 | debug_msg( 10, 'attrs = ' + str( attrs ) ) |
---|
809 | |
---|
810 | nodeslist = str( count_mynodes ) |
---|
811 | else: |
---|
812 | start_timestamp = '' |
---|
813 | nodeslist = '' |
---|
814 | |
---|
815 | myAttrs = { } |
---|
816 | |
---|
817 | myAttrs[ 'name' ] = str( name ) |
---|
818 | myAttrs[ 'queue' ] = str( queue ) |
---|
819 | myAttrs[ 'owner' ] = str( owner ) |
---|
820 | myAttrs[ 'requested_time' ] = str( requested_time ) |
---|
821 | myAttrs[ 'requested_memory' ] = str( requested_memory ) |
---|
822 | myAttrs[ 'ppn' ] = str( ppn ) |
---|
823 | myAttrs[ 'status' ] = str( status ) |
---|
824 | myAttrs[ 'start_timestamp' ] = str( start_timestamp ) |
---|
825 | myAttrs[ 'queued_timestamp' ] = str( queued_timestamp ) |
---|
826 | myAttrs[ 'reported' ] = str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) |
---|
827 | myAttrs[ 'nodes' ] = nodeslist |
---|
828 | myAttrs[ 'domain' ] = string.join( socket.getfqdn().split( '.' )[1:], '.' ) |
---|
829 | myAttrs[ 'poll_interval' ] = str( BATCH_POLL_INTERVAL ) |
---|
830 | |
---|
831 | if self.jobDataChanged( self.jobs, job_id, myAttrs ) and myAttrs['status'] in [ 'R', 'Q' ]: |
---|
832 | |
---|
833 | self.jobs[ job_id ] = myAttrs |
---|
834 | |
---|
835 | for id, attrs in self.jobs.items(): |
---|
836 | |
---|
837 | if id not in jobs_processed: |
---|
838 | |
---|
839 | # This one isn't there anymore; toedeledoki! |
---|
840 | # |
---|
841 | del self.jobs[ id ] |
---|
842 | |
---|
843 | def submitJobData( self ): |
---|
844 | |
---|
845 | """Submit job info list""" |
---|
846 | |
---|
847 | self.dp.multicastGmetric( 'MONARCH-HEARTBEAT', str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) ) |
---|
848 | |
---|
849 | running_jobs = 0 |
---|
850 | queued_jobs = 0 |
---|
851 | |
---|
852 | # Count how many running/queued jobs we found |
---|
853 | # |
---|
854 | for jobid, jobattrs in self.jobs.items(): |
---|
855 | |
---|
856 | if jobattrs[ 'status' ] == 'Q': |
---|
857 | |
---|
858 | queued_jobs += 1 |
---|
859 | |
---|
860 | elif jobattrs[ 'status' ] == 'R': |
---|
861 | |
---|
862 | running_jobs += 1 |
---|
863 | |
---|
864 | # Report running/queued jobs as seperate metric for a nice RRD graph |
---|
865 | # |
---|
866 | self.dp.multicastGmetric( 'MONARCH-RJ', str( running_jobs ), 'uint32', 'jobs' ) |
---|
867 | self.dp.multicastGmetric( 'MONARCH-QJ', str( queued_jobs ), 'uint32', 'jobs' ) |
---|
868 | |
---|
869 | # Now let's spread the knowledge |
---|
870 | # |
---|
871 | for jobid, jobattrs in self.jobs.items(): |
---|
872 | |
---|
873 | # Make gmetric values for each job: respect max gmetric value length |
---|
874 | # |
---|
875 | gmetric_val = self.compileGmetricVal( jobid, jobattrs ) |
---|
876 | metric_increment = 0 |
---|
877 | |
---|
878 | # If we have more job info than max gmetric value length allows, split it up |
---|
879 | # amongst multiple metrics |
---|
880 | # |
---|
881 | for val in gmetric_val: |
---|
882 | |
---|
883 | self.dp.multicastGmetric( 'MONARCH-JOB-' + jobid + '-' + str(metric_increment), val ) |
---|
884 | |
---|
885 | # Increase follow number if this jobinfo is split up amongst more than 1 gmetric |
---|
886 | # |
---|
887 | metric_increment = metric_increment + 1 |
---|
888 | |
---|
889 | def compileGmetricVal( self, jobid, jobattrs ): |
---|
890 | |
---|
891 | """Create a val string for gmetric of jobinfo""" |
---|
892 | |
---|
893 | gval_lists = [ ] |
---|
894 | mystr = None |
---|
895 | val_list = { } |
---|
896 | |
---|
897 | for val_name, val_value in jobattrs.items(): |
---|
898 | |
---|
899 | # These are our own metric names, i.e.: status, start_timestamp, etc |
---|
900 | # |
---|
901 | val_list_names_len = len( string.join( val_list.keys() ) ) + len(val_list.keys()) |
---|
902 | |
---|
903 | # These are their corresponding values |
---|
904 | # |
---|
905 | val_list_vals_len = len( string.join( val_list.values() ) ) + len(val_list.values()) |
---|
906 | |
---|
907 | if val_name == 'nodes' and jobattrs['status'] == 'R': |
---|
908 | |
---|
909 | node_str = None |
---|
910 | |
---|
911 | for node in val_value: |
---|
912 | |
---|
913 | if node_str: |
---|
914 | |
---|
915 | node_str = node_str + ';' + node |
---|
916 | else: |
---|
917 | node_str = node |
---|
918 | |
---|
919 | # Make sure if we add this new info, that the total metric's value length does not exceed METRIC_MAX_VAL_LEN |
---|
920 | # |
---|
921 | if (val_list_names_len + len(val_name) ) + (val_list_vals_len + len(node_str) ) > METRIC_MAX_VAL_LEN: |
---|
922 | |
---|
923 | # It's too big, we need to make a new gmetric for the additional info |
---|
924 | # |
---|
925 | val_list[ val_name ] = node_str |
---|
926 | |
---|
927 | gval_lists.append( val_list ) |
---|
928 | |
---|
929 | val_list = { } |
---|
930 | node_str = None |
---|
931 | |
---|
932 | val_list[ val_name ] = node_str |
---|
933 | |
---|
934 | gval_lists.append( val_list ) |
---|
935 | |
---|
936 | val_list = { } |
---|
937 | |
---|
938 | elif val_value != '': |
---|
939 | |
---|
940 | # Make sure if we add this new info, that the total metric's value length does not exceed METRIC_MAX_VAL_LEN |
---|
941 | # |
---|
942 | if (val_list_names_len + len(val_name) ) + (val_list_vals_len + len(str(val_value)) ) > METRIC_MAX_VAL_LEN: |
---|
943 | |
---|
944 | # It's too big, we need to make a new gmetric for the additional info |
---|
945 | # |
---|
946 | gval_lists.append( val_list ) |
---|
947 | |
---|
948 | val_list = { } |
---|
949 | |
---|
950 | val_list[ val_name ] = val_value |
---|
951 | |
---|
952 | if len( val_list ) > 0: |
---|
953 | |
---|
954 | gval_lists.append( val_list ) |
---|
955 | |
---|
956 | str_list = [ ] |
---|
957 | |
---|
958 | # Now append the value names and values together, i.e.: stop_timestamp=value, etc |
---|
959 | # |
---|
960 | for val_list in gval_lists: |
---|
961 | |
---|
962 | my_val_str = None |
---|
963 | |
---|
964 | for val_name, val_value in val_list.items(): |
---|
965 | |
---|
966 | if my_val_str: |
---|
967 | |
---|
968 | my_val_str = my_val_str + ' ' + val_name + '=' + val_value |
---|
969 | else: |
---|
970 | my_val_str = val_name + '=' + val_value |
---|
971 | |
---|
972 | str_list.append( my_val_str ) |
---|
973 | |
---|
974 | return str_list |
---|
975 | |
---|
976 | # |
---|
977 | # Gmetric by Nick Galbreath - nickg(a.t)modp(d.o.t)com |
---|
978 | # Version 1.0 - 21-April2-2007 |
---|
979 | # http://code.google.com/p/embeddedgmetric/ |
---|
980 | # |
---|
981 | # Modified by: Ramon Bastiaans |
---|
982 | # For the Job Monarch Project, see: https://subtrac.sara.nl/oss/jobmonarch/ |
---|
983 | # |
---|
984 | # added: DEFAULT_TYPE for Gmetric's |
---|
985 | # added: checkHostProtocol to determine if target is multicast or not |
---|
986 | # changed: allow default for Gmetric constructor |
---|
987 | # changed: allow defaults for all send() values except dmax |
---|
988 | # |
---|
989 | |
---|
990 | GMETRIC_DEFAULT_TYPE = 'string' |
---|
991 | GMETRIC_DEFAULT_HOST = '127.0.0.1' |
---|
992 | GMETRIC_DEFAULT_PORT = '8649' |
---|
993 | GMETRIC_DEFAULT_UNITS = '' |
---|
994 | |
---|
995 | class Gmetric: |
---|
996 | |
---|
997 | global GMETRIC_DEFAULT_HOST, GMETRIC_DEFAULT_PORT |
---|
998 | |
---|
999 | slope = { 'zero' : 0, 'positive' : 1, 'negative' : 2, 'both' : 3, 'unspecified' : 4 } |
---|
1000 | type = ( '', 'string', 'uint16', 'int16', 'uint32', 'int32', 'float', 'double', 'timestamp' ) |
---|
1001 | protocol = ( 'udp', 'multicast' ) |
---|
1002 | |
---|
1003 | def __init__( self, host=GMETRIC_DEFAULT_HOST, port=GMETRIC_DEFAULT_PORT ): |
---|
1004 | |
---|
1005 | global GMETRIC_DEFAULT_TYPE |
---|
1006 | |
---|
1007 | self.prot = self.checkHostProtocol( host ) |
---|
1008 | self.msg = xdrlib.Packer() |
---|
1009 | self.socket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM ) |
---|
1010 | |
---|
1011 | if self.prot not in self.protocol: |
---|
1012 | |
---|
1013 | raise ValueError( "Protocol must be one of: " + str( self.protocol ) ) |
---|
1014 | |
---|
1015 | if self.prot == 'multicast': |
---|
1016 | |
---|
1017 | # Set multicast options |
---|
1018 | # |
---|
1019 | self.socket.setsockopt( socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 20 ) |
---|
1020 | |
---|
1021 | self.hostport = ( host, int( port ) ) |
---|
1022 | self.slopestr = 'both' |
---|
1023 | self.tmax = 60 |
---|
1024 | |
---|
1025 | def checkHostProtocol( self, ip ): |
---|
1026 | |
---|
1027 | """Detect if a ip adress is a multicast address""" |
---|
1028 | |
---|
1029 | MULTICAST_ADDRESS_MIN = ( "224", "0", "0", "0" ) |
---|
1030 | MULTICAST_ADDRESS_MAX = ( "239", "255", "255", "255" ) |
---|
1031 | |
---|
1032 | ip_fields = ip.split( '.' ) |
---|
1033 | |
---|
1034 | if ip_fields >= MULTICAST_ADDRESS_MIN and ip_fields <= MULTICAST_ADDRESS_MAX: |
---|
1035 | |
---|
1036 | return 'multicast' |
---|
1037 | else: |
---|
1038 | return 'udp' |
---|
1039 | |
---|
1040 | def send( self, name, value, dmax, typestr = '', units = '' ): |
---|
1041 | |
---|
1042 | if len( units ) == 0: |
---|
1043 | units = GMETRIC_DEFAULT_UNITS |
---|
1044 | |
---|
1045 | if len( typestr ) == 0: |
---|
1046 | typestr = GMETRIC_DEFAULT_TYPE |
---|
1047 | |
---|
1048 | msg = self.makexdr( name, value, typestr, units, self.slopestr, self.tmax, dmax ) |
---|
1049 | |
---|
1050 | return self.socket.sendto( msg, self.hostport ) |
---|
1051 | |
---|
1052 | def makexdr( self, name, value, typestr, unitstr, slopestr, tmax, dmax ): |
---|
1053 | |
---|
1054 | if slopestr not in self.slope: |
---|
1055 | |
---|
1056 | raise ValueError( "Slope must be one of: " + str( self.slope.keys() ) ) |
---|
1057 | |
---|
1058 | if typestr not in self.type: |
---|
1059 | |
---|
1060 | raise ValueError( "Type must be one of: " + str( self.type ) ) |
---|
1061 | |
---|
1062 | if len( name ) == 0: |
---|
1063 | |
---|
1064 | raise ValueError( "Name must be non-empty" ) |
---|
1065 | |
---|
1066 | self.msg.reset() |
---|
1067 | self.msg.pack_int( 0 ) |
---|
1068 | self.msg.pack_string( typestr ) |
---|
1069 | self.msg.pack_string( name ) |
---|
1070 | self.msg.pack_string( str( value ) ) |
---|
1071 | self.msg.pack_string( unitstr ) |
---|
1072 | self.msg.pack_int( self.slope[ slopestr ] ) |
---|
1073 | self.msg.pack_uint( int( tmax ) ) |
---|
1074 | self.msg.pack_uint( int( dmax ) ) |
---|
1075 | |
---|
1076 | return self.msg.get_buffer() |
---|
1077 | |
---|
1078 | def printTime( ): |
---|
1079 | |
---|
1080 | """Print current time/date in human readable format for log/debug""" |
---|
1081 | |
---|
1082 | return time.strftime("%a, %d %b %Y %H:%M:%S") |
---|
1083 | |
---|
1084 | def debug_msg( level, msg ): |
---|
1085 | |
---|
1086 | """Print msg if at or above current debug level""" |
---|
1087 | |
---|
1088 | global DAEMONIZE, DEBUG_LEVEL, SYSLOG_LEVEL |
---|
1089 | |
---|
1090 | if (not DAEMONIZE and DEBUG_LEVEL >= level): |
---|
1091 | sys.stderr.write( msg + '\n' ) |
---|
1092 | |
---|
1093 | if (DAEMONIZE and USE_SYSLOG and SYSLOG_LEVEL >= level): |
---|
1094 | syslog.syslog( msg ) |
---|
1095 | |
---|
1096 | def write_pidfile(): |
---|
1097 | |
---|
1098 | # Write pidfile if PIDFILE is set |
---|
1099 | # |
---|
1100 | if PIDFILE: |
---|
1101 | |
---|
1102 | pid = os.getpid() |
---|
1103 | |
---|
1104 | pidfile = open( PIDFILE, 'w' ) |
---|
1105 | |
---|
1106 | pidfile.write( str( pid ) ) |
---|
1107 | pidfile.close() |
---|
1108 | |
---|
1109 | def main(): |
---|
1110 | |
---|
1111 | """Application start""" |
---|
1112 | |
---|
1113 | global PBSQuery, PBSError |
---|
1114 | global SYSLOG_FACILITY, USE_SYSLOG, BATCH_API, DAEMONIZE |
---|
1115 | |
---|
1116 | if not processArgs( sys.argv[1:] ): |
---|
1117 | |
---|
1118 | sys.exit( 1 ) |
---|
1119 | |
---|
1120 | # Load appropriate DataGatherer depending on which BATCH_API is set |
---|
1121 | # and any required modules for the Gatherer |
---|
1122 | # |
---|
1123 | if BATCH_API == 'pbs': |
---|
1124 | |
---|
1125 | try: |
---|
1126 | from PBSQuery import PBSQuery, PBSError |
---|
1127 | |
---|
1128 | except ImportError: |
---|
1129 | |
---|
1130 | debug_msg( 0, "FATAL ERROR: BATCH_API set to 'pbs' but python module 'pbs_python' is not installed" ) |
---|
1131 | sys.exit( 1 ) |
---|
1132 | |
---|
1133 | gather = PbsDataGatherer() |
---|
1134 | |
---|
1135 | elif BATCH_API == 'sge': |
---|
1136 | |
---|
1137 | debug_msg( 0, "FATAL ERROR: BATCH_API 'sge' implementation is currently broken, check future releases" ) |
---|
1138 | |
---|
1139 | sys.exit( 1 ) |
---|
1140 | |
---|
1141 | gather = SgeDataGatherer() |
---|
1142 | |
---|
1143 | else: |
---|
1144 | debug_msg( 0, "FATAL ERROR: unknown BATCH_API '" + BATCH_API + "' is not supported" ) |
---|
1145 | |
---|
1146 | sys.exit( 1 ) |
---|
1147 | |
---|
1148 | if( DAEMONIZE and USE_SYSLOG ): |
---|
1149 | |
---|
1150 | syslog.openlog( 'jobmond', syslog.LOG_NOWAIT, SYSLOG_FACILITY ) |
---|
1151 | |
---|
1152 | |
---|
1153 | if DAEMONIZE: |
---|
1154 | |
---|
1155 | gather.daemon() |
---|
1156 | else: |
---|
1157 | gather.run() |
---|
1158 | |
---|
1159 | # wh00t? someone started me! :) |
---|
1160 | # |
---|
1161 | if __name__ == '__main__': |
---|
1162 | main() |
---|