[23] | 1 | #!/usr/bin/env python |
---|
| 2 | |
---|
[212] | 3 | import sys, getopt, ConfigParser |
---|
[26] | 4 | |
---|
[212] | 5 | def processArgs( args ): |
---|
[26] | 6 | |
---|
[212] | 7 | SHORT_L = 'c:' |
---|
| 8 | LONG_L = 'config=' |
---|
[165] | 9 | |
---|
[212] | 10 | config_filename = None |
---|
[61] | 11 | |
---|
[212] | 12 | try: |
---|
[68] | 13 | |
---|
[212] | 14 | opts, args = getopt.getopt( args, SHORT_L, LONG_L ) |
---|
[185] | 15 | |
---|
[212] | 16 | except getopt.error, detail: |
---|
| 17 | |
---|
| 18 | print detail |
---|
| 19 | sys.exit(1) |
---|
| 20 | |
---|
| 21 | for opt, value in opts: |
---|
| 22 | |
---|
| 23 | if opt in [ '--config', '-c' ]: |
---|
| 24 | |
---|
| 25 | config_filename = value |
---|
| 26 | |
---|
| 27 | if not config_filename: |
---|
| 28 | |
---|
| 29 | config_filename = '/etc/jobmond.conf' |
---|
| 30 | |
---|
| 31 | return loadConfig( config_filename ) |
---|
| 32 | |
---|
| 33 | def loadConfig( filename ): |
---|
| 34 | |
---|
[215] | 35 | def getlist( cfg_string ): |
---|
| 36 | |
---|
| 37 | my_list = [ ] |
---|
| 38 | |
---|
| 39 | for item_txt in cfg_string.split( ',' ): |
---|
| 40 | |
---|
| 41 | sep_char = None |
---|
| 42 | |
---|
| 43 | item_txt = item_txt.strip() |
---|
| 44 | |
---|
| 45 | for s_char in [ "'", '"' ]: |
---|
| 46 | |
---|
| 47 | if item_txt.find( s_char ) != -1: |
---|
| 48 | |
---|
| 49 | if item_txt.count( s_char ) != 2: |
---|
| 50 | |
---|
| 51 | print 'Missing quote: %s' %item_txt |
---|
| 52 | sys.exit( 1 ) |
---|
| 53 | |
---|
| 54 | else: |
---|
| 55 | |
---|
| 56 | sep_char = s_char |
---|
| 57 | break |
---|
| 58 | |
---|
| 59 | if sep_char: |
---|
| 60 | |
---|
| 61 | item_txt = item_txt.split( sep_char )[1] |
---|
| 62 | |
---|
| 63 | my_list.append( item_txt ) |
---|
| 64 | |
---|
| 65 | return my_list |
---|
| 66 | |
---|
[212] | 67 | cfg = ConfigParser.ConfigParser() |
---|
| 68 | |
---|
| 69 | cfg.read( filename ) |
---|
| 70 | |
---|
[215] | 71 | global DEBUG_LEVEL, DAEMONIZE, TORQUE_SERVER, TORQUE_POLL_INTERVAL, GMOND_CONF, DETECT_TIME_DIFFS, BATCH_HOST_TRANSLATE |
---|
[212] | 72 | |
---|
| 73 | DEBUG_LEVEL = cfg.getint( 'DEFAULT', 'DEBUG_LEVEL' ) |
---|
| 74 | |
---|
| 75 | DAEMONIZE = cfg.getboolean( 'DEFAULT', 'DAEMONIZE' ) |
---|
| 76 | |
---|
| 77 | TORQUE_SERVER = cfg.get( 'DEFAULT', 'TORQUE_SERVER' ) |
---|
| 78 | |
---|
| 79 | TORQUE_POLL_INTERVAL = cfg.getint( 'DEFAULT', 'TORQUE_POLL_INTERVAL' ) |
---|
| 80 | |
---|
| 81 | GMOND_CONF = cfg.get( 'DEFAULT', 'GMOND_CONF' ) |
---|
| 82 | |
---|
| 83 | DETECT_TIME_DIFFS = cfg.getboolean( 'DEFAULT', 'DETECT_TIME_DIFFS' ) |
---|
| 84 | |
---|
[215] | 85 | BATCH_HOST_TRANSLATE = getlist( cfg.get( 'DEFAULT', 'BATCH_HOST_TRANSLATE' ) ) |
---|
| 86 | |
---|
[212] | 87 | return True |
---|
| 88 | |
---|
[23] | 89 | from PBSQuery import PBSQuery |
---|
| 90 | |
---|
[215] | 91 | import time, os, socket, string, re |
---|
[212] | 92 | |
---|
[61] | 93 | class DataProcessor: |
---|
[68] | 94 | """Class for processing of data""" |
---|
[61] | 95 | |
---|
| 96 | binary = '/usr/bin/gmetric' |
---|
| 97 | |
---|
| 98 | def __init__( self, binary=None ): |
---|
[68] | 99 | """Remember alternate binary location if supplied""" |
---|
[61] | 100 | |
---|
| 101 | if binary: |
---|
| 102 | self.binary = binary |
---|
| 103 | |
---|
[80] | 104 | # Timeout for XML |
---|
| 105 | # |
---|
| 106 | # From ganglia's documentation: |
---|
| 107 | # |
---|
| 108 | # 'A metric will be deleted DMAX seconds after it is received, and |
---|
| 109 | # DMAX=0 means eternal life.' |
---|
[61] | 110 | |
---|
[194] | 111 | self.dmax = str( int( int( TORQUE_POLL_INTERVAL ) + 2 ) ) |
---|
[80] | 112 | |
---|
[68] | 113 | try: |
---|
| 114 | gmond_file = GMOND_CONF |
---|
| 115 | |
---|
| 116 | except NameError: |
---|
| 117 | gmond_file = '/etc/gmond.conf' |
---|
| 118 | |
---|
| 119 | if not os.path.exists( gmond_file ): |
---|
| 120 | debug_msg( 0, gmond_file + ' does not exist' ) |
---|
| 121 | sys.exit( 1 ) |
---|
| 122 | |
---|
[69] | 123 | incompatible = self.checkGmetricVersion() |
---|
[61] | 124 | |
---|
[65] | 125 | if incompatible: |
---|
| 126 | debug_msg( 0, 'Gmetric version not compatible, pls upgrade to at least 3.0.1' ) |
---|
| 127 | sys.exit( 1 ) |
---|
| 128 | |
---|
| 129 | def checkGmetricVersion( self ): |
---|
[68] | 130 | """ |
---|
| 131 | Check version of gmetric is at least 3.0.1 |
---|
| 132 | for the syntax we use |
---|
| 133 | """ |
---|
[65] | 134 | |
---|
| 135 | for line in os.popen( self.binary + ' --version' ).readlines(): |
---|
| 136 | |
---|
| 137 | line = line.split( ' ' ) |
---|
| 138 | |
---|
[69] | 139 | if len( line ) == 2 and str(line).find( 'gmetric' ) != -1: |
---|
[65] | 140 | |
---|
[69] | 141 | gmetric_version = line[1].split( '\n' )[0] |
---|
[65] | 142 | |
---|
[69] | 143 | version_major = int( gmetric_version.split( '.' )[0] ) |
---|
| 144 | version_minor = int( gmetric_version.split( '.' )[1] ) |
---|
| 145 | version_patch = int( gmetric_version.split( '.' )[2] ) |
---|
[65] | 146 | |
---|
| 147 | incompatible = 0 |
---|
| 148 | |
---|
| 149 | if version_major < 3: |
---|
| 150 | |
---|
| 151 | incompatible = 1 |
---|
| 152 | |
---|
| 153 | elif version_major == 3: |
---|
| 154 | |
---|
| 155 | if version_minor == 0: |
---|
| 156 | |
---|
| 157 | if version_patch < 1: |
---|
| 158 | |
---|
[91] | 159 | incompatible = 1 |
---|
[65] | 160 | |
---|
| 161 | return incompatible |
---|
| 162 | |
---|
[75] | 163 | def multicastGmetric( self, metricname, metricval, valtype='string' ): |
---|
[68] | 164 | """Call gmetric binary and multicast""" |
---|
[65] | 165 | |
---|
| 166 | cmd = self.binary |
---|
| 167 | |
---|
[61] | 168 | try: |
---|
| 169 | cmd = cmd + ' -c' + GMOND_CONF |
---|
| 170 | except NameError: |
---|
[64] | 171 | debug_msg( 10, 'Assuming /etc/gmond.conf for gmetric cmd (ommitting)' ) |
---|
[61] | 172 | |
---|
[168] | 173 | cmd = cmd + ' -n' + str( metricname )+ ' -v"' + str( metricval )+ '" -t' + str( valtype ) + ' -d' + str( self.dmax ) |
---|
[61] | 174 | |
---|
[101] | 175 | debug_msg( 10, printTime() + ' ' + cmd ) |
---|
[69] | 176 | os.system( cmd ) |
---|
[61] | 177 | |
---|
[174] | 178 | class DataGatherer: |
---|
[23] | 179 | |
---|
[61] | 180 | jobs = { } |
---|
| 181 | |
---|
[23] | 182 | def __init__( self ): |
---|
[68] | 183 | """Setup appropriate variables""" |
---|
[23] | 184 | |
---|
[26] | 185 | self.jobs = { } |
---|
[185] | 186 | self.timeoffset = 0 |
---|
[61] | 187 | self.dp = DataProcessor() |
---|
[91] | 188 | self.initPbsQuery() |
---|
[23] | 189 | |
---|
[91] | 190 | def initPbsQuery( self ): |
---|
| 191 | |
---|
| 192 | self.pq = None |
---|
[165] | 193 | if( TORQUE_SERVER ): |
---|
| 194 | self.pq = PBSQuery( TORQUE_SERVER ) |
---|
[174] | 195 | else: |
---|
[165] | 196 | self.pq = PBSQuery() |
---|
[91] | 197 | |
---|
[26] | 198 | def getAttr( self, attrs, name ): |
---|
[68] | 199 | """Return certain attribute from dictionary, if exists""" |
---|
[26] | 200 | |
---|
| 201 | if attrs.has_key( name ): |
---|
| 202 | return attrs[name] |
---|
| 203 | else: |
---|
| 204 | return '' |
---|
| 205 | |
---|
| 206 | def jobDataChanged( self, jobs, job_id, attrs ): |
---|
[68] | 207 | """Check if job with attrs and job_id in jobs has changed""" |
---|
[26] | 208 | |
---|
| 209 | if jobs.has_key( job_id ): |
---|
| 210 | oldData = jobs[ job_id ] |
---|
| 211 | else: |
---|
| 212 | return 1 |
---|
| 213 | |
---|
| 214 | for name, val in attrs.items(): |
---|
| 215 | |
---|
| 216 | if oldData.has_key( name ): |
---|
| 217 | |
---|
| 218 | if oldData[ name ] != attrs[ name ]: |
---|
| 219 | |
---|
| 220 | return 1 |
---|
| 221 | |
---|
| 222 | else: |
---|
| 223 | return 1 |
---|
| 224 | |
---|
| 225 | return 0 |
---|
| 226 | |
---|
[65] | 227 | def getJobData( self, known_jobs ): |
---|
[68] | 228 | """Gather all data on current jobs in Torque""" |
---|
[26] | 229 | |
---|
[65] | 230 | if len( known_jobs ) > 0: |
---|
| 231 | jobs = known_jobs |
---|
| 232 | else: |
---|
| 233 | jobs = { } |
---|
[26] | 234 | |
---|
[101] | 235 | #self.initPbsQuery() |
---|
[125] | 236 | |
---|
| 237 | #print self.pq.getnodes() |
---|
| 238 | |
---|
[26] | 239 | joblist = self.pq.getjobs() |
---|
| 240 | |
---|
[69] | 241 | self.cur_time = time.time() |
---|
[68] | 242 | |
---|
[26] | 243 | jobs_processed = [ ] |
---|
| 244 | |
---|
[125] | 245 | #self.printJobs( joblist ) |
---|
| 246 | |
---|
[26] | 247 | for name, attrs in joblist.items(): |
---|
| 248 | |
---|
| 249 | job_id = name.split( '.' )[0] |
---|
| 250 | |
---|
| 251 | jobs_processed.append( job_id ) |
---|
[61] | 252 | |
---|
[26] | 253 | name = self.getAttr( attrs, 'Job_Name' ) |
---|
| 254 | queue = self.getAttr( attrs, 'queue' ) |
---|
| 255 | owner = self.getAttr( attrs, 'Job_Owner' ).split( '@' )[0] |
---|
| 256 | requested_time = self.getAttr( attrs, 'Resource_List.walltime' ) |
---|
| 257 | requested_memory = self.getAttr( attrs, 'Resource_List.mem' ) |
---|
[95] | 258 | |
---|
[26] | 259 | mynoderequest = self.getAttr( attrs, 'Resource_List.nodes' ) |
---|
[95] | 260 | |
---|
[26] | 261 | if mynoderequest.find( ':' ) != -1 and mynoderequest.find( 'ppn' ) != -1: |
---|
| 262 | ppn = mynoderequest.split( ':' )[1].split( 'ppn=' )[1] |
---|
| 263 | else: |
---|
| 264 | ppn = '' |
---|
[95] | 265 | |
---|
[26] | 266 | status = self.getAttr( attrs, 'job_state' ) |
---|
[25] | 267 | |
---|
[95] | 268 | if status == 'R': |
---|
| 269 | start_timestamp = self.getAttr( attrs, 'mtime' ) |
---|
| 270 | nodes = self.getAttr( attrs, 'exec_host' ).split( '+' ) |
---|
[133] | 271 | |
---|
| 272 | nodeslist = [ ] |
---|
| 273 | |
---|
| 274 | for node in nodes: |
---|
| 275 | host = node.split( '/' )[0] |
---|
| 276 | |
---|
| 277 | if nodeslist.count( host ) == 0: |
---|
[215] | 278 | |
---|
| 279 | for translate_pattern in BATCH_HOST_TRANSLATE: |
---|
| 280 | |
---|
[220] | 281 | if translate_pattern.find( '/' ) != -1: |
---|
[215] | 282 | |
---|
[220] | 283 | translate_orig = translate_pattern.split( '/' )[1] |
---|
| 284 | translate_new = translate_pattern.split( '/' )[2] |
---|
| 285 | |
---|
| 286 | host = re.sub( translate_orig, translate_new, host ) |
---|
[216] | 287 | |
---|
[217] | 288 | if not host in nodeslist: |
---|
[216] | 289 | |
---|
| 290 | nodeslist.append( host ) |
---|
[133] | 291 | |
---|
[185] | 292 | if DETECT_TIME_DIFFS: |
---|
| 293 | |
---|
| 294 | # If a job start if later than our current date, |
---|
| 295 | # that must mean the Torque server's time is later |
---|
| 296 | # than our local time. |
---|
| 297 | |
---|
| 298 | if int(start_timestamp) > int( int(self.cur_time) + int(self.timeoffset) ): |
---|
| 299 | |
---|
| 300 | self.timeoffset = int( int(start_timestamp) - int(self.cur_time) ) |
---|
| 301 | |
---|
[133] | 302 | elif status == 'Q': |
---|
[95] | 303 | start_timestamp = '' |
---|
[133] | 304 | count_mynodes = 0 |
---|
| 305 | numeric_node = 1 |
---|
[95] | 306 | |
---|
[133] | 307 | for node in mynoderequest.split( '+' ): |
---|
[67] | 308 | |
---|
[133] | 309 | nodepart = node.split( ':' )[0] |
---|
[67] | 310 | |
---|
[133] | 311 | for letter in nodepart: |
---|
[67] | 312 | |
---|
[133] | 313 | if letter not in string.digits: |
---|
| 314 | |
---|
| 315 | numeric_node = 0 |
---|
| 316 | |
---|
| 317 | if not numeric_node: |
---|
| 318 | count_mynodes = count_mynodes + 1 |
---|
| 319 | else: |
---|
| 320 | count_mynodes = count_mynodes + int( nodepart ) |
---|
| 321 | |
---|
[134] | 322 | nodeslist = count_mynodes |
---|
[172] | 323 | else: |
---|
| 324 | start_timestamp = '' |
---|
[173] | 325 | nodeslist = '' |
---|
[133] | 326 | |
---|
[26] | 327 | myAttrs = { } |
---|
| 328 | myAttrs['name'] = name |
---|
| 329 | myAttrs['queue'] = queue |
---|
| 330 | myAttrs['owner'] = owner |
---|
| 331 | myAttrs['requested_time'] = requested_time |
---|
| 332 | myAttrs['requested_memory'] = requested_memory |
---|
| 333 | myAttrs['ppn'] = ppn |
---|
| 334 | myAttrs['status'] = status |
---|
| 335 | myAttrs['start_timestamp'] = start_timestamp |
---|
[185] | 336 | myAttrs['reported'] = str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) |
---|
[67] | 337 | myAttrs['nodes'] = nodeslist |
---|
| 338 | myAttrs['domain'] = string.join( socket.getfqdn().split( '.' )[1:], '.' ) |
---|
[80] | 339 | myAttrs['poll_interval'] = TORQUE_POLL_INTERVAL |
---|
[26] | 340 | |
---|
[184] | 341 | if self.jobDataChanged( jobs, job_id, myAttrs ) and myAttrs['status'] in [ 'R', 'Q' ]: |
---|
[26] | 342 | jobs[ job_id ] = myAttrs |
---|
[61] | 343 | |
---|
[101] | 344 | #debug_msg( 10, printTime() + ' job %s state changed' %(job_id) ) |
---|
[26] | 345 | |
---|
[76] | 346 | for id, attrs in jobs.items(): |
---|
| 347 | |
---|
| 348 | if id not in jobs_processed: |
---|
| 349 | |
---|
| 350 | # This one isn't there anymore; toedeledoki! |
---|
| 351 | # |
---|
| 352 | del jobs[ id ] |
---|
| 353 | |
---|
[65] | 354 | return jobs |
---|
| 355 | |
---|
| 356 | def submitJobData( self, jobs ): |
---|
| 357 | """Submit job info list""" |
---|
| 358 | |
---|
[219] | 359 | self.dp.multicastGmetric( 'MONARCH-HEARTBEAT', str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) ) |
---|
[69] | 360 | |
---|
[61] | 361 | # Now let's spread the knowledge |
---|
| 362 | # |
---|
| 363 | for jobid, jobattrs in jobs.items(): |
---|
| 364 | |
---|
[95] | 365 | gmetric_val = self.compileGmetricVal( jobid, jobattrs ) |
---|
[61] | 366 | |
---|
[95] | 367 | for val in gmetric_val: |
---|
[219] | 368 | self.dp.multicastGmetric( 'MONARCH-JOB-' + jobid, val ) |
---|
[61] | 369 | |
---|
[67] | 370 | def makeNodeString( self, nodelist ): |
---|
[68] | 371 | """Make one big string of all hosts""" |
---|
[67] | 372 | |
---|
| 373 | node_str = None |
---|
| 374 | |
---|
| 375 | for node in nodelist: |
---|
| 376 | if not node_str: |
---|
| 377 | node_str = node |
---|
| 378 | else: |
---|
| 379 | node_str = node_str + ';' + node |
---|
| 380 | |
---|
| 381 | return node_str |
---|
| 382 | |
---|
[65] | 383 | def compileGmetricVal( self, jobid, jobattrs ): |
---|
| 384 | """Create a val string for gmetric of jobinfo""" |
---|
[61] | 385 | |
---|
[80] | 386 | appendList = [ ] |
---|
| 387 | appendList.append( 'name=' + jobattrs['name'] ) |
---|
| 388 | appendList.append( 'queue=' + jobattrs['queue'] ) |
---|
| 389 | appendList.append( 'owner=' + jobattrs['owner'] ) |
---|
| 390 | appendList.append( 'requested_time=' + jobattrs['requested_time'] ) |
---|
[95] | 391 | |
---|
| 392 | if jobattrs['requested_memory'] != '': |
---|
| 393 | appendList.append( 'requested_memory=' + jobattrs['requested_memory'] ) |
---|
| 394 | |
---|
| 395 | if jobattrs['ppn'] != '': |
---|
| 396 | appendList.append( 'ppn=' + jobattrs['ppn'] ) |
---|
| 397 | |
---|
[80] | 398 | appendList.append( 'status=' + jobattrs['status'] ) |
---|
[95] | 399 | |
---|
| 400 | if jobattrs['start_timestamp'] != '': |
---|
| 401 | appendList.append( 'start_timestamp=' + jobattrs['start_timestamp'] ) |
---|
| 402 | |
---|
[80] | 403 | appendList.append( 'reported=' + jobattrs['reported'] ) |
---|
[85] | 404 | appendList.append( 'poll_interval=' + str( jobattrs['poll_interval'] ) ) |
---|
[80] | 405 | appendList.append( 'domain=' + jobattrs['domain'] ) |
---|
[26] | 406 | |
---|
[134] | 407 | if jobattrs['status'] == 'R': |
---|
| 408 | if len( jobattrs['nodes'] ) > 0: |
---|
| 409 | appendList.append( 'nodes=' + self.makeNodeString( jobattrs['nodes'] ) ) |
---|
| 410 | elif jobattrs['status'] == 'Q': |
---|
| 411 | appendList.append( 'nodes=' + str(jobattrs['nodes']) ) |
---|
[95] | 412 | |
---|
[65] | 413 | return self.makeAppendLists( appendList ) |
---|
| 414 | |
---|
| 415 | def makeAppendLists( self, append_list ): |
---|
[68] | 416 | """ |
---|
| 417 | Divide all values from append_list over strings with a maximum |
---|
| 418 | size of 1400 |
---|
| 419 | """ |
---|
[65] | 420 | |
---|
| 421 | app_lists = [ ] |
---|
| 422 | |
---|
| 423 | mystr = None |
---|
| 424 | |
---|
| 425 | for val in append_list: |
---|
| 426 | |
---|
| 427 | if not mystr: |
---|
| 428 | mystr = val |
---|
| 429 | else: |
---|
| 430 | if not self.checkValAppendMaxSize( mystr, val ): |
---|
| 431 | mystr = mystr + ' ' + val |
---|
| 432 | else: |
---|
| 433 | # Too big, new appenlist |
---|
| 434 | app_lists.append( mystr ) |
---|
| 435 | mystr = val |
---|
| 436 | |
---|
| 437 | app_lists.append( mystr ) |
---|
| 438 | |
---|
| 439 | return app_lists |
---|
| 440 | |
---|
| 441 | def checkValAppendMaxSize( self, val, text ): |
---|
| 442 | """Check if val + text size is not above 1400 (max msg size)""" |
---|
| 443 | |
---|
[69] | 444 | # Max frame size of a udp datagram is 1500 bytes |
---|
[216] | 445 | # removing misc header and gmetric stuff leaves about 1300 bytes |
---|
[69] | 446 | # |
---|
[216] | 447 | if len( val + text ) > 900: |
---|
[65] | 448 | return 1 |
---|
| 449 | else: |
---|
| 450 | return 0 |
---|
| 451 | |
---|
[61] | 452 | def printJobs( self, jobs ): |
---|
[65] | 453 | """Print a jobinfo overview""" |
---|
| 454 | |
---|
[26] | 455 | for name, attrs in self.jobs.items(): |
---|
| 456 | |
---|
| 457 | print 'job %s' %(name) |
---|
| 458 | |
---|
| 459 | for name, val in attrs.items(): |
---|
| 460 | |
---|
| 461 | print '\t%s = %s' %( name, val ) |
---|
| 462 | |
---|
[61] | 463 | def printJob( self, jobs, job_id ): |
---|
[65] | 464 | """Print job with job_id from jobs""" |
---|
[26] | 465 | |
---|
| 466 | print 'job %s' %(job_id) |
---|
| 467 | |
---|
[65] | 468 | for name, val in jobs[ job_id ].items(): |
---|
[26] | 469 | |
---|
| 470 | print '\t%s = %s' %( name, val ) |
---|
| 471 | |
---|
| 472 | def daemon( self ): |
---|
[65] | 473 | """Run as daemon forever""" |
---|
[26] | 474 | |
---|
| 475 | # Fork the first child |
---|
| 476 | # |
---|
| 477 | pid = os.fork() |
---|
| 478 | if pid > 0: |
---|
[212] | 479 | sys.exit(0) # end parent |
---|
[26] | 480 | |
---|
| 481 | # creates a session and sets the process group ID |
---|
| 482 | # |
---|
| 483 | os.setsid() |
---|
| 484 | |
---|
| 485 | # Fork the second child |
---|
| 486 | # |
---|
| 487 | pid = os.fork() |
---|
| 488 | if pid > 0: |
---|
[212] | 489 | sys.exit(0) # end parent |
---|
[26] | 490 | |
---|
| 491 | # Go to the root directory and set the umask |
---|
| 492 | # |
---|
| 493 | os.chdir('/') |
---|
| 494 | os.umask(0) |
---|
| 495 | |
---|
| 496 | sys.stdin.close() |
---|
| 497 | sys.stdout.close() |
---|
| 498 | sys.stderr.close() |
---|
| 499 | |
---|
| 500 | os.open('/dev/null', 0) |
---|
| 501 | os.dup(0) |
---|
| 502 | os.dup(0) |
---|
| 503 | |
---|
| 504 | self.run() |
---|
| 505 | |
---|
| 506 | def run( self ): |
---|
[65] | 507 | """Main thread""" |
---|
[26] | 508 | |
---|
| 509 | while ( 1 ): |
---|
| 510 | |
---|
[65] | 511 | self.jobs = self.getJobData( self.jobs ) |
---|
| 512 | self.submitJobData( self.jobs ) |
---|
[64] | 513 | time.sleep( TORQUE_POLL_INTERVAL ) |
---|
[26] | 514 | |
---|
| 515 | def printTime( ): |
---|
[65] | 516 | """Print current time/date in human readable format for log/debug""" |
---|
[26] | 517 | |
---|
| 518 | return time.strftime("%a, %d %b %Y %H:%M:%S") |
---|
| 519 | |
---|
| 520 | def debug_msg( level, msg ): |
---|
[65] | 521 | """Print msg if at or above current debug level""" |
---|
[26] | 522 | |
---|
| 523 | if (DEBUG_LEVEL >= level): |
---|
| 524 | sys.stderr.write( msg + '\n' ) |
---|
| 525 | |
---|
[23] | 526 | def main(): |
---|
[65] | 527 | """Application start""" |
---|
[23] | 528 | |
---|
[212] | 529 | if not processArgs( sys.argv[1:] ): |
---|
| 530 | sys.exit( 1 ) |
---|
| 531 | |
---|
[174] | 532 | gather = DataGatherer() |
---|
[26] | 533 | if DAEMONIZE: |
---|
| 534 | gather.daemon() |
---|
| 535 | else: |
---|
| 536 | gather.run() |
---|
[23] | 537 | |
---|
[65] | 538 | # w00t someone started me |
---|
| 539 | # |
---|
[23] | 540 | if __name__ == '__main__': |
---|
| 541 | main() |
---|