[23] | 1 | #!/usr/bin/env python |
---|
| 2 | |
---|
[26] | 3 | # Specify debugging level here; |
---|
| 4 | # |
---|
[101] | 5 | # 10 = gemtric cmd's |
---|
[125] | 6 | DEBUG_LEVEL = 0 |
---|
[26] | 7 | |
---|
| 8 | # Wether or not to run as a daemon in background |
---|
| 9 | # |
---|
[125] | 10 | DAEMONIZE = 1 |
---|
[26] | 11 | |
---|
[165] | 12 | # Which Torque server to monitor |
---|
| 13 | # |
---|
| 14 | TORQUE_SERVER = 'localhost' |
---|
| 15 | |
---|
[64] | 16 | # How many seconds interval for polling of jobs |
---|
[61] | 17 | # |
---|
[64] | 18 | # this will effect directly how accurate the |
---|
| 19 | # end time of a job can be determined |
---|
| 20 | # |
---|
| 21 | TORQUE_POLL_INTERVAL = 10 |
---|
[61] | 22 | |
---|
[68] | 23 | # Alternate location of gmond.conf |
---|
| 24 | # |
---|
| 25 | # Default: /etc/gmond.conf |
---|
| 26 | # |
---|
| 27 | #GMOND_CONF = '/etc/gmond.conf' |
---|
| 28 | |
---|
[185] | 29 | # Wether or not to detect differences in |
---|
| 30 | # time from Torque server and local time. |
---|
| 31 | # |
---|
| 32 | # Ideally both machines (if not the same) |
---|
| 33 | # should have the same time (via ntp or whatever) |
---|
| 34 | # |
---|
| 35 | DETECT_TIME_DIFFS = 1 |
---|
| 36 | |
---|
[23] | 37 | from PBSQuery import PBSQuery |
---|
[26] | 38 | import sys |
---|
| 39 | import time |
---|
[65] | 40 | import os |
---|
[67] | 41 | import socket |
---|
[65] | 42 | import string |
---|
[23] | 43 | |
---|
[61] | 44 | class DataProcessor: |
---|
[68] | 45 | """Class for processing of data""" |
---|
[61] | 46 | |
---|
| 47 | binary = '/usr/bin/gmetric' |
---|
| 48 | |
---|
| 49 | def __init__( self, binary=None ): |
---|
[68] | 50 | """Remember alternate binary location if supplied""" |
---|
[61] | 51 | |
---|
| 52 | if binary: |
---|
| 53 | self.binary = binary |
---|
| 54 | |
---|
[80] | 55 | # Timeout for XML |
---|
| 56 | # |
---|
| 57 | # From ganglia's documentation: |
---|
| 58 | # |
---|
| 59 | # 'A metric will be deleted DMAX seconds after it is received, and |
---|
| 60 | # DMAX=0 means eternal life.' |
---|
[61] | 61 | |
---|
[194] | 62 | self.dmax = str( int( int( TORQUE_POLL_INTERVAL ) + 2 ) ) |
---|
[80] | 63 | |
---|
[68] | 64 | try: |
---|
| 65 | gmond_file = GMOND_CONF |
---|
| 66 | |
---|
| 67 | except NameError: |
---|
| 68 | gmond_file = '/etc/gmond.conf' |
---|
| 69 | |
---|
| 70 | if not os.path.exists( gmond_file ): |
---|
| 71 | debug_msg( 0, gmond_file + ' does not exist' ) |
---|
| 72 | sys.exit( 1 ) |
---|
| 73 | |
---|
[69] | 74 | incompatible = self.checkGmetricVersion() |
---|
[61] | 75 | |
---|
[65] | 76 | if incompatible: |
---|
| 77 | debug_msg( 0, 'Gmetric version not compatible, pls upgrade to at least 3.0.1' ) |
---|
| 78 | sys.exit( 1 ) |
---|
| 79 | |
---|
| 80 | def checkGmetricVersion( self ): |
---|
[68] | 81 | """ |
---|
| 82 | Check version of gmetric is at least 3.0.1 |
---|
| 83 | for the syntax we use |
---|
| 84 | """ |
---|
[65] | 85 | |
---|
| 86 | for line in os.popen( self.binary + ' --version' ).readlines(): |
---|
| 87 | |
---|
| 88 | line = line.split( ' ' ) |
---|
| 89 | |
---|
[69] | 90 | if len( line ) == 2 and str(line).find( 'gmetric' ) != -1: |
---|
[65] | 91 | |
---|
[69] | 92 | gmetric_version = line[1].split( '\n' )[0] |
---|
[65] | 93 | |
---|
[69] | 94 | version_major = int( gmetric_version.split( '.' )[0] ) |
---|
| 95 | version_minor = int( gmetric_version.split( '.' )[1] ) |
---|
| 96 | version_patch = int( gmetric_version.split( '.' )[2] ) |
---|
[65] | 97 | |
---|
| 98 | incompatible = 0 |
---|
| 99 | |
---|
| 100 | if version_major < 3: |
---|
| 101 | |
---|
| 102 | incompatible = 1 |
---|
| 103 | |
---|
| 104 | elif version_major == 3: |
---|
| 105 | |
---|
| 106 | if version_minor == 0: |
---|
| 107 | |
---|
| 108 | if version_patch < 1: |
---|
| 109 | |
---|
[91] | 110 | incompatible = 1 |
---|
[65] | 111 | |
---|
| 112 | return incompatible |
---|
| 113 | |
---|
[75] | 114 | def multicastGmetric( self, metricname, metricval, valtype='string' ): |
---|
[68] | 115 | """Call gmetric binary and multicast""" |
---|
[65] | 116 | |
---|
| 117 | cmd = self.binary |
---|
| 118 | |
---|
[61] | 119 | try: |
---|
| 120 | cmd = cmd + ' -c' + GMOND_CONF |
---|
| 121 | except NameError: |
---|
[64] | 122 | debug_msg( 10, 'Assuming /etc/gmond.conf for gmetric cmd (ommitting)' ) |
---|
[61] | 123 | |
---|
[168] | 124 | cmd = cmd + ' -n' + str( metricname )+ ' -v"' + str( metricval )+ '" -t' + str( valtype ) + ' -d' + str( self.dmax ) |
---|
[61] | 125 | |
---|
[101] | 126 | debug_msg( 10, printTime() + ' ' + cmd ) |
---|
[69] | 127 | os.system( cmd ) |
---|
[61] | 128 | |
---|
[174] | 129 | class DataGatherer: |
---|
[23] | 130 | |
---|
[61] | 131 | jobs = { } |
---|
| 132 | |
---|
[23] | 133 | def __init__( self ): |
---|
[68] | 134 | """Setup appropriate variables""" |
---|
[23] | 135 | |
---|
[26] | 136 | self.jobs = { } |
---|
[185] | 137 | self.timeoffset = 0 |
---|
[61] | 138 | self.dp = DataProcessor() |
---|
[91] | 139 | self.initPbsQuery() |
---|
[23] | 140 | |
---|
[91] | 141 | def initPbsQuery( self ): |
---|
| 142 | |
---|
| 143 | self.pq = None |
---|
[165] | 144 | if( TORQUE_SERVER ): |
---|
| 145 | self.pq = PBSQuery( TORQUE_SERVER ) |
---|
[174] | 146 | else: |
---|
[165] | 147 | self.pq = PBSQuery() |
---|
[91] | 148 | |
---|
[26] | 149 | def getAttr( self, attrs, name ): |
---|
[68] | 150 | """Return certain attribute from dictionary, if exists""" |
---|
[26] | 151 | |
---|
| 152 | if attrs.has_key( name ): |
---|
| 153 | return attrs[name] |
---|
| 154 | else: |
---|
| 155 | return '' |
---|
| 156 | |
---|
| 157 | def jobDataChanged( self, jobs, job_id, attrs ): |
---|
[68] | 158 | """Check if job with attrs and job_id in jobs has changed""" |
---|
[26] | 159 | |
---|
| 160 | if jobs.has_key( job_id ): |
---|
| 161 | oldData = jobs[ job_id ] |
---|
| 162 | else: |
---|
| 163 | return 1 |
---|
| 164 | |
---|
| 165 | for name, val in attrs.items(): |
---|
| 166 | |
---|
| 167 | if oldData.has_key( name ): |
---|
| 168 | |
---|
| 169 | if oldData[ name ] != attrs[ name ]: |
---|
| 170 | |
---|
| 171 | return 1 |
---|
| 172 | |
---|
| 173 | else: |
---|
| 174 | return 1 |
---|
| 175 | |
---|
| 176 | return 0 |
---|
| 177 | |
---|
[65] | 178 | def getJobData( self, known_jobs ): |
---|
[68] | 179 | """Gather all data on current jobs in Torque""" |
---|
[26] | 180 | |
---|
[65] | 181 | if len( known_jobs ) > 0: |
---|
| 182 | jobs = known_jobs |
---|
| 183 | else: |
---|
| 184 | jobs = { } |
---|
[26] | 185 | |
---|
[101] | 186 | #self.initPbsQuery() |
---|
[125] | 187 | |
---|
| 188 | #print self.pq.getnodes() |
---|
| 189 | |
---|
[26] | 190 | joblist = self.pq.getjobs() |
---|
| 191 | |
---|
[69] | 192 | self.cur_time = time.time() |
---|
[68] | 193 | |
---|
[26] | 194 | jobs_processed = [ ] |
---|
| 195 | |
---|
[125] | 196 | #self.printJobs( joblist ) |
---|
| 197 | |
---|
[26] | 198 | for name, attrs in joblist.items(): |
---|
| 199 | |
---|
| 200 | job_id = name.split( '.' )[0] |
---|
| 201 | |
---|
| 202 | jobs_processed.append( job_id ) |
---|
[61] | 203 | |
---|
[26] | 204 | name = self.getAttr( attrs, 'Job_Name' ) |
---|
| 205 | queue = self.getAttr( attrs, 'queue' ) |
---|
| 206 | owner = self.getAttr( attrs, 'Job_Owner' ).split( '@' )[0] |
---|
| 207 | requested_time = self.getAttr( attrs, 'Resource_List.walltime' ) |
---|
| 208 | requested_memory = self.getAttr( attrs, 'Resource_List.mem' ) |
---|
[95] | 209 | |
---|
[26] | 210 | mynoderequest = self.getAttr( attrs, 'Resource_List.nodes' ) |
---|
[95] | 211 | |
---|
[26] | 212 | if mynoderequest.find( ':' ) != -1 and mynoderequest.find( 'ppn' ) != -1: |
---|
| 213 | ppn = mynoderequest.split( ':' )[1].split( 'ppn=' )[1] |
---|
| 214 | else: |
---|
| 215 | ppn = '' |
---|
[95] | 216 | |
---|
[26] | 217 | status = self.getAttr( attrs, 'job_state' ) |
---|
[25] | 218 | |
---|
[95] | 219 | if status == 'R': |
---|
| 220 | start_timestamp = self.getAttr( attrs, 'mtime' ) |
---|
| 221 | nodes = self.getAttr( attrs, 'exec_host' ).split( '+' ) |
---|
[133] | 222 | |
---|
| 223 | nodeslist = [ ] |
---|
| 224 | |
---|
| 225 | for node in nodes: |
---|
| 226 | host = node.split( '/' )[0] |
---|
| 227 | |
---|
| 228 | if nodeslist.count( host ) == 0: |
---|
| 229 | nodeslist.append( host ) |
---|
| 230 | |
---|
[185] | 231 | if DETECT_TIME_DIFFS: |
---|
| 232 | |
---|
| 233 | # If a job start if later than our current date, |
---|
| 234 | # that must mean the Torque server's time is later |
---|
| 235 | # than our local time. |
---|
| 236 | |
---|
| 237 | if int(start_timestamp) > int( int(self.cur_time) + int(self.timeoffset) ): |
---|
| 238 | |
---|
| 239 | self.timeoffset = int( int(start_timestamp) - int(self.cur_time) ) |
---|
| 240 | |
---|
[133] | 241 | elif status == 'Q': |
---|
[95] | 242 | start_timestamp = '' |
---|
[133] | 243 | count_mynodes = 0 |
---|
| 244 | numeric_node = 1 |
---|
[95] | 245 | |
---|
[133] | 246 | for node in mynoderequest.split( '+' ): |
---|
[67] | 247 | |
---|
[133] | 248 | nodepart = node.split( ':' )[0] |
---|
[67] | 249 | |
---|
[133] | 250 | for letter in nodepart: |
---|
[67] | 251 | |
---|
[133] | 252 | if letter not in string.digits: |
---|
| 253 | |
---|
| 254 | numeric_node = 0 |
---|
| 255 | |
---|
| 256 | if not numeric_node: |
---|
| 257 | count_mynodes = count_mynodes + 1 |
---|
| 258 | else: |
---|
| 259 | count_mynodes = count_mynodes + int( nodepart ) |
---|
| 260 | |
---|
[134] | 261 | nodeslist = count_mynodes |
---|
[172] | 262 | else: |
---|
| 263 | start_timestamp = '' |
---|
[173] | 264 | nodeslist = '' |
---|
[133] | 265 | |
---|
[26] | 266 | myAttrs = { } |
---|
| 267 | myAttrs['name'] = name |
---|
| 268 | myAttrs['queue'] = queue |
---|
| 269 | myAttrs['owner'] = owner |
---|
| 270 | myAttrs['requested_time'] = requested_time |
---|
| 271 | myAttrs['requested_memory'] = requested_memory |
---|
| 272 | myAttrs['ppn'] = ppn |
---|
| 273 | myAttrs['status'] = status |
---|
| 274 | myAttrs['start_timestamp'] = start_timestamp |
---|
[185] | 275 | myAttrs['reported'] = str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) |
---|
[67] | 276 | myAttrs['nodes'] = nodeslist |
---|
| 277 | myAttrs['domain'] = string.join( socket.getfqdn().split( '.' )[1:], '.' ) |
---|
[80] | 278 | myAttrs['poll_interval'] = TORQUE_POLL_INTERVAL |
---|
[26] | 279 | |
---|
[184] | 280 | if self.jobDataChanged( jobs, job_id, myAttrs ) and myAttrs['status'] in [ 'R', 'Q' ]: |
---|
[26] | 281 | jobs[ job_id ] = myAttrs |
---|
[61] | 282 | |
---|
[101] | 283 | #debug_msg( 10, printTime() + ' job %s state changed' %(job_id) ) |
---|
[26] | 284 | |
---|
[76] | 285 | for id, attrs in jobs.items(): |
---|
| 286 | |
---|
| 287 | if id not in jobs_processed: |
---|
| 288 | |
---|
| 289 | # This one isn't there anymore; toedeledoki! |
---|
| 290 | # |
---|
| 291 | del jobs[ id ] |
---|
| 292 | |
---|
[65] | 293 | return jobs |
---|
| 294 | |
---|
| 295 | def submitJobData( self, jobs ): |
---|
| 296 | """Submit job info list""" |
---|
| 297 | |
---|
[185] | 298 | self.dp.multicastGmetric( 'TOGA-HEARTBEAT', str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) ) |
---|
[69] | 299 | |
---|
[61] | 300 | # Now let's spread the knowledge |
---|
| 301 | # |
---|
| 302 | for jobid, jobattrs in jobs.items(): |
---|
| 303 | |
---|
[95] | 304 | gmetric_val = self.compileGmetricVal( jobid, jobattrs ) |
---|
[61] | 305 | |
---|
[95] | 306 | for val in gmetric_val: |
---|
| 307 | self.dp.multicastGmetric( 'TOGA-JOB-' + jobid, val ) |
---|
[61] | 308 | |
---|
[67] | 309 | def makeNodeString( self, nodelist ): |
---|
[68] | 310 | """Make one big string of all hosts""" |
---|
[67] | 311 | |
---|
| 312 | node_str = None |
---|
| 313 | |
---|
| 314 | for node in nodelist: |
---|
| 315 | if not node_str: |
---|
| 316 | node_str = node |
---|
| 317 | else: |
---|
| 318 | node_str = node_str + ';' + node |
---|
| 319 | |
---|
| 320 | return node_str |
---|
| 321 | |
---|
[65] | 322 | def compileGmetricVal( self, jobid, jobattrs ): |
---|
| 323 | """Create a val string for gmetric of jobinfo""" |
---|
[61] | 324 | |
---|
[80] | 325 | appendList = [ ] |
---|
| 326 | appendList.append( 'name=' + jobattrs['name'] ) |
---|
| 327 | appendList.append( 'queue=' + jobattrs['queue'] ) |
---|
| 328 | appendList.append( 'owner=' + jobattrs['owner'] ) |
---|
| 329 | appendList.append( 'requested_time=' + jobattrs['requested_time'] ) |
---|
[95] | 330 | |
---|
| 331 | if jobattrs['requested_memory'] != '': |
---|
| 332 | appendList.append( 'requested_memory=' + jobattrs['requested_memory'] ) |
---|
| 333 | |
---|
| 334 | if jobattrs['ppn'] != '': |
---|
| 335 | appendList.append( 'ppn=' + jobattrs['ppn'] ) |
---|
| 336 | |
---|
[80] | 337 | appendList.append( 'status=' + jobattrs['status'] ) |
---|
[95] | 338 | |
---|
| 339 | if jobattrs['start_timestamp'] != '': |
---|
| 340 | appendList.append( 'start_timestamp=' + jobattrs['start_timestamp'] ) |
---|
| 341 | |
---|
[80] | 342 | appendList.append( 'reported=' + jobattrs['reported'] ) |
---|
[85] | 343 | appendList.append( 'poll_interval=' + str( jobattrs['poll_interval'] ) ) |
---|
[80] | 344 | appendList.append( 'domain=' + jobattrs['domain'] ) |
---|
[26] | 345 | |
---|
[134] | 346 | if jobattrs['status'] == 'R': |
---|
| 347 | if len( jobattrs['nodes'] ) > 0: |
---|
| 348 | appendList.append( 'nodes=' + self.makeNodeString( jobattrs['nodes'] ) ) |
---|
| 349 | elif jobattrs['status'] == 'Q': |
---|
| 350 | appendList.append( 'nodes=' + str(jobattrs['nodes']) ) |
---|
[95] | 351 | |
---|
[65] | 352 | return self.makeAppendLists( appendList ) |
---|
| 353 | |
---|
| 354 | def makeAppendLists( self, append_list ): |
---|
[68] | 355 | """ |
---|
| 356 | Divide all values from append_list over strings with a maximum |
---|
| 357 | size of 1400 |
---|
| 358 | """ |
---|
[65] | 359 | |
---|
| 360 | app_lists = [ ] |
---|
| 361 | |
---|
| 362 | mystr = None |
---|
| 363 | |
---|
| 364 | for val in append_list: |
---|
| 365 | |
---|
| 366 | if not mystr: |
---|
| 367 | mystr = val |
---|
| 368 | else: |
---|
| 369 | if not self.checkValAppendMaxSize( mystr, val ): |
---|
| 370 | mystr = mystr + ' ' + val |
---|
| 371 | else: |
---|
| 372 | # Too big, new appenlist |
---|
| 373 | app_lists.append( mystr ) |
---|
| 374 | mystr = val |
---|
| 375 | |
---|
| 376 | app_lists.append( mystr ) |
---|
| 377 | |
---|
| 378 | return app_lists |
---|
| 379 | |
---|
| 380 | def checkValAppendMaxSize( self, val, text ): |
---|
| 381 | """Check if val + text size is not above 1400 (max msg size)""" |
---|
| 382 | |
---|
[69] | 383 | # Max frame size of a udp datagram is 1500 bytes |
---|
| 384 | # removing misc header and gmetric stuff leaves about 1400 bytes |
---|
| 385 | # |
---|
[65] | 386 | if len( val + text ) > 1400: |
---|
| 387 | return 1 |
---|
| 388 | else: |
---|
| 389 | return 0 |
---|
| 390 | |
---|
[61] | 391 | def printJobs( self, jobs ): |
---|
[65] | 392 | """Print a jobinfo overview""" |
---|
| 393 | |
---|
[26] | 394 | for name, attrs in self.jobs.items(): |
---|
| 395 | |
---|
| 396 | print 'job %s' %(name) |
---|
| 397 | |
---|
| 398 | for name, val in attrs.items(): |
---|
| 399 | |
---|
| 400 | print '\t%s = %s' %( name, val ) |
---|
| 401 | |
---|
[61] | 402 | def printJob( self, jobs, job_id ): |
---|
[65] | 403 | """Print job with job_id from jobs""" |
---|
[26] | 404 | |
---|
| 405 | print 'job %s' %(job_id) |
---|
| 406 | |
---|
[65] | 407 | for name, val in jobs[ job_id ].items(): |
---|
[26] | 408 | |
---|
| 409 | print '\t%s = %s' %( name, val ) |
---|
| 410 | |
---|
| 411 | def daemon( self ): |
---|
[65] | 412 | """Run as daemon forever""" |
---|
[26] | 413 | |
---|
| 414 | # Fork the first child |
---|
| 415 | # |
---|
| 416 | pid = os.fork() |
---|
| 417 | if pid > 0: |
---|
| 418 | sys.exit(0) # end parrent |
---|
| 419 | |
---|
| 420 | # creates a session and sets the process group ID |
---|
| 421 | # |
---|
| 422 | os.setsid() |
---|
| 423 | |
---|
| 424 | # Fork the second child |
---|
| 425 | # |
---|
| 426 | pid = os.fork() |
---|
| 427 | if pid > 0: |
---|
| 428 | sys.exit(0) # end parrent |
---|
| 429 | |
---|
| 430 | # Go to the root directory and set the umask |
---|
| 431 | # |
---|
| 432 | os.chdir('/') |
---|
| 433 | os.umask(0) |
---|
| 434 | |
---|
| 435 | sys.stdin.close() |
---|
| 436 | sys.stdout.close() |
---|
| 437 | sys.stderr.close() |
---|
| 438 | |
---|
| 439 | os.open('/dev/null', 0) |
---|
| 440 | os.dup(0) |
---|
| 441 | os.dup(0) |
---|
| 442 | |
---|
| 443 | self.run() |
---|
| 444 | |
---|
| 445 | def run( self ): |
---|
[65] | 446 | """Main thread""" |
---|
[26] | 447 | |
---|
| 448 | while ( 1 ): |
---|
| 449 | |
---|
[65] | 450 | self.jobs = self.getJobData( self.jobs ) |
---|
| 451 | self.submitJobData( self.jobs ) |
---|
[64] | 452 | time.sleep( TORQUE_POLL_INTERVAL ) |
---|
[26] | 453 | |
---|
| 454 | def printTime( ): |
---|
[65] | 455 | """Print current time/date in human readable format for log/debug""" |
---|
[26] | 456 | |
---|
| 457 | return time.strftime("%a, %d %b %Y %H:%M:%S") |
---|
| 458 | |
---|
| 459 | def debug_msg( level, msg ): |
---|
[65] | 460 | """Print msg if at or above current debug level""" |
---|
[26] | 461 | |
---|
| 462 | if (DEBUG_LEVEL >= level): |
---|
| 463 | sys.stderr.write( msg + '\n' ) |
---|
| 464 | |
---|
[23] | 465 | def main(): |
---|
[65] | 466 | """Application start""" |
---|
[23] | 467 | |
---|
[174] | 468 | gather = DataGatherer() |
---|
[26] | 469 | if DAEMONIZE: |
---|
| 470 | gather.daemon() |
---|
| 471 | else: |
---|
| 472 | gather.run() |
---|
[23] | 473 | |
---|
[65] | 474 | # w00t someone started me |
---|
| 475 | # |
---|
[23] | 476 | if __name__ == '__main__': |
---|
| 477 | main() |
---|