[23] | 1 | #!/usr/bin/env python |
---|
[225] | 2 | # |
---|
| 3 | # This file is part of Jobmonarch |
---|
| 4 | # |
---|
[363] | 5 | # Copyright (C) 2006-2007 Ramon Bastiaans |
---|
[623] | 6 | # Copyright (C) 2007, 2009 Dave Love (SGE code) |
---|
[225] | 7 | # |
---|
| 8 | # Jobmonarch is free software; you can redistribute it and/or modify |
---|
| 9 | # it under the terms of the GNU General Public License as published by |
---|
| 10 | # the Free Software Foundation; either version 2 of the License, or |
---|
| 11 | # (at your option) any later version. |
---|
| 12 | # |
---|
| 13 | # Jobmonarch is distributed in the hope that it will be useful, |
---|
| 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
| 16 | # GNU General Public License for more details. |
---|
| 17 | # |
---|
| 18 | # You should have received a copy of the GNU General Public License |
---|
| 19 | # along with this program; if not, write to the Free Software |
---|
| 20 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 21 | # |
---|
[228] | 22 | # SVN $Id: jobmond.py 656 2010-08-13 13:40:48Z ramonb $ |
---|
[227] | 23 | # |
---|
[23] | 24 | |
---|
[471] | 25 | import sys, getopt, ConfigParser, time, os, socket, string, re |
---|
| 26 | import xdrlib, socket, syslog, xml, xml.sax |
---|
[318] | 27 | from xml.sax.handler import feature_namespaces |
---|
[623] | 28 | from collections import deque |
---|
[318] | 29 | |
---|
[656] | 30 | VERSION='0.3.1+SVN' |
---|
[307] | 31 | |
---|
[471] | 32 | def usage( ver ): |
---|
| 33 | |
---|
| 34 | print 'jobmond %s' %VERSION |
---|
| 35 | |
---|
| 36 | if ver: |
---|
| 37 | return 0 |
---|
| 38 | |
---|
[307] | 39 | print |
---|
[471] | 40 | print 'Purpose:' |
---|
| 41 | print ' The Job Monitoring Daemon (jobmond) reports batch jobs information and statistics' |
---|
| 42 | print ' to Ganglia, which can be viewed with Job Monarch web frontend' |
---|
[307] | 43 | print |
---|
[471] | 44 | print 'Usage: jobmond [OPTIONS]' |
---|
| 45 | print |
---|
| 46 | print ' -c, --config=FILE The configuration file to use (default: /etc/jobmond.conf)' |
---|
| 47 | print ' -p, --pidfile=FILE Use pid file to store the process id' |
---|
| 48 | print ' -h, --help Print help and exit' |
---|
| 49 | print ' -v, --version Print version and exit' |
---|
| 50 | print |
---|
[307] | 51 | |
---|
[212] | 52 | def processArgs( args ): |
---|
[26] | 53 | |
---|
[471] | 54 | SHORT_L = 'p:hvc:' |
---|
| 55 | LONG_L = [ 'help', 'config=', 'pidfile=', 'version' ] |
---|
[165] | 56 | |
---|
[307] | 57 | global PIDFILE |
---|
[354] | 58 | PIDFILE = None |
---|
[61] | 59 | |
---|
[354] | 60 | config_filename = '/etc/jobmond.conf' |
---|
| 61 | |
---|
[212] | 62 | try: |
---|
[68] | 63 | |
---|
[354] | 64 | opts, args = getopt.getopt( args, SHORT_L, LONG_L ) |
---|
[185] | 65 | |
---|
[307] | 66 | except getopt.GetoptError, detail: |
---|
[212] | 67 | |
---|
| 68 | print detail |
---|
[307] | 69 | usage() |
---|
[354] | 70 | sys.exit( 1 ) |
---|
[212] | 71 | |
---|
| 72 | for opt, value in opts: |
---|
| 73 | |
---|
| 74 | if opt in [ '--config', '-c' ]: |
---|
| 75 | |
---|
[354] | 76 | config_filename = value |
---|
[212] | 77 | |
---|
[307] | 78 | if opt in [ '--pidfile', '-p' ]: |
---|
[212] | 79 | |
---|
[354] | 80 | PIDFILE = value |
---|
[307] | 81 | |
---|
| 82 | if opt in [ '--help', '-h' ]: |
---|
| 83 | |
---|
[471] | 84 | usage( False ) |
---|
[354] | 85 | sys.exit( 0 ) |
---|
[212] | 86 | |
---|
[476] | 87 | if opt in [ '--version', '-v' ]: |
---|
[471] | 88 | |
---|
[476] | 89 | usage( True ) |
---|
[471] | 90 | sys.exit( 0 ) |
---|
| 91 | |
---|
[212] | 92 | return loadConfig( config_filename ) |
---|
| 93 | |
---|
[623] | 94 | # Fixme: This doesn't DTRT with commented-out bits of the file. E.g. |
---|
| 95 | # it picked up a commented-out `mcast_join' and tried to use a |
---|
| 96 | # multicast channel when it shouldn't have done. |
---|
[520] | 97 | class GangliaConfigParser: |
---|
| 98 | |
---|
| 99 | def __init__( self, config_file ): |
---|
| 100 | |
---|
| 101 | self.config_file = config_file |
---|
| 102 | |
---|
| 103 | if not os.path.exists( self.config_file ): |
---|
| 104 | |
---|
| 105 | debug_msg( 0, "FATAL ERROR: gmond config '" + self.config_file + "' not found!" ) |
---|
| 106 | sys.exit( 1 ) |
---|
| 107 | |
---|
| 108 | def removeQuotes( self, value ): |
---|
| 109 | |
---|
| 110 | clean_value = value |
---|
| 111 | clean_value = clean_value.replace( "'", "" ) |
---|
| 112 | clean_value = clean_value.replace( '"', '' ) |
---|
| 113 | clean_value = clean_value.strip() |
---|
| 114 | |
---|
| 115 | return clean_value |
---|
| 116 | |
---|
| 117 | def getVal( self, section, valname ): |
---|
| 118 | |
---|
| 119 | cfg_fp = open( self.config_file ) |
---|
| 120 | section_start = False |
---|
| 121 | section_found = False |
---|
| 122 | value = None |
---|
| 123 | |
---|
| 124 | for line in cfg_fp.readlines(): |
---|
| 125 | |
---|
| 126 | if line.find( section ) != -1: |
---|
| 127 | |
---|
| 128 | section_found = True |
---|
| 129 | |
---|
| 130 | if line.find( '{' ) != -1 and section_found: |
---|
| 131 | |
---|
| 132 | section_start = True |
---|
| 133 | |
---|
| 134 | if line.find( '}' ) != -1 and section_found: |
---|
| 135 | |
---|
| 136 | section_start = False |
---|
| 137 | section_found = False |
---|
| 138 | |
---|
| 139 | if line.find( valname ) != -1 and section_start: |
---|
| 140 | |
---|
| 141 | value = string.join( line.split( '=' )[1:], '' ).strip() |
---|
| 142 | |
---|
| 143 | cfg_fp.close() |
---|
| 144 | |
---|
| 145 | return value |
---|
| 146 | |
---|
| 147 | def getInt( self, section, valname ): |
---|
| 148 | |
---|
| 149 | value = self.getVal( section, valname ) |
---|
| 150 | |
---|
| 151 | if not value: |
---|
| 152 | return False |
---|
| 153 | |
---|
| 154 | value = self.removeQuotes( value ) |
---|
| 155 | |
---|
| 156 | return int( value ) |
---|
| 157 | |
---|
| 158 | def getStr( self, section, valname ): |
---|
| 159 | |
---|
| 160 | value = self.getVal( section, valname ) |
---|
| 161 | |
---|
| 162 | if not value: |
---|
| 163 | return False |
---|
| 164 | |
---|
| 165 | value = self.removeQuotes( value ) |
---|
| 166 | |
---|
| 167 | return str( value ) |
---|
| 168 | |
---|
| 169 | def findGmetric(): |
---|
| 170 | |
---|
| 171 | for dir in os.path.expandvars( '$PATH' ).split( ':' ): |
---|
| 172 | |
---|
| 173 | guess = '%s/%s' %( dir, 'gmetric' ) |
---|
| 174 | |
---|
| 175 | if os.path.exists( guess ): |
---|
| 176 | |
---|
| 177 | return guess |
---|
| 178 | |
---|
| 179 | return False |
---|
| 180 | |
---|
[212] | 181 | def loadConfig( filename ): |
---|
| 182 | |
---|
[215] | 183 | def getlist( cfg_string ): |
---|
| 184 | |
---|
| 185 | my_list = [ ] |
---|
| 186 | |
---|
| 187 | for item_txt in cfg_string.split( ',' ): |
---|
| 188 | |
---|
| 189 | sep_char = None |
---|
| 190 | |
---|
| 191 | item_txt = item_txt.strip() |
---|
| 192 | |
---|
| 193 | for s_char in [ "'", '"' ]: |
---|
| 194 | |
---|
| 195 | if item_txt.find( s_char ) != -1: |
---|
| 196 | |
---|
| 197 | if item_txt.count( s_char ) != 2: |
---|
| 198 | |
---|
| 199 | print 'Missing quote: %s' %item_txt |
---|
| 200 | sys.exit( 1 ) |
---|
| 201 | |
---|
| 202 | else: |
---|
| 203 | |
---|
| 204 | sep_char = s_char |
---|
| 205 | break |
---|
| 206 | |
---|
| 207 | if sep_char: |
---|
| 208 | |
---|
| 209 | item_txt = item_txt.split( sep_char )[1] |
---|
| 210 | |
---|
| 211 | my_list.append( item_txt ) |
---|
| 212 | |
---|
| 213 | return my_list |
---|
| 214 | |
---|
[354] | 215 | cfg = ConfigParser.ConfigParser() |
---|
[212] | 216 | |
---|
| 217 | cfg.read( filename ) |
---|
| 218 | |
---|
[354] | 219 | global DEBUG_LEVEL, DAEMONIZE, BATCH_SERVER, BATCH_POLL_INTERVAL |
---|
| 220 | global GMOND_CONF, DETECT_TIME_DIFFS, BATCH_HOST_TRANSLATE |
---|
[373] | 221 | global BATCH_API, QUEUE, GMETRIC_TARGET, USE_SYSLOG |
---|
[449] | 222 | global SYSLOG_LEVEL, SYSLOG_FACILITY, GMETRIC_BINARY |
---|
[212] | 223 | |
---|
[354] | 224 | DEBUG_LEVEL = cfg.getint( 'DEFAULT', 'DEBUG_LEVEL' ) |
---|
[212] | 225 | |
---|
[354] | 226 | DAEMONIZE = cfg.getboolean( 'DEFAULT', 'DAEMONIZE' ) |
---|
[212] | 227 | |
---|
[377] | 228 | SYSLOG_LEVEL = -1 |
---|
| 229 | SYSLOG_FACILITY = None |
---|
| 230 | |
---|
[265] | 231 | try: |
---|
[373] | 232 | USE_SYSLOG = cfg.getboolean( 'DEFAULT', 'USE_SYSLOG' ) |
---|
[212] | 233 | |
---|
[373] | 234 | except ConfigParser.NoOptionError: |
---|
| 235 | |
---|
| 236 | USE_SYSLOG = True |
---|
| 237 | |
---|
| 238 | debug_msg( 0, 'ERROR: no option USE_SYSLOG found: assuming yes' ) |
---|
| 239 | |
---|
| 240 | if USE_SYSLOG: |
---|
| 241 | |
---|
| 242 | try: |
---|
| 243 | SYSLOG_LEVEL = cfg.getint( 'DEFAULT', 'SYSLOG_LEVEL' ) |
---|
| 244 | |
---|
| 245 | except ConfigParser.NoOptionError: |
---|
| 246 | |
---|
| 247 | debug_msg( 0, 'ERROR: no option SYSLOG_LEVEL found: assuming level 0' ) |
---|
| 248 | SYSLOG_LEVEL = 0 |
---|
| 249 | |
---|
| 250 | try: |
---|
| 251 | |
---|
| 252 | SYSLOG_FACILITY = eval( 'syslog.LOG_' + cfg.get( 'DEFAULT', 'SYSLOG_FACILITY' ) ) |
---|
| 253 | |
---|
[377] | 254 | except ConfigParser.NoOptionError: |
---|
[373] | 255 | |
---|
| 256 | SYSLOG_FACILITY = syslog.LOG_DAEMON |
---|
| 257 | |
---|
| 258 | debug_msg( 0, 'ERROR: no option SYSLOG_FACILITY found: assuming facility DAEMON' ) |
---|
| 259 | |
---|
| 260 | try: |
---|
| 261 | |
---|
[354] | 262 | BATCH_SERVER = cfg.get( 'DEFAULT', 'BATCH_SERVER' ) |
---|
[212] | 263 | |
---|
[265] | 264 | except ConfigParser.NoOptionError: |
---|
| 265 | |
---|
| 266 | # Backwards compatibility for old configs |
---|
| 267 | # |
---|
| 268 | |
---|
[354] | 269 | BATCH_SERVER = cfg.get( 'DEFAULT', 'TORQUE_SERVER' ) |
---|
| 270 | api_guess = 'pbs' |
---|
[265] | 271 | |
---|
| 272 | try: |
---|
| 273 | |
---|
[354] | 274 | BATCH_POLL_INTERVAL = cfg.getint( 'DEFAULT', 'BATCH_POLL_INTERVAL' ) |
---|
[265] | 275 | |
---|
| 276 | except ConfigParser.NoOptionError: |
---|
| 277 | |
---|
| 278 | # Backwards compatibility for old configs |
---|
| 279 | # |
---|
| 280 | |
---|
[354] | 281 | BATCH_POLL_INTERVAL = cfg.getint( 'DEFAULT', 'TORQUE_POLL_INTERVAL' ) |
---|
| 282 | api_guess = 'pbs' |
---|
[353] | 283 | |
---|
| 284 | try: |
---|
[212] | 285 | |
---|
[354] | 286 | GMOND_CONF = cfg.get( 'DEFAULT', 'GMOND_CONF' ) |
---|
[353] | 287 | |
---|
| 288 | except ConfigParser.NoOptionError: |
---|
| 289 | |
---|
[520] | 290 | # Not specified: assume /etc/gmond.conf |
---|
| 291 | # |
---|
| 292 | GMOND_CONF = '/etc/gmond.conf' |
---|
[353] | 293 | |
---|
[520] | 294 | ganglia_cfg = GangliaConfigParser( GMOND_CONF ) |
---|
[449] | 295 | |
---|
[520] | 296 | # Let's try to find the GMETRIC_TARGET ourselves first from GMOND_CONF |
---|
| 297 | # |
---|
| 298 | gmetric_dest_ip = ganglia_cfg.getStr( 'udp_send_channel', 'mcast_join' ) |
---|
[449] | 299 | |
---|
[520] | 300 | if not gmetric_dest_ip: |
---|
[449] | 301 | |
---|
[520] | 302 | # Maybe unicast target then |
---|
| 303 | # |
---|
| 304 | gmetric_dest_ip = ganglia_cfg.getStr( 'udp_send_channel', 'host' ) |
---|
[449] | 305 | |
---|
[524] | 306 | gmetric_dest_port = ganglia_cfg.getStr( 'udp_send_channel', 'port' ) |
---|
[520] | 307 | |
---|
| 308 | if gmetric_dest_ip and gmetric_dest_port: |
---|
| 309 | |
---|
| 310 | GMETRIC_TARGET = '%s:%s' %( gmetric_dest_ip, gmetric_dest_port ) |
---|
| 311 | else: |
---|
| 312 | |
---|
| 313 | debug_msg( 0, "WARNING: Can't parse udp_send_channel from: '%s'" %GMOND_CONF ) |
---|
| 314 | |
---|
| 315 | # Couldn't figure it out: let's see if it's in our jobmond.conf |
---|
| 316 | # |
---|
| 317 | try: |
---|
| 318 | |
---|
| 319 | GMETRIC_TARGET = cfg.get( 'DEFAULT', 'GMETRIC_TARGET' ) |
---|
| 320 | |
---|
| 321 | # Guess not: now just give up |
---|
| 322 | # |
---|
| 323 | except ConfigParser.NoOptionError: |
---|
| 324 | |
---|
| 325 | GMETRIC_TARGET = None |
---|
| 326 | |
---|
| 327 | debug_msg( 0, "ERROR: GMETRIC_TARGET not set: internal Gmetric handling aborted. Failing back to DEPRECATED use of gmond.conf/gmetric binary. This will slow down jobmond significantly!" ) |
---|
| 328 | |
---|
| 329 | gmetric_bin = findGmetric() |
---|
| 330 | |
---|
| 331 | if gmetric_bin: |
---|
| 332 | |
---|
| 333 | GMETRIC_BINARY = gmetric_bin |
---|
| 334 | else: |
---|
| 335 | debug_msg( 0, "WARNING: Can't find gmetric binary anywhere in $PATH" ) |
---|
| 336 | |
---|
| 337 | try: |
---|
| 338 | |
---|
| 339 | GMETRIC_BINARY = cfg.get( 'DEFAULT', 'GMETRIC_BINARY' ) |
---|
| 340 | |
---|
| 341 | except ConfigParser.NoOptionError: |
---|
| 342 | |
---|
| 343 | debug_msg( 0, "FATAL ERROR: GMETRIC_BINARY not set and not in $PATH" ) |
---|
| 344 | sys.exit( 1 ) |
---|
| 345 | |
---|
[354] | 346 | DETECT_TIME_DIFFS = cfg.getboolean( 'DEFAULT', 'DETECT_TIME_DIFFS' ) |
---|
[212] | 347 | |
---|
[354] | 348 | BATCH_HOST_TRANSLATE = getlist( cfg.get( 'DEFAULT', 'BATCH_HOST_TRANSLATE' ) ) |
---|
[215] | 349 | |
---|
[266] | 350 | try: |
---|
[256] | 351 | |
---|
[354] | 352 | BATCH_API = cfg.get( 'DEFAULT', 'BATCH_API' ) |
---|
[266] | 353 | |
---|
| 354 | except ConfigParser.NoOptionError, detail: |
---|
| 355 | |
---|
| 356 | if BATCH_SERVER and api_guess: |
---|
[354] | 357 | |
---|
| 358 | BATCH_API = api_guess |
---|
[266] | 359 | else: |
---|
[373] | 360 | debug_msg( 0, "FATAL ERROR: BATCH_API not set and can't make guess" ) |
---|
[266] | 361 | sys.exit( 1 ) |
---|
[317] | 362 | |
---|
| 363 | try: |
---|
| 364 | |
---|
[354] | 365 | QUEUE = getlist( cfg.get( 'DEFAULT', 'QUEUE' ) ) |
---|
[317] | 366 | |
---|
| 367 | except ConfigParser.NoOptionError, detail: |
---|
| 368 | |
---|
[354] | 369 | QUEUE = None |
---|
[353] | 370 | |
---|
[212] | 371 | return True |
---|
| 372 | |
---|
[507] | 373 | def fqdn_parts (fqdn): |
---|
[520] | 374 | |
---|
[507] | 375 | """Return pair of host and domain for fully-qualified domain name arg.""" |
---|
[520] | 376 | |
---|
[507] | 377 | parts = fqdn.split (".") |
---|
[520] | 378 | |
---|
[507] | 379 | return (parts[0], string.join(parts[1:], ".")) |
---|
| 380 | |
---|
[253] | 381 | METRIC_MAX_VAL_LEN = 900 |
---|
| 382 | |
---|
[61] | 383 | class DataProcessor: |
---|
[355] | 384 | |
---|
[68] | 385 | """Class for processing of data""" |
---|
[61] | 386 | |
---|
[449] | 387 | binary = None |
---|
[61] | 388 | |
---|
| 389 | def __init__( self, binary=None ): |
---|
[355] | 390 | |
---|
[68] | 391 | """Remember alternate binary location if supplied""" |
---|
[61] | 392 | |
---|
[449] | 393 | global GMETRIC_BINARY |
---|
| 394 | |
---|
[61] | 395 | if binary: |
---|
| 396 | self.binary = binary |
---|
| 397 | |
---|
[449] | 398 | if not self.binary: |
---|
| 399 | self.binary = GMETRIC_BINARY |
---|
| 400 | |
---|
[80] | 401 | # Timeout for XML |
---|
| 402 | # |
---|
| 403 | # From ganglia's documentation: |
---|
| 404 | # |
---|
| 405 | # 'A metric will be deleted DMAX seconds after it is received, and |
---|
| 406 | # DMAX=0 means eternal life.' |
---|
[61] | 407 | |
---|
[256] | 408 | self.dmax = str( int( int( BATCH_POLL_INTERVAL ) * 2 ) ) |
---|
[80] | 409 | |
---|
[353] | 410 | if GMOND_CONF: |
---|
[354] | 411 | |
---|
[353] | 412 | incompatible = self.checkGmetricVersion() |
---|
[61] | 413 | |
---|
[353] | 414 | if incompatible: |
---|
[355] | 415 | |
---|
| 416 | debug_msg( 0, 'Gmetric version not compatible, please upgrade to at least 3.0.1' ) |
---|
[353] | 417 | sys.exit( 1 ) |
---|
[65] | 418 | |
---|
| 419 | def checkGmetricVersion( self ): |
---|
[355] | 420 | |
---|
[68] | 421 | """ |
---|
| 422 | Check version of gmetric is at least 3.0.1 |
---|
| 423 | for the syntax we use |
---|
| 424 | """ |
---|
[65] | 425 | |
---|
[255] | 426 | global METRIC_MAX_VAL_LEN |
---|
| 427 | |
---|
[341] | 428 | incompatible = 0 |
---|
| 429 | |
---|
[355] | 430 | gfp = os.popen( self.binary + ' --version' ) |
---|
| 431 | lines = gfp.readlines() |
---|
[65] | 432 | |
---|
[355] | 433 | gfp.close() |
---|
| 434 | |
---|
| 435 | for line in lines: |
---|
| 436 | |
---|
[65] | 437 | line = line.split( ' ' ) |
---|
| 438 | |
---|
[355] | 439 | if len( line ) == 2 and str( line ).find( 'gmetric' ) != -1: |
---|
[65] | 440 | |
---|
[355] | 441 | gmetric_version = line[1].split( '\n' )[0] |
---|
[65] | 442 | |
---|
[355] | 443 | version_major = int( gmetric_version.split( '.' )[0] ) |
---|
| 444 | version_minor = int( gmetric_version.split( '.' )[1] ) |
---|
| 445 | version_patch = int( gmetric_version.split( '.' )[2] ) |
---|
[65] | 446 | |
---|
[355] | 447 | incompatible = 0 |
---|
[65] | 448 | |
---|
| 449 | if version_major < 3: |
---|
| 450 | |
---|
| 451 | incompatible = 1 |
---|
| 452 | |
---|
| 453 | elif version_major == 3: |
---|
| 454 | |
---|
| 455 | if version_minor == 0: |
---|
| 456 | |
---|
| 457 | if version_patch < 1: |
---|
| 458 | |
---|
[91] | 459 | incompatible = 1 |
---|
[65] | 460 | |
---|
[471] | 461 | # Gmetric 3.0.1 >< 3.0.3 had a bug in the max metric length |
---|
| 462 | # |
---|
[255] | 463 | if version_patch < 3: |
---|
| 464 | |
---|
| 465 | METRIC_MAX_VAL_LEN = 900 |
---|
| 466 | |
---|
| 467 | elif version_patch >= 3: |
---|
| 468 | |
---|
| 469 | METRIC_MAX_VAL_LEN = 1400 |
---|
| 470 | |
---|
[65] | 471 | return incompatible |
---|
| 472 | |
---|
[409] | 473 | def multicastGmetric( self, metricname, metricval, valtype='string', units='' ): |
---|
[355] | 474 | |
---|
[68] | 475 | """Call gmetric binary and multicast""" |
---|
[65] | 476 | |
---|
| 477 | cmd = self.binary |
---|
| 478 | |
---|
[353] | 479 | if GMETRIC_TARGET: |
---|
[61] | 480 | |
---|
[353] | 481 | GMETRIC_TARGET_HOST = GMETRIC_TARGET.split( ':' )[0] |
---|
| 482 | GMETRIC_TARGET_PORT = GMETRIC_TARGET.split( ':' )[1] |
---|
| 483 | |
---|
| 484 | metric_debug = "[gmetric] name: %s - val: %s - dmax: %s" %( str( metricname ), str( metricval ), str( self.dmax ) ) |
---|
| 485 | |
---|
| 486 | debug_msg( 10, printTime() + ' ' + metric_debug) |
---|
| 487 | |
---|
| 488 | gm = Gmetric( GMETRIC_TARGET_HOST, GMETRIC_TARGET_PORT ) |
---|
| 489 | |
---|
[425] | 490 | gm.send( str( metricname ), str( metricval ), str( self.dmax ), valtype, units ) |
---|
[353] | 491 | |
---|
| 492 | else: |
---|
| 493 | try: |
---|
| 494 | cmd = cmd + ' -c' + GMOND_CONF |
---|
| 495 | |
---|
| 496 | except NameError: |
---|
| 497 | |
---|
[507] | 498 | debug_msg( 10, 'Assuming /etc/gmond.conf for gmetric cmd (omitting)' ) |
---|
[353] | 499 | |
---|
| 500 | cmd = cmd + ' -n' + str( metricname )+ ' -v"' + str( metricval )+ '" -t' + str( valtype ) + ' -d' + str( self.dmax ) |
---|
| 501 | |
---|
[409] | 502 | if len( units ) > 0: |
---|
| 503 | |
---|
| 504 | cmd = cmd + ' -u"' + units + '"' |
---|
| 505 | |
---|
[353] | 506 | debug_msg( 10, printTime() + ' ' + cmd ) |
---|
| 507 | |
---|
| 508 | os.system( cmd ) |
---|
| 509 | |
---|
[318] | 510 | class DataGatherer: |
---|
[23] | 511 | |
---|
[318] | 512 | """Skeleton class for batch system DataGatherer""" |
---|
[256] | 513 | |
---|
[318] | 514 | def printJobs( self, jobs ): |
---|
[355] | 515 | |
---|
[318] | 516 | """Print a jobinfo overview""" |
---|
| 517 | |
---|
| 518 | for name, attrs in self.jobs.items(): |
---|
| 519 | |
---|
| 520 | print 'job %s' %(name) |
---|
| 521 | |
---|
| 522 | for name, val in attrs.items(): |
---|
| 523 | |
---|
| 524 | print '\t%s = %s' %( name, val ) |
---|
| 525 | |
---|
| 526 | def printJob( self, jobs, job_id ): |
---|
[355] | 527 | |
---|
[318] | 528 | """Print job with job_id from jobs""" |
---|
| 529 | |
---|
| 530 | print 'job %s' %(job_id) |
---|
| 531 | |
---|
| 532 | for name, val in jobs[ job_id ].items(): |
---|
| 533 | |
---|
| 534 | print '\t%s = %s' %( name, val ) |
---|
| 535 | |
---|
[507] | 536 | def getAttr( self, attrs, name ): |
---|
| 537 | |
---|
| 538 | """Return certain attribute from dictionary, if exists""" |
---|
| 539 | |
---|
| 540 | if attrs.has_key( name ): |
---|
| 541 | |
---|
| 542 | return attrs[ name ] |
---|
| 543 | else: |
---|
| 544 | return '' |
---|
| 545 | |
---|
| 546 | def jobDataChanged( self, jobs, job_id, attrs ): |
---|
| 547 | |
---|
| 548 | """Check if job with attrs and job_id in jobs has changed""" |
---|
| 549 | |
---|
| 550 | if jobs.has_key( job_id ): |
---|
| 551 | |
---|
| 552 | oldData = jobs[ job_id ] |
---|
| 553 | else: |
---|
| 554 | return 1 |
---|
| 555 | |
---|
| 556 | for name, val in attrs.items(): |
---|
| 557 | |
---|
| 558 | if oldData.has_key( name ): |
---|
| 559 | |
---|
| 560 | if oldData[ name ] != attrs[ name ]: |
---|
| 561 | |
---|
| 562 | return 1 |
---|
| 563 | |
---|
| 564 | else: |
---|
| 565 | return 1 |
---|
| 566 | |
---|
| 567 | return 0 |
---|
| 568 | |
---|
| 569 | def submitJobData( self ): |
---|
| 570 | |
---|
| 571 | """Submit job info list""" |
---|
| 572 | |
---|
[512] | 573 | global BATCH_API |
---|
| 574 | |
---|
[507] | 575 | self.dp.multicastGmetric( 'MONARCH-HEARTBEAT', str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) ) |
---|
| 576 | |
---|
| 577 | running_jobs = 0 |
---|
| 578 | queued_jobs = 0 |
---|
| 579 | |
---|
| 580 | # Count how many running/queued jobs we found |
---|
| 581 | # |
---|
| 582 | for jobid, jobattrs in self.jobs.items(): |
---|
| 583 | |
---|
| 584 | if jobattrs[ 'status' ] == 'Q': |
---|
| 585 | |
---|
| 586 | queued_jobs += 1 |
---|
| 587 | |
---|
| 588 | elif jobattrs[ 'status' ] == 'R': |
---|
| 589 | |
---|
| 590 | running_jobs += 1 |
---|
| 591 | |
---|
| 592 | # Report running/queued jobs as seperate metric for a nice RRD graph |
---|
| 593 | # |
---|
| 594 | self.dp.multicastGmetric( 'MONARCH-RJ', str( running_jobs ), 'uint32', 'jobs' ) |
---|
| 595 | self.dp.multicastGmetric( 'MONARCH-QJ', str( queued_jobs ), 'uint32', 'jobs' ) |
---|
| 596 | |
---|
[512] | 597 | # Report down/offline nodes in batch (PBS only ATM) |
---|
| 598 | # |
---|
| 599 | if BATCH_API == 'pbs': |
---|
| 600 | |
---|
[514] | 601 | domain = fqdn_parts( socket.getfqdn() )[1] |
---|
| 602 | |
---|
[512] | 603 | downed_nodes = list() |
---|
| 604 | offline_nodes = list() |
---|
| 605 | |
---|
| 606 | l = ['state'] |
---|
| 607 | |
---|
| 608 | for name, node in self.pq.getnodes().items(): |
---|
| 609 | |
---|
| 610 | if ( node[ 'state' ].find( "down" ) != -1 ): |
---|
| 611 | |
---|
| 612 | downed_nodes.append( name ) |
---|
| 613 | |
---|
| 614 | if ( node[ 'state' ].find( "offline" ) != -1 ): |
---|
| 615 | |
---|
| 616 | offline_nodes.append( name ) |
---|
| 617 | |
---|
[514] | 618 | downnodeslist = do_nodelist( downed_nodes ) |
---|
| 619 | offlinenodeslist = do_nodelist( offline_nodes ) |
---|
[512] | 620 | |
---|
[514] | 621 | down_str = 'nodes=%s domain=%s reported=%s' %( string.join( downnodeslist, ';' ), domain, str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) ) |
---|
| 622 | offl_str = 'nodes=%s domain=%s reported=%s' %( string.join( offlinenodeslist, ';' ), domain, str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) ) |
---|
| 623 | self.dp.multicastGmetric( 'MONARCH-DOWN' , down_str ) |
---|
| 624 | self.dp.multicastGmetric( 'MONARCH-OFFLINE', offl_str ) |
---|
| 625 | |
---|
[507] | 626 | # Now let's spread the knowledge |
---|
| 627 | # |
---|
| 628 | for jobid, jobattrs in self.jobs.items(): |
---|
| 629 | |
---|
| 630 | # Make gmetric values for each job: respect max gmetric value length |
---|
| 631 | # |
---|
| 632 | gmetric_val = self.compileGmetricVal( jobid, jobattrs ) |
---|
| 633 | metric_increment = 0 |
---|
| 634 | |
---|
| 635 | # If we have more job info than max gmetric value length allows, split it up |
---|
| 636 | # amongst multiple metrics |
---|
| 637 | # |
---|
| 638 | for val in gmetric_val: |
---|
| 639 | |
---|
| 640 | self.dp.multicastGmetric( 'MONARCH-JOB-' + jobid + '-' + str(metric_increment), val ) |
---|
| 641 | |
---|
| 642 | # Increase follow number if this jobinfo is split up amongst more than 1 gmetric |
---|
| 643 | # |
---|
| 644 | metric_increment = metric_increment + 1 |
---|
| 645 | |
---|
| 646 | def compileGmetricVal( self, jobid, jobattrs ): |
---|
| 647 | |
---|
| 648 | """Create a val string for gmetric of jobinfo""" |
---|
| 649 | |
---|
| 650 | gval_lists = [ ] |
---|
| 651 | val_list = { } |
---|
| 652 | |
---|
| 653 | for val_name, val_value in jobattrs.items(): |
---|
| 654 | |
---|
| 655 | # These are our own metric names, i.e.: status, start_timestamp, etc |
---|
| 656 | # |
---|
| 657 | val_list_names_len = len( string.join( val_list.keys() ) ) + len(val_list.keys()) |
---|
| 658 | |
---|
| 659 | # These are their corresponding values |
---|
| 660 | # |
---|
| 661 | val_list_vals_len = len( string.join( val_list.values() ) ) + len(val_list.values()) |
---|
| 662 | |
---|
| 663 | if val_name == 'nodes' and jobattrs['status'] == 'R': |
---|
| 664 | |
---|
| 665 | node_str = None |
---|
| 666 | |
---|
| 667 | for node in val_value: |
---|
| 668 | |
---|
| 669 | if node_str: |
---|
| 670 | |
---|
| 671 | node_str = node_str + ';' + node |
---|
| 672 | else: |
---|
| 673 | node_str = node |
---|
| 674 | |
---|
| 675 | # Make sure if we add this new info, that the total metric's value length does not exceed METRIC_MAX_VAL_LEN |
---|
| 676 | # |
---|
| 677 | if (val_list_names_len + len(val_name) ) + (val_list_vals_len + len(node_str) ) > METRIC_MAX_VAL_LEN: |
---|
| 678 | |
---|
| 679 | # It's too big, we need to make a new gmetric for the additional info |
---|
| 680 | # |
---|
| 681 | val_list[ val_name ] = node_str |
---|
| 682 | |
---|
| 683 | gval_lists.append( val_list ) |
---|
| 684 | |
---|
| 685 | val_list = { } |
---|
| 686 | node_str = None |
---|
| 687 | |
---|
| 688 | val_list[ val_name ] = node_str |
---|
| 689 | |
---|
| 690 | gval_lists.append( val_list ) |
---|
| 691 | |
---|
| 692 | val_list = { } |
---|
| 693 | |
---|
| 694 | elif val_value != '': |
---|
| 695 | |
---|
| 696 | # Make sure if we add this new info, that the total metric's value length does not exceed METRIC_MAX_VAL_LEN |
---|
| 697 | # |
---|
| 698 | if (val_list_names_len + len(val_name) ) + (val_list_vals_len + len(str(val_value)) ) > METRIC_MAX_VAL_LEN: |
---|
| 699 | |
---|
| 700 | # It's too big, we need to make a new gmetric for the additional info |
---|
| 701 | # |
---|
| 702 | gval_lists.append( val_list ) |
---|
| 703 | |
---|
| 704 | val_list = { } |
---|
| 705 | |
---|
| 706 | val_list[ val_name ] = val_value |
---|
| 707 | |
---|
| 708 | if len( val_list ) > 0: |
---|
| 709 | |
---|
| 710 | gval_lists.append( val_list ) |
---|
| 711 | |
---|
| 712 | str_list = [ ] |
---|
| 713 | |
---|
| 714 | # Now append the value names and values together, i.e.: stop_timestamp=value, etc |
---|
| 715 | # |
---|
| 716 | for val_list in gval_lists: |
---|
| 717 | |
---|
| 718 | my_val_str = None |
---|
| 719 | |
---|
| 720 | for val_name, val_value in val_list.items(): |
---|
| 721 | |
---|
[579] | 722 | if type(val_value) == list: |
---|
| 723 | |
---|
| 724 | val_value = val_value.join( ',' ) |
---|
| 725 | |
---|
[507] | 726 | if my_val_str: |
---|
| 727 | |
---|
[623] | 728 | try: |
---|
| 729 | # fixme: It's getting |
---|
| 730 | # ('nodes', None) items |
---|
| 731 | my_val_str = my_val_str + ' ' + val_name + '=' + val_value |
---|
| 732 | except: |
---|
| 733 | pass |
---|
| 734 | |
---|
[507] | 735 | else: |
---|
| 736 | my_val_str = val_name + '=' + val_value |
---|
| 737 | |
---|
| 738 | str_list.append( my_val_str ) |
---|
| 739 | |
---|
| 740 | return str_list |
---|
| 741 | |
---|
[256] | 742 | def daemon( self ): |
---|
[355] | 743 | |
---|
[318] | 744 | """Run as daemon forever""" |
---|
[256] | 745 | |
---|
[318] | 746 | # Fork the first child |
---|
| 747 | # |
---|
| 748 | pid = os.fork() |
---|
| 749 | if pid > 0: |
---|
| 750 | sys.exit(0) # end parent |
---|
[256] | 751 | |
---|
[318] | 752 | # creates a session and sets the process group ID |
---|
| 753 | # |
---|
| 754 | os.setsid() |
---|
| 755 | |
---|
| 756 | # Fork the second child |
---|
| 757 | # |
---|
| 758 | pid = os.fork() |
---|
| 759 | if pid > 0: |
---|
| 760 | sys.exit(0) # end parent |
---|
| 761 | |
---|
| 762 | write_pidfile() |
---|
| 763 | |
---|
| 764 | # Go to the root directory and set the umask |
---|
| 765 | # |
---|
| 766 | os.chdir('/') |
---|
| 767 | os.umask(0) |
---|
| 768 | |
---|
| 769 | sys.stdin.close() |
---|
| 770 | sys.stdout.close() |
---|
| 771 | sys.stderr.close() |
---|
| 772 | |
---|
| 773 | os.open('/dev/null', os.O_RDWR) |
---|
| 774 | os.dup2(0, 1) |
---|
| 775 | os.dup2(0, 2) |
---|
| 776 | |
---|
| 777 | self.run() |
---|
| 778 | |
---|
[256] | 779 | def run( self ): |
---|
[355] | 780 | |
---|
[318] | 781 | """Main thread""" |
---|
[256] | 782 | |
---|
[318] | 783 | while ( 1 ): |
---|
| 784 | |
---|
[348] | 785 | self.getJobData() |
---|
| 786 | self.submitJobData() |
---|
[318] | 787 | time.sleep( BATCH_POLL_INTERVAL ) |
---|
[256] | 788 | |
---|
[623] | 789 | # SGE code by Dave Love <fx@gnu.org>. Tested with SGE 6.0u8 and 6.0u11. May |
---|
| 790 | # work with SGE 6.1 (else should be easily fixable), but definitely doesn't |
---|
| 791 | # with 6.2. See also the fixmes. |
---|
[256] | 792 | |
---|
[507] | 793 | class NoJobs (Exception): |
---|
| 794 | """Exception raised by empty job list in qstat output.""" |
---|
| 795 | pass |
---|
[256] | 796 | |
---|
[507] | 797 | class SgeQstatXMLParser(xml.sax.handler.ContentHandler): |
---|
| 798 | """SAX handler for XML output from Sun Grid Engine's `qstat'.""" |
---|
[318] | 799 | |
---|
[507] | 800 | def __init__(self): |
---|
| 801 | self.value = "" |
---|
| 802 | self.joblist = [] |
---|
| 803 | self.job = {} |
---|
| 804 | self.queue = "" |
---|
| 805 | self.in_joblist = False |
---|
| 806 | self.lrequest = False |
---|
[623] | 807 | self.eltq = deque() |
---|
[507] | 808 | xml.sax.handler.ContentHandler.__init__(self) |
---|
[318] | 809 | |
---|
[623] | 810 | # The structure of the output is as follows (for SGE 6.0). It's |
---|
| 811 | # similar for 6.1, but radically different for 6.2, and is |
---|
| 812 | # undocumented generally. Unfortunately it's voluminous, and probably |
---|
| 813 | # doesn't scale to large clusters/queues. |
---|
[318] | 814 | |
---|
[507] | 815 | # <detailed_job_info xmlns:xsd="http://www.w3.org/2001/XMLSchema"> |
---|
| 816 | # <djob_info> |
---|
| 817 | # <qmaster_response> <!-- job --> |
---|
| 818 | # ... |
---|
| 819 | # <JB_ja_template> |
---|
| 820 | # <ulong_sublist> |
---|
| 821 | # ... <!-- start_time, state ... --> |
---|
| 822 | # </ulong_sublist> |
---|
| 823 | # </JB_ja_template> |
---|
| 824 | # <JB_ja_tasks> |
---|
| 825 | # <ulong_sublist> |
---|
| 826 | # ... <!-- task info |
---|
| 827 | # </ulong_sublist> |
---|
| 828 | # ... |
---|
| 829 | # </JB_ja_tasks> |
---|
| 830 | # ... |
---|
| 831 | # </qmaster_response> |
---|
| 832 | # </djob_info> |
---|
| 833 | # <messages> |
---|
| 834 | # ... |
---|
[318] | 835 | |
---|
[507] | 836 | # NB. We might treat each task as a separate job, like |
---|
| 837 | # straight qstat output, but the web interface expects jobs to |
---|
| 838 | # be identified by integers, not, say, <job number>.<task>. |
---|
[318] | 839 | |
---|
[507] | 840 | # So, I lied. If the job list is empty, we get invalid XML |
---|
| 841 | # like this, which we need to defend against: |
---|
[318] | 842 | |
---|
[507] | 843 | # <unknown_jobs xmlns:xsd="http://www.w3.org/2001/XMLSchema"> |
---|
| 844 | # <> |
---|
| 845 | # <ST_name>*</ST_name> |
---|
| 846 | # </> |
---|
| 847 | # </unknown_jobs> |
---|
[318] | 848 | |
---|
[507] | 849 | def startElement(self, name, attrs): |
---|
| 850 | self.value = "" |
---|
| 851 | if name == "djob_info": # job list |
---|
| 852 | self.in_joblist = True |
---|
[623] | 853 | # The job container is "qmaster_response" in SGE 6.0 |
---|
| 854 | # and 6.1, but "element" in 6.2. This is only the very |
---|
| 855 | # start of what's necessary for 6.2, though (sigh). |
---|
| 856 | elif (name == "qmaster_response" or name == "element") \ |
---|
| 857 | and self.eltq[-1] == "djob_info": # job |
---|
[507] | 858 | self.job = {"job_state": "U", "slots": 0, |
---|
| 859 | "nodes": [], "queued_timestamp": "", |
---|
| 860 | "queued_timestamp": "", "queue": "", |
---|
| 861 | "ppn": "0", "RN_max": 0, |
---|
| 862 | # fixme in endElement |
---|
| 863 | "requested_memory": 0, "requested_time": 0 |
---|
| 864 | } |
---|
| 865 | self.joblist.append(self.job) |
---|
| 866 | elif name == "qstat_l_requests": # resource request |
---|
| 867 | self.lrequest = True |
---|
| 868 | elif name == "unknown_jobs": |
---|
| 869 | raise NoJobs |
---|
[623] | 870 | self.eltq.append (name) |
---|
[318] | 871 | |
---|
[507] | 872 | def characters(self, ch): |
---|
| 873 | self.value += ch |
---|
[318] | 874 | |
---|
[507] | 875 | def endElement(self, name): |
---|
| 876 | """Snarf job elements contents into job dictionary. |
---|
| 877 | Translate keys if appropriate.""" |
---|
[318] | 878 | |
---|
[507] | 879 | name_trans = { |
---|
| 880 | "JB_job_number": "number", |
---|
| 881 | "JB_job_name": "name", "JB_owner": "owner", |
---|
| 882 | "queue_name": "queue", "JAT_start_time": "start_timestamp", |
---|
| 883 | "JB_submission_time": "queued_timestamp" |
---|
| 884 | } |
---|
| 885 | value = self.value |
---|
[623] | 886 | self.eltq.pop () |
---|
[318] | 887 | |
---|
[507] | 888 | if name == "djob_info": |
---|
| 889 | self.in_joblist = False |
---|
| 890 | self.job = {} |
---|
| 891 | elif name == "JAT_master_queue": |
---|
| 892 | self.job["queue"] = value.split("@")[0] |
---|
| 893 | elif name == "JG_qhostname": |
---|
| 894 | if not (value in self.job["nodes"]): |
---|
| 895 | self.job["nodes"].append(value) |
---|
| 896 | elif name == "JG_slots": # slots in use |
---|
| 897 | self.job["slots"] += int(value) |
---|
| 898 | elif name == "RN_max": # requested slots (tasks or parallel) |
---|
| 899 | self.job["RN_max"] = max (self.job["RN_max"], |
---|
| 900 | int(value)) |
---|
| 901 | elif name == "JAT_state": # job state (bitwise or) |
---|
| 902 | value = int (value) |
---|
| 903 | # Status values from sge_jobL.h |
---|
| 904 | #define JIDLE 0x00000000 |
---|
| 905 | #define JHELD 0x00000010 |
---|
| 906 | #define JMIGRATING 0x00000020 |
---|
| 907 | #define JQUEUED 0x00000040 |
---|
| 908 | #define JRUNNING 0x00000080 |
---|
| 909 | #define JSUSPENDED 0x00000100 |
---|
| 910 | #define JTRANSFERING 0x00000200 |
---|
| 911 | #define JDELETED 0x00000400 |
---|
| 912 | #define JWAITING 0x00000800 |
---|
| 913 | #define JEXITING 0x00001000 |
---|
| 914 | #define JWRITTEN 0x00002000 |
---|
| 915 | #define JSUSPENDED_ON_THRESHOLD 0x00010000 |
---|
| 916 | #define JFINISHED 0x00010000 |
---|
| 917 | if value & 0x80: |
---|
| 918 | self.job["status"] = "R" |
---|
| 919 | elif value & 0x40: |
---|
| 920 | self.job["status"] = "Q" |
---|
| 921 | else: |
---|
| 922 | self.job["status"] = "O" # `other' |
---|
| 923 | elif name == "CE_name" and self.lrequest and self.value in \ |
---|
| 924 | ("h_cpu", "s_cpu", "cpu", "h_core", "s_core"): |
---|
| 925 | # We're in a container for an interesting resource |
---|
| 926 | # request; record which type. |
---|
| 927 | self.lrequest = self.value |
---|
| 928 | elif name == "CE_doubleval" and self.lrequest: |
---|
| 929 | # if we're in a container for an interesting |
---|
| 930 | # resource request, use the maxmimum of the hard |
---|
| 931 | # and soft requests to record the requested CPU |
---|
| 932 | # or core. Fixme: I'm not sure if this logic is |
---|
| 933 | # right. |
---|
| 934 | if self.lrequest in ("h_core", "s_core"): |
---|
| 935 | self.job["requested_memory"] = \ |
---|
| 936 | max (float (value), |
---|
| 937 | self.job["requested_memory"]) |
---|
| 938 | # Fixme: Check what cpu means, c.f [hs]_cpu. |
---|
| 939 | elif self.lrequest in ("h_cpu", "s_cpu", "cpu"): |
---|
| 940 | self.job["requested_time"] = \ |
---|
| 941 | max (float (value), |
---|
| 942 | self.job["requested_time"]) |
---|
| 943 | elif name == "qstat_l_requests": |
---|
| 944 | self.lrequest = False |
---|
| 945 | elif self.job and self.in_joblist: |
---|
| 946 | if name in name_trans: |
---|
| 947 | name = name_trans[name] |
---|
| 948 | self.job[name] = value |
---|
[318] | 949 | |
---|
[507] | 950 | # Abstracted from PBS original. |
---|
| 951 | # Fixme: Is it worth (or appropriate for PBS) sorting the result? |
---|
[520] | 952 | # |
---|
| 953 | def do_nodelist( nodes ): |
---|
| 954 | |
---|
[507] | 955 | """Translate node list as appropriate.""" |
---|
[520] | 956 | |
---|
[507] | 957 | nodeslist = [ ] |
---|
[520] | 958 | my_domain = fqdn_parts( socket.getfqdn() )[1] |
---|
| 959 | |
---|
[507] | 960 | for node in nodes: |
---|
[520] | 961 | |
---|
[507] | 962 | host = node.split( '/' )[0] # not relevant for SGE |
---|
| 963 | h, host_domain = fqdn_parts(host) |
---|
[520] | 964 | |
---|
[507] | 965 | if host_domain == my_domain: |
---|
[520] | 966 | |
---|
[507] | 967 | host = h |
---|
[520] | 968 | |
---|
[507] | 969 | if nodeslist.count( host ) == 0: |
---|
[520] | 970 | |
---|
[507] | 971 | for translate_pattern in BATCH_HOST_TRANSLATE: |
---|
[520] | 972 | |
---|
[507] | 973 | if translate_pattern.find( '/' ) != -1: |
---|
[520] | 974 | |
---|
[507] | 975 | translate_orig = \ |
---|
| 976 | translate_pattern.split( '/' )[1] |
---|
| 977 | translate_new = \ |
---|
| 978 | translate_pattern.split( '/' )[2] |
---|
| 979 | host = re.sub( translate_orig, |
---|
| 980 | translate_new, host ) |
---|
| 981 | if not host in nodeslist: |
---|
| 982 | nodeslist.append( host ) |
---|
| 983 | return nodeslist |
---|
[318] | 984 | |
---|
| 985 | class SgeDataGatherer(DataGatherer): |
---|
| 986 | |
---|
[507] | 987 | jobs = {} |
---|
[61] | 988 | |
---|
[318] | 989 | def __init__( self ): |
---|
[507] | 990 | self.jobs = {} |
---|
[318] | 991 | self.timeoffset = 0 |
---|
| 992 | self.dp = DataProcessor() |
---|
| 993 | |
---|
[507] | 994 | def getJobData( self ): |
---|
[318] | 995 | """Gather all data on current jobs in SGE""" |
---|
| 996 | |
---|
[507] | 997 | import popen2 |
---|
[318] | 998 | |
---|
[507] | 999 | self.cur_time = 0 |
---|
| 1000 | queues = "" |
---|
| 1001 | if QUEUE: # only for specific queues |
---|
| 1002 | # Fixme: assumes queue names don't contain single |
---|
| 1003 | # quote or comma. Don't know what the SGE rules are. |
---|
| 1004 | queues = " -q '" + string.join (QUEUE, ",") + "'" |
---|
| 1005 | # Note the comment in SgeQstatXMLParser about scaling with |
---|
| 1006 | # this method of getting data. I haven't found better one. |
---|
| 1007 | # Output with args `-xml -ext -f -r' is easier to parse |
---|
| 1008 | # in some ways, harder in others, but it doesn't provide |
---|
[623] | 1009 | # the submission time (at least SGE 6.0). The pipeline |
---|
| 1010 | # into sed corrects bogus XML observed with a configuration |
---|
| 1011 | # of SGE 6.0u8, which otherwise causes the parsing to hang. |
---|
| 1012 | piping = popen2.Popen3("qstat -u '*' -j '*' -xml | \ |
---|
| 1013 | sed -e 's/reported usage>/reported_usage>/g' -e 's;<\/*JATASK:.*>;;'" \ |
---|
| 1014 | + queues, True) |
---|
[507] | 1015 | qstatparser = SgeQstatXMLParser() |
---|
| 1016 | parse_err = 0 |
---|
| 1017 | try: |
---|
| 1018 | xml.sax.parse(piping.fromchild, qstatparser) |
---|
| 1019 | except NoJobs: |
---|
| 1020 | pass |
---|
| 1021 | except: |
---|
| 1022 | parse_err = 1 |
---|
| 1023 | if piping.wait(): |
---|
| 1024 | debug_msg(10, |
---|
| 1025 | "qstat error, skipping until next polling interval: " |
---|
| 1026 | + piping.childerr.readline()) |
---|
| 1027 | return None |
---|
| 1028 | elif parse_err: |
---|
| 1029 | debug_msg(10, "Bad XML output from qstat"()) |
---|
| 1030 | exit (1) |
---|
| 1031 | for f in piping.fromchild, piping.tochild, piping.childerr: |
---|
| 1032 | f.close() |
---|
[318] | 1033 | self.cur_time = time.time() |
---|
[507] | 1034 | jobs_processed = [] |
---|
| 1035 | for job in qstatparser.joblist: |
---|
| 1036 | job_id = job["number"] |
---|
| 1037 | if job["status"] in [ 'Q', 'R' ]: |
---|
| 1038 | jobs_processed.append(job_id) |
---|
| 1039 | if job["status"] == "R": |
---|
| 1040 | job["nodes"] = do_nodelist (job["nodes"]) |
---|
[623] | 1041 | # Fixme: why is job["nodes"] sometimes null? |
---|
| 1042 | try: |
---|
| 1043 | # Fixme: Is this sensible? The |
---|
| 1044 | # PBS-type PPN isn't something you use |
---|
| 1045 | # with SGE. |
---|
| 1046 | job["ppn"] = float(job["slots"]) / \ |
---|
| 1047 | len(job["nodes"]) |
---|
| 1048 | except: |
---|
| 1049 | job["ppn"] = 0 |
---|
[507] | 1050 | if DETECT_TIME_DIFFS: |
---|
| 1051 | # If a job start is later than our |
---|
| 1052 | # current date, that must mean |
---|
| 1053 | # the SGE server's time is later |
---|
| 1054 | # than our local time. |
---|
| 1055 | start_timestamp = \ |
---|
| 1056 | int (job["start_timestamp"]) |
---|
| 1057 | if start_timestamp > \ |
---|
| 1058 | int(self.cur_time) + \ |
---|
| 1059 | int(self.timeoffset): |
---|
[318] | 1060 | |
---|
[507] | 1061 | self.timeoffset = \ |
---|
| 1062 | start_timestamp - \ |
---|
| 1063 | int(self.cur_time) |
---|
| 1064 | else: |
---|
| 1065 | # fixme: Note sure what this should be: |
---|
| 1066 | job["ppn"] = job["RN_max"] |
---|
| 1067 | job["nodes"] = "1" |
---|
[318] | 1068 | |
---|
[507] | 1069 | myAttrs = {} |
---|
| 1070 | for attr in ["name", "queue", "owner", |
---|
| 1071 | "requested_time", "status", |
---|
| 1072 | "requested_memory", "ppn", |
---|
| 1073 | "start_timestamp", "queued_timestamp"]: |
---|
| 1074 | myAttrs[attr] = str(job[attr]) |
---|
| 1075 | myAttrs["nodes"] = job["nodes"] |
---|
| 1076 | myAttrs["reported"] = str(int(self.cur_time) + \ |
---|
| 1077 | int(self.timeoffset)) |
---|
| 1078 | myAttrs["domain"] = fqdn_parts(socket.getfqdn())[1] |
---|
| 1079 | myAttrs["poll_interval"] = str(BATCH_POLL_INTERVAL) |
---|
[318] | 1080 | |
---|
[507] | 1081 | if self.jobDataChanged(self.jobs, job_id, myAttrs) \ |
---|
| 1082 | and myAttrs["status"] in ["R", "Q"]: |
---|
| 1083 | self.jobs[job_id] = myAttrs |
---|
| 1084 | for id, attrs in self.jobs.items(): |
---|
| 1085 | if id not in jobs_processed: |
---|
| 1086 | del self.jobs[id] |
---|
[318] | 1087 | |
---|
[524] | 1088 | # LSF code by Mahmoud Hanafi <hanafim@users.sourceforge.nt> |
---|
| 1089 | # Requres LSFObject http://sourceforge.net/projects/lsfobject |
---|
| 1090 | # |
---|
| 1091 | class LsfDataGatherer(DataGatherer): |
---|
[525] | 1092 | |
---|
[524] | 1093 | """This is the DataGatherer for LSf""" |
---|
| 1094 | |
---|
| 1095 | global lsfObject |
---|
| 1096 | |
---|
| 1097 | def __init__( self ): |
---|
[525] | 1098 | |
---|
[524] | 1099 | self.jobs = { } |
---|
| 1100 | self.timeoffset = 0 |
---|
| 1101 | self.dp = DataProcessor() |
---|
| 1102 | self.initLsfQuery() |
---|
| 1103 | |
---|
[525] | 1104 | def _countDuplicatesInList( self, dupedList ): |
---|
| 1105 | |
---|
| 1106 | countDupes = { } |
---|
| 1107 | |
---|
| 1108 | for item in dupedList: |
---|
| 1109 | |
---|
| 1110 | if not countDupes.has_key( item ): |
---|
| 1111 | |
---|
| 1112 | countDupes[ item ] = 1 |
---|
| 1113 | else: |
---|
| 1114 | countDupes[ item ] = countDupes[ item ] + 1 |
---|
| 1115 | |
---|
| 1116 | dupeCountList = [ ] |
---|
| 1117 | |
---|
| 1118 | for item, count in countDupes.items(): |
---|
| 1119 | |
---|
| 1120 | dupeCountList.append( ( item, count ) ) |
---|
| 1121 | |
---|
| 1122 | return dupeCountList |
---|
[524] | 1123 | # |
---|
| 1124 | #lst = ['I1','I2','I1','I3','I4','I4','I7','I7','I7','I7','I7'] |
---|
| 1125 | #print _countDuplicatesInList(lst) |
---|
| 1126 | #[('I1', 2), ('I3', 1), ('I2', 1), ('I4', 2), ('I7', 5)] |
---|
| 1127 | ######################## |
---|
| 1128 | |
---|
| 1129 | def initLsfQuery( self ): |
---|
| 1130 | self.pq = None |
---|
| 1131 | self.pq = lsfObject.jobInfoEntObject() |
---|
| 1132 | |
---|
| 1133 | def getJobData( self, known_jobs="" ): |
---|
| 1134 | """Gather all data on current jobs in LSF""" |
---|
| 1135 | if len( known_jobs ) > 0: |
---|
| 1136 | jobs = known_jobs |
---|
| 1137 | else: |
---|
| 1138 | jobs = { } |
---|
| 1139 | joblist = {} |
---|
| 1140 | joblist = self.pq.getJobInfo() |
---|
| 1141 | nodelist = '' |
---|
| 1142 | |
---|
| 1143 | self.cur_time = time.time() |
---|
| 1144 | |
---|
| 1145 | jobs_processed = [ ] |
---|
| 1146 | |
---|
| 1147 | for name, attrs in joblist.items(): |
---|
| 1148 | job_id = str(name) |
---|
| 1149 | jobs_processed.append( job_id ) |
---|
| 1150 | name = self.getAttr( attrs, 'jobName' ) |
---|
| 1151 | queue = self.getAttr( self.getAttr( attrs, 'submit') , 'queue' ) |
---|
| 1152 | owner = self.getAttr( attrs, 'user' ) |
---|
| 1153 | |
---|
| 1154 | ### THIS IS THE rLimit List index values |
---|
| 1155 | #define LSF_RLIMIT_CPU 0 /* cpu time in milliseconds */ |
---|
| 1156 | #define LSF_RLIMIT_FSIZE 1 /* maximum file size */ |
---|
| 1157 | #define LSF_RLIMIT_DATA 2 /* data size */ |
---|
| 1158 | #define LSF_RLIMIT_STACK 3 /* stack size */ |
---|
| 1159 | #define LSF_RLIMIT_CORE 4 /* core file size */ |
---|
| 1160 | #define LSF_RLIMIT_RSS 5 /* resident set size */ |
---|
| 1161 | #define LSF_RLIMIT_NOFILE 6 /* open files */ |
---|
| 1162 | #define LSF_RLIMIT_OPEN_MAX 7 /* (from HP-UX) */ |
---|
| 1163 | #define LSF_RLIMIT_VMEM 8 /* maximum swap mem */ |
---|
| 1164 | #define LSF_RLIMIT_SWAP 8 |
---|
| 1165 | #define LSF_RLIMIT_RUN 9 /* max wall-clock time limit */ |
---|
| 1166 | #define LSF_RLIMIT_PROCESS 10 /* process number limit */ |
---|
| 1167 | #define LSF_RLIMIT_THREAD 11 /* thread number limit (introduced in LSF6.0) */ |
---|
| 1168 | #define LSF_RLIM_NLIMITS 12 /* number of resource limits */ |
---|
| 1169 | |
---|
| 1170 | requested_time = self.getAttr( self.getAttr( attrs, 'submit') , 'rLimits' )[9] |
---|
| 1171 | if requested_time == -1: |
---|
| 1172 | requested_time = "" |
---|
| 1173 | requested_memory = self.getAttr( self.getAttr( attrs, 'submit') , 'rLimits' )[8] |
---|
| 1174 | if requested_memory == -1: |
---|
| 1175 | requested_memory = "" |
---|
| 1176 | # This tries to get proc per node. We don't support this right now |
---|
| 1177 | ppn = 0 #self.getAttr( self.getAttr( attrs, 'SubmitList') , 'numProessors' ) |
---|
| 1178 | requested_cpus = self.getAttr( self.getAttr( attrs, 'submit') , 'numProcessors' ) |
---|
| 1179 | if requested_cpus == None or requested_cpus == "": |
---|
| 1180 | requested_cpus = 1 |
---|
| 1181 | |
---|
| 1182 | if QUEUE: |
---|
| 1183 | for q in QUEUE: |
---|
| 1184 | if q == queue: |
---|
| 1185 | display_queue = 1 |
---|
| 1186 | break |
---|
| 1187 | else: |
---|
| 1188 | display_queue = 0 |
---|
| 1189 | continue |
---|
| 1190 | if display_queue == 0: |
---|
| 1191 | continue |
---|
| 1192 | |
---|
| 1193 | runState = self.getAttr( attrs, 'status' ) |
---|
| 1194 | if runState == 4: |
---|
| 1195 | status = 'R' |
---|
| 1196 | else: |
---|
| 1197 | status = 'Q' |
---|
| 1198 | queued_timestamp = self.getAttr( attrs, 'submitTime' ) |
---|
| 1199 | |
---|
| 1200 | if status == 'R': |
---|
| 1201 | start_timestamp = self.getAttr( attrs, 'startTime' ) |
---|
| 1202 | nodesCpu = dict(self._countDuplicatesInList(self.getAttr( attrs, 'exHosts' ))) |
---|
| 1203 | nodelist = nodesCpu.keys() |
---|
| 1204 | |
---|
| 1205 | if DETECT_TIME_DIFFS: |
---|
| 1206 | |
---|
| 1207 | # If a job start if later than our current date, |
---|
| 1208 | # that must mean the Torque server's time is later |
---|
| 1209 | # than our local time. |
---|
| 1210 | |
---|
| 1211 | if int(start_timestamp) > int( int(self.cur_time) + int(self.timeoffset) ): |
---|
| 1212 | |
---|
| 1213 | self.timeoffset = int( int(start_timestamp) - int(self.cur_time) ) |
---|
| 1214 | |
---|
| 1215 | elif status == 'Q': |
---|
| 1216 | start_timestamp = '' |
---|
| 1217 | count_mynodes = 0 |
---|
| 1218 | numeric_node = 1 |
---|
| 1219 | nodelist = '' |
---|
| 1220 | |
---|
| 1221 | myAttrs = { } |
---|
| 1222 | if name == "": |
---|
| 1223 | myAttrs['name'] = "none" |
---|
| 1224 | else: |
---|
| 1225 | myAttrs['name'] = name |
---|
| 1226 | |
---|
| 1227 | myAttrs[ 'owner' ] = owner |
---|
| 1228 | myAttrs[ 'requested_time' ] = str(requested_time) |
---|
| 1229 | myAttrs[ 'requested_memory' ] = str(requested_memory) |
---|
| 1230 | myAttrs[ 'requested_cpus' ] = str(requested_cpus) |
---|
| 1231 | myAttrs[ 'ppn' ] = str( ppn ) |
---|
| 1232 | myAttrs[ 'status' ] = status |
---|
| 1233 | myAttrs[ 'start_timestamp' ] = str(start_timestamp) |
---|
| 1234 | myAttrs[ 'queue' ] = str(queue) |
---|
| 1235 | myAttrs[ 'queued_timestamp' ] = str(queued_timestamp) |
---|
| 1236 | myAttrs[ 'reported' ] = str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) |
---|
| 1237 | myAttrs[ 'nodes' ] = do_nodelist( nodelist ) |
---|
| 1238 | myAttrs[ 'domain' ] = fqdn_parts( socket.getfqdn() )[1] |
---|
| 1239 | myAttrs[ 'poll_interval' ] = str(BATCH_POLL_INTERVAL) |
---|
| 1240 | |
---|
| 1241 | if self.jobDataChanged( jobs, job_id, myAttrs ) and myAttrs['status'] in [ 'R', 'Q' ]: |
---|
| 1242 | jobs[ job_id ] = myAttrs |
---|
| 1243 | |
---|
[525] | 1244 | debug_msg( 10, printTime() + ' job %s state changed' %(job_id) ) |
---|
[524] | 1245 | |
---|
| 1246 | for id, attrs in jobs.items(): |
---|
| 1247 | if id not in jobs_processed: |
---|
[525] | 1248 | # This one isn't there anymore |
---|
[524] | 1249 | # |
---|
| 1250 | del jobs[ id ] |
---|
| 1251 | self.jobs=jobs |
---|
| 1252 | |
---|
| 1253 | |
---|
[355] | 1254 | class PbsDataGatherer( DataGatherer ): |
---|
[318] | 1255 | |
---|
| 1256 | """This is the DataGatherer for PBS and Torque""" |
---|
| 1257 | |
---|
[524] | 1258 | global PBSQuery, PBSError |
---|
[256] | 1259 | |
---|
[23] | 1260 | def __init__( self ): |
---|
[354] | 1261 | |
---|
[68] | 1262 | """Setup appropriate variables""" |
---|
[23] | 1263 | |
---|
[354] | 1264 | self.jobs = { } |
---|
| 1265 | self.timeoffset = 0 |
---|
| 1266 | self.dp = DataProcessor() |
---|
| 1267 | |
---|
[91] | 1268 | self.initPbsQuery() |
---|
[23] | 1269 | |
---|
[91] | 1270 | def initPbsQuery( self ): |
---|
| 1271 | |
---|
[354] | 1272 | self.pq = None |
---|
| 1273 | |
---|
[256] | 1274 | if( BATCH_SERVER ): |
---|
[354] | 1275 | |
---|
| 1276 | self.pq = PBSQuery( BATCH_SERVER ) |
---|
[174] | 1277 | else: |
---|
[354] | 1278 | self.pq = PBSQuery() |
---|
[91] | 1279 | |
---|
[656] | 1280 | try: |
---|
| 1281 | self.pq.old_data_structure() |
---|
| 1282 | |
---|
| 1283 | except AttributeError: |
---|
| 1284 | |
---|
| 1285 | # pbs_query is older |
---|
| 1286 | # |
---|
| 1287 | pass |
---|
| 1288 | |
---|
[348] | 1289 | def getJobData( self ): |
---|
[354] | 1290 | |
---|
[68] | 1291 | """Gather all data on current jobs in Torque""" |
---|
[26] | 1292 | |
---|
[354] | 1293 | joblist = {} |
---|
[359] | 1294 | self.cur_time = 0 |
---|
[349] | 1295 | |
---|
[359] | 1296 | try: |
---|
| 1297 | joblist = self.pq.getjobs() |
---|
| 1298 | self.cur_time = time.time() |
---|
[354] | 1299 | |
---|
[359] | 1300 | except PBSError, detail: |
---|
[354] | 1301 | |
---|
[359] | 1302 | debug_msg( 10, "Caught PBS unavailable, skipping until next polling interval: " + str( detail ) ) |
---|
| 1303 | return None |
---|
[354] | 1304 | |
---|
| 1305 | jobs_processed = [ ] |
---|
[26] | 1306 | |
---|
| 1307 | for name, attrs in joblist.items(): |
---|
[508] | 1308 | display_queue = 1 |
---|
[354] | 1309 | job_id = name.split( '.' )[0] |
---|
[26] | 1310 | |
---|
[354] | 1311 | name = self.getAttr( attrs, 'Job_Name' ) |
---|
| 1312 | queue = self.getAttr( attrs, 'queue' ) |
---|
[317] | 1313 | |
---|
| 1314 | if QUEUE: |
---|
[508] | 1315 | for q in QUEUE: |
---|
| 1316 | if q == queue: |
---|
| 1317 | display_queue = 1 |
---|
| 1318 | break |
---|
| 1319 | else: |
---|
| 1320 | display_queue = 0 |
---|
| 1321 | continue |
---|
| 1322 | if display_queue == 0: |
---|
| 1323 | continue |
---|
[317] | 1324 | |
---|
| 1325 | |
---|
[354] | 1326 | owner = self.getAttr( attrs, 'Job_Owner' ).split( '@' )[0] |
---|
| 1327 | requested_time = self.getAttr( attrs, 'Resource_List.walltime' ) |
---|
| 1328 | requested_memory = self.getAttr( attrs, 'Resource_List.mem' ) |
---|
[95] | 1329 | |
---|
[354] | 1330 | mynoderequest = self.getAttr( attrs, 'Resource_List.nodes' ) |
---|
[95] | 1331 | |
---|
[354] | 1332 | ppn = '' |
---|
[281] | 1333 | |
---|
[26] | 1334 | if mynoderequest.find( ':' ) != -1 and mynoderequest.find( 'ppn' ) != -1: |
---|
[95] | 1335 | |
---|
[354] | 1336 | mynoderequest_fields = mynoderequest.split( ':' ) |
---|
[281] | 1337 | |
---|
| 1338 | for mynoderequest_field in mynoderequest_fields: |
---|
| 1339 | |
---|
| 1340 | if mynoderequest_field.find( 'ppn' ) != -1: |
---|
| 1341 | |
---|
[354] | 1342 | ppn = mynoderequest_field.split( 'ppn=' )[1] |
---|
[281] | 1343 | |
---|
[354] | 1344 | status = self.getAttr( attrs, 'job_state' ) |
---|
[25] | 1345 | |
---|
[450] | 1346 | if status in [ 'Q', 'R' ]: |
---|
| 1347 | |
---|
| 1348 | jobs_processed.append( job_id ) |
---|
| 1349 | |
---|
[354] | 1350 | queued_timestamp = self.getAttr( attrs, 'ctime' ) |
---|
[243] | 1351 | |
---|
[95] | 1352 | if status == 'R': |
---|
[133] | 1353 | |
---|
[354] | 1354 | start_timestamp = self.getAttr( attrs, 'mtime' ) |
---|
| 1355 | nodes = self.getAttr( attrs, 'exec_host' ).split( '+' ) |
---|
[133] | 1356 | |
---|
[507] | 1357 | nodeslist = do_nodelist( nodes ) |
---|
[354] | 1358 | |
---|
[185] | 1359 | if DETECT_TIME_DIFFS: |
---|
| 1360 | |
---|
| 1361 | # If a job start if later than our current date, |
---|
| 1362 | # that must mean the Torque server's time is later |
---|
| 1363 | # than our local time. |
---|
| 1364 | |
---|
[354] | 1365 | if int( start_timestamp ) > int( int( self.cur_time ) + int( self.timeoffset ) ): |
---|
[185] | 1366 | |
---|
[354] | 1367 | self.timeoffset = int( int(start_timestamp) - int(self.cur_time) ) |
---|
[185] | 1368 | |
---|
[133] | 1369 | elif status == 'Q': |
---|
[95] | 1370 | |
---|
[451] | 1371 | # 'mynodequest' can be a string in the following syntax according to the |
---|
| 1372 | # Torque Administator's manual: |
---|
| 1373 | # |
---|
| 1374 | # {<node_count> | <hostname>}[:ppn=<ppn>][:<property>[:<property>]...][+ ...] |
---|
| 1375 | # {<node_count> | <hostname>}[:ppn=<ppn>][:<property>[:<property>]...][+ ...] |
---|
| 1376 | # etc |
---|
| 1377 | # |
---|
| 1378 | |
---|
| 1379 | # |
---|
| 1380 | # For now we only count the amount of nodes request and ignore properties |
---|
| 1381 | # |
---|
| 1382 | |
---|
[354] | 1383 | start_timestamp = '' |
---|
| 1384 | count_mynodes = 0 |
---|
| 1385 | |
---|
[133] | 1386 | for node in mynoderequest.split( '+' ): |
---|
[67] | 1387 | |
---|
[451] | 1388 | # Just grab the {node_count|hostname} part and ignore properties |
---|
| 1389 | # |
---|
[354] | 1390 | nodepart = node.split( ':' )[0] |
---|
[67] | 1391 | |
---|
[451] | 1392 | # Let's assume a node_count value |
---|
| 1393 | # |
---|
| 1394 | numeric_node = 1 |
---|
| 1395 | |
---|
| 1396 | # Chop the value up into characters |
---|
| 1397 | # |
---|
[133] | 1398 | for letter in nodepart: |
---|
[67] | 1399 | |
---|
[451] | 1400 | # If this char is not a digit (0-9), this must be a hostname |
---|
| 1401 | # |
---|
[133] | 1402 | if letter not in string.digits: |
---|
| 1403 | |
---|
[354] | 1404 | numeric_node = 0 |
---|
[133] | 1405 | |
---|
[451] | 1406 | # If this is a hostname, just count this as one (1) node |
---|
| 1407 | # |
---|
[133] | 1408 | if not numeric_node: |
---|
[354] | 1409 | |
---|
| 1410 | count_mynodes = count_mynodes + 1 |
---|
[133] | 1411 | else: |
---|
[451] | 1412 | |
---|
| 1413 | # If this a number, it must be the node_count |
---|
| 1414 | # and increase our count with it's value |
---|
| 1415 | # |
---|
[327] | 1416 | try: |
---|
[354] | 1417 | count_mynodes = count_mynodes + int( nodepart ) |
---|
| 1418 | |
---|
[327] | 1419 | except ValueError, detail: |
---|
[354] | 1420 | |
---|
[451] | 1421 | # When we arrive here I must be bugged or very confused |
---|
| 1422 | # THIS SHOULD NOT HAPPEN! |
---|
| 1423 | # |
---|
[327] | 1424 | debug_msg( 10, str( detail ) ) |
---|
| 1425 | debug_msg( 10, "Encountered weird node in Resources_List?!" ) |
---|
| 1426 | debug_msg( 10, 'nodepart = ' + str( nodepart ) ) |
---|
| 1427 | debug_msg( 10, 'job = ' + str( name ) ) |
---|
| 1428 | debug_msg( 10, 'attrs = ' + str( attrs ) ) |
---|
[133] | 1429 | |
---|
[354] | 1430 | nodeslist = str( count_mynodes ) |
---|
[172] | 1431 | else: |
---|
[354] | 1432 | start_timestamp = '' |
---|
| 1433 | nodeslist = '' |
---|
[133] | 1434 | |
---|
[354] | 1435 | myAttrs = { } |
---|
[26] | 1436 | |
---|
[471] | 1437 | myAttrs[ 'name' ] = str( name ) |
---|
[354] | 1438 | myAttrs[ 'queue' ] = str( queue ) |
---|
| 1439 | myAttrs[ 'owner' ] = str( owner ) |
---|
| 1440 | myAttrs[ 'requested_time' ] = str( requested_time ) |
---|
| 1441 | myAttrs[ 'requested_memory' ] = str( requested_memory ) |
---|
| 1442 | myAttrs[ 'ppn' ] = str( ppn ) |
---|
| 1443 | myAttrs[ 'status' ] = str( status ) |
---|
| 1444 | myAttrs[ 'start_timestamp' ] = str( start_timestamp ) |
---|
| 1445 | myAttrs[ 'queued_timestamp' ] = str( queued_timestamp ) |
---|
| 1446 | myAttrs[ 'reported' ] = str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) |
---|
| 1447 | myAttrs[ 'nodes' ] = nodeslist |
---|
[507] | 1448 | myAttrs[ 'domain' ] = fqdn_parts( socket.getfqdn() )[1] |
---|
[354] | 1449 | myAttrs[ 'poll_interval' ] = str( BATCH_POLL_INTERVAL ) |
---|
| 1450 | |
---|
[348] | 1451 | if self.jobDataChanged( self.jobs, job_id, myAttrs ) and myAttrs['status'] in [ 'R', 'Q' ]: |
---|
[61] | 1452 | |
---|
[354] | 1453 | self.jobs[ job_id ] = myAttrs |
---|
[26] | 1454 | |
---|
[348] | 1455 | for id, attrs in self.jobs.items(): |
---|
[76] | 1456 | |
---|
| 1457 | if id not in jobs_processed: |
---|
| 1458 | |
---|
| 1459 | # This one isn't there anymore; toedeledoki! |
---|
| 1460 | # |
---|
[348] | 1461 | del self.jobs[ id ] |
---|
[76] | 1462 | |
---|
[363] | 1463 | # |
---|
| 1464 | # Gmetric by Nick Galbreath - nickg(a.t)modp(d.o.t)com |
---|
| 1465 | # Version 1.0 - 21-April2-2007 |
---|
| 1466 | # http://code.google.com/p/embeddedgmetric/ |
---|
| 1467 | # |
---|
| 1468 | # Modified by: Ramon Bastiaans |
---|
| 1469 | # For the Job Monarch Project, see: https://subtrac.sara.nl/oss/jobmonarch/ |
---|
| 1470 | # |
---|
| 1471 | # added: DEFAULT_TYPE for Gmetric's |
---|
| 1472 | # added: checkHostProtocol to determine if target is multicast or not |
---|
| 1473 | # changed: allow default for Gmetric constructor |
---|
| 1474 | # changed: allow defaults for all send() values except dmax |
---|
| 1475 | # |
---|
| 1476 | |
---|
[362] | 1477 | GMETRIC_DEFAULT_TYPE = 'string' |
---|
| 1478 | GMETRIC_DEFAULT_HOST = '127.0.0.1' |
---|
| 1479 | GMETRIC_DEFAULT_PORT = '8649' |
---|
[431] | 1480 | GMETRIC_DEFAULT_UNITS = '' |
---|
[362] | 1481 | |
---|
| 1482 | class Gmetric: |
---|
| 1483 | |
---|
| 1484 | global GMETRIC_DEFAULT_HOST, GMETRIC_DEFAULT_PORT |
---|
| 1485 | |
---|
| 1486 | slope = { 'zero' : 0, 'positive' : 1, 'negative' : 2, 'both' : 3, 'unspecified' : 4 } |
---|
| 1487 | type = ( '', 'string', 'uint16', 'int16', 'uint32', 'int32', 'float', 'double', 'timestamp' ) |
---|
| 1488 | protocol = ( 'udp', 'multicast' ) |
---|
| 1489 | |
---|
| 1490 | def __init__( self, host=GMETRIC_DEFAULT_HOST, port=GMETRIC_DEFAULT_PORT ): |
---|
| 1491 | |
---|
| 1492 | global GMETRIC_DEFAULT_TYPE |
---|
| 1493 | |
---|
| 1494 | self.prot = self.checkHostProtocol( host ) |
---|
| 1495 | self.msg = xdrlib.Packer() |
---|
| 1496 | self.socket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM ) |
---|
| 1497 | |
---|
| 1498 | if self.prot not in self.protocol: |
---|
| 1499 | |
---|
| 1500 | raise ValueError( "Protocol must be one of: " + str( self.protocol ) ) |
---|
| 1501 | |
---|
| 1502 | if self.prot == 'multicast': |
---|
| 1503 | |
---|
[471] | 1504 | # Set multicast options |
---|
| 1505 | # |
---|
[362] | 1506 | self.socket.setsockopt( socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 20 ) |
---|
| 1507 | |
---|
| 1508 | self.hostport = ( host, int( port ) ) |
---|
| 1509 | self.slopestr = 'both' |
---|
| 1510 | self.tmax = 60 |
---|
| 1511 | |
---|
| 1512 | def checkHostProtocol( self, ip ): |
---|
| 1513 | |
---|
[471] | 1514 | """Detect if a ip adress is a multicast address""" |
---|
| 1515 | |
---|
[362] | 1516 | MULTICAST_ADDRESS_MIN = ( "224", "0", "0", "0" ) |
---|
| 1517 | MULTICAST_ADDRESS_MAX = ( "239", "255", "255", "255" ) |
---|
| 1518 | |
---|
| 1519 | ip_fields = ip.split( '.' ) |
---|
| 1520 | |
---|
| 1521 | if ip_fields >= MULTICAST_ADDRESS_MIN and ip_fields <= MULTICAST_ADDRESS_MAX: |
---|
| 1522 | |
---|
| 1523 | return 'multicast' |
---|
| 1524 | else: |
---|
| 1525 | return 'udp' |
---|
| 1526 | |
---|
[431] | 1527 | def send( self, name, value, dmax, typestr = '', units = '' ): |
---|
[362] | 1528 | |
---|
[409] | 1529 | if len( units ) == 0: |
---|
[431] | 1530 | units = GMETRIC_DEFAULT_UNITS |
---|
[471] | 1531 | |
---|
[431] | 1532 | if len( typestr ) == 0: |
---|
| 1533 | typestr = GMETRIC_DEFAULT_TYPE |
---|
[362] | 1534 | |
---|
[424] | 1535 | msg = self.makexdr( name, value, typestr, units, self.slopestr, self.tmax, dmax ) |
---|
[409] | 1536 | |
---|
[362] | 1537 | return self.socket.sendto( msg, self.hostport ) |
---|
| 1538 | |
---|
| 1539 | def makexdr( self, name, value, typestr, unitstr, slopestr, tmax, dmax ): |
---|
| 1540 | |
---|
| 1541 | if slopestr not in self.slope: |
---|
| 1542 | |
---|
| 1543 | raise ValueError( "Slope must be one of: " + str( self.slope.keys() ) ) |
---|
| 1544 | |
---|
| 1545 | if typestr not in self.type: |
---|
| 1546 | |
---|
| 1547 | raise ValueError( "Type must be one of: " + str( self.type ) ) |
---|
| 1548 | |
---|
| 1549 | if len( name ) == 0: |
---|
| 1550 | |
---|
| 1551 | raise ValueError( "Name must be non-empty" ) |
---|
| 1552 | |
---|
| 1553 | self.msg.reset() |
---|
| 1554 | self.msg.pack_int( 0 ) |
---|
| 1555 | self.msg.pack_string( typestr ) |
---|
| 1556 | self.msg.pack_string( name ) |
---|
| 1557 | self.msg.pack_string( str( value ) ) |
---|
| 1558 | self.msg.pack_string( unitstr ) |
---|
| 1559 | self.msg.pack_int( self.slope[ slopestr ] ) |
---|
| 1560 | self.msg.pack_uint( int( tmax ) ) |
---|
| 1561 | self.msg.pack_uint( int( dmax ) ) |
---|
| 1562 | |
---|
| 1563 | return self.msg.get_buffer() |
---|
| 1564 | |
---|
[26] | 1565 | def printTime( ): |
---|
[354] | 1566 | |
---|
[65] | 1567 | """Print current time/date in human readable format for log/debug""" |
---|
[26] | 1568 | |
---|
| 1569 | return time.strftime("%a, %d %b %Y %H:%M:%S") |
---|
| 1570 | |
---|
| 1571 | def debug_msg( level, msg ): |
---|
[354] | 1572 | |
---|
[65] | 1573 | """Print msg if at or above current debug level""" |
---|
[26] | 1574 | |
---|
[377] | 1575 | global DAEMONIZE, DEBUG_LEVEL, SYSLOG_LEVEL |
---|
| 1576 | |
---|
[373] | 1577 | if (not DAEMONIZE and DEBUG_LEVEL >= level): |
---|
| 1578 | sys.stderr.write( msg + '\n' ) |
---|
[26] | 1579 | |
---|
[373] | 1580 | if (DAEMONIZE and USE_SYSLOG and SYSLOG_LEVEL >= level): |
---|
| 1581 | syslog.syslog( msg ) |
---|
| 1582 | |
---|
[307] | 1583 | def write_pidfile(): |
---|
| 1584 | |
---|
[471] | 1585 | # Write pidfile if PIDFILE is set |
---|
| 1586 | # |
---|
[307] | 1587 | if PIDFILE: |
---|
| 1588 | |
---|
[354] | 1589 | pid = os.getpid() |
---|
| 1590 | |
---|
[471] | 1591 | pidfile = open( PIDFILE, 'w' ) |
---|
[354] | 1592 | |
---|
| 1593 | pidfile.write( str( pid ) ) |
---|
[307] | 1594 | pidfile.close() |
---|
| 1595 | |
---|
[23] | 1596 | def main(): |
---|
[354] | 1597 | |
---|
[65] | 1598 | """Application start""" |
---|
[23] | 1599 | |
---|
[524] | 1600 | global PBSQuery, PBSError, lsfObject |
---|
[373] | 1601 | global SYSLOG_FACILITY, USE_SYSLOG, BATCH_API, DAEMONIZE |
---|
[256] | 1602 | |
---|
[212] | 1603 | if not processArgs( sys.argv[1:] ): |
---|
[354] | 1604 | |
---|
[212] | 1605 | sys.exit( 1 ) |
---|
| 1606 | |
---|
[471] | 1607 | # Load appropriate DataGatherer depending on which BATCH_API is set |
---|
| 1608 | # and any required modules for the Gatherer |
---|
| 1609 | # |
---|
[256] | 1610 | if BATCH_API == 'pbs': |
---|
| 1611 | |
---|
| 1612 | try: |
---|
[282] | 1613 | from PBSQuery import PBSQuery, PBSError |
---|
[256] | 1614 | |
---|
| 1615 | except ImportError: |
---|
| 1616 | |
---|
[373] | 1617 | debug_msg( 0, "FATAL ERROR: BATCH_API set to 'pbs' but python module 'pbs_python' is not installed" ) |
---|
[256] | 1618 | sys.exit( 1 ) |
---|
| 1619 | |
---|
| 1620 | gather = PbsDataGatherer() |
---|
| 1621 | |
---|
| 1622 | elif BATCH_API == 'sge': |
---|
| 1623 | |
---|
[507] | 1624 | # Tested with SGE 6.0u11. |
---|
[524] | 1625 | # |
---|
| 1626 | gather = SgeDataGatherer() |
---|
[368] | 1627 | |
---|
[524] | 1628 | elif BATCH_API == 'lsf': |
---|
[368] | 1629 | |
---|
[524] | 1630 | try: |
---|
| 1631 | from lsfObject import lsfObject |
---|
| 1632 | except: |
---|
| 1633 | debug_msg(0, "fatal error: BATCH_API set to 'lsf' but python module is not found or installed") |
---|
| 1634 | sys.exit( 1) |
---|
[256] | 1635 | |
---|
[524] | 1636 | gather = LsfDataGatherer() |
---|
| 1637 | |
---|
[256] | 1638 | else: |
---|
[373] | 1639 | debug_msg( 0, "FATAL ERROR: unknown BATCH_API '" + BATCH_API + "' is not supported" ) |
---|
[354] | 1640 | |
---|
[256] | 1641 | sys.exit( 1 ) |
---|
| 1642 | |
---|
[373] | 1643 | if( DAEMONIZE and USE_SYSLOG ): |
---|
| 1644 | |
---|
| 1645 | syslog.openlog( 'jobmond', syslog.LOG_NOWAIT, SYSLOG_FACILITY ) |
---|
| 1646 | |
---|
[26] | 1647 | if DAEMONIZE: |
---|
[354] | 1648 | |
---|
[26] | 1649 | gather.daemon() |
---|
| 1650 | else: |
---|
| 1651 | gather.run() |
---|
[23] | 1652 | |
---|
[256] | 1653 | # wh00t? someone started me! :) |
---|
[65] | 1654 | # |
---|
[23] | 1655 | if __name__ == '__main__': |
---|
| 1656 | main() |
---|