[23] | 1 | #!/usr/bin/env python |
---|
[225] | 2 | # |
---|
| 3 | # This file is part of Jobmonarch |
---|
| 4 | # |
---|
[691] | 5 | # Copyright (C) 2006-2013 Ramon Bastiaans |
---|
[623] | 6 | # Copyright (C) 2007, 2009 Dave Love (SGE code) |
---|
[225] | 7 | # |
---|
| 8 | # Jobmonarch is free software; you can redistribute it and/or modify |
---|
| 9 | # it under the terms of the GNU General Public License as published by |
---|
| 10 | # the Free Software Foundation; either version 2 of the License, or |
---|
| 11 | # (at your option) any later version. |
---|
| 12 | # |
---|
| 13 | # Jobmonarch is distributed in the hope that it will be useful, |
---|
| 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
| 16 | # GNU General Public License for more details. |
---|
| 17 | # |
---|
| 18 | # You should have received a copy of the GNU General Public License |
---|
| 19 | # along with this program; if not, write to the Free Software |
---|
| 20 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 21 | # |
---|
[228] | 22 | # SVN $Id: jobmond.py 694 2013-03-20 13:00:23Z ramonb $ |
---|
[227] | 23 | # |
---|
[23] | 24 | |
---|
[694] | 25 | # vi :set ts=4 |
---|
| 26 | |
---|
[471] | 27 | import sys, getopt, ConfigParser, time, os, socket, string, re |
---|
| 28 | import xdrlib, socket, syslog, xml, xml.sax |
---|
[318] | 29 | from xml.sax.handler import feature_namespaces |
---|
[623] | 30 | from collections import deque |
---|
[318] | 31 | |
---|
[691] | 32 | VERSION='0.4+SVN' |
---|
[307] | 33 | |
---|
[471] | 34 | def usage( ver ): |
---|
| 35 | |
---|
[691] | 36 | print 'jobmond %s' %VERSION |
---|
[471] | 37 | |
---|
[691] | 38 | if ver: |
---|
| 39 | return 0 |
---|
[471] | 40 | |
---|
[691] | 41 | print |
---|
| 42 | print 'Purpose:' |
---|
| 43 | print ' The Job Monitoring Daemon (jobmond) reports batch jobs information and statistics' |
---|
| 44 | print ' to Ganglia, which can be viewed with Job Monarch web frontend' |
---|
| 45 | print |
---|
| 46 | print 'Usage: jobmond [OPTIONS]' |
---|
| 47 | print |
---|
| 48 | print ' -c, --config=FILE The configuration file to use (default: /etc/jobmond.conf)' |
---|
| 49 | print ' -p, --pidfile=FILE Use pid file to store the process id' |
---|
| 50 | print ' -h, --help Print help and exit' |
---|
| 51 | print ' -v, --version Print version and exit' |
---|
| 52 | print |
---|
[307] | 53 | |
---|
[212] | 54 | def processArgs( args ): |
---|
[26] | 55 | |
---|
[691] | 56 | SHORT_L = 'p:hvc:' |
---|
| 57 | LONG_L = [ 'help', 'config=', 'pidfile=', 'version' ] |
---|
[165] | 58 | |
---|
[691] | 59 | global PIDFILE |
---|
| 60 | PIDFILE = None |
---|
[61] | 61 | |
---|
[691] | 62 | config_filename = '/etc/jobmond.conf' |
---|
[354] | 63 | |
---|
[691] | 64 | try: |
---|
[68] | 65 | |
---|
[691] | 66 | opts, args = getopt.getopt( args, SHORT_L, LONG_L ) |
---|
[185] | 67 | |
---|
[691] | 68 | except getopt.GetoptError, detail: |
---|
[212] | 69 | |
---|
[691] | 70 | print detail |
---|
| 71 | usage() |
---|
| 72 | sys.exit( 1 ) |
---|
[212] | 73 | |
---|
[691] | 74 | for opt, value in opts: |
---|
[212] | 75 | |
---|
[691] | 76 | if opt in [ '--config', '-c' ]: |
---|
| 77 | |
---|
| 78 | config_filename = value |
---|
[212] | 79 | |
---|
[691] | 80 | if opt in [ '--pidfile', '-p' ]: |
---|
[212] | 81 | |
---|
[691] | 82 | PIDFILE = value |
---|
| 83 | |
---|
| 84 | if opt in [ '--help', '-h' ]: |
---|
[307] | 85 | |
---|
[691] | 86 | usage( False ) |
---|
| 87 | sys.exit( 0 ) |
---|
[212] | 88 | |
---|
[691] | 89 | if opt in [ '--version', '-v' ]: |
---|
[471] | 90 | |
---|
[691] | 91 | usage( True ) |
---|
| 92 | sys.exit( 0 ) |
---|
[471] | 93 | |
---|
[691] | 94 | return loadConfig( config_filename ) |
---|
[212] | 95 | |
---|
[623] | 96 | # Fixme: This doesn't DTRT with commented-out bits of the file. E.g. |
---|
| 97 | # it picked up a commented-out `mcast_join' and tried to use a |
---|
| 98 | # multicast channel when it shouldn't have done. |
---|
[520] | 99 | class GangliaConfigParser: |
---|
| 100 | |
---|
[691] | 101 | def __init__( self, config_file ): |
---|
[520] | 102 | |
---|
[691] | 103 | self.config_file = config_file |
---|
[520] | 104 | |
---|
[691] | 105 | if not os.path.exists( self.config_file ): |
---|
[520] | 106 | |
---|
[691] | 107 | debug_msg( 0, "FATAL ERROR: gmond config '" + self.config_file + "' not found!" ) |
---|
| 108 | sys.exit( 1 ) |
---|
[520] | 109 | |
---|
[691] | 110 | def removeQuotes( self, value ): |
---|
[520] | 111 | |
---|
[691] | 112 | clean_value = value |
---|
| 113 | clean_value = clean_value.replace( "'", "" ) |
---|
| 114 | clean_value = clean_value.replace( '"', '' ) |
---|
| 115 | clean_value = clean_value.strip() |
---|
[520] | 116 | |
---|
[691] | 117 | return clean_value |
---|
[520] | 118 | |
---|
[691] | 119 | def getVal( self, section, valname ): |
---|
[520] | 120 | |
---|
[691] | 121 | cfg_fp = open( self.config_file ) |
---|
| 122 | section_start = False |
---|
| 123 | section_found = False |
---|
| 124 | value = None |
---|
[520] | 125 | |
---|
[691] | 126 | for line in cfg_fp.readlines(): |
---|
[520] | 127 | |
---|
[691] | 128 | if line.find( section ) != -1: |
---|
[520] | 129 | |
---|
[691] | 130 | section_found = True |
---|
[520] | 131 | |
---|
[691] | 132 | if line.find( '{' ) != -1 and section_found: |
---|
[520] | 133 | |
---|
[691] | 134 | section_start = True |
---|
[520] | 135 | |
---|
[691] | 136 | if line.find( '}' ) != -1 and section_found: |
---|
[520] | 137 | |
---|
[691] | 138 | section_start = False |
---|
| 139 | section_found = False |
---|
[520] | 140 | |
---|
[691] | 141 | if line.find( valname ) != -1 and section_start: |
---|
[520] | 142 | |
---|
[691] | 143 | value = string.join( line.split( '=' )[1:], '' ).strip() |
---|
[520] | 144 | |
---|
[691] | 145 | cfg_fp.close() |
---|
[520] | 146 | |
---|
[691] | 147 | return value |
---|
[520] | 148 | |
---|
[691] | 149 | def getInt( self, section, valname ): |
---|
[520] | 150 | |
---|
[691] | 151 | value = self.getVal( section, valname ) |
---|
[520] | 152 | |
---|
[691] | 153 | if not value: |
---|
| 154 | return False |
---|
[520] | 155 | |
---|
[691] | 156 | value = self.removeQuotes( value ) |
---|
[520] | 157 | |
---|
[691] | 158 | return int( value ) |
---|
[520] | 159 | |
---|
[691] | 160 | def getStr( self, section, valname ): |
---|
[520] | 161 | |
---|
[691] | 162 | value = self.getVal( section, valname ) |
---|
[520] | 163 | |
---|
[691] | 164 | if not value: |
---|
| 165 | return False |
---|
[520] | 166 | |
---|
[691] | 167 | value = self.removeQuotes( value ) |
---|
[520] | 168 | |
---|
[691] | 169 | return str( value ) |
---|
[520] | 170 | |
---|
| 171 | def findGmetric(): |
---|
| 172 | |
---|
[691] | 173 | for dir in os.path.expandvars( '$PATH' ).split( ':' ): |
---|
[520] | 174 | |
---|
[691] | 175 | guess = '%s/%s' %( dir, 'gmetric' ) |
---|
[520] | 176 | |
---|
[691] | 177 | if os.path.exists( guess ): |
---|
[520] | 178 | |
---|
[691] | 179 | return guess |
---|
[520] | 180 | |
---|
[691] | 181 | return False |
---|
[520] | 182 | |
---|
[212] | 183 | def loadConfig( filename ): |
---|
| 184 | |
---|
[691] | 185 | def getlist( cfg_string ): |
---|
[215] | 186 | |
---|
[691] | 187 | my_list = [ ] |
---|
[215] | 188 | |
---|
[691] | 189 | for item_txt in cfg_string.split( ',' ): |
---|
[215] | 190 | |
---|
[691] | 191 | sep_char = None |
---|
[215] | 192 | |
---|
[691] | 193 | item_txt = item_txt.strip() |
---|
[215] | 194 | |
---|
[691] | 195 | for s_char in [ "'", '"' ]: |
---|
[215] | 196 | |
---|
[691] | 197 | if item_txt.find( s_char ) != -1: |
---|
[215] | 198 | |
---|
[691] | 199 | if item_txt.count( s_char ) != 2: |
---|
[215] | 200 | |
---|
[691] | 201 | print 'Missing quote: %s' %item_txt |
---|
| 202 | sys.exit( 1 ) |
---|
[215] | 203 | |
---|
[691] | 204 | else: |
---|
[215] | 205 | |
---|
[691] | 206 | sep_char = s_char |
---|
| 207 | break |
---|
[215] | 208 | |
---|
[691] | 209 | if sep_char: |
---|
[215] | 210 | |
---|
[691] | 211 | item_txt = item_txt.split( sep_char )[1] |
---|
[215] | 212 | |
---|
[691] | 213 | my_list.append( item_txt ) |
---|
[215] | 214 | |
---|
[691] | 215 | return my_list |
---|
[215] | 216 | |
---|
[691] | 217 | cfg = ConfigParser.ConfigParser() |
---|
[212] | 218 | |
---|
[691] | 219 | cfg.read( filename ) |
---|
[212] | 220 | |
---|
[691] | 221 | global DEBUG_LEVEL, DAEMONIZE, BATCH_SERVER, BATCH_POLL_INTERVAL |
---|
| 222 | global GMOND_CONF, DETECT_TIME_DIFFS, BATCH_HOST_TRANSLATE |
---|
| 223 | global BATCH_API, QUEUE, GMETRIC_TARGET, USE_SYSLOG |
---|
| 224 | global SYSLOG_LEVEL, SYSLOG_FACILITY, GMETRIC_BINARY |
---|
[212] | 225 | |
---|
[691] | 226 | DEBUG_LEVEL = cfg.getint( 'DEFAULT', 'DEBUG_LEVEL' ) |
---|
[212] | 227 | |
---|
[691] | 228 | DAEMONIZE = cfg.getboolean( 'DEFAULT', 'DAEMONIZE' ) |
---|
[212] | 229 | |
---|
[691] | 230 | SYSLOG_LEVEL = -1 |
---|
| 231 | SYSLOG_FACILITY = None |
---|
[377] | 232 | |
---|
[691] | 233 | try: |
---|
| 234 | USE_SYSLOG = cfg.getboolean( 'DEFAULT', 'USE_SYSLOG' ) |
---|
[212] | 235 | |
---|
[691] | 236 | except ConfigParser.NoOptionError: |
---|
[373] | 237 | |
---|
[691] | 238 | USE_SYSLOG = True |
---|
[373] | 239 | |
---|
[691] | 240 | debug_msg( 0, 'ERROR: no option USE_SYSLOG found: assuming yes' ) |
---|
[373] | 241 | |
---|
[691] | 242 | if USE_SYSLOG: |
---|
[373] | 243 | |
---|
[691] | 244 | try: |
---|
| 245 | SYSLOG_LEVEL = cfg.getint( 'DEFAULT', 'SYSLOG_LEVEL' ) |
---|
[373] | 246 | |
---|
[691] | 247 | except ConfigParser.NoOptionError: |
---|
[373] | 248 | |
---|
[691] | 249 | debug_msg( 0, 'ERROR: no option SYSLOG_LEVEL found: assuming level 0' ) |
---|
| 250 | SYSLOG_LEVEL = 0 |
---|
[373] | 251 | |
---|
[691] | 252 | try: |
---|
[373] | 253 | |
---|
[691] | 254 | SYSLOG_FACILITY = eval( 'syslog.LOG_' + cfg.get( 'DEFAULT', 'SYSLOG_FACILITY' ) ) |
---|
[373] | 255 | |
---|
[691] | 256 | except ConfigParser.NoOptionError: |
---|
[373] | 257 | |
---|
[691] | 258 | SYSLOG_FACILITY = syslog.LOG_DAEMON |
---|
[373] | 259 | |
---|
[691] | 260 | debug_msg( 0, 'ERROR: no option SYSLOG_FACILITY found: assuming facility DAEMON' ) |
---|
[373] | 261 | |
---|
[691] | 262 | try: |
---|
[373] | 263 | |
---|
[691] | 264 | BATCH_SERVER = cfg.get( 'DEFAULT', 'BATCH_SERVER' ) |
---|
[212] | 265 | |
---|
[691] | 266 | except ConfigParser.NoOptionError: |
---|
[265] | 267 | |
---|
[691] | 268 | # Backwards compatibility for old configs |
---|
| 269 | # |
---|
[265] | 270 | |
---|
[691] | 271 | BATCH_SERVER = cfg.get( 'DEFAULT', 'TORQUE_SERVER' ) |
---|
| 272 | api_guess = 'pbs' |
---|
| 273 | |
---|
| 274 | try: |
---|
| 275 | |
---|
| 276 | BATCH_POLL_INTERVAL = cfg.getint( 'DEFAULT', 'BATCH_POLL_INTERVAL' ) |
---|
[265] | 277 | |
---|
[691] | 278 | except ConfigParser.NoOptionError: |
---|
[265] | 279 | |
---|
[691] | 280 | # Backwards compatibility for old configs |
---|
| 281 | # |
---|
[265] | 282 | |
---|
[691] | 283 | BATCH_POLL_INTERVAL = cfg.getint( 'DEFAULT', 'TORQUE_POLL_INTERVAL' ) |
---|
| 284 | api_guess = 'pbs' |
---|
| 285 | |
---|
| 286 | try: |
---|
[212] | 287 | |
---|
[691] | 288 | GMOND_CONF = cfg.get( 'DEFAULT', 'GMOND_CONF' ) |
---|
[353] | 289 | |
---|
[691] | 290 | except ConfigParser.NoOptionError: |
---|
[353] | 291 | |
---|
[691] | 292 | # Not specified: assume /etc/gmond.conf |
---|
| 293 | # |
---|
| 294 | GMOND_CONF = '/etc/gmond.conf' |
---|
[353] | 295 | |
---|
[691] | 296 | ganglia_cfg = GangliaConfigParser( GMOND_CONF ) |
---|
[449] | 297 | |
---|
[691] | 298 | # Let's try to find the GMETRIC_TARGET ourselves first from GMOND_CONF |
---|
| 299 | # |
---|
| 300 | gmetric_dest_ip = ganglia_cfg.getStr( 'udp_send_channel', 'mcast_join' ) |
---|
[449] | 301 | |
---|
[691] | 302 | if not gmetric_dest_ip: |
---|
[449] | 303 | |
---|
[691] | 304 | # Maybe unicast target then |
---|
| 305 | # |
---|
| 306 | gmetric_dest_ip = ganglia_cfg.getStr( 'udp_send_channel', 'host' ) |
---|
[449] | 307 | |
---|
[691] | 308 | gmetric_dest_port = ganglia_cfg.getStr( 'udp_send_channel', 'port' ) |
---|
[520] | 309 | |
---|
[691] | 310 | if gmetric_dest_ip and gmetric_dest_port: |
---|
[520] | 311 | |
---|
[691] | 312 | GMETRIC_TARGET = '%s:%s' %( gmetric_dest_ip, gmetric_dest_port ) |
---|
| 313 | else: |
---|
[520] | 314 | |
---|
[691] | 315 | debug_msg( 0, "WARNING: Can't parse udp_send_channel from: '%s'" %GMOND_CONF ) |
---|
[520] | 316 | |
---|
[691] | 317 | # Couldn't figure it out: let's see if it's in our jobmond.conf |
---|
| 318 | # |
---|
| 319 | try: |
---|
[520] | 320 | |
---|
[691] | 321 | GMETRIC_TARGET = cfg.get( 'DEFAULT', 'GMETRIC_TARGET' ) |
---|
[520] | 322 | |
---|
[691] | 323 | # Guess not: now just give up |
---|
| 324 | # |
---|
| 325 | except ConfigParser.NoOptionError: |
---|
[520] | 326 | |
---|
[691] | 327 | GMETRIC_TARGET = None |
---|
[520] | 328 | |
---|
[691] | 329 | debug_msg( 0, "ERROR: GMETRIC_TARGET not set: internal Gmetric handling aborted. Failing back to DEPRECATED use of gmond.conf/gmetric binary. This will slow down jobmond significantly!" ) |
---|
[520] | 330 | |
---|
[691] | 331 | gmetric_bin = findGmetric() |
---|
[520] | 332 | |
---|
[691] | 333 | if gmetric_bin: |
---|
[520] | 334 | |
---|
[691] | 335 | GMETRIC_BINARY = gmetric_bin |
---|
| 336 | else: |
---|
| 337 | debug_msg( 0, "WARNING: Can't find gmetric binary anywhere in $PATH" ) |
---|
[520] | 338 | |
---|
[691] | 339 | try: |
---|
[520] | 340 | |
---|
[691] | 341 | GMETRIC_BINARY = cfg.get( 'DEFAULT', 'GMETRIC_BINARY' ) |
---|
[520] | 342 | |
---|
[691] | 343 | except ConfigParser.NoOptionError: |
---|
[520] | 344 | |
---|
[691] | 345 | debug_msg( 0, "FATAL ERROR: GMETRIC_BINARY not set and not in $PATH" ) |
---|
| 346 | sys.exit( 1 ) |
---|
[520] | 347 | |
---|
[691] | 348 | DETECT_TIME_DIFFS = cfg.getboolean( 'DEFAULT', 'DETECT_TIME_DIFFS' ) |
---|
[212] | 349 | |
---|
[691] | 350 | BATCH_HOST_TRANSLATE = getlist( cfg.get( 'DEFAULT', 'BATCH_HOST_TRANSLATE' ) ) |
---|
[215] | 351 | |
---|
[691] | 352 | try: |
---|
[256] | 353 | |
---|
[691] | 354 | BATCH_API = cfg.get( 'DEFAULT', 'BATCH_API' ) |
---|
[266] | 355 | |
---|
[691] | 356 | except ConfigParser.NoOptionError, detail: |
---|
[266] | 357 | |
---|
[691] | 358 | if BATCH_SERVER and api_guess: |
---|
[354] | 359 | |
---|
[691] | 360 | BATCH_API = api_guess |
---|
| 361 | else: |
---|
| 362 | debug_msg( 0, "FATAL ERROR: BATCH_API not set and can't make guess" ) |
---|
| 363 | sys.exit( 1 ) |
---|
[317] | 364 | |
---|
[691] | 365 | try: |
---|
[317] | 366 | |
---|
[691] | 367 | QUEUE = getlist( cfg.get( 'DEFAULT', 'QUEUE' ) ) |
---|
[317] | 368 | |
---|
[691] | 369 | except ConfigParser.NoOptionError, detail: |
---|
[317] | 370 | |
---|
[691] | 371 | QUEUE = None |
---|
[353] | 372 | |
---|
[691] | 373 | return True |
---|
[212] | 374 | |
---|
[507] | 375 | def fqdn_parts (fqdn): |
---|
[520] | 376 | |
---|
[691] | 377 | """Return pair of host and domain for fully-qualified domain name arg.""" |
---|
[520] | 378 | |
---|
[691] | 379 | parts = fqdn.split (".") |
---|
[520] | 380 | |
---|
[691] | 381 | return (parts[0], string.join(parts[1:], ".")) |
---|
[507] | 382 | |
---|
[253] | 383 | METRIC_MAX_VAL_LEN = 900 |
---|
| 384 | |
---|
[61] | 385 | class DataProcessor: |
---|
[355] | 386 | |
---|
[691] | 387 | """Class for processing of data""" |
---|
[61] | 388 | |
---|
[691] | 389 | binary = None |
---|
[61] | 390 | |
---|
[691] | 391 | def __init__( self, binary=None ): |
---|
[355] | 392 | |
---|
[691] | 393 | """Remember alternate binary location if supplied""" |
---|
[61] | 394 | |
---|
[691] | 395 | global GMETRIC_BINARY, GMOND_CONF |
---|
[449] | 396 | |
---|
[691] | 397 | if binary: |
---|
| 398 | self.binary = binary |
---|
[61] | 399 | |
---|
[691] | 400 | if not self.binary: |
---|
| 401 | self.binary = GMETRIC_BINARY |
---|
[449] | 402 | |
---|
[691] | 403 | # Timeout for XML |
---|
| 404 | # |
---|
| 405 | # From ganglia's documentation: |
---|
| 406 | # |
---|
| 407 | # 'A metric will be deleted DMAX seconds after it is received, and |
---|
| 408 | # DMAX=0 means eternal life.' |
---|
[61] | 409 | |
---|
[691] | 410 | self.dmax = str( int( int( BATCH_POLL_INTERVAL ) * 2 ) ) |
---|
[80] | 411 | |
---|
[691] | 412 | if GMOND_CONF: |
---|
[354] | 413 | |
---|
[691] | 414 | incompatible = self.checkGmetricVersion() |
---|
[61] | 415 | |
---|
[691] | 416 | if incompatible: |
---|
[355] | 417 | |
---|
[692] | 418 | debug_msg( 0, 'Gmetric version not compatible, please upgrade to at least 3.4.0' ) |
---|
[691] | 419 | sys.exit( 1 ) |
---|
[65] | 420 | |
---|
[691] | 421 | def checkGmetricVersion( self ): |
---|
[355] | 422 | |
---|
[691] | 423 | """ |
---|
[692] | 424 | Check version of gmetric is at least 3.4.0 |
---|
[691] | 425 | for the syntax we use |
---|
| 426 | """ |
---|
[65] | 427 | |
---|
[691] | 428 | global METRIC_MAX_VAL_LEN, GMETRIC_TARGET |
---|
[255] | 429 | |
---|
[691] | 430 | incompatible = 0 |
---|
[341] | 431 | |
---|
[691] | 432 | gfp = os.popen( self.binary + ' --version' ) |
---|
[692] | 433 | lines = gfp.readlines() |
---|
[65] | 434 | |
---|
[691] | 435 | gfp.close() |
---|
[355] | 436 | |
---|
[691] | 437 | for line in lines: |
---|
[355] | 438 | |
---|
[691] | 439 | line = line.split( ' ' ) |
---|
[65] | 440 | |
---|
[691] | 441 | if len( line ) == 2 and str( line ).find( 'gmetric' ) != -1: |
---|
| 442 | |
---|
| 443 | gmetric_version = line[1].split( '\n' )[0] |
---|
[65] | 444 | |
---|
[691] | 445 | version_major = int( gmetric_version.split( '.' )[0] ) |
---|
| 446 | version_minor = int( gmetric_version.split( '.' )[1] ) |
---|
| 447 | version_patch = int( gmetric_version.split( '.' )[2] ) |
---|
[65] | 448 | |
---|
[691] | 449 | incompatible = 0 |
---|
[65] | 450 | |
---|
[691] | 451 | if version_major < 3: |
---|
[65] | 452 | |
---|
[691] | 453 | incompatible = 1 |
---|
| 454 | |
---|
| 455 | elif version_major == 3: |
---|
[65] | 456 | |
---|
[692] | 457 | if version_minor < 4: |
---|
[65] | 458 | |
---|
[692] | 459 | incompatible = 1 |
---|
[65] | 460 | |
---|
[692] | 461 | else: |
---|
[255] | 462 | |
---|
[692] | 463 | METRIC_MAX_VAL_LEN = 1400 |
---|
[255] | 464 | |
---|
[691] | 465 | return incompatible |
---|
[65] | 466 | |
---|
[691] | 467 | def multicastGmetric( self, metricname, metricval, valtype='string', units='' ): |
---|
[355] | 468 | |
---|
[691] | 469 | """Call gmetric binary and multicast""" |
---|
[65] | 470 | |
---|
[691] | 471 | cmd = self.binary |
---|
[65] | 472 | |
---|
[691] | 473 | if GMETRIC_TARGET: |
---|
[61] | 474 | |
---|
[691] | 475 | GMETRIC_TARGET_HOST = GMETRIC_TARGET.split( ':' )[0] |
---|
| 476 | GMETRIC_TARGET_PORT = GMETRIC_TARGET.split( ':' )[1] |
---|
[353] | 477 | |
---|
[691] | 478 | metric_debug = "[gmetric] name: %s - val: %s - dmax: %s" %( str( metricname ), str( metricval ), str( self.dmax ) ) |
---|
[353] | 479 | |
---|
[691] | 480 | debug_msg( 10, printTime() + ' ' + metric_debug) |
---|
[353] | 481 | |
---|
[691] | 482 | gm = Gmetric( GMETRIC_TARGET_HOST, GMETRIC_TARGET_PORT ) |
---|
[353] | 483 | |
---|
[691] | 484 | gm.send( str( metricname ), str( metricval ), str( self.dmax ), valtype, units ) |
---|
[353] | 485 | |
---|
[691] | 486 | else: |
---|
| 487 | try: |
---|
| 488 | cmd = cmd + ' -c' + GMOND_CONF |
---|
[353] | 489 | |
---|
[691] | 490 | except NameError: |
---|
[353] | 491 | |
---|
[691] | 492 | debug_msg( 10, 'Assuming /etc/gmond.conf for gmetric cmd (omitting)' ) |
---|
[353] | 493 | |
---|
[691] | 494 | cmd = cmd + ' -n' + str( metricname )+ ' -v"' + str( metricval )+ '" -t' + str( valtype ) + ' -d' + str( self.dmax ) |
---|
[353] | 495 | |
---|
[691] | 496 | if len( units ) > 0: |
---|
[409] | 497 | |
---|
[691] | 498 | cmd = cmd + ' -u"' + units + '"' |
---|
[409] | 499 | |
---|
[691] | 500 | debug_msg( 10, printTime() + ' ' + cmd ) |
---|
[353] | 501 | |
---|
[691] | 502 | os.system( cmd ) |
---|
[353] | 503 | |
---|
[318] | 504 | class DataGatherer: |
---|
[23] | 505 | |
---|
[691] | 506 | """Skeleton class for batch system DataGatherer""" |
---|
[256] | 507 | |
---|
[691] | 508 | def printJobs( self, jobs ): |
---|
[355] | 509 | |
---|
[691] | 510 | """Print a jobinfo overview""" |
---|
[318] | 511 | |
---|
[691] | 512 | for name, attrs in self.jobs.items(): |
---|
[318] | 513 | |
---|
[691] | 514 | print 'job %s' %(name) |
---|
[318] | 515 | |
---|
[691] | 516 | for name, val in attrs.items(): |
---|
[318] | 517 | |
---|
[691] | 518 | print '\t%s = %s' %( name, val ) |
---|
[318] | 519 | |
---|
[691] | 520 | def printJob( self, jobs, job_id ): |
---|
[355] | 521 | |
---|
[691] | 522 | """Print job with job_id from jobs""" |
---|
[318] | 523 | |
---|
[691] | 524 | print 'job %s' %(job_id) |
---|
[318] | 525 | |
---|
[691] | 526 | for name, val in jobs[ job_id ].items(): |
---|
[318] | 527 | |
---|
[691] | 528 | print '\t%s = %s' %( name, val ) |
---|
[318] | 529 | |
---|
[691] | 530 | def getAttr( self, attrs, name ): |
---|
[507] | 531 | |
---|
[691] | 532 | """Return certain attribute from dictionary, if exists""" |
---|
[507] | 533 | |
---|
[691] | 534 | if attrs.has_key( name ): |
---|
[507] | 535 | |
---|
[691] | 536 | return attrs[ name ] |
---|
| 537 | else: |
---|
| 538 | return '' |
---|
[507] | 539 | |
---|
[691] | 540 | def jobDataChanged( self, jobs, job_id, attrs ): |
---|
[507] | 541 | |
---|
[691] | 542 | """Check if job with attrs and job_id in jobs has changed""" |
---|
[507] | 543 | |
---|
[691] | 544 | if jobs.has_key( job_id ): |
---|
[507] | 545 | |
---|
[691] | 546 | oldData = jobs[ job_id ] |
---|
| 547 | else: |
---|
| 548 | return 1 |
---|
[507] | 549 | |
---|
[691] | 550 | for name, val in attrs.items(): |
---|
[507] | 551 | |
---|
[691] | 552 | if oldData.has_key( name ): |
---|
[507] | 553 | |
---|
[691] | 554 | if oldData[ name ] != attrs[ name ]: |
---|
[507] | 555 | |
---|
[691] | 556 | return 1 |
---|
[507] | 557 | |
---|
[691] | 558 | else: |
---|
| 559 | return 1 |
---|
[507] | 560 | |
---|
[691] | 561 | return 0 |
---|
[507] | 562 | |
---|
[691] | 563 | def submitJobData( self ): |
---|
[507] | 564 | |
---|
[691] | 565 | """Submit job info list""" |
---|
[507] | 566 | |
---|
[691] | 567 | global BATCH_API |
---|
[512] | 568 | |
---|
[691] | 569 | self.dp.multicastGmetric( 'MONARCH-HEARTBEAT', str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) ) |
---|
[507] | 570 | |
---|
[691] | 571 | running_jobs = 0 |
---|
| 572 | queued_jobs = 0 |
---|
[507] | 573 | |
---|
[691] | 574 | # Count how many running/queued jobs we found |
---|
| 575 | # |
---|
| 576 | for jobid, jobattrs in self.jobs.items(): |
---|
[507] | 577 | |
---|
[691] | 578 | if jobattrs[ 'status' ] == 'Q': |
---|
[507] | 579 | |
---|
[691] | 580 | queued_jobs += 1 |
---|
[507] | 581 | |
---|
[691] | 582 | elif jobattrs[ 'status' ] == 'R': |
---|
[507] | 583 | |
---|
[691] | 584 | running_jobs += 1 |
---|
[507] | 585 | |
---|
[691] | 586 | # Report running/queued jobs as seperate metric for a nice RRD graph |
---|
| 587 | # |
---|
| 588 | self.dp.multicastGmetric( 'MONARCH-RJ', str( running_jobs ), 'uint32', 'jobs' ) |
---|
| 589 | self.dp.multicastGmetric( 'MONARCH-QJ', str( queued_jobs ), 'uint32', 'jobs' ) |
---|
[507] | 590 | |
---|
[691] | 591 | # Report down/offline nodes in batch (PBS only ATM) |
---|
| 592 | # |
---|
| 593 | if BATCH_API == 'pbs': |
---|
[512] | 594 | |
---|
[691] | 595 | domain = fqdn_parts( socket.getfqdn() )[1] |
---|
[514] | 596 | |
---|
[691] | 597 | downed_nodes = list() |
---|
| 598 | offline_nodes = list() |
---|
| 599 | |
---|
| 600 | l = ['state'] |
---|
| 601 | |
---|
| 602 | for name, node in self.pq.getnodes().items(): |
---|
[512] | 603 | |
---|
[691] | 604 | if ( node[ 'state' ].find( "down" ) != -1 ): |
---|
[512] | 605 | |
---|
[691] | 606 | downed_nodes.append( name ) |
---|
[512] | 607 | |
---|
[691] | 608 | if ( node[ 'state' ].find( "offline" ) != -1 ): |
---|
[512] | 609 | |
---|
[691] | 610 | offline_nodes.append( name ) |
---|
[512] | 611 | |
---|
[691] | 612 | downnodeslist = do_nodelist( downed_nodes ) |
---|
| 613 | offlinenodeslist = do_nodelist( offline_nodes ) |
---|
[512] | 614 | |
---|
[691] | 615 | down_str = 'nodes=%s domain=%s reported=%s' %( string.join( downnodeslist, ';' ), domain, str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) ) |
---|
| 616 | offl_str = 'nodes=%s domain=%s reported=%s' %( string.join( offlinenodeslist, ';' ), domain, str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) ) |
---|
| 617 | self.dp.multicastGmetric( 'MONARCH-DOWN' , down_str ) |
---|
| 618 | self.dp.multicastGmetric( 'MONARCH-OFFLINE', offl_str ) |
---|
[514] | 619 | |
---|
[691] | 620 | # Now let's spread the knowledge |
---|
| 621 | # |
---|
| 622 | for jobid, jobattrs in self.jobs.items(): |
---|
[507] | 623 | |
---|
[691] | 624 | # Make gmetric values for each job: respect max gmetric value length |
---|
| 625 | # |
---|
| 626 | gmetric_val = self.compileGmetricVal( jobid, jobattrs ) |
---|
| 627 | metric_increment = 0 |
---|
[507] | 628 | |
---|
[691] | 629 | # If we have more job info than max gmetric value length allows, split it up |
---|
| 630 | # amongst multiple metrics |
---|
| 631 | # |
---|
| 632 | for val in gmetric_val: |
---|
[507] | 633 | |
---|
[691] | 634 | self.dp.multicastGmetric( 'MONARCH-JOB-' + jobid + '-' + str(metric_increment), val ) |
---|
[507] | 635 | |
---|
[691] | 636 | # Increase follow number if this jobinfo is split up amongst more than 1 gmetric |
---|
| 637 | # |
---|
| 638 | metric_increment = metric_increment + 1 |
---|
[507] | 639 | |
---|
[691] | 640 | def compileGmetricVal( self, jobid, jobattrs ): |
---|
[507] | 641 | |
---|
[691] | 642 | """Create a val string for gmetric of jobinfo""" |
---|
[507] | 643 | |
---|
[691] | 644 | gval_lists = [ ] |
---|
| 645 | val_list = { } |
---|
[507] | 646 | |
---|
[691] | 647 | for val_name, val_value in jobattrs.items(): |
---|
[507] | 648 | |
---|
[691] | 649 | # These are our own metric names, i.e.: status, start_timestamp, etc |
---|
| 650 | # |
---|
| 651 | val_list_names_len = len( string.join( val_list.keys() ) ) + len(val_list.keys()) |
---|
[507] | 652 | |
---|
[691] | 653 | # These are their corresponding values |
---|
| 654 | # |
---|
| 655 | val_list_vals_len = len( string.join( val_list.values() ) ) + len(val_list.values()) |
---|
[507] | 656 | |
---|
[691] | 657 | if val_name == 'nodes' and jobattrs['status'] == 'R': |
---|
[507] | 658 | |
---|
[691] | 659 | node_str = None |
---|
[507] | 660 | |
---|
[691] | 661 | for node in val_value: |
---|
[507] | 662 | |
---|
[691] | 663 | if node_str: |
---|
[507] | 664 | |
---|
[691] | 665 | node_str = node_str + ';' + node |
---|
| 666 | else: |
---|
| 667 | node_str = node |
---|
[507] | 668 | |
---|
[691] | 669 | # Make sure if we add this new info, that the total metric's value length does not exceed METRIC_MAX_VAL_LEN |
---|
| 670 | # |
---|
| 671 | if (val_list_names_len + len(val_name) ) + (val_list_vals_len + len(node_str) ) > METRIC_MAX_VAL_LEN: |
---|
[507] | 672 | |
---|
[691] | 673 | # It's too big, we need to make a new gmetric for the additional info |
---|
| 674 | # |
---|
| 675 | val_list[ val_name ] = node_str |
---|
[507] | 676 | |
---|
[691] | 677 | gval_lists.append( val_list ) |
---|
[507] | 678 | |
---|
[691] | 679 | val_list = { } |
---|
| 680 | node_str = None |
---|
[507] | 681 | |
---|
[691] | 682 | val_list[ val_name ] = node_str |
---|
[507] | 683 | |
---|
[691] | 684 | gval_lists.append( val_list ) |
---|
[507] | 685 | |
---|
[691] | 686 | val_list = { } |
---|
[507] | 687 | |
---|
[691] | 688 | elif val_value != '': |
---|
[507] | 689 | |
---|
[691] | 690 | # Make sure if we add this new info, that the total metric's value length does not exceed METRIC_MAX_VAL_LEN |
---|
| 691 | # |
---|
| 692 | if (val_list_names_len + len(val_name) ) + (val_list_vals_len + len(str(val_value)) ) > METRIC_MAX_VAL_LEN: |
---|
[507] | 693 | |
---|
[691] | 694 | # It's too big, we need to make a new gmetric for the additional info |
---|
| 695 | # |
---|
| 696 | gval_lists.append( val_list ) |
---|
[507] | 697 | |
---|
[691] | 698 | val_list = { } |
---|
[507] | 699 | |
---|
[691] | 700 | val_list[ val_name ] = val_value |
---|
[507] | 701 | |
---|
[691] | 702 | if len( val_list ) > 0: |
---|
[507] | 703 | |
---|
[691] | 704 | gval_lists.append( val_list ) |
---|
[507] | 705 | |
---|
[691] | 706 | str_list = [ ] |
---|
[507] | 707 | |
---|
[691] | 708 | # Now append the value names and values together, i.e.: stop_timestamp=value, etc |
---|
| 709 | # |
---|
| 710 | for val_list in gval_lists: |
---|
[507] | 711 | |
---|
[691] | 712 | my_val_str = None |
---|
[507] | 713 | |
---|
[691] | 714 | for val_name, val_value in val_list.items(): |
---|
[507] | 715 | |
---|
[691] | 716 | if type(val_value) == list: |
---|
[579] | 717 | |
---|
[691] | 718 | val_value = val_value.join( ',' ) |
---|
[579] | 719 | |
---|
[691] | 720 | if my_val_str: |
---|
[507] | 721 | |
---|
[691] | 722 | try: |
---|
| 723 | # fixme: It's getting |
---|
| 724 | # ('nodes', None) items |
---|
| 725 | my_val_str = my_val_str + ' ' + val_name + '=' + val_value |
---|
| 726 | except: |
---|
| 727 | pass |
---|
[623] | 728 | |
---|
[691] | 729 | else: |
---|
| 730 | my_val_str = val_name + '=' + val_value |
---|
[507] | 731 | |
---|
[691] | 732 | str_list.append( my_val_str ) |
---|
[507] | 733 | |
---|
[691] | 734 | return str_list |
---|
[507] | 735 | |
---|
[691] | 736 | def daemon( self ): |
---|
[355] | 737 | |
---|
[691] | 738 | """Run as daemon forever""" |
---|
[256] | 739 | |
---|
[691] | 740 | # Fork the first child |
---|
| 741 | # |
---|
| 742 | pid = os.fork() |
---|
| 743 | if pid > 0: |
---|
| 744 | sys.exit(0) # end parent |
---|
[256] | 745 | |
---|
[691] | 746 | # creates a session and sets the process group ID |
---|
| 747 | # |
---|
| 748 | os.setsid() |
---|
[318] | 749 | |
---|
[691] | 750 | # Fork the second child |
---|
| 751 | # |
---|
| 752 | pid = os.fork() |
---|
| 753 | if pid > 0: |
---|
| 754 | sys.exit(0) # end parent |
---|
[318] | 755 | |
---|
[691] | 756 | write_pidfile() |
---|
[318] | 757 | |
---|
[691] | 758 | # Go to the root directory and set the umask |
---|
| 759 | # |
---|
| 760 | os.chdir('/') |
---|
| 761 | os.umask(0) |
---|
[318] | 762 | |
---|
[691] | 763 | sys.stdin.close() |
---|
| 764 | sys.stdout.close() |
---|
| 765 | sys.stderr.close() |
---|
[318] | 766 | |
---|
[691] | 767 | os.open('/dev/null', os.O_RDWR) |
---|
| 768 | os.dup2(0, 1) |
---|
| 769 | os.dup2(0, 2) |
---|
[318] | 770 | |
---|
[691] | 771 | self.run() |
---|
[318] | 772 | |
---|
[691] | 773 | def run( self ): |
---|
[355] | 774 | |
---|
[691] | 775 | """Main thread""" |
---|
[256] | 776 | |
---|
[691] | 777 | while ( 1 ): |
---|
| 778 | |
---|
| 779 | self.getJobData() |
---|
| 780 | self.submitJobData() |
---|
| 781 | time.sleep( BATCH_POLL_INTERVAL ) |
---|
[256] | 782 | |
---|
[623] | 783 | # SGE code by Dave Love <fx@gnu.org>. Tested with SGE 6.0u8 and 6.0u11. May |
---|
| 784 | # work with SGE 6.1 (else should be easily fixable), but definitely doesn't |
---|
| 785 | # with 6.2. See also the fixmes. |
---|
[256] | 786 | |
---|
[507] | 787 | class NoJobs (Exception): |
---|
[691] | 788 | """Exception raised by empty job list in qstat output.""" |
---|
| 789 | pass |
---|
[256] | 790 | |
---|
[507] | 791 | class SgeQstatXMLParser(xml.sax.handler.ContentHandler): |
---|
[691] | 792 | """SAX handler for XML output from Sun Grid Engine's `qstat'.""" |
---|
[318] | 793 | |
---|
[691] | 794 | def __init__(self): |
---|
| 795 | self.value = "" |
---|
| 796 | self.joblist = [] |
---|
| 797 | self.job = {} |
---|
| 798 | self.queue = "" |
---|
| 799 | self.in_joblist = False |
---|
| 800 | self.lrequest = False |
---|
| 801 | self.eltq = deque() |
---|
| 802 | xml.sax.handler.ContentHandler.__init__(self) |
---|
[318] | 803 | |
---|
[691] | 804 | # The structure of the output is as follows (for SGE 6.0). It's |
---|
| 805 | # similar for 6.1, but radically different for 6.2, and is |
---|
| 806 | # undocumented generally. Unfortunately it's voluminous, and probably |
---|
| 807 | # doesn't scale to large clusters/queues. |
---|
[318] | 808 | |
---|
[691] | 809 | # <detailed_job_info xmlns:xsd="http://www.w3.org/2001/XMLSchema"> |
---|
| 810 | # <djob_info> |
---|
| 811 | # <qmaster_response> <!-- job --> |
---|
| 812 | # ... |
---|
| 813 | # <JB_ja_template> |
---|
| 814 | # <ulong_sublist> |
---|
| 815 | # ... <!-- start_time, state ... --> |
---|
| 816 | # </ulong_sublist> |
---|
| 817 | # </JB_ja_template> |
---|
| 818 | # <JB_ja_tasks> |
---|
| 819 | # <ulong_sublist> |
---|
| 820 | # ... <!-- task info |
---|
| 821 | # </ulong_sublist> |
---|
| 822 | # ... |
---|
| 823 | # </JB_ja_tasks> |
---|
| 824 | # ... |
---|
| 825 | # </qmaster_response> |
---|
| 826 | # </djob_info> |
---|
| 827 | # <messages> |
---|
| 828 | # ... |
---|
[318] | 829 | |
---|
[691] | 830 | # NB. We might treat each task as a separate job, like |
---|
| 831 | # straight qstat output, but the web interface expects jobs to |
---|
| 832 | # be identified by integers, not, say, <job number>.<task>. |
---|
[318] | 833 | |
---|
[691] | 834 | # So, I lied. If the job list is empty, we get invalid XML |
---|
| 835 | # like this, which we need to defend against: |
---|
[318] | 836 | |
---|
[691] | 837 | # <unknown_jobs xmlns:xsd="http://www.w3.org/2001/XMLSchema"> |
---|
| 838 | # <> |
---|
| 839 | # <ST_name>*</ST_name> |
---|
| 840 | # </> |
---|
| 841 | # </unknown_jobs> |
---|
[318] | 842 | |
---|
[691] | 843 | def startElement(self, name, attrs): |
---|
| 844 | self.value = "" |
---|
| 845 | if name == "djob_info": # job list |
---|
| 846 | self.in_joblist = True |
---|
| 847 | # The job container is "qmaster_response" in SGE 6.0 |
---|
| 848 | # and 6.1, but "element" in 6.2. This is only the very |
---|
| 849 | # start of what's necessary for 6.2, though (sigh). |
---|
| 850 | elif (name == "qmaster_response" or name == "element") \ |
---|
| 851 | and self.eltq[-1] == "djob_info": # job |
---|
| 852 | self.job = {"job_state": "U", "slots": 0, |
---|
| 853 | "nodes": [], "queued_timestamp": "", |
---|
| 854 | "queued_timestamp": "", "queue": "", |
---|
| 855 | "ppn": "0", "RN_max": 0, |
---|
| 856 | # fixme in endElement |
---|
| 857 | "requested_memory": 0, "requested_time": 0 |
---|
| 858 | } |
---|
| 859 | self.joblist.append(self.job) |
---|
| 860 | elif name == "qstat_l_requests": # resource request |
---|
| 861 | self.lrequest = True |
---|
| 862 | elif name == "unknown_jobs": |
---|
| 863 | raise NoJobs |
---|
| 864 | self.eltq.append (name) |
---|
[318] | 865 | |
---|
[691] | 866 | def characters(self, ch): |
---|
| 867 | self.value += ch |
---|
[318] | 868 | |
---|
[691] | 869 | def endElement(self, name): |
---|
| 870 | """Snarf job elements contents into job dictionary. |
---|
| 871 | Translate keys if appropriate.""" |
---|
[318] | 872 | |
---|
[691] | 873 | name_trans = { |
---|
| 874 | "JB_job_number": "number", |
---|
| 875 | "JB_job_name": "name", "JB_owner": "owner", |
---|
| 876 | "queue_name": "queue", "JAT_start_time": "start_timestamp", |
---|
| 877 | "JB_submission_time": "queued_timestamp" |
---|
| 878 | } |
---|
| 879 | value = self.value |
---|
| 880 | self.eltq.pop () |
---|
[318] | 881 | |
---|
[691] | 882 | if name == "djob_info": |
---|
| 883 | self.in_joblist = False |
---|
| 884 | self.job = {} |
---|
| 885 | elif name == "JAT_master_queue": |
---|
| 886 | self.job["queue"] = value.split("@")[0] |
---|
| 887 | elif name == "JG_qhostname": |
---|
| 888 | if not (value in self.job["nodes"]): |
---|
| 889 | self.job["nodes"].append(value) |
---|
| 890 | elif name == "JG_slots": # slots in use |
---|
| 891 | self.job["slots"] += int(value) |
---|
| 892 | elif name == "RN_max": # requested slots (tasks or parallel) |
---|
| 893 | self.job["RN_max"] = max (self.job["RN_max"], |
---|
| 894 | int(value)) |
---|
| 895 | elif name == "JAT_state": # job state (bitwise or) |
---|
| 896 | value = int (value) |
---|
| 897 | # Status values from sge_jobL.h |
---|
| 898 | #define JIDLE 0x00000000 |
---|
| 899 | #define JHELD 0x00000010 |
---|
| 900 | #define JMIGRATING 0x00000020 |
---|
| 901 | #define JQUEUED 0x00000040 |
---|
| 902 | #define JRUNNING 0x00000080 |
---|
| 903 | #define JSUSPENDED 0x00000100 |
---|
| 904 | #define JTRANSFERING 0x00000200 |
---|
| 905 | #define JDELETED 0x00000400 |
---|
| 906 | #define JWAITING 0x00000800 |
---|
| 907 | #define JEXITING 0x00001000 |
---|
| 908 | #define JWRITTEN 0x00002000 |
---|
| 909 | #define JSUSPENDED_ON_THRESHOLD 0x00010000 |
---|
| 910 | #define JFINISHED 0x00010000 |
---|
| 911 | if value & 0x80: |
---|
| 912 | self.job["status"] = "R" |
---|
| 913 | elif value & 0x40: |
---|
| 914 | self.job["status"] = "Q" |
---|
| 915 | else: |
---|
| 916 | self.job["status"] = "O" # `other' |
---|
| 917 | elif name == "CE_name" and self.lrequest and self.value in \ |
---|
| 918 | ("h_cpu", "s_cpu", "cpu", "h_core", "s_core"): |
---|
| 919 | # We're in a container for an interesting resource |
---|
| 920 | # request; record which type. |
---|
| 921 | self.lrequest = self.value |
---|
| 922 | elif name == "CE_doubleval" and self.lrequest: |
---|
| 923 | # if we're in a container for an interesting |
---|
| 924 | # resource request, use the maxmimum of the hard |
---|
| 925 | # and soft requests to record the requested CPU |
---|
| 926 | # or core. Fixme: I'm not sure if this logic is |
---|
| 927 | # right. |
---|
| 928 | if self.lrequest in ("h_core", "s_core"): |
---|
| 929 | self.job["requested_memory"] = \ |
---|
| 930 | max (float (value), |
---|
| 931 | self.job["requested_memory"]) |
---|
| 932 | # Fixme: Check what cpu means, c.f [hs]_cpu. |
---|
| 933 | elif self.lrequest in ("h_cpu", "s_cpu", "cpu"): |
---|
| 934 | self.job["requested_time"] = \ |
---|
| 935 | max (float (value), |
---|
| 936 | self.job["requested_time"]) |
---|
| 937 | elif name == "qstat_l_requests": |
---|
| 938 | self.lrequest = False |
---|
| 939 | elif self.job and self.in_joblist: |
---|
| 940 | if name in name_trans: |
---|
| 941 | name = name_trans[name] |
---|
| 942 | self.job[name] = value |
---|
[318] | 943 | |
---|
[507] | 944 | # Abstracted from PBS original. |
---|
| 945 | # Fixme: Is it worth (or appropriate for PBS) sorting the result? |
---|
[520] | 946 | # |
---|
| 947 | def do_nodelist( nodes ): |
---|
| 948 | |
---|
[691] | 949 | """Translate node list as appropriate.""" |
---|
[520] | 950 | |
---|
[691] | 951 | nodeslist = [ ] |
---|
| 952 | my_domain = fqdn_parts( socket.getfqdn() )[1] |
---|
[520] | 953 | |
---|
[691] | 954 | for node in nodes: |
---|
[520] | 955 | |
---|
[691] | 956 | host = node.split( '/' )[0] # not relevant for SGE |
---|
| 957 | h, host_domain = fqdn_parts(host) |
---|
[520] | 958 | |
---|
[691] | 959 | if host_domain == my_domain: |
---|
[520] | 960 | |
---|
[691] | 961 | host = h |
---|
[520] | 962 | |
---|
[691] | 963 | if nodeslist.count( host ) == 0: |
---|
[520] | 964 | |
---|
[691] | 965 | for translate_pattern in BATCH_HOST_TRANSLATE: |
---|
[520] | 966 | |
---|
[691] | 967 | if translate_pattern.find( '/' ) != -1: |
---|
[520] | 968 | |
---|
[691] | 969 | translate_orig = \ |
---|
| 970 | translate_pattern.split( '/' )[1] |
---|
| 971 | translate_new = \ |
---|
| 972 | translate_pattern.split( '/' )[2] |
---|
| 973 | host = re.sub( translate_orig, |
---|
| 974 | translate_new, host ) |
---|
| 975 | if not host in nodeslist: |
---|
| 976 | nodeslist.append( host ) |
---|
| 977 | return nodeslist |
---|
[318] | 978 | |
---|
| 979 | class SgeDataGatherer(DataGatherer): |
---|
| 980 | |
---|
[691] | 981 | jobs = {} |
---|
[61] | 982 | |
---|
[691] | 983 | def __init__( self ): |
---|
| 984 | self.jobs = {} |
---|
| 985 | self.timeoffset = 0 |
---|
| 986 | self.dp = DataProcessor() |
---|
[318] | 987 | |
---|
[691] | 988 | def getJobData( self ): |
---|
| 989 | """Gather all data on current jobs in SGE""" |
---|
[318] | 990 | |
---|
[691] | 991 | import popen2 |
---|
[318] | 992 | |
---|
[691] | 993 | self.cur_time = 0 |
---|
| 994 | queues = "" |
---|
| 995 | if QUEUE: # only for specific queues |
---|
| 996 | # Fixme: assumes queue names don't contain single |
---|
| 997 | # quote or comma. Don't know what the SGE rules are. |
---|
| 998 | queues = " -q '" + string.join (QUEUE, ",") + "'" |
---|
| 999 | # Note the comment in SgeQstatXMLParser about scaling with |
---|
| 1000 | # this method of getting data. I haven't found better one. |
---|
| 1001 | # Output with args `-xml -ext -f -r' is easier to parse |
---|
| 1002 | # in some ways, harder in others, but it doesn't provide |
---|
| 1003 | # the submission time (at least SGE 6.0). The pipeline |
---|
| 1004 | # into sed corrects bogus XML observed with a configuration |
---|
| 1005 | # of SGE 6.0u8, which otherwise causes the parsing to hang. |
---|
| 1006 | piping = popen2.Popen3("qstat -u '*' -j '*' -xml | \ |
---|
[623] | 1007 | sed -e 's/reported usage>/reported_usage>/g' -e 's;<\/*JATASK:.*>;;'" \ |
---|
[691] | 1008 | + queues, True) |
---|
| 1009 | qstatparser = SgeQstatXMLParser() |
---|
| 1010 | parse_err = 0 |
---|
| 1011 | try: |
---|
| 1012 | xml.sax.parse(piping.fromchild, qstatparser) |
---|
| 1013 | except NoJobs: |
---|
| 1014 | pass |
---|
| 1015 | except: |
---|
| 1016 | parse_err = 1 |
---|
| 1017 | if piping.wait(): |
---|
| 1018 | debug_msg(10, |
---|
| 1019 | "qstat error, skipping until next polling interval: " |
---|
| 1020 | + piping.childerr.readline()) |
---|
| 1021 | return None |
---|
| 1022 | elif parse_err: |
---|
| 1023 | debug_msg(10, "Bad XML output from qstat"()) |
---|
| 1024 | exit (1) |
---|
| 1025 | for f in piping.fromchild, piping.tochild, piping.childerr: |
---|
| 1026 | f.close() |
---|
| 1027 | self.cur_time = time.time() |
---|
| 1028 | jobs_processed = [] |
---|
| 1029 | for job in qstatparser.joblist: |
---|
| 1030 | job_id = job["number"] |
---|
| 1031 | if job["status"] in [ 'Q', 'R' ]: |
---|
| 1032 | jobs_processed.append(job_id) |
---|
| 1033 | if job["status"] == "R": |
---|
| 1034 | job["nodes"] = do_nodelist (job["nodes"]) |
---|
| 1035 | # Fixme: why is job["nodes"] sometimes null? |
---|
| 1036 | try: |
---|
| 1037 | # Fixme: Is this sensible? The |
---|
| 1038 | # PBS-type PPN isn't something you use |
---|
| 1039 | # with SGE. |
---|
| 1040 | job["ppn"] = float(job["slots"]) / \ |
---|
| 1041 | len(job["nodes"]) |
---|
| 1042 | except: |
---|
| 1043 | job["ppn"] = 0 |
---|
| 1044 | if DETECT_TIME_DIFFS: |
---|
| 1045 | # If a job start is later than our |
---|
| 1046 | # current date, that must mean |
---|
| 1047 | # the SGE server's time is later |
---|
| 1048 | # than our local time. |
---|
| 1049 | start_timestamp = \ |
---|
| 1050 | int (job["start_timestamp"]) |
---|
| 1051 | if start_timestamp > \ |
---|
| 1052 | int(self.cur_time) + \ |
---|
| 1053 | int(self.timeoffset): |
---|
[318] | 1054 | |
---|
[691] | 1055 | self.timeoffset = \ |
---|
| 1056 | start_timestamp - \ |
---|
| 1057 | int(self.cur_time) |
---|
| 1058 | else: |
---|
| 1059 | # fixme: Note sure what this should be: |
---|
| 1060 | job["ppn"] = job["RN_max"] |
---|
| 1061 | job["nodes"] = "1" |
---|
[318] | 1062 | |
---|
[691] | 1063 | myAttrs = {} |
---|
| 1064 | for attr in ["name", "queue", "owner", |
---|
| 1065 | "requested_time", "status", |
---|
| 1066 | "requested_memory", "ppn", |
---|
| 1067 | "start_timestamp", "queued_timestamp"]: |
---|
| 1068 | myAttrs[attr] = str(job[attr]) |
---|
| 1069 | myAttrs["nodes"] = job["nodes"] |
---|
| 1070 | myAttrs["reported"] = str(int(self.cur_time) + \ |
---|
| 1071 | int(self.timeoffset)) |
---|
| 1072 | myAttrs["domain"] = fqdn_parts(socket.getfqdn())[1] |
---|
| 1073 | myAttrs["poll_interval"] = str(BATCH_POLL_INTERVAL) |
---|
[318] | 1074 | |
---|
[691] | 1075 | if self.jobDataChanged(self.jobs, job_id, myAttrs) \ |
---|
| 1076 | and myAttrs["status"] in ["R", "Q"]: |
---|
| 1077 | self.jobs[job_id] = myAttrs |
---|
| 1078 | for id, attrs in self.jobs.items(): |
---|
| 1079 | if id not in jobs_processed: |
---|
| 1080 | del self.jobs[id] |
---|
[318] | 1081 | |
---|
[524] | 1082 | # LSF code by Mahmoud Hanafi <hanafim@users.sourceforge.nt> |
---|
| 1083 | # Requres LSFObject http://sourceforge.net/projects/lsfobject |
---|
| 1084 | # |
---|
| 1085 | class LsfDataGatherer(DataGatherer): |
---|
[525] | 1086 | |
---|
[691] | 1087 | """This is the DataGatherer for LSf""" |
---|
[524] | 1088 | |
---|
[691] | 1089 | global lsfObject |
---|
[524] | 1090 | |
---|
[691] | 1091 | def __init__( self ): |
---|
[525] | 1092 | |
---|
[691] | 1093 | self.jobs = { } |
---|
| 1094 | self.timeoffset = 0 |
---|
| 1095 | self.dp = DataProcessor() |
---|
| 1096 | self.initLsfQuery() |
---|
[524] | 1097 | |
---|
[691] | 1098 | def _countDuplicatesInList( self, dupedList ): |
---|
[525] | 1099 | |
---|
[691] | 1100 | countDupes = { } |
---|
[525] | 1101 | |
---|
[691] | 1102 | for item in dupedList: |
---|
[525] | 1103 | |
---|
[691] | 1104 | if not countDupes.has_key( item ): |
---|
[525] | 1105 | |
---|
[691] | 1106 | countDupes[ item ] = 1 |
---|
| 1107 | else: |
---|
| 1108 | countDupes[ item ] = countDupes[ item ] + 1 |
---|
[525] | 1109 | |
---|
[691] | 1110 | dupeCountList = [ ] |
---|
[525] | 1111 | |
---|
[691] | 1112 | for item, count in countDupes.items(): |
---|
[525] | 1113 | |
---|
[691] | 1114 | dupeCountList.append( ( item, count ) ) |
---|
[525] | 1115 | |
---|
[691] | 1116 | return dupeCountList |
---|
[524] | 1117 | # |
---|
| 1118 | #lst = ['I1','I2','I1','I3','I4','I4','I7','I7','I7','I7','I7'] |
---|
| 1119 | #print _countDuplicatesInList(lst) |
---|
| 1120 | #[('I1', 2), ('I3', 1), ('I2', 1), ('I4', 2), ('I7', 5)] |
---|
| 1121 | ######################## |
---|
| 1122 | |
---|
[691] | 1123 | def initLsfQuery( self ): |
---|
| 1124 | self.pq = None |
---|
| 1125 | self.pq = lsfObject.jobInfoEntObject() |
---|
[524] | 1126 | |
---|
[691] | 1127 | def getJobData( self, known_jobs="" ): |
---|
| 1128 | """Gather all data on current jobs in LSF""" |
---|
| 1129 | if len( known_jobs ) > 0: |
---|
| 1130 | jobs = known_jobs |
---|
| 1131 | else: |
---|
| 1132 | jobs = { } |
---|
| 1133 | joblist = {} |
---|
| 1134 | joblist = self.pq.getJobInfo() |
---|
| 1135 | nodelist = '' |
---|
[524] | 1136 | |
---|
[691] | 1137 | self.cur_time = time.time() |
---|
[524] | 1138 | |
---|
[691] | 1139 | jobs_processed = [ ] |
---|
[524] | 1140 | |
---|
[691] | 1141 | for name, attrs in joblist.items(): |
---|
| 1142 | job_id = str(name) |
---|
| 1143 | jobs_processed.append( job_id ) |
---|
| 1144 | name = self.getAttr( attrs, 'jobName' ) |
---|
| 1145 | queue = self.getAttr( self.getAttr( attrs, 'submit') , 'queue' ) |
---|
| 1146 | owner = self.getAttr( attrs, 'user' ) |
---|
[524] | 1147 | |
---|
| 1148 | ### THIS IS THE rLimit List index values |
---|
[691] | 1149 | #define LSF_RLIMIT_CPU 0 /* cpu time in milliseconds */ |
---|
| 1150 | #define LSF_RLIMIT_FSIZE 1 /* maximum file size */ |
---|
| 1151 | #define LSF_RLIMIT_DATA 2 /* data size */ |
---|
| 1152 | #define LSF_RLIMIT_STACK 3 /* stack size */ |
---|
| 1153 | #define LSF_RLIMIT_CORE 4 /* core file size */ |
---|
| 1154 | #define LSF_RLIMIT_RSS 5 /* resident set size */ |
---|
| 1155 | #define LSF_RLIMIT_NOFILE 6 /* open files */ |
---|
| 1156 | #define LSF_RLIMIT_OPEN_MAX 7 /* (from HP-UX) */ |
---|
| 1157 | #define LSF_RLIMIT_VMEM 8 /* maximum swap mem */ |
---|
[524] | 1158 | #define LSF_RLIMIT_SWAP 8 |
---|
[691] | 1159 | #define LSF_RLIMIT_RUN 9 /* max wall-clock time limit */ |
---|
| 1160 | #define LSF_RLIMIT_PROCESS 10 /* process number limit */ |
---|
| 1161 | #define LSF_RLIMIT_THREAD 11 /* thread number limit (introduced in LSF6.0) */ |
---|
| 1162 | #define LSF_RLIM_NLIMITS 12 /* number of resource limits */ |
---|
[524] | 1163 | |
---|
[691] | 1164 | requested_time = self.getAttr( self.getAttr( attrs, 'submit') , 'rLimits' )[9] |
---|
| 1165 | if requested_time == -1: |
---|
| 1166 | requested_time = "" |
---|
| 1167 | requested_memory = self.getAttr( self.getAttr( attrs, 'submit') , 'rLimits' )[8] |
---|
| 1168 | if requested_memory == -1: |
---|
| 1169 | requested_memory = "" |
---|
[524] | 1170 | # This tries to get proc per node. We don't support this right now |
---|
[691] | 1171 | ppn = 0 #self.getAttr( self.getAttr( attrs, 'SubmitList') , 'numProessors' ) |
---|
| 1172 | requested_cpus = self.getAttr( self.getAttr( attrs, 'submit') , 'numProcessors' ) |
---|
| 1173 | if requested_cpus == None or requested_cpus == "": |
---|
| 1174 | requested_cpus = 1 |
---|
[524] | 1175 | |
---|
[691] | 1176 | if QUEUE: |
---|
| 1177 | for q in QUEUE: |
---|
| 1178 | if q == queue: |
---|
| 1179 | display_queue = 1 |
---|
| 1180 | break |
---|
| 1181 | else: |
---|
| 1182 | display_queue = 0 |
---|
| 1183 | continue |
---|
| 1184 | if display_queue == 0: |
---|
| 1185 | continue |
---|
[524] | 1186 | |
---|
[691] | 1187 | runState = self.getAttr( attrs, 'status' ) |
---|
| 1188 | if runState == 4: |
---|
| 1189 | status = 'R' |
---|
| 1190 | else: |
---|
| 1191 | status = 'Q' |
---|
| 1192 | queued_timestamp = self.getAttr( attrs, 'submitTime' ) |
---|
[524] | 1193 | |
---|
[691] | 1194 | if status == 'R': |
---|
| 1195 | start_timestamp = self.getAttr( attrs, 'startTime' ) |
---|
| 1196 | nodesCpu = dict(self._countDuplicatesInList(self.getAttr( attrs, 'exHosts' ))) |
---|
| 1197 | nodelist = nodesCpu.keys() |
---|
[524] | 1198 | |
---|
[691] | 1199 | if DETECT_TIME_DIFFS: |
---|
[524] | 1200 | |
---|
[691] | 1201 | # If a job start if later than our current date, |
---|
| 1202 | # that must mean the Torque server's time is later |
---|
| 1203 | # than our local time. |
---|
[524] | 1204 | |
---|
[691] | 1205 | if int(start_timestamp) > int( int(self.cur_time) + int(self.timeoffset) ): |
---|
[524] | 1206 | |
---|
[691] | 1207 | self.timeoffset = int( int(start_timestamp) - int(self.cur_time) ) |
---|
[524] | 1208 | |
---|
[691] | 1209 | elif status == 'Q': |
---|
| 1210 | start_timestamp = '' |
---|
| 1211 | count_mynodes = 0 |
---|
| 1212 | numeric_node = 1 |
---|
| 1213 | nodelist = '' |
---|
[524] | 1214 | |
---|
[691] | 1215 | myAttrs = { } |
---|
| 1216 | if name == "": |
---|
| 1217 | myAttrs['name'] = "none" |
---|
| 1218 | else: |
---|
| 1219 | myAttrs['name'] = name |
---|
[524] | 1220 | |
---|
[691] | 1221 | myAttrs[ 'owner' ] = owner |
---|
| 1222 | myAttrs[ 'requested_time' ] = str(requested_time) |
---|
| 1223 | myAttrs[ 'requested_memory' ] = str(requested_memory) |
---|
| 1224 | myAttrs[ 'requested_cpus' ] = str(requested_cpus) |
---|
| 1225 | myAttrs[ 'ppn' ] = str( ppn ) |
---|
| 1226 | myAttrs[ 'status' ] = status |
---|
| 1227 | myAttrs[ 'start_timestamp' ] = str(start_timestamp) |
---|
| 1228 | myAttrs[ 'queue' ] = str(queue) |
---|
| 1229 | myAttrs[ 'queued_timestamp' ] = str(queued_timestamp) |
---|
| 1230 | myAttrs[ 'reported' ] = str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) |
---|
| 1231 | myAttrs[ 'nodes' ] = do_nodelist( nodelist ) |
---|
| 1232 | myAttrs[ 'domain' ] = fqdn_parts( socket.getfqdn() )[1] |
---|
| 1233 | myAttrs[ 'poll_interval' ] = str(BATCH_POLL_INTERVAL) |
---|
[524] | 1234 | |
---|
[691] | 1235 | if self.jobDataChanged( jobs, job_id, myAttrs ) and myAttrs['status'] in [ 'R', 'Q' ]: |
---|
| 1236 | jobs[ job_id ] = myAttrs |
---|
[524] | 1237 | |
---|
[691] | 1238 | debug_msg( 10, printTime() + ' job %s state changed' %(job_id) ) |
---|
[524] | 1239 | |
---|
[691] | 1240 | for id, attrs in jobs.items(): |
---|
| 1241 | if id not in jobs_processed: |
---|
| 1242 | # This one isn't there anymore |
---|
| 1243 | # |
---|
| 1244 | del jobs[ id ] |
---|
| 1245 | self.jobs=jobs |
---|
[524] | 1246 | |
---|
| 1247 | |
---|
[355] | 1248 | class PbsDataGatherer( DataGatherer ): |
---|
[318] | 1249 | |
---|
[691] | 1250 | """This is the DataGatherer for PBS and Torque""" |
---|
[318] | 1251 | |
---|
[691] | 1252 | global PBSQuery, PBSError |
---|
[256] | 1253 | |
---|
[691] | 1254 | def __init__( self ): |
---|
[354] | 1255 | |
---|
[691] | 1256 | """Setup appropriate variables""" |
---|
[23] | 1257 | |
---|
[691] | 1258 | self.jobs = { } |
---|
| 1259 | self.timeoffset = 0 |
---|
| 1260 | self.dp = DataProcessor() |
---|
[354] | 1261 | |
---|
[691] | 1262 | self.initPbsQuery() |
---|
[23] | 1263 | |
---|
[691] | 1264 | def initPbsQuery( self ): |
---|
[91] | 1265 | |
---|
[691] | 1266 | self.pq = None |
---|
[354] | 1267 | |
---|
[691] | 1268 | if( BATCH_SERVER ): |
---|
[354] | 1269 | |
---|
[691] | 1270 | self.pq = PBSQuery( BATCH_SERVER ) |
---|
| 1271 | else: |
---|
| 1272 | self.pq = PBSQuery() |
---|
[91] | 1273 | |
---|
[691] | 1274 | try: |
---|
| 1275 | self.pq.old_data_structure() |
---|
[656] | 1276 | |
---|
[691] | 1277 | except AttributeError: |
---|
[656] | 1278 | |
---|
[691] | 1279 | # pbs_query is older |
---|
| 1280 | # |
---|
| 1281 | pass |
---|
[656] | 1282 | |
---|
[691] | 1283 | def getJobData( self ): |
---|
[354] | 1284 | |
---|
[691] | 1285 | """Gather all data on current jobs in Torque""" |
---|
[26] | 1286 | |
---|
[691] | 1287 | joblist = {} |
---|
| 1288 | self.cur_time = 0 |
---|
[349] | 1289 | |
---|
[691] | 1290 | try: |
---|
| 1291 | joblist = self.pq.getjobs() |
---|
| 1292 | self.cur_time = time.time() |
---|
[354] | 1293 | |
---|
[691] | 1294 | except PBSError, detail: |
---|
[354] | 1295 | |
---|
[691] | 1296 | debug_msg( 10, "Caught PBS unavailable, skipping until next polling interval: " + str( detail ) ) |
---|
| 1297 | return None |
---|
[354] | 1298 | |
---|
[691] | 1299 | jobs_processed = [ ] |
---|
[26] | 1300 | |
---|
[691] | 1301 | for name, attrs in joblist.items(): |
---|
| 1302 | display_queue = 1 |
---|
| 1303 | job_id = name.split( '.' )[0] |
---|
[26] | 1304 | |
---|
[691] | 1305 | name = self.getAttr( attrs, 'Job_Name' ) |
---|
| 1306 | queue = self.getAttr( attrs, 'queue' ) |
---|
[317] | 1307 | |
---|
[691] | 1308 | if QUEUE: |
---|
| 1309 | for q in QUEUE: |
---|
| 1310 | if q == queue: |
---|
| 1311 | display_queue = 1 |
---|
| 1312 | break |
---|
| 1313 | else: |
---|
| 1314 | display_queue = 0 |
---|
| 1315 | continue |
---|
| 1316 | if display_queue == 0: |
---|
| 1317 | continue |
---|
[317] | 1318 | |
---|
| 1319 | |
---|
[691] | 1320 | owner = self.getAttr( attrs, 'Job_Owner' ).split( '@' )[0] |
---|
| 1321 | requested_time = self.getAttr( attrs, 'Resource_List.walltime' ) |
---|
| 1322 | requested_memory = self.getAttr( attrs, 'Resource_List.mem' ) |
---|
[95] | 1323 | |
---|
[691] | 1324 | mynoderequest = self.getAttr( attrs, 'Resource_List.nodes' ) |
---|
[95] | 1325 | |
---|
[691] | 1326 | ppn = '' |
---|
[281] | 1327 | |
---|
[691] | 1328 | if mynoderequest.find( ':' ) != -1 and mynoderequest.find( 'ppn' ) != -1: |
---|
[95] | 1329 | |
---|
[691] | 1330 | mynoderequest_fields = mynoderequest.split( ':' ) |
---|
[281] | 1331 | |
---|
[691] | 1332 | for mynoderequest_field in mynoderequest_fields: |
---|
[281] | 1333 | |
---|
[691] | 1334 | if mynoderequest_field.find( 'ppn' ) != -1: |
---|
[281] | 1335 | |
---|
[691] | 1336 | ppn = mynoderequest_field.split( 'ppn=' )[1] |
---|
[281] | 1337 | |
---|
[691] | 1338 | status = self.getAttr( attrs, 'job_state' ) |
---|
[25] | 1339 | |
---|
[691] | 1340 | if status in [ 'Q', 'R' ]: |
---|
[450] | 1341 | |
---|
[691] | 1342 | jobs_processed.append( job_id ) |
---|
[450] | 1343 | |
---|
[691] | 1344 | queued_timestamp = self.getAttr( attrs, 'ctime' ) |
---|
[243] | 1345 | |
---|
[691] | 1346 | if status == 'R': |
---|
[133] | 1347 | |
---|
[691] | 1348 | start_timestamp = self.getAttr( attrs, 'mtime' ) |
---|
| 1349 | nodes = self.getAttr( attrs, 'exec_host' ).split( '+' ) |
---|
[133] | 1350 | |
---|
[691] | 1351 | nodeslist = do_nodelist( nodes ) |
---|
[354] | 1352 | |
---|
[691] | 1353 | if DETECT_TIME_DIFFS: |
---|
[185] | 1354 | |
---|
[691] | 1355 | # If a job start if later than our current date, |
---|
| 1356 | # that must mean the Torque server's time is later |
---|
| 1357 | # than our local time. |
---|
| 1358 | |
---|
| 1359 | if int( start_timestamp ) > int( int( self.cur_time ) + int( self.timeoffset ) ): |
---|
[185] | 1360 | |
---|
[691] | 1361 | self.timeoffset = int( int(start_timestamp) - int(self.cur_time) ) |
---|
[185] | 1362 | |
---|
[691] | 1363 | elif status == 'Q': |
---|
[95] | 1364 | |
---|
[691] | 1365 | # 'mynodequest' can be a string in the following syntax according to the |
---|
| 1366 | # Torque Administator's manual: |
---|
| 1367 | # |
---|
| 1368 | # {<node_count> | <hostname>}[:ppn=<ppn>][:<property>[:<property>]...][+ ...] |
---|
| 1369 | # {<node_count> | <hostname>}[:ppn=<ppn>][:<property>[:<property>]...][+ ...] |
---|
| 1370 | # etc |
---|
| 1371 | # |
---|
[451] | 1372 | |
---|
[691] | 1373 | # |
---|
| 1374 | # For now we only count the amount of nodes request and ignore properties |
---|
| 1375 | # |
---|
[451] | 1376 | |
---|
[691] | 1377 | start_timestamp = '' |
---|
| 1378 | count_mynodes = 0 |
---|
[354] | 1379 | |
---|
[691] | 1380 | for node in mynoderequest.split( '+' ): |
---|
[67] | 1381 | |
---|
[691] | 1382 | # Just grab the {node_count|hostname} part and ignore properties |
---|
| 1383 | # |
---|
| 1384 | nodepart = node.split( ':' )[0] |
---|
[67] | 1385 | |
---|
[691] | 1386 | # Let's assume a node_count value |
---|
| 1387 | # |
---|
| 1388 | numeric_node = 1 |
---|
[451] | 1389 | |
---|
[691] | 1390 | # Chop the value up into characters |
---|
| 1391 | # |
---|
| 1392 | for letter in nodepart: |
---|
[67] | 1393 | |
---|
[691] | 1394 | # If this char is not a digit (0-9), this must be a hostname |
---|
| 1395 | # |
---|
| 1396 | if letter not in string.digits: |
---|
[133] | 1397 | |
---|
[691] | 1398 | numeric_node = 0 |
---|
[133] | 1399 | |
---|
[691] | 1400 | # If this is a hostname, just count this as one (1) node |
---|
| 1401 | # |
---|
| 1402 | if not numeric_node: |
---|
[354] | 1403 | |
---|
[691] | 1404 | count_mynodes = count_mynodes + 1 |
---|
| 1405 | else: |
---|
[451] | 1406 | |
---|
[691] | 1407 | # If this a number, it must be the node_count |
---|
| 1408 | # and increase our count with it's value |
---|
| 1409 | # |
---|
| 1410 | try: |
---|
| 1411 | count_mynodes = count_mynodes + int( nodepart ) |
---|
[354] | 1412 | |
---|
[691] | 1413 | except ValueError, detail: |
---|
[354] | 1414 | |
---|
[691] | 1415 | # When we arrive here I must be bugged or very confused |
---|
| 1416 | # THIS SHOULD NOT HAPPEN! |
---|
| 1417 | # |
---|
| 1418 | debug_msg( 10, str( detail ) ) |
---|
| 1419 | debug_msg( 10, "Encountered weird node in Resources_List?!" ) |
---|
| 1420 | debug_msg( 10, 'nodepart = ' + str( nodepart ) ) |
---|
| 1421 | debug_msg( 10, 'job = ' + str( name ) ) |
---|
| 1422 | debug_msg( 10, 'attrs = ' + str( attrs ) ) |
---|
| 1423 | |
---|
| 1424 | nodeslist = str( count_mynodes ) |
---|
| 1425 | else: |
---|
| 1426 | start_timestamp = '' |
---|
| 1427 | nodeslist = '' |
---|
[133] | 1428 | |
---|
[691] | 1429 | myAttrs = { } |
---|
[26] | 1430 | |
---|
[691] | 1431 | myAttrs[ 'name' ] = str( name ) |
---|
| 1432 | myAttrs[ 'queue' ] = str( queue ) |
---|
| 1433 | myAttrs[ 'owner' ] = str( owner ) |
---|
| 1434 | myAttrs[ 'requested_time' ] = str( requested_time ) |
---|
| 1435 | myAttrs[ 'requested_memory' ] = str( requested_memory ) |
---|
| 1436 | myAttrs[ 'ppn' ] = str( ppn ) |
---|
| 1437 | myAttrs[ 'status' ] = str( status ) |
---|
| 1438 | myAttrs[ 'start_timestamp' ] = str( start_timestamp ) |
---|
| 1439 | myAttrs[ 'queued_timestamp' ] = str( queued_timestamp ) |
---|
| 1440 | myAttrs[ 'reported' ] = str( int( int( self.cur_time ) + int( self.timeoffset ) ) ) |
---|
| 1441 | myAttrs[ 'nodes' ] = nodeslist |
---|
| 1442 | myAttrs[ 'domain' ] = fqdn_parts( socket.getfqdn() )[1] |
---|
| 1443 | myAttrs[ 'poll_interval' ] = str( BATCH_POLL_INTERVAL ) |
---|
[354] | 1444 | |
---|
[691] | 1445 | if self.jobDataChanged( self.jobs, job_id, myAttrs ) and myAttrs['status'] in [ 'R', 'Q' ]: |
---|
[61] | 1446 | |
---|
[691] | 1447 | self.jobs[ job_id ] = myAttrs |
---|
[26] | 1448 | |
---|
[691] | 1449 | for id, attrs in self.jobs.items(): |
---|
[76] | 1450 | |
---|
[691] | 1451 | if id not in jobs_processed: |
---|
[76] | 1452 | |
---|
[691] | 1453 | # This one isn't there anymore; toedeledoki! |
---|
| 1454 | # |
---|
| 1455 | del self.jobs[ id ] |
---|
[76] | 1456 | |
---|
[363] | 1457 | # |
---|
| 1458 | # Gmetric by Nick Galbreath - nickg(a.t)modp(d.o.t)com |
---|
| 1459 | # Version 1.0 - 21-April2-2007 |
---|
| 1460 | # http://code.google.com/p/embeddedgmetric/ |
---|
| 1461 | # |
---|
| 1462 | # Modified by: Ramon Bastiaans |
---|
| 1463 | # For the Job Monarch Project, see: https://subtrac.sara.nl/oss/jobmonarch/ |
---|
| 1464 | # |
---|
| 1465 | # added: DEFAULT_TYPE for Gmetric's |
---|
| 1466 | # added: checkHostProtocol to determine if target is multicast or not |
---|
| 1467 | # changed: allow default for Gmetric constructor |
---|
| 1468 | # changed: allow defaults for all send() values except dmax |
---|
| 1469 | # |
---|
| 1470 | |
---|
[362] | 1471 | GMETRIC_DEFAULT_TYPE = 'string' |
---|
| 1472 | GMETRIC_DEFAULT_HOST = '127.0.0.1' |
---|
| 1473 | GMETRIC_DEFAULT_PORT = '8649' |
---|
[691] | 1474 | GMETRIC_DEFAULT_UNITS = '' |
---|
[362] | 1475 | |
---|
| 1476 | class Gmetric: |
---|
| 1477 | |
---|
[691] | 1478 | global GMETRIC_DEFAULT_HOST, GMETRIC_DEFAULT_PORT |
---|
[362] | 1479 | |
---|
[691] | 1480 | slope = { 'zero' : 0, 'positive' : 1, 'negative' : 2, 'both' : 3, 'unspecified' : 4 } |
---|
| 1481 | type = ( '', 'string', 'uint16', 'int16', 'uint32', 'int32', 'float', 'double', 'timestamp' ) |
---|
| 1482 | protocol = ( 'udp', 'multicast' ) |
---|
[362] | 1483 | |
---|
[691] | 1484 | def __init__( self, host=GMETRIC_DEFAULT_HOST, port=GMETRIC_DEFAULT_PORT ): |
---|
| 1485 | |
---|
| 1486 | global GMETRIC_DEFAULT_TYPE |
---|
[362] | 1487 | |
---|
[691] | 1488 | self.prot = self.checkHostProtocol( host ) |
---|
| 1489 | self.msg = xdrlib.Packer() |
---|
| 1490 | self.socket = socket.socket( socket.AF_INET, socket.SOCK_DGRAM ) |
---|
[362] | 1491 | |
---|
[691] | 1492 | if self.prot not in self.protocol: |
---|
[362] | 1493 | |
---|
[691] | 1494 | raise ValueError( "Protocol must be one of: " + str( self.protocol ) ) |
---|
[362] | 1495 | |
---|
[691] | 1496 | if self.prot == 'multicast': |
---|
[362] | 1497 | |
---|
[691] | 1498 | # Set multicast options |
---|
| 1499 | # |
---|
| 1500 | self.socket.setsockopt( socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 20 ) |
---|
[362] | 1501 | |
---|
[691] | 1502 | self.hostport = ( host, int( port ) ) |
---|
| 1503 | self.slopestr = 'both' |
---|
| 1504 | self.tmax = 60 |
---|
[362] | 1505 | |
---|
[691] | 1506 | def checkHostProtocol( self, ip ): |
---|
[362] | 1507 | |
---|
[691] | 1508 | """Detect if a ip adress is a multicast address""" |
---|
[471] | 1509 | |
---|
[691] | 1510 | MULTICAST_ADDRESS_MIN = ( "224", "0", "0", "0" ) |
---|
| 1511 | MULTICAST_ADDRESS_MAX = ( "239", "255", "255", "255" ) |
---|
[362] | 1512 | |
---|
[691] | 1513 | ip_fields = ip.split( '.' ) |
---|
[362] | 1514 | |
---|
[691] | 1515 | if ip_fields >= MULTICAST_ADDRESS_MIN and ip_fields <= MULTICAST_ADDRESS_MAX: |
---|
[362] | 1516 | |
---|
[691] | 1517 | return 'multicast' |
---|
| 1518 | else: |
---|
| 1519 | return 'udp' |
---|
[362] | 1520 | |
---|
[691] | 1521 | def send( self, name, value, dmax, typestr = '', units = '' ): |
---|
[362] | 1522 | |
---|
[691] | 1523 | if len( units ) == 0: |
---|
| 1524 | units = GMETRIC_DEFAULT_UNITS |
---|
[471] | 1525 | |
---|
[691] | 1526 | if len( typestr ) == 0: |
---|
| 1527 | typestr = GMETRIC_DEFAULT_TYPE |
---|
[362] | 1528 | |
---|
[691] | 1529 | msg = self.makexdr( name, value, typestr, units, self.slopestr, self.tmax, dmax ) |
---|
[409] | 1530 | |
---|
[691] | 1531 | return self.socket.sendto( msg, self.hostport ) |
---|
[362] | 1532 | |
---|
[691] | 1533 | def makexdr( self, name, value, typestr, unitstr, slopestr, tmax, dmax ): |
---|
[362] | 1534 | |
---|
[691] | 1535 | if slopestr not in self.slope: |
---|
[362] | 1536 | |
---|
[691] | 1537 | raise ValueError( "Slope must be one of: " + str( self.slope.keys() ) ) |
---|
[362] | 1538 | |
---|
[691] | 1539 | if typestr not in self.type: |
---|
[362] | 1540 | |
---|
[691] | 1541 | raise ValueError( "Type must be one of: " + str( self.type ) ) |
---|
[362] | 1542 | |
---|
[691] | 1543 | if len( name ) == 0: |
---|
[362] | 1544 | |
---|
[691] | 1545 | raise ValueError( "Name must be non-empty" ) |
---|
[362] | 1546 | |
---|
[691] | 1547 | self.msg.reset() |
---|
| 1548 | self.msg.pack_int( 0 ) |
---|
| 1549 | self.msg.pack_string( typestr ) |
---|
| 1550 | self.msg.pack_string( name ) |
---|
| 1551 | self.msg.pack_string( str( value ) ) |
---|
| 1552 | self.msg.pack_string( unitstr ) |
---|
| 1553 | self.msg.pack_int( self.slope[ slopestr ] ) |
---|
| 1554 | self.msg.pack_uint( int( tmax ) ) |
---|
| 1555 | self.msg.pack_uint( int( dmax ) ) |
---|
[362] | 1556 | |
---|
[691] | 1557 | return self.msg.get_buffer() |
---|
[362] | 1558 | |
---|
[26] | 1559 | def printTime( ): |
---|
[354] | 1560 | |
---|
[691] | 1561 | """Print current time/date in human readable format for log/debug""" |
---|
[26] | 1562 | |
---|
[691] | 1563 | return time.strftime("%a, %d %b %Y %H:%M:%S") |
---|
[26] | 1564 | |
---|
| 1565 | def debug_msg( level, msg ): |
---|
[354] | 1566 | |
---|
[691] | 1567 | """Print msg if at or above current debug level""" |
---|
[26] | 1568 | |
---|
[691] | 1569 | global DAEMONIZE, DEBUG_LEVEL, SYSLOG_LEVEL |
---|
[377] | 1570 | |
---|
[691] | 1571 | if (not DAEMONIZE and DEBUG_LEVEL >= level): |
---|
| 1572 | sys.stderr.write( msg + '\n' ) |
---|
[26] | 1573 | |
---|
[691] | 1574 | if (DAEMONIZE and USE_SYSLOG and SYSLOG_LEVEL >= level): |
---|
| 1575 | syslog.syslog( msg ) |
---|
[373] | 1576 | |
---|
[307] | 1577 | def write_pidfile(): |
---|
| 1578 | |
---|
[691] | 1579 | # Write pidfile if PIDFILE is set |
---|
| 1580 | # |
---|
| 1581 | if PIDFILE: |
---|
[307] | 1582 | |
---|
[691] | 1583 | pid = os.getpid() |
---|
[354] | 1584 | |
---|
[691] | 1585 | pidfile = open( PIDFILE, 'w' ) |
---|
[354] | 1586 | |
---|
[691] | 1587 | pidfile.write( str( pid ) ) |
---|
| 1588 | pidfile.close() |
---|
[307] | 1589 | |
---|
[23] | 1590 | def main(): |
---|
[354] | 1591 | |
---|
[691] | 1592 | """Application start""" |
---|
[23] | 1593 | |
---|
[691] | 1594 | global PBSQuery, PBSError, lsfObject |
---|
| 1595 | global SYSLOG_FACILITY, USE_SYSLOG, BATCH_API, DAEMONIZE |
---|
[256] | 1596 | |
---|
[691] | 1597 | if not processArgs( sys.argv[1:] ): |
---|
[354] | 1598 | |
---|
[691] | 1599 | sys.exit( 1 ) |
---|
[212] | 1600 | |
---|
[691] | 1601 | # Load appropriate DataGatherer depending on which BATCH_API is set |
---|
| 1602 | # and any required modules for the Gatherer |
---|
| 1603 | # |
---|
| 1604 | if BATCH_API == 'pbs': |
---|
[256] | 1605 | |
---|
[691] | 1606 | try: |
---|
| 1607 | from PBSQuery import PBSQuery, PBSError |
---|
[256] | 1608 | |
---|
[691] | 1609 | except ImportError: |
---|
[256] | 1610 | |
---|
[691] | 1611 | debug_msg( 0, "FATAL ERROR: BATCH_API set to 'pbs' but python module 'pbs_python' is not installed" ) |
---|
| 1612 | sys.exit( 1 ) |
---|
[256] | 1613 | |
---|
[691] | 1614 | gather = PbsDataGatherer() |
---|
[256] | 1615 | |
---|
[691] | 1616 | elif BATCH_API == 'sge': |
---|
[256] | 1617 | |
---|
[691] | 1618 | # Tested with SGE 6.0u11. |
---|
| 1619 | # |
---|
| 1620 | gather = SgeDataGatherer() |
---|
[368] | 1621 | |
---|
[691] | 1622 | elif BATCH_API == 'lsf': |
---|
[368] | 1623 | |
---|
[691] | 1624 | try: |
---|
| 1625 | from lsfObject import lsfObject |
---|
| 1626 | except: |
---|
| 1627 | debug_msg(0, "fatal error: BATCH_API set to 'lsf' but python module is not found or installed") |
---|
| 1628 | sys.exit( 1) |
---|
[256] | 1629 | |
---|
[691] | 1630 | gather = LsfDataGatherer() |
---|
[524] | 1631 | |
---|
[691] | 1632 | else: |
---|
| 1633 | debug_msg( 0, "FATAL ERROR: unknown BATCH_API '" + BATCH_API + "' is not supported" ) |
---|
[354] | 1634 | |
---|
[691] | 1635 | sys.exit( 1 ) |
---|
[256] | 1636 | |
---|
[691] | 1637 | if( DAEMONIZE and USE_SYSLOG ): |
---|
[373] | 1638 | |
---|
[691] | 1639 | syslog.openlog( 'jobmond', syslog.LOG_NOWAIT, SYSLOG_FACILITY ) |
---|
[373] | 1640 | |
---|
[691] | 1641 | if DAEMONIZE: |
---|
[354] | 1642 | |
---|
[691] | 1643 | gather.daemon() |
---|
| 1644 | else: |
---|
| 1645 | gather.run() |
---|
[23] | 1646 | |
---|
[256] | 1647 | # wh00t? someone started me! :) |
---|
[65] | 1648 | # |
---|
[23] | 1649 | if __name__ == '__main__': |
---|
[691] | 1650 | main() |
---|