main.py revision 4f2469c40c9020af2cf39ec0af518b6caba0a588
1#!/usr/bin/env python 2 3""" 4lit - LLVM Integrated Tester. 5 6See lit.pod for more information. 7""" 8 9import math, os, platform, random, re, sys, time, threading, traceback 10 11import ProgressBar 12import TestRunner 13import Util 14 15import LitConfig 16import Test 17 18import lit.discovery 19 20class TestingProgressDisplay: 21 def __init__(self, opts, numTests, progressBar=None): 22 self.opts = opts 23 self.numTests = numTests 24 self.current = None 25 self.lock = threading.Lock() 26 self.progressBar = progressBar 27 self.completed = 0 28 29 def update(self, test): 30 # Avoid locking overhead in quiet mode 31 if self.opts.quiet and not test.result.isFailure: 32 self.completed += 1 33 return 34 35 # Output lock. 36 self.lock.acquire() 37 try: 38 self.handleUpdate(test) 39 finally: 40 self.lock.release() 41 42 def finish(self): 43 if self.progressBar: 44 self.progressBar.clear() 45 elif self.opts.quiet: 46 pass 47 elif self.opts.succinct: 48 sys.stdout.write('\n') 49 50 def handleUpdate(self, test): 51 self.completed += 1 52 if self.progressBar: 53 self.progressBar.update(float(self.completed)/self.numTests, 54 test.getFullName()) 55 56 if self.opts.succinct and not test.result.isFailure: 57 return 58 59 if self.progressBar: 60 self.progressBar.clear() 61 62 print('%s: %s (%d of %d)' % (test.result.name, test.getFullName(), 63 self.completed, self.numTests)) 64 65 if test.result.isFailure and self.opts.showOutput: 66 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(), 67 '*'*20)) 68 print(test.output) 69 print("*" * 20) 70 71 sys.stdout.flush() 72 73class TestProvider: 74 def __init__(self, tests, maxTime): 75 self.maxTime = maxTime 76 self.iter = iter(tests) 77 self.lock = threading.Lock() 78 self.startTime = time.time() 79 self.canceled = False 80 81 def cancel(self): 82 self.lock.acquire() 83 self.canceled = True 84 self.lock.release() 85 86 def get(self): 87 # Check if we have run out of time. 88 if self.maxTime is not None: 89 if time.time() - self.startTime > self.maxTime: 90 return None 91 92 # Otherwise take the next test. 93 self.lock.acquire() 94 if self.canceled: 95 self.lock.release() 96 return None 97 98 try: 99 item = self.iter.next() 100 except StopIteration: 101 item = None 102 self.lock.release() 103 return item 104 105class Tester(threading.Thread): 106 def __init__(self, litConfig, provider, display): 107 threading.Thread.__init__(self) 108 self.litConfig = litConfig 109 self.provider = provider 110 self.display = display 111 112 def run(self): 113 while 1: 114 item = self.provider.get() 115 if item is None: 116 break 117 self.runTest(item) 118 119 def runTest(self, test): 120 result = None 121 startTime = time.time() 122 try: 123 result, output = test.config.test_format.execute(test, 124 self.litConfig) 125 except KeyboardInterrupt: 126 # This is a sad hack. Unfortunately subprocess goes 127 # bonkers with ctrl-c and we start forking merrily. 128 print('\nCtrl-C detected, goodbye.') 129 os.kill(0,9) 130 except: 131 if self.litConfig.debug: 132 raise 133 result = Test.UNRESOLVED 134 output = 'Exception during script execution:\n' 135 output += traceback.format_exc() 136 output += '\n' 137 elapsed = time.time() - startTime 138 139 test.setResult(result, output, elapsed) 140 self.display.update(test) 141 142def runTests(numThreads, litConfig, provider, display): 143 # If only using one testing thread, don't use threads at all; this lets us 144 # profile, among other things. 145 if numThreads == 1: 146 t = Tester(litConfig, provider, display) 147 t.run() 148 return 149 150 # Otherwise spin up the testing threads and wait for them to finish. 151 testers = [Tester(litConfig, provider, display) 152 for i in range(numThreads)] 153 for t in testers: 154 t.start() 155 try: 156 for t in testers: 157 t.join() 158 except KeyboardInterrupt: 159 sys.exit(2) 160 161def main(builtinParameters = {}): 162 # Bump the GIL check interval, its more important to get any one thread to a 163 # blocking operation (hopefully exec) than to try and unblock other threads. 164 # 165 # FIXME: This is a hack. 166 import sys 167 sys.setcheckinterval(1000) 168 169 global options 170 from optparse import OptionParser, OptionGroup 171 parser = OptionParser("usage: %prog [options] {file-or-path}") 172 173 parser.add_option("-j", "--threads", dest="numThreads", metavar="N", 174 help="Number of testing threads", 175 type=int, action="store", default=None) 176 parser.add_option("", "--config-prefix", dest="configPrefix", 177 metavar="NAME", help="Prefix for 'lit' config files", 178 action="store", default=None) 179 parser.add_option("", "--param", dest="userParameters", 180 metavar="NAME=VAL", 181 help="Add 'NAME' = 'VAL' to the user defined parameters", 182 type=str, action="append", default=[]) 183 184 group = OptionGroup(parser, "Output Format") 185 # FIXME: I find these names very confusing, although I like the 186 # functionality. 187 group.add_option("-q", "--quiet", dest="quiet", 188 help="Suppress no error output", 189 action="store_true", default=False) 190 group.add_option("-s", "--succinct", dest="succinct", 191 help="Reduce amount of output", 192 action="store_true", default=False) 193 group.add_option("-v", "--verbose", dest="showOutput", 194 help="Show all test output", 195 action="store_true", default=False) 196 group.add_option("", "--no-progress-bar", dest="useProgressBar", 197 help="Do not use curses based progress bar", 198 action="store_false", default=True) 199 parser.add_option_group(group) 200 201 group = OptionGroup(parser, "Test Execution") 202 group.add_option("", "--path", dest="path", 203 help="Additional paths to add to testing environment", 204 action="append", type=str, default=[]) 205 group.add_option("", "--vg", dest="useValgrind", 206 help="Run tests under valgrind", 207 action="store_true", default=False) 208 group.add_option("", "--vg-leak", dest="valgrindLeakCheck", 209 help="Check for memory leaks under valgrind", 210 action="store_true", default=False) 211 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG", 212 help="Specify an extra argument for valgrind", 213 type=str, action="append", default=[]) 214 group.add_option("", "--time-tests", dest="timeTests", 215 help="Track elapsed wall time for each test", 216 action="store_true", default=False) 217 parser.add_option_group(group) 218 219 group = OptionGroup(parser, "Test Selection") 220 group.add_option("", "--max-tests", dest="maxTests", metavar="N", 221 help="Maximum number of tests to run", 222 action="store", type=int, default=None) 223 group.add_option("", "--max-time", dest="maxTime", metavar="N", 224 help="Maximum time to spend testing (in seconds)", 225 action="store", type=float, default=None) 226 group.add_option("", "--shuffle", dest="shuffle", 227 help="Run tests in random order", 228 action="store_true", default=False) 229 group.add_option("", "--filter", dest="filter", metavar="REGEX", 230 help=("Only run tests with paths matching the given " 231 "regular expression"), 232 action="store", default=None) 233 parser.add_option_group(group) 234 235 group = OptionGroup(parser, "Debug and Experimental Options") 236 group.add_option("", "--debug", dest="debug", 237 help="Enable debugging (for 'lit' development)", 238 action="store_true", default=False) 239 group.add_option("", "--show-suites", dest="showSuites", 240 help="Show discovered test suites", 241 action="store_true", default=False) 242 group.add_option("", "--show-tests", dest="showTests", 243 help="Show all discovered tests", 244 action="store_true", default=False) 245 group.add_option("", "--repeat", dest="repeatTests", metavar="N", 246 help="Repeat tests N times (for timing)", 247 action="store", default=None, type=int) 248 parser.add_option_group(group) 249 250 (opts, args) = parser.parse_args() 251 252 if not args: 253 parser.error('No inputs specified') 254 255 if opts.numThreads is None: 256# Python <2.5 has a race condition causing lit to always fail with numThreads>1 257# http://bugs.python.org/issue1731717 258# I haven't seen this bug occur with 2.5.2 and later, so only enable multiple 259# threads by default there. 260 if sys.hexversion >= 0x2050200: 261 opts.numThreads = Util.detectCPUs() 262 else: 263 opts.numThreads = 1 264 265 inputs = args 266 267 # Create the user defined parameters. 268 userParams = dict(builtinParameters) 269 for entry in opts.userParameters: 270 if '=' not in entry: 271 name,val = entry,'' 272 else: 273 name,val = entry.split('=', 1) 274 userParams[name] = val 275 276 # Create the global config object. 277 litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]), 278 path = opts.path, 279 quiet = opts.quiet, 280 useValgrind = opts.useValgrind, 281 valgrindLeakCheck = opts.valgrindLeakCheck, 282 valgrindArgs = opts.valgrindArgs, 283 debug = opts.debug, 284 isWindows = (platform.system()=='Windows'), 285 params = userParams, 286 config_prefix = opts.configPrefix) 287 288 tests = lit.discovery.find_tests_for_inputs(litConfig, inputs) 289 290 if opts.showSuites or opts.showTests: 291 # Aggregate the tests by suite. 292 suitesAndTests = {} 293 for t in tests: 294 if t.suite not in suitesAndTests: 295 suitesAndTests[t.suite] = [] 296 suitesAndTests[t.suite].append(t) 297 suitesAndTests = suitesAndTests.items() 298 suitesAndTests.sort(key = lambda item: item[0].name) 299 300 # Show the suites, if requested. 301 if opts.showSuites: 302 print('-- Test Suites --') 303 for ts,ts_tests in suitesAndTests: 304 print(' %s - %d tests' %(ts.name, len(ts_tests))) 305 print(' Source Root: %s' % ts.source_root) 306 print(' Exec Root : %s' % ts.exec_root) 307 308 # Show the tests, if requested. 309 if opts.showTests: 310 print('-- Available Tests --') 311 for ts,ts_tests in suitesAndTests: 312 ts_tests.sort(key = lambda test: test.path_in_suite) 313 for test in ts_tests: 314 print(' %s' % (test.getFullName(),)) 315 316 # Select and order the tests. 317 numTotalTests = len(tests) 318 319 # First, select based on the filter expression if given. 320 if opts.filter: 321 try: 322 rex = re.compile(opts.filter) 323 except: 324 parser.error("invalid regular expression for --filter: %r" % ( 325 opts.filter)) 326 tests = [t for t in tests 327 if rex.search(t.getFullName())] 328 329 # Then select the order. 330 if opts.shuffle: 331 random.shuffle(tests) 332 else: 333 tests.sort(key = lambda t: t.getFullName()) 334 335 # Finally limit the number of tests, if desired. 336 if opts.maxTests is not None: 337 tests = tests[:opts.maxTests] 338 339 # Don't create more threads than tests. 340 opts.numThreads = min(len(tests), opts.numThreads) 341 342 extra = '' 343 if len(tests) != numTotalTests: 344 extra = ' of %d' % numTotalTests 345 header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra, 346 opts.numThreads) 347 348 if opts.repeatTests: 349 tests = [t.copyWithIndex(i) 350 for t in tests 351 for i in range(opts.repeatTests)] 352 353 progressBar = None 354 if not opts.quiet: 355 if opts.succinct and opts.useProgressBar: 356 try: 357 tc = ProgressBar.TerminalController() 358 progressBar = ProgressBar.ProgressBar(tc, header) 359 except ValueError: 360 print(header) 361 progressBar = ProgressBar.SimpleProgressBar('Testing: ') 362 else: 363 print(header) 364 365 startTime = time.time() 366 display = TestingProgressDisplay(opts, len(tests), progressBar) 367 provider = TestProvider(tests, opts.maxTime) 368 369 try: 370 import win32api 371 except ImportError: 372 pass 373 else: 374 def console_ctrl_handler(type): 375 provider.cancel() 376 return True 377 win32api.SetConsoleCtrlHandler(console_ctrl_handler, True) 378 379 runTests(opts.numThreads, litConfig, provider, display) 380 display.finish() 381 382 if not opts.quiet: 383 print('Testing Time: %.2fs'%(time.time() - startTime)) 384 385 # Update results for any tests which weren't run. 386 for t in tests: 387 if t.result is None: 388 t.setResult(Test.UNRESOLVED, '', 0.0) 389 390 # List test results organized by kind. 391 hasFailures = False 392 byCode = {} 393 for t in tests: 394 if t.result not in byCode: 395 byCode[t.result] = [] 396 byCode[t.result].append(t) 397 if t.result.isFailure: 398 hasFailures = True 399 400 # FIXME: Show unresolved and (optionally) unsupported tests. 401 for title,code in (('Unexpected Passing Tests', Test.XPASS), 402 ('Failing Tests', Test.FAIL)): 403 elts = byCode.get(code) 404 if not elts: 405 continue 406 print('*'*20) 407 print('%s (%d):' % (title, len(elts))) 408 for t in elts: 409 print(' %s' % t.getFullName()) 410 print 411 412 if opts.timeTests: 413 # Collate, in case we repeated tests. 414 times = {} 415 for t in tests: 416 key = t.getFullName() 417 times[key] = times.get(key, 0.) + t.elapsed 418 419 byTime = list(times.items()) 420 byTime.sort(key = lambda item: item[1]) 421 if byTime: 422 Util.printHistogram(byTime, title='Tests') 423 424 for name,code in (('Expected Passes ', Test.PASS), 425 ('Expected Failures ', Test.XFAIL), 426 ('Unsupported Tests ', Test.UNSUPPORTED), 427 ('Unresolved Tests ', Test.UNRESOLVED), 428 ('Unexpected Passes ', Test.XPASS), 429 ('Unexpected Failures', Test.FAIL),): 430 if opts.quiet and not code.isFailure: 431 continue 432 N = len(byCode.get(code,[])) 433 if N: 434 print(' %s: %d' % (name,N)) 435 436 # If we encountered any additional errors, exit abnormally. 437 if litConfig.numErrors: 438 print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors 439 sys.exit(2) 440 441 # Warn about warnings. 442 if litConfig.numWarnings: 443 print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings 444 445 if hasFailures: 446 sys.exit(1) 447 sys.exit(0) 448 449if __name__=='__main__': 450 main() 451