main.py revision 41169f2a0e341f48d71acc0997ec954ee70289fd
1#!/usr/bin/env python
2
3"""
4lit - LLVM Integrated Tester.
5
6See lit.pod for more information.
7"""
8
9import math, os, platform, random, re, sys, time, threading, traceback
10
11import ProgressBar
12import TestRunner
13import Util
14
15from TestingConfig import TestingConfig
16import LitConfig
17import Test
18
19# Configuration files to look for when discovering test suites. These can be
20# overridden with --config-prefix.
21#
22# FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ?
23gConfigName = 'lit.cfg'
24gSiteConfigName = 'lit.site.cfg'
25
26kLocalConfigName = 'lit.local.cfg'
27
28class TestingProgressDisplay:
29    def __init__(self, opts, numTests, progressBar=None):
30        self.opts = opts
31        self.numTests = numTests
32        self.current = None
33        self.lock = threading.Lock()
34        self.progressBar = progressBar
35        self.completed = 0
36
37    def update(self, test):
38        # Avoid locking overhead in quiet mode
39        if self.opts.quiet and not test.result.isFailure:
40            self.completed += 1
41            return
42
43        # Output lock.
44        self.lock.acquire()
45        try:
46            self.handleUpdate(test)
47        finally:
48            self.lock.release()
49
50    def finish(self):
51        if self.progressBar:
52            self.progressBar.clear()
53        elif self.opts.quiet:
54            pass
55        elif self.opts.succinct:
56            sys.stdout.write('\n')
57
58    def handleUpdate(self, test):
59        self.completed += 1
60        if self.progressBar:
61            self.progressBar.update(float(self.completed)/self.numTests,
62                                    test.getFullName())
63
64        if self.opts.succinct and not test.result.isFailure:
65            return
66
67        if self.progressBar:
68            self.progressBar.clear()
69
70        print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
71                                     self.completed, self.numTests)
72
73        if test.result.isFailure and self.opts.showOutput:
74            print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
75                                              '*'*20)
76            print test.output
77            print "*" * 20
78
79        sys.stdout.flush()
80
81class TestProvider:
82    def __init__(self, tests, maxTime):
83        self.maxTime = maxTime
84        self.iter = iter(tests)
85        self.lock = threading.Lock()
86        self.startTime = time.time()
87
88    def get(self):
89        # Check if we have run out of time.
90        if self.maxTime is not None:
91            if time.time() - self.startTime > self.maxTime:
92                return None
93
94        # Otherwise take the next test.
95        self.lock.acquire()
96        try:
97            item = self.iter.next()
98        except StopIteration:
99            item = None
100        self.lock.release()
101        return item
102
103class Tester(threading.Thread):
104    def __init__(self, litConfig, provider, display):
105        threading.Thread.__init__(self)
106        self.litConfig = litConfig
107        self.provider = provider
108        self.display = display
109
110    def run(self):
111        while 1:
112            item = self.provider.get()
113            if item is None:
114                break
115            self.runTest(item)
116
117    def runTest(self, test):
118        result = None
119        startTime = time.time()
120        try:
121            result, output = test.config.test_format.execute(test,
122                                                             self.litConfig)
123        except KeyboardInterrupt:
124            # This is a sad hack. Unfortunately subprocess goes
125            # bonkers with ctrl-c and we start forking merrily.
126            print '\nCtrl-C detected, goodbye.'
127            os.kill(0,9)
128        except:
129            if self.litConfig.debug:
130                raise
131            result = Test.UNRESOLVED
132            output = 'Exception during script execution:\n'
133            output += traceback.format_exc()
134            output += '\n'
135        elapsed = time.time() - startTime
136
137        test.setResult(result, output, elapsed)
138        self.display.update(test)
139
140def dirContainsTestSuite(path):
141    cfgpath = os.path.join(path, gSiteConfigName)
142    if os.path.exists(cfgpath):
143        return cfgpath
144    cfgpath = os.path.join(path, gConfigName)
145    if os.path.exists(cfgpath):
146        return cfgpath
147
148def getTestSuite(item, litConfig, cache):
149    """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
150
151    Find the test suite containing @arg item.
152
153    @retval (None, ...) - Indicates no test suite contains @arg item.
154    @retval (suite, relative_path) - The suite that @arg item is in, and its
155    relative path inside that suite.
156    """
157    def search1(path):
158        # Check for a site config or a lit config.
159        cfgpath = dirContainsTestSuite(path)
160
161        # If we didn't find a config file, keep looking.
162        if not cfgpath:
163            parent,base = os.path.split(path)
164            if parent == path:
165                return (None, ())
166
167            ts, relative = search(parent)
168            return (ts, relative + (base,))
169
170        # We found a config file, load it.
171        if litConfig.debug:
172            litConfig.note('loading suite config %r' % cfgpath)
173
174        cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
175        source_root = os.path.realpath(cfg.test_source_root or path)
176        exec_root = os.path.realpath(cfg.test_exec_root or path)
177        return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
178
179    def search(path):
180        # Check for an already instantiated test suite.
181        res = cache.get(path)
182        if res is None:
183            cache[path] = res = search1(path)
184        return res
185
186    # Canonicalize the path.
187    item = os.path.realpath(item)
188
189    # Skip files and virtual components.
190    components = []
191    while not os.path.isdir(item):
192        parent,base = os.path.split(item)
193        if parent == item:
194            return (None, ())
195        components.append(base)
196        item = parent
197    components.reverse()
198
199    ts, relative = search(item)
200    return ts, tuple(relative + tuple(components))
201
202def getLocalConfig(ts, path_in_suite, litConfig, cache):
203    def search1(path_in_suite):
204        # Get the parent config.
205        if not path_in_suite:
206            parent = ts.config
207        else:
208            parent = search(path_in_suite[:-1])
209
210        # Load the local configuration.
211        source_path = ts.getSourcePath(path_in_suite)
212        cfgpath = os.path.join(source_path, kLocalConfigName)
213        if litConfig.debug:
214            litConfig.note('loading local config %r' % cfgpath)
215        return TestingConfig.frompath(cfgpath, parent, litConfig,
216                                    mustExist = False,
217                                    config = parent.clone(cfgpath))
218
219    def search(path_in_suite):
220        key = (ts, path_in_suite)
221        res = cache.get(key)
222        if res is None:
223            cache[key] = res = search1(path_in_suite)
224        return res
225
226    return search(path_in_suite)
227
228def getTests(path, litConfig, testSuiteCache, localConfigCache):
229    # Find the test suite for this input and its relative path.
230    ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
231    if ts is None:
232        litConfig.warning('unable to find test suite for %r' % path)
233        return (),()
234
235    if litConfig.debug:
236        litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
237                                                        path_in_suite))
238
239    return ts, getTestsInSuite(ts, path_in_suite, litConfig,
240                               testSuiteCache, localConfigCache)
241
242def getTestsInSuite(ts, path_in_suite, litConfig,
243                    testSuiteCache, localConfigCache):
244    # Check that the source path exists (errors here are reported by the
245    # caller).
246    source_path = ts.getSourcePath(path_in_suite)
247    if not os.path.exists(source_path):
248        return
249
250    # Check if the user named a test directly.
251    if not os.path.isdir(source_path):
252        lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
253        yield Test.Test(ts, path_in_suite, lc)
254        return
255
256    # Otherwise we have a directory to search for tests, start by getting the
257    # local configuration.
258    lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
259
260    # Search for tests.
261    if lc.test_format is not None:
262        for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
263                                                      litConfig, lc):
264            yield res
265
266    # Search subdirectories.
267    for filename in os.listdir(source_path):
268        # FIXME: This doesn't belong here?
269        if filename in ('Output', '.svn') or filename in lc.excludes:
270            continue
271
272        # Ignore non-directories.
273        file_sourcepath = os.path.join(source_path, filename)
274        if not os.path.isdir(file_sourcepath):
275            continue
276
277        # Check for nested test suites, first in the execpath in case there is a
278        # site configuration and then in the source path.
279        file_execpath = ts.getExecPath(path_in_suite + (filename,))
280        if dirContainsTestSuite(file_execpath):
281            sub_ts, subiter = getTests(file_execpath, litConfig,
282                                       testSuiteCache, localConfigCache)
283        elif dirContainsTestSuite(file_sourcepath):
284            sub_ts, subiter = getTests(file_sourcepath, litConfig,
285                                       testSuiteCache, localConfigCache)
286        else:
287            # Otherwise, continue loading from inside this test suite.
288            subiter = getTestsInSuite(ts, path_in_suite + (filename,),
289                                      litConfig, testSuiteCache,
290                                      localConfigCache)
291            sub_ts = None
292
293        N = 0
294        for res in subiter:
295            N += 1
296            yield res
297        if sub_ts and not N:
298            litConfig.warning('test suite %r contained no tests' % sub_ts.name)
299
300def runTests(numThreads, litConfig, provider, display):
301    # If only using one testing thread, don't use threads at all; this lets us
302    # profile, among other things.
303    if numThreads == 1:
304        t = Tester(litConfig, provider, display)
305        t.run()
306        return
307
308    # Otherwise spin up the testing threads and wait for them to finish.
309    testers = [Tester(litConfig, provider, display)
310               for i in range(numThreads)]
311    for t in testers:
312        t.start()
313    try:
314        for t in testers:
315            t.join()
316    except KeyboardInterrupt:
317        sys.exit(2)
318
319def load_test_suite(inputs):
320    import unittest
321
322    # Create the global config object.
323    litConfig = LitConfig.LitConfig(progname = 'lit',
324                                    path = [],
325                                    quiet = False,
326                                    useValgrind = False,
327                                    valgrindLeakCheck = False,
328                                    valgrindArgs = [],
329                                    noExecute = False,
330                                    ignoreStdErr = False,
331                                    debug = False,
332                                    isWindows = (platform.system()=='Windows'),
333                                    params = {})
334
335    # Load the tests from the inputs.
336    tests = []
337    testSuiteCache = {}
338    localConfigCache = {}
339    for input in inputs:
340        prev = len(tests)
341        tests.extend(getTests(input, litConfig,
342                              testSuiteCache, localConfigCache)[1])
343        if prev == len(tests):
344            litConfig.warning('input %r contained no tests' % input)
345
346    # If there were any errors during test discovery, exit now.
347    if litConfig.numErrors:
348        print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
349        sys.exit(2)
350
351    # Return a unittest test suite which just runs the tests in order.
352    def get_test_fn(test):
353        return unittest.FunctionTestCase(
354            lambda: test.config.test_format.execute(
355                test, litConfig),
356            description = test.getFullName())
357
358    from LitTestCase import LitTestCase
359    return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
360
361def main(builtinParameters = {}):    # Bump the GIL check interval, its more important to get any one thread to a
362    # blocking operation (hopefully exec) than to try and unblock other threads.
363    #
364    # FIXME: This is a hack.
365    import sys
366    sys.setcheckinterval(1000)
367
368    global options
369    from optparse import OptionParser, OptionGroup
370    parser = OptionParser("usage: %prog [options] {file-or-path}")
371
372    parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
373                      help="Number of testing threads",
374                      type=int, action="store", default=None)
375    parser.add_option("", "--config-prefix", dest="configPrefix",
376                      metavar="NAME", help="Prefix for 'lit' config files",
377                      action="store", default=None)
378    parser.add_option("", "--param", dest="userParameters",
379                      metavar="NAME=VAL",
380                      help="Add 'NAME' = 'VAL' to the user defined parameters",
381                      type=str, action="append", default=[])
382
383    group = OptionGroup(parser, "Output Format")
384    # FIXME: I find these names very confusing, although I like the
385    # functionality.
386    group.add_option("-q", "--quiet", dest="quiet",
387                     help="Suppress no error output",
388                     action="store_true", default=False)
389    group.add_option("-s", "--succinct", dest="succinct",
390                     help="Reduce amount of output",
391                     action="store_true", default=False)
392    group.add_option("-v", "--verbose", dest="showOutput",
393                     help="Show all test output",
394                     action="store_true", default=False)
395    group.add_option("", "--no-progress-bar", dest="useProgressBar",
396                     help="Do not use curses based progress bar",
397                     action="store_false", default=True)
398    parser.add_option_group(group)
399
400    group = OptionGroup(parser, "Test Execution")
401    group.add_option("", "--path", dest="path",
402                     help="Additional paths to add to testing environment",
403                     action="append", type=str, default=[])
404    group.add_option("", "--vg", dest="useValgrind",
405                     help="Run tests under valgrind",
406                     action="store_true", default=False)
407    group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
408                     help="Check for memory leaks under valgrind",
409                     action="store_true", default=False)
410    group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
411                     help="Specify an extra argument for valgrind",
412                     type=str, action="append", default=[])
413    group.add_option("", "--time-tests", dest="timeTests",
414                     help="Track elapsed wall time for each test",
415                     action="store_true", default=False)
416    group.add_option("", "--no-execute", dest="noExecute",
417                     help="Don't execute any tests (assume PASS)",
418                     action="store_true", default=False)
419    parser.add_option_group(group)
420
421    group = OptionGroup(parser, "Test Selection")
422    group.add_option("", "--max-tests", dest="maxTests", metavar="N",
423                     help="Maximum number of tests to run",
424                     action="store", type=int, default=None)
425    group.add_option("", "--max-time", dest="maxTime", metavar="N",
426                     help="Maximum time to spend testing (in seconds)",
427                     action="store", type=float, default=None)
428    group.add_option("", "--shuffle", dest="shuffle",
429                     help="Run tests in random order",
430                     action="store_true", default=False)
431    group.add_option("", "--filter", dest="filter", metavar="EXPRESSION",
432                     help=("Only run tests with paths matching the given "
433                           "regular expression"),
434                     action="store", default=None)
435    parser.add_option_group(group)
436
437    group = OptionGroup(parser, "Debug and Experimental Options")
438    group.add_option("", "--debug", dest="debug",
439                      help="Enable debugging (for 'lit' development)",
440                      action="store_true", default=False)
441    group.add_option("", "--show-suites", dest="showSuites",
442                      help="Show discovered test suites",
443                      action="store_true", default=False)
444    group.add_option("", "--repeat", dest="repeatTests", metavar="N",
445                      help="Repeat tests N times (for timing)",
446                      action="store", default=None, type=int)
447    parser.add_option_group(group)
448
449    (opts, args) = parser.parse_args()
450
451    if not args:
452        parser.error('No inputs specified')
453
454    if opts.configPrefix is not None:
455        global gConfigName, gSiteConfigName, kLocalConfigName
456        gConfigName = '%s.cfg' % opts.configPrefix
457        gSiteConfigName = '%s.site.cfg' % opts.configPrefix
458        kLocalConfigName = '%s.local.cfg' % opts.configPrefix
459
460    if opts.numThreads is None:
461# Python <2.5 has a race condition causing lit to always fail with numThreads>1
462# http://bugs.python.org/issue1731717
463# I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
464# threads by default there.
465       if sys.hexversion >= 0x2050200:
466               opts.numThreads = Util.detectCPUs()
467       else:
468               opts.numThreads = 1
469
470    inputs = args
471
472    # Create the user defined parameters.
473    userParams = dict(builtinParameters)
474    for entry in opts.userParameters:
475        if '=' not in entry:
476            name,val = entry,''
477        else:
478            name,val = entry.split('=', 1)
479        userParams[name] = val
480
481    # Create the global config object.
482    litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
483                                    path = opts.path,
484                                    quiet = opts.quiet,
485                                    useValgrind = opts.useValgrind,
486                                    valgrindLeakCheck = opts.valgrindLeakCheck,
487                                    valgrindArgs = opts.valgrindArgs,
488                                    noExecute = opts.noExecute,
489                                    ignoreStdErr = False,
490                                    debug = opts.debug,
491                                    isWindows = (platform.system()=='Windows'),
492                                    params = userParams)
493
494    # Expand '@...' form in inputs.
495    actual_inputs = []
496    for input in inputs:
497        if os.path.exists(input) or not input.startswith('@'):
498            actual_inputs.append(input)
499        else:
500            f = open(input[1:])
501            try:
502                for ln in f:
503                    ln = ln.strip()
504                    if ln:
505                        actual_inputs.append(ln)
506            finally:
507                f.close()
508
509
510    # Load the tests from the inputs.
511    tests = []
512    testSuiteCache = {}
513    localConfigCache = {}
514    for input in actual_inputs:
515        prev = len(tests)
516        tests.extend(getTests(input, litConfig,
517                              testSuiteCache, localConfigCache)[1])
518        if prev == len(tests):
519            litConfig.warning('input %r contained no tests' % input)
520
521    # If there were any errors during test discovery, exit now.
522    if litConfig.numErrors:
523        print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
524        sys.exit(2)
525
526    if opts.showSuites:
527        suitesAndTests = dict([(ts,[])
528                               for ts,_ in testSuiteCache.values()
529                               if ts])
530        for t in tests:
531            suitesAndTests[t.suite].append(t)
532
533        print '-- Test Suites --'
534        suitesAndTests = suitesAndTests.items()
535        suitesAndTests.sort(key = lambda (ts,_): ts.name)
536        for ts,ts_tests in suitesAndTests:
537            print '  %s - %d tests' %(ts.name, len(ts_tests))
538            print '    Source Root: %s' % ts.source_root
539            print '    Exec Root  : %s' % ts.exec_root
540
541    # Select and order the tests.
542    numTotalTests = len(tests)
543
544    # First, select based on the filter expression if given.
545    if opts.filter:
546        try:
547            rex = re.compile(opts.filter)
548        except:
549            parser.error("invalid regular expression for --filter: %r" % (
550                    opts.filter))
551        tests = [t for t in tests
552                 if rex.search(t.getFullName())]
553
554    # Then select the order.
555    if opts.shuffle:
556        random.shuffle(tests)
557    else:
558        tests.sort(key = lambda t: t.getFullName())
559
560    # Finally limit the number of tests, if desired.
561    if opts.maxTests is not None:
562        tests = tests[:opts.maxTests]
563
564    # Don't create more threads than tests.
565    opts.numThreads = min(len(tests), opts.numThreads)
566
567    extra = ''
568    if len(tests) != numTotalTests:
569        extra = ' of %d' % numTotalTests
570    header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
571                                                      opts.numThreads)
572
573    if opts.repeatTests:
574        tests = [t.copyWithIndex(i)
575                 for t in tests
576                 for i in range(opts.repeatTests)]
577
578    progressBar = None
579    if not opts.quiet:
580        if opts.succinct and opts.useProgressBar:
581            try:
582                tc = ProgressBar.TerminalController()
583                progressBar = ProgressBar.ProgressBar(tc, header)
584            except ValueError:
585                print header
586                progressBar = ProgressBar.SimpleProgressBar('Testing: ')
587        else:
588            print header
589
590    startTime = time.time()
591    display = TestingProgressDisplay(opts, len(tests), progressBar)
592    provider = TestProvider(tests, opts.maxTime)
593    runTests(opts.numThreads, litConfig, provider, display)
594    display.finish()
595
596    if not opts.quiet:
597        print 'Testing Time: %.2fs'%(time.time() - startTime)
598
599    # Update results for any tests which weren't run.
600    for t in tests:
601        if t.result is None:
602            t.setResult(Test.UNRESOLVED, '', 0.0)
603
604    # List test results organized by kind.
605    hasFailures = False
606    byCode = {}
607    for t in tests:
608        if t.result not in byCode:
609            byCode[t.result] = []
610        byCode[t.result].append(t)
611        if t.result.isFailure:
612            hasFailures = True
613
614    # FIXME: Show unresolved and (optionally) unsupported tests.
615    for title,code in (('Unexpected Passing Tests', Test.XPASS),
616                       ('Failing Tests', Test.FAIL)):
617        elts = byCode.get(code)
618        if not elts:
619            continue
620        print '*'*20
621        print '%s (%d):' % (title, len(elts))
622        for t in elts:
623            print '    %s' % t.getFullName()
624        print
625
626    if opts.timeTests:
627        # Collate, in case we repeated tests.
628        times = {}
629        for t in tests:
630            key = t.getFullName()
631            times[key] = times.get(key, 0.) + t.elapsed
632
633        byTime = list(times.items())
634        byTime.sort(key = lambda (name,elapsed): elapsed)
635        if byTime:
636            Util.printHistogram(byTime, title='Tests')
637
638    for name,code in (('Expected Passes    ', Test.PASS),
639                      ('Expected Failures  ', Test.XFAIL),
640                      ('Unsupported Tests  ', Test.UNSUPPORTED),
641                      ('Unresolved Tests   ', Test.UNRESOLVED),
642                      ('Unexpected Passes  ', Test.XPASS),
643                      ('Unexpected Failures', Test.FAIL),):
644        if opts.quiet and not code.isFailure:
645            continue
646        N = len(byCode.get(code,[]))
647        if N:
648            print '  %s: %d' % (name,N)
649
650    # If we encountered any additional errors, exit abnormally.
651    if litConfig.numErrors:
652        print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
653        sys.exit(2)
654
655    # Warn about warnings.
656    if litConfig.numWarnings:
657        print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
658
659    if hasFailures:
660        sys.exit(1)
661    sys.exit(0)
662
663if __name__=='__main__':
664    main()
665