1#!/usr/bin/env python
2
3"""
4lit - LLVM Integrated Tester.
5
6See lit.pod for more information.
7"""
8
9from __future__ import absolute_import
10import math, os, platform, random, re, sys, time
11
12import lit.ProgressBar
13import lit.LitConfig
14import lit.Test
15import lit.run
16import lit.util
17import lit.discovery
18
19class TestingProgressDisplay(object):
20    def __init__(self, opts, numTests, progressBar=None):
21        self.opts = opts
22        self.numTests = numTests
23        self.current = None
24        self.progressBar = progressBar
25        self.completed = 0
26
27    def finish(self):
28        if self.progressBar:
29            self.progressBar.clear()
30        elif self.opts.quiet:
31            pass
32        elif self.opts.succinct:
33            sys.stdout.write('\n')
34
35    def update(self, test):
36        self.completed += 1
37
38        if self.opts.incremental:
39            update_incremental_cache(test)
40
41        if self.progressBar:
42            self.progressBar.update(float(self.completed)/self.numTests,
43                                    test.getFullName())
44
45        if not test.result.code.isFailure and \
46                (self.opts.quiet or self.opts.succinct):
47            return
48
49        if self.progressBar:
50            self.progressBar.clear()
51
52        # Show the test result line.
53        test_name = test.getFullName()
54        print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
55                                     self.completed, self.numTests))
56
57        # Show the test failure output, if requested.
58        if test.result.code.isFailure and self.opts.showOutput:
59            print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
60                                              '*'*20))
61            print(test.result.output)
62            print("*" * 20)
63
64        # Report test metrics, if present.
65        if test.result.metrics:
66            print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
67                                               '*'*10))
68            items = sorted(test.result.metrics.items())
69            for metric_name, value in items:
70                print('%s: %s ' % (metric_name, value.format()))
71            print("*" * 10)
72
73        # Ensure the output is flushed.
74        sys.stdout.flush()
75
76def write_test_results(run, lit_config, testing_time, output_path):
77    try:
78        import json
79    except ImportError:
80        lit_config.fatal('test output unsupported with Python 2.5')
81
82    # Construct the data we will write.
83    data = {}
84    # Encode the current lit version as a schema version.
85    data['__version__'] = lit.__versioninfo__
86    data['elapsed'] = testing_time
87    # FIXME: Record some information on the lit configuration used?
88    # FIXME: Record information from the individual test suites?
89
90    # Encode the tests.
91    data['tests'] = tests_data = []
92    for test in run.tests:
93        test_data = {
94            'name' : test.getFullName(),
95            'code' : test.result.code.name,
96            'output' : test.result.output,
97            'elapsed' : test.result.elapsed }
98
99        # Add test metrics, if present.
100        if test.result.metrics:
101            test_data['metrics'] = metrics_data = {}
102            for key, value in test.result.metrics.items():
103                metrics_data[key] = value.todata()
104
105        tests_data.append(test_data)
106
107    # Write the output.
108    f = open(output_path, 'w')
109    try:
110        json.dump(data, f, indent=2, sort_keys=True)
111        f.write('\n')
112    finally:
113        f.close()
114
115def update_incremental_cache(test):
116    if not test.result.code.isFailure:
117        return
118    fname = test.getFilePath()
119    os.utime(fname, None)
120
121def sort_by_incremental_cache(run):
122    def sortIndex(test):
123        fname = test.getFilePath()
124        try:
125            return -os.path.getmtime(fname)
126        except:
127            return 0
128    run.tests.sort(key = lambda t: sortIndex(t))
129
130def main(builtinParameters = {}):
131    # Use processes by default on Unix platforms.
132    isWindows = platform.system() == 'Windows'
133    useProcessesIsDefault = not isWindows
134
135    global options
136    from optparse import OptionParser, OptionGroup
137    parser = OptionParser("usage: %prog [options] {file-or-path}")
138
139    parser.add_option("", "--version", dest="show_version",
140                      help="Show version and exit",
141                      action="store_true", default=False)
142    parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
143                      help="Number of testing threads",
144                      type=int, action="store", default=None)
145    parser.add_option("", "--config-prefix", dest="configPrefix",
146                      metavar="NAME", help="Prefix for 'lit' config files",
147                      action="store", default=None)
148    parser.add_option("", "--param", dest="userParameters",
149                      metavar="NAME=VAL",
150                      help="Add 'NAME' = 'VAL' to the user defined parameters",
151                      type=str, action="append", default=[])
152
153    group = OptionGroup(parser, "Output Format")
154    # FIXME: I find these names very confusing, although I like the
155    # functionality.
156    group.add_option("-q", "--quiet", dest="quiet",
157                     help="Suppress no error output",
158                     action="store_true", default=False)
159    group.add_option("-s", "--succinct", dest="succinct",
160                     help="Reduce amount of output",
161                     action="store_true", default=False)
162    group.add_option("-v", "--verbose", dest="showOutput",
163                     help="Show all test output",
164                     action="store_true", default=False)
165    group.add_option("-o", "--output", dest="output_path",
166                     help="Write test results to the provided path",
167                     action="store", type=str, metavar="PATH")
168    group.add_option("", "--no-progress-bar", dest="useProgressBar",
169                     help="Do not use curses based progress bar",
170                     action="store_false", default=True)
171    parser.add_option_group(group)
172
173    group = OptionGroup(parser, "Test Execution")
174    group.add_option("", "--path", dest="path",
175                     help="Additional paths to add to testing environment",
176                     action="append", type=str, default=[])
177    group.add_option("", "--vg", dest="useValgrind",
178                     help="Run tests under valgrind",
179                     action="store_true", default=False)
180    group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
181                     help="Check for memory leaks under valgrind",
182                     action="store_true", default=False)
183    group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
184                     help="Specify an extra argument for valgrind",
185                     type=str, action="append", default=[])
186    group.add_option("", "--time-tests", dest="timeTests",
187                     help="Track elapsed wall time for each test",
188                     action="store_true", default=False)
189    group.add_option("", "--no-execute", dest="noExecute",
190                     help="Don't execute any tests (assume PASS)",
191                     action="store_true", default=False)
192    parser.add_option_group(group)
193
194    group = OptionGroup(parser, "Test Selection")
195    group.add_option("", "--max-tests", dest="maxTests", metavar="N",
196                     help="Maximum number of tests to run",
197                     action="store", type=int, default=None)
198    group.add_option("", "--max-time", dest="maxTime", metavar="N",
199                     help="Maximum time to spend testing (in seconds)",
200                     action="store", type=float, default=None)
201    group.add_option("", "--shuffle", dest="shuffle",
202                     help="Run tests in random order",
203                     action="store_true", default=False)
204    group.add_option("-i", "--incremental", dest="incremental",
205                     help="Run modified and failing tests first (updates "
206                     "mtimes)",
207                     action="store_true", default=False)
208    group.add_option("", "--filter", dest="filter", metavar="REGEX",
209                     help=("Only run tests with paths matching the given "
210                           "regular expression"),
211                     action="store", default=None)
212    parser.add_option_group(group)
213
214    group = OptionGroup(parser, "Debug and Experimental Options")
215    group.add_option("", "--debug", dest="debug",
216                      help="Enable debugging (for 'lit' development)",
217                      action="store_true", default=False)
218    group.add_option("", "--show-suites", dest="showSuites",
219                      help="Show discovered test suites",
220                      action="store_true", default=False)
221    group.add_option("", "--show-tests", dest="showTests",
222                      help="Show all discovered tests",
223                      action="store_true", default=False)
224    group.add_option("", "--use-processes", dest="useProcesses",
225                      help="Run tests in parallel with processes (not threads)",
226                      action="store_true", default=useProcessesIsDefault)
227    group.add_option("", "--use-threads", dest="useProcesses",
228                      help="Run tests in parallel with threads (not processes)",
229                      action="store_false", default=useProcessesIsDefault)
230    parser.add_option_group(group)
231
232    (opts, args) = parser.parse_args()
233
234    if opts.show_version:
235        print("lit %s" % (lit.__version__,))
236        return
237
238    if not args:
239        parser.error('No inputs specified')
240
241    if opts.numThreads is None:
242# Python <2.5 has a race condition causing lit to always fail with numThreads>1
243# http://bugs.python.org/issue1731717
244# I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
245# threads by default there.
246       if sys.hexversion >= 0x2050200:
247               opts.numThreads = lit.util.detectCPUs()
248       else:
249               opts.numThreads = 1
250
251    inputs = args
252
253    # Create the user defined parameters.
254    userParams = dict(builtinParameters)
255    for entry in opts.userParameters:
256        if '=' not in entry:
257            name,val = entry,''
258        else:
259            name,val = entry.split('=', 1)
260        userParams[name] = val
261
262    # Create the global config object.
263    litConfig = lit.LitConfig.LitConfig(
264        progname = os.path.basename(sys.argv[0]),
265        path = opts.path,
266        quiet = opts.quiet,
267        useValgrind = opts.useValgrind,
268        valgrindLeakCheck = opts.valgrindLeakCheck,
269        valgrindArgs = opts.valgrindArgs,
270        noExecute = opts.noExecute,
271        debug = opts.debug,
272        isWindows = isWindows,
273        params = userParams,
274        config_prefix = opts.configPrefix)
275
276    # Perform test discovery.
277    run = lit.run.Run(litConfig,
278                      lit.discovery.find_tests_for_inputs(litConfig, inputs))
279
280    if opts.showSuites or opts.showTests:
281        # Aggregate the tests by suite.
282        suitesAndTests = {}
283        for t in run.tests:
284            if t.suite not in suitesAndTests:
285                suitesAndTests[t.suite] = []
286            suitesAndTests[t.suite].append(t)
287        suitesAndTests = list(suitesAndTests.items())
288        suitesAndTests.sort(key = lambda item: item[0].name)
289
290        # Show the suites, if requested.
291        if opts.showSuites:
292            print('-- Test Suites --')
293            for ts,ts_tests in suitesAndTests:
294                print('  %s - %d tests' %(ts.name, len(ts_tests)))
295                print('    Source Root: %s' % ts.source_root)
296                print('    Exec Root  : %s' % ts.exec_root)
297
298        # Show the tests, if requested.
299        if opts.showTests:
300            print('-- Available Tests --')
301            for ts,ts_tests in suitesAndTests:
302                ts_tests.sort(key = lambda test: test.path_in_suite)
303                for test in ts_tests:
304                    print('  %s' % (test.getFullName(),))
305
306        # Exit.
307        sys.exit(0)
308
309    # Select and order the tests.
310    numTotalTests = len(run.tests)
311
312    # First, select based on the filter expression if given.
313    if opts.filter:
314        try:
315            rex = re.compile(opts.filter)
316        except:
317            parser.error("invalid regular expression for --filter: %r" % (
318                    opts.filter))
319        run.tests = [t for t in run.tests
320                     if rex.search(t.getFullName())]
321
322    # Then select the order.
323    if opts.shuffle:
324        random.shuffle(run.tests)
325    elif opts.incremental:
326        sort_by_incremental_cache(run)
327    else:
328        run.tests.sort(key = lambda t: t.getFullName())
329
330    # Finally limit the number of tests, if desired.
331    if opts.maxTests is not None:
332        run.tests = run.tests[:opts.maxTests]
333
334    # Don't create more threads than tests.
335    opts.numThreads = min(len(run.tests), opts.numThreads)
336
337    extra = ''
338    if len(run.tests) != numTotalTests:
339        extra = ' of %d' % numTotalTests
340    header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
341                                                      opts.numThreads)
342
343    progressBar = None
344    if not opts.quiet:
345        if opts.succinct and opts.useProgressBar:
346            try:
347                tc = lit.ProgressBar.TerminalController()
348                progressBar = lit.ProgressBar.ProgressBar(tc, header)
349            except ValueError:
350                print(header)
351                progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
352        else:
353            print(header)
354
355    startTime = time.time()
356    display = TestingProgressDisplay(opts, len(run.tests), progressBar)
357    try:
358        run.execute_tests(display, opts.numThreads, opts.maxTime,
359                          opts.useProcesses)
360    except KeyboardInterrupt:
361        sys.exit(2)
362    display.finish()
363
364    testing_time = time.time() - startTime
365    if not opts.quiet:
366        print('Testing Time: %.2fs' % (testing_time,))
367
368    # Write out the test data, if requested.
369    if opts.output_path is not None:
370        write_test_results(run, litConfig, testing_time, opts.output_path)
371
372    # List test results organized by kind.
373    hasFailures = False
374    byCode = {}
375    for test in run.tests:
376        if test.result.code not in byCode:
377            byCode[test.result.code] = []
378        byCode[test.result.code].append(test)
379        if test.result.code.isFailure:
380            hasFailures = True
381
382    # Print each test in any of the failing groups.
383    for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
384                       ('Failing Tests', lit.Test.FAIL),
385                       ('Unresolved Tests', lit.Test.UNRESOLVED)):
386        elts = byCode.get(code)
387        if not elts:
388            continue
389        print('*'*20)
390        print('%s (%d):' % (title, len(elts)))
391        for test in elts:
392            print('    %s' % test.getFullName())
393        sys.stdout.write('\n')
394
395    if opts.timeTests and run.tests:
396        # Order by time.
397        test_times = [(test.getFullName(), test.result.elapsed)
398                      for test in run.tests]
399        lit.util.printHistogram(test_times, title='Tests')
400
401    for name,code in (('Expected Passes    ', lit.Test.PASS),
402                      ('Expected Failures  ', lit.Test.XFAIL),
403                      ('Unsupported Tests  ', lit.Test.UNSUPPORTED),
404                      ('Unresolved Tests   ', lit.Test.UNRESOLVED),
405                      ('Unexpected Passes  ', lit.Test.XPASS),
406                      ('Unexpected Failures', lit.Test.FAIL),):
407        if opts.quiet and not code.isFailure:
408            continue
409        N = len(byCode.get(code,[]))
410        if N:
411            print('  %s: %d' % (name,N))
412
413    # If we encountered any additional errors, exit abnormally.
414    if litConfig.numErrors:
415        sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
416        sys.exit(2)
417
418    # Warn about warnings.
419    if litConfig.numWarnings:
420        sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
421
422    if hasFailures:
423        sys.exit(1)
424    sys.exit(0)
425
426if __name__=='__main__':
427    main()
428