unittest_suite.py revision 363cdb56d1b5326f020020365754be944c6d78a7
1#!/usr/bin/python -u 2 3import os, sys, unittest, optparse 4import common 5from autotest_lib.utils import parallel 6 7 8root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) 9 10parser = optparse.OptionParser() 11parser.add_option("-r", action="store", type="string", dest="start", 12 default='', 13 help="root directory to start running unittests") 14parser.add_option("--full", action="store_true", dest="full", default=False, 15 help="whether to run the shortened version of the test") 16parser.add_option("--debug", action="store_true", dest="debug", default=False, 17 help="run in debug mode") 18 19 20LONG_TESTS = set(( 21 'monitor_db_unittest.py', 22 'barrier_unittest.py', 23 'migrate_unittest.py', 24 'frontend_unittest.py', 25 'client_compilation_unittest.py', 26 'csv_encoder_unittest.py', 27 )) 28 29modules = [] 30 31 32def lister(full, dirname, files): 33 if not os.path.exists(os.path.join(dirname, '__init__.py')): 34 return 35 for f in files: 36 if f.endswith('_unittest.py'): 37 if not full and f in LONG_TESTS: 38 continue 39 temp = os.path.join(dirname, f).strip('.py') 40 mod_name = ['autotest_lib'] + temp[len(root)+1:].split('/') 41 modules.append(mod_name) 42 43 44def run_test(mod_name): 45 if not options.debug: 46 parallel.redirect_io() 47 48 print "Running %s" % '.'.join(mod_name) 49 mod = common.setup_modules.import_module(mod_name[-1], 50 '.'.join(mod_name[:-1])) 51 test = unittest.defaultTestLoader.loadTestsFromModule(mod) 52 suite = unittest.TestSuite(test) 53 runner = unittest.TextTestRunner(verbosity=2) 54 result = runner.run(suite) 55 if result.errors or result.failures: 56 raise Exception("%s failed" % '.'.join(mod_name)) 57 58 59def run_tests(start, full=False): 60 os.path.walk(start, lister, full) 61 62 functions = {} 63 for module in modules: 64 # Create a function that'll test a particular module. module=module 65 # is a hack to force python to evaluate the params now. We then 66 # rename the function to make error reporting nicer. 67 run_module = lambda module=module: run_test(module) 68 name = '.'.join(module) 69 run_module.__name__ = name 70 functions[run_module] = set() 71 72 try: 73 dargs = {} 74 if options.debug: 75 dargs['max_simultaneous_procs'] = 1 76 pe = parallel.ParallelExecute(functions, **dargs) 77 pe.run_until_completion() 78 except parallel.ParallelError, err: 79 return err.errors 80 return [] 81 82 83def main(): 84 global options, args 85 options, args = parser.parse_args() 86 if args: 87 parser.error('Unexpected argument(s): %s' % args) 88 parser.print_help() 89 sys.exit(1) 90 91 # Strip the arguments off the command line, so that the unit tests do not 92 # see them. 93 sys.argv = [sys.argv[0]] 94 95 errors = run_tests(os.path.join(root, options.start), options.full) 96 if errors: 97 print "%d tests resulted in an error/failure:" % len(errors) 98 for error in errors: 99 print "\t%s" % error 100 sys.exit(1) 101 else: 102 print "All passed!" 103 sys.exit(0) 104 105if __name__ == "__main__": 106 main() 107