chrome_tests.py revision d3868032626d59662ff73b372b5d584c1d144c53
1#!/usr/bin/env python 2# Copyright (c) 2012 The Chromium Authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6''' Runs various chrome tests through valgrind_test.py.''' 7 8import glob 9import logging 10import multiprocessing 11import optparse 12import os 13import stat 14import sys 15 16import logging_utils 17import path_utils 18 19import common 20import valgrind_test 21 22class TestNotFound(Exception): pass 23 24class MultipleGTestFiltersSpecified(Exception): pass 25 26class BuildDirNotFound(Exception): pass 27 28class BuildDirAmbiguous(Exception): pass 29 30class ChromeTests: 31 SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"] 32 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 500 33 34 def __init__(self, options, args, test): 35 if ':' in test: 36 (self._test, self._gtest_filter) = test.split(':', 1) 37 else: 38 self._test = test 39 self._gtest_filter = options.gtest_filter 40 41 if self._test not in self._test_list: 42 raise TestNotFound("Unknown test: %s" % test) 43 44 if options.gtest_filter and options.gtest_filter != self._gtest_filter: 45 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter " 46 "and --test %s" % test) 47 48 self._options = options 49 self._args = args 50 51 script_dir = path_utils.ScriptDir() 52 # Compute the top of the tree (the "source dir") from the script dir (where 53 # this script lives). We assume that the script dir is in tools/valgrind/ 54 # relative to the top of the tree. 55 self._source_dir = os.path.dirname(os.path.dirname(script_dir)) 56 # since this path is used for string matching, make sure it's always 57 # an absolute Unix-style path 58 self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/') 59 valgrind_test_script = os.path.join(script_dir, "valgrind_test.py") 60 self._command_preamble = ["--source_dir=%s" % (self._source_dir)] 61 62 if not self._options.build_dir: 63 dirs = [ 64 os.path.join(self._source_dir, "xcodebuild", "Debug"), 65 os.path.join(self._source_dir, "out", "Debug"), 66 os.path.join(self._source_dir, "build", "Debug"), 67 ] 68 build_dir = [d for d in dirs if os.path.isdir(d)] 69 if len(build_dir) > 1: 70 raise BuildDirAmbiguous("Found more than one suitable build dir:\n" 71 "%s\nPlease specify just one " 72 "using --build_dir" % ", ".join(build_dir)) 73 elif build_dir: 74 self._options.build_dir = build_dir[0] 75 else: 76 self._options.build_dir = None 77 78 if self._options.build_dir: 79 build_dir = os.path.abspath(self._options.build_dir) 80 self._command_preamble += ["--build_dir=%s" % (self._options.build_dir)] 81 82 def _EnsureBuildDirFound(self): 83 if not self._options.build_dir: 84 raise BuildDirNotFound("Oops, couldn't find a build dir, please " 85 "specify it manually using --build_dir") 86 87 def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None): 88 '''Generates the default command array that most tests will use.''' 89 if exe and common.IsWindows(): 90 exe += '.exe' 91 92 cmd = list(self._command_preamble) 93 94 # Find all suppressions matching the following pattern: 95 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt 96 # and list them with --suppressions= prefix. 97 script_dir = path_utils.ScriptDir() 98 tool_name = tool.ToolName(); 99 suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt") 100 if os.path.exists(suppression_file): 101 cmd.append("--suppressions=%s" % suppression_file) 102 # Platform-specific suppression 103 for platform in common.PlatformNames(): 104 platform_suppression_file = \ 105 os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform) 106 if os.path.exists(platform_suppression_file): 107 cmd.append("--suppressions=%s" % platform_suppression_file) 108 109 if self._options.valgrind_tool_flags: 110 cmd += self._options.valgrind_tool_flags.split(" ") 111 if self._options.keep_logs: 112 cmd += ["--keep_logs"] 113 if valgrind_test_args != None: 114 for arg in valgrind_test_args: 115 cmd.append(arg) 116 if exe: 117 self._EnsureBuildDirFound() 118 cmd.append(os.path.join(self._options.build_dir, exe)) 119 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time 120 # so we can find the slowpokes. 121 cmd.append("--gtest_print_time") 122 if self._options.gtest_repeat: 123 cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat) 124 return cmd 125 126 def Run(self): 127 ''' Runs the test specified by command-line argument --test ''' 128 logging.info("running test %s" % (self._test)) 129 return self._test_list[self._test](self) 130 131 def _AppendGtestFilter(self, tool, name, cmd): 132 '''Append an appropriate --gtest_filter flag to the googletest binary 133 invocation. 134 If the user passed his own filter mentioning only one test, just use it. 135 Othewise, filter out tests listed in the appropriate gtest_exclude files. 136 ''' 137 if (self._gtest_filter and 138 ":" not in self._gtest_filter and 139 "?" not in self._gtest_filter and 140 "*" not in self._gtest_filter): 141 cmd.append("--gtest_filter=%s" % self._gtest_filter) 142 return 143 144 filters = [] 145 gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude") 146 147 gtest_filter_files = [ 148 os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())] 149 # Use ".gtest.txt" files only for slow tools, as they now contain 150 # Valgrind- and Dr.Memory-specific filters. 151 # TODO(glider): rename the files to ".gtest_slow.txt" 152 if tool.ToolName() in ChromeTests.SLOW_TOOLS: 153 gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")] 154 for platform_suffix in common.PlatformNames(): 155 gtest_filter_files += [ 156 os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix), 157 os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \ 158 (tool.ToolName(), platform_suffix))] 159 logging.info("Reading gtest exclude filter files:") 160 for filename in gtest_filter_files: 161 # strip the leading absolute path (may be very long on the bot) 162 # and the following / or \. 163 readable_filename = filename.replace("\\", "/") # '\' on Windows 164 readable_filename = readable_filename.replace(self._source_dir, "")[1:] 165 if not os.path.exists(filename): 166 logging.info(" \"%s\" - not found" % readable_filename) 167 continue 168 logging.info(" \"%s\" - OK" % readable_filename) 169 f = open(filename, 'r') 170 for line in f.readlines(): 171 if line.startswith("#") or line.startswith("//") or line.isspace(): 172 continue 173 line = line.rstrip() 174 test_prefixes = ["FLAKY", "FAILS"] 175 for p in test_prefixes: 176 # Strip prefixes from the test names. 177 line = line.replace(".%s_" % p, ".") 178 # Exclude the original test name. 179 filters.append(line) 180 if line[-2:] != ".*": 181 # List all possible prefixes if line doesn't end with ".*". 182 for p in test_prefixes: 183 filters.append(line.replace(".", ".%s_" % p)) 184 # Get rid of duplicates. 185 filters = set(filters) 186 gtest_filter = self._gtest_filter 187 if len(filters): 188 if gtest_filter: 189 gtest_filter += ":" 190 if gtest_filter.find("-") < 0: 191 gtest_filter += "-" 192 else: 193 gtest_filter = "-" 194 gtest_filter += ":".join(filters) 195 if gtest_filter: 196 cmd.append("--gtest_filter=%s" % gtest_filter) 197 198 @staticmethod 199 def ShowTests(): 200 test_to_names = {} 201 for name, test_function in ChromeTests._test_list.iteritems(): 202 test_to_names.setdefault(test_function, []).append(name) 203 204 name_to_aliases = {} 205 for names in test_to_names.itervalues(): 206 names.sort(key=lambda name: len(name)) 207 name_to_aliases[names[0]] = names[1:] 208 209 print 210 print "Available tests:" 211 print "----------------" 212 for name, aliases in sorted(name_to_aliases.iteritems()): 213 if aliases: 214 print " {} (aka {})".format(name, ', '.join(aliases)) 215 else: 216 print " {}".format(name) 217 218 def SetupLdPath(self, requires_build_dir): 219 if requires_build_dir: 220 self._EnsureBuildDirFound() 221 elif not self._options.build_dir: 222 return 223 224 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded. 225 if (os.getenv("LD_LIBRARY_PATH")): 226 os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"), 227 self._options.build_dir)) 228 else: 229 os.putenv("LD_LIBRARY_PATH", self._options.build_dir) 230 231 def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None): 232 tool = valgrind_test.CreateTool(self._options.valgrind_tool) 233 cmd = self._DefaultCommand(tool, name, valgrind_test_args) 234 self._AppendGtestFilter(tool, name, cmd) 235 cmd.extend(['--test-tiny-timeout=1000']) 236 if cmd_args: 237 cmd.extend(cmd_args) 238 239 self.SetupLdPath(True) 240 return tool.Run(cmd, module) 241 242 def RunCmdLine(self): 243 tool = valgrind_test.CreateTool(self._options.valgrind_tool) 244 cmd = self._DefaultCommand(tool, None, self._args) 245 self.SetupLdPath(False) 246 return tool.Run(cmd, None) 247 248 def TestAppList(self): 249 return self.SimpleTest("app_list", "app_list_unittests") 250 251 def TestAsh(self): 252 return self.SimpleTest("ash", "ash_unittests") 253 254 def TestAura(self): 255 return self.SimpleTest("aura", "aura_unittests") 256 257 def TestBase(self): 258 return self.SimpleTest("base", "base_unittests") 259 260 def TestChromeOS(self): 261 return self.SimpleTest("chromeos", "chromeos_unittests") 262 263 def TestComponents(self): 264 return self.SimpleTest("components", "components_unittests") 265 266 def TestCompositor(self): 267 return self.SimpleTest("compositor", "compositor_unittests") 268 269 def TestContent(self): 270 return self.SimpleTest("content", "content_unittests") 271 272 def TestContentBrowser(self): 273 return self.SimpleTest("content", "content_browsertests") 274 275 def TestCourgette(self): 276 return self.SimpleTest("courgette", "courgette_unittests") 277 278 def TestCrypto(self): 279 return self.SimpleTest("crypto", "crypto_unittests") 280 281 def TestDevice(self): 282 return self.SimpleTest("device", "device_unittests") 283 284 def TestFFmpeg(self): 285 return self.SimpleTest("chrome", "ffmpeg_unittests") 286 287 def TestFFmpegRegressions(self): 288 return self.SimpleTest("chrome", "ffmpeg_regression_tests") 289 290 def TestGPU(self): 291 return self.SimpleTest("gpu", "gpu_unittests") 292 293 def TestIpc(self): 294 return self.SimpleTest("ipc", "ipc_tests", 295 valgrind_test_args=["--trace_children"]) 296 297 def TestJingle(self): 298 return self.SimpleTest("chrome", "jingle_unittests") 299 300 def TestMedia(self): 301 return self.SimpleTest("chrome", "media_unittests") 302 303 def TestMessageCenter(self): 304 return self.SimpleTest("message_center", "message_center_unittests") 305 306 def TestNet(self): 307 return self.SimpleTest("net", "net_unittests") 308 309 def TestNetPerf(self): 310 return self.SimpleTest("net", "net_perftests") 311 312 def TestPPAPI(self): 313 return self.SimpleTest("chrome", "ppapi_unittests") 314 315 def TestPrinting(self): 316 return self.SimpleTest("chrome", "printing_unittests") 317 318 def TestRemoting(self): 319 return self.SimpleTest("chrome", "remoting_unittests", 320 cmd_args=[ 321 "--ui-test-action-timeout=60000", 322 "--ui-test-action-max-timeout=150000"]) 323 324 def TestSql(self): 325 return self.SimpleTest("chrome", "sql_unittests") 326 327 def TestSync(self): 328 return self.SimpleTest("chrome", "sync_unit_tests") 329 330 def TestLinuxSandbox(self): 331 return self.SimpleTest("sandbox", "sandbox_linux_unittests") 332 333 def TestUnit(self): 334 # http://crbug.com/51716 335 # Disabling all unit tests 336 # Problems reappeared after r119922 337 if common.IsMac() and (self._options.valgrind_tool == "memcheck"): 338 logging.warning("unit_tests are disabled for memcheck on MacOS.") 339 return 0; 340 return self.SimpleTest("chrome", "unit_tests") 341 342 def TestUIUnit(self): 343 return self.SimpleTest("chrome", "ui_unittests") 344 345 def TestURL(self): 346 return self.SimpleTest("chrome", "url_unittests") 347 348 def TestViews(self): 349 return self.SimpleTest("views", "views_unittests") 350 351 # Valgrind timeouts are in seconds. 352 UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"] 353 # UI test timeouts are in milliseconds. 354 UI_TEST_ARGS = ["--ui-test-action-timeout=60000", 355 "--ui-test-action-max-timeout=150000", 356 "--no-sandbox"] 357 358 # TODO(thestig) fine-tune these values. 359 # Valgrind timeouts are in seconds. 360 BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"] 361 # Browser test timeouts are in milliseconds. 362 BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000", 363 "--ui-test-action-max-timeout=800000", 364 "--no-sandbox"] 365 366 def TestAutomatedUI(self): 367 return self.SimpleTest("chrome", "automated_ui_tests", 368 valgrind_test_args=self.UI_VALGRIND_ARGS, 369 cmd_args=self.UI_TEST_ARGS) 370 371 def TestBrowser(self): 372 return self.SimpleTest("chrome", "browser_tests", 373 valgrind_test_args=self.BROWSER_VALGRIND_ARGS, 374 cmd_args=self.BROWSER_TEST_ARGS) 375 376 def TestInteractiveUI(self): 377 return self.SimpleTest("chrome", "interactive_ui_tests", 378 valgrind_test_args=self.UI_VALGRIND_ARGS, 379 cmd_args=self.UI_TEST_ARGS) 380 381 def TestReliability(self): 382 script_dir = path_utils.ScriptDir() 383 url_list_file = os.path.join(script_dir, "reliability", "url_list.txt") 384 return self.SimpleTest("chrome", "reliability_tests", 385 valgrind_test_args=self.UI_VALGRIND_ARGS, 386 cmd_args=(self.UI_TEST_ARGS + 387 ["--list=%s" % url_list_file])) 388 389 def TestSafeBrowsing(self): 390 return self.SimpleTest("chrome", "safe_browsing_tests", 391 valgrind_test_args=self.UI_VALGRIND_ARGS, 392 cmd_args=(["--ui-test-action-max-timeout=450000"])) 393 394 def TestSyncIntegration(self): 395 return self.SimpleTest("chrome", "sync_integration_tests", 396 valgrind_test_args=self.UI_VALGRIND_ARGS, 397 cmd_args=(["--ui-test-action-max-timeout=450000"])) 398 399 def TestLayoutChunk(self, chunk_num, chunk_size): 400 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the 401 # list of tests. Wrap around to beginning of list at end. 402 # If chunk_size is zero, run all tests in the list once. 403 # If a text file is given as argument, it is used as the list of tests. 404 # 405 # Build the ginormous commandline in 'cmd'. 406 # It's going to be roughly 407 # python valgrind_test.py ... python run_webkit_tests.py ... 408 # but we'll use the --indirect flag to valgrind_test.py 409 # to avoid valgrinding python. 410 # Start by building the valgrind_test.py commandline. 411 tool = valgrind_test.CreateTool(self._options.valgrind_tool) 412 cmd = self._DefaultCommand(tool) 413 cmd.append("--trace_children") 414 cmd.append("--indirect_webkit_layout") 415 cmd.append("--ignore_exit_code") 416 # Now build script_cmd, the run_webkits_tests.py commandline 417 # Store each chunk in its own directory so that we can find the data later 418 chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) 419 out_dir = os.path.join(path_utils.ScriptDir(), "latest") 420 out_dir = os.path.join(out_dir, chunk_dir) 421 if os.path.exists(out_dir): 422 old_files = glob.glob(os.path.join(out_dir, "*.txt")) 423 for f in old_files: 424 os.remove(f) 425 else: 426 os.makedirs(out_dir) 427 script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests", 428 "run_webkit_tests.py") 429 # http://crbug.com/260627: After the switch to content_shell from DRT, each 430 # test now brings up 3 processes. Under Valgrind, they become memory bound 431 # and can eventually OOM if we don't reduce the total count. 432 jobs = int(multiprocessing.cpu_count() * 0.3) 433 script_cmd = ["python", script, "-v", 434 "--run-singly", # run a separate DumpRenderTree for each test 435 "--fully-parallel", 436 "--child-processes=%d" % jobs, 437 "--time-out-ms=200000", 438 "--no-retry-failures", # retrying takes too much time 439 # http://crbug.com/176908: Don't launch a browser when done. 440 "--no-show-results", 441 "--nocheck-sys-deps"] 442 # Pass build mode to run_webkit_tests.py. We aren't passed it directly, 443 # so parse it out of build_dir. run_webkit_tests.py can only handle 444 # the two values "Release" and "Debug". 445 # TODO(Hercules): unify how all our scripts pass around build mode 446 # (--mode / --target / --build_dir / --debug) 447 if self._options.build_dir.endswith("Debug"): 448 script_cmd.append("--debug"); 449 if (chunk_size > 0): 450 script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) 451 if len(self._args): 452 # if the arg is a txt file, then treat it as a list of tests 453 if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": 454 script_cmd.append("--test-list=%s" % self._args[0]) 455 else: 456 script_cmd.extend(self._args) 457 self._AppendGtestFilter(tool, "layout", script_cmd) 458 # Now run script_cmd with the wrapper in cmd 459 cmd.extend(["--"]) 460 cmd.extend(script_cmd) 461 462 # Layout tests often times fail quickly, but the buildbot remains green. 463 # Detect this situation when running with the default chunk size. 464 if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE: 465 min_runtime_in_seconds=120 466 else: 467 min_runtime_in_seconds=0 468 ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds) 469 return ret 470 471 472 def TestLayout(self): 473 # A "chunk file" is maintained in the local directory so that each test 474 # runs a slice of the layout tests of size chunk_size that increments with 475 # each run. Since tests can be added and removed from the layout tests at 476 # any time, this is not going to give exact coverage, but it will allow us 477 # to continuously run small slices of the layout tests under valgrind rather 478 # than having to run all of them in one shot. 479 chunk_size = self._options.num_tests 480 if (chunk_size == 0): 481 return self.TestLayoutChunk(0, 0) 482 chunk_num = 0 483 chunk_file = os.path.join("valgrind_layout_chunk.txt") 484 logging.info("Reading state from " + chunk_file) 485 try: 486 f = open(chunk_file) 487 if f: 488 str = f.read() 489 if len(str): 490 chunk_num = int(str) 491 # This should be enough so that we have a couple of complete runs 492 # of test data stored in the archive (although note that when we loop 493 # that we almost guaranteed won't be at the end of the test list) 494 if chunk_num > 10000: 495 chunk_num = 0 496 f.close() 497 except IOError, (errno, strerror): 498 logging.error("error reading from file %s (%d, %s)" % (chunk_file, 499 errno, strerror)) 500 # Save the new chunk size before running the tests. Otherwise if a 501 # particular chunk hangs the bot, the chunk number will never get 502 # incremented and the bot will be wedged. 503 logging.info("Saving state to " + chunk_file) 504 try: 505 f = open(chunk_file, "w") 506 chunk_num += 1 507 f.write("%d" % chunk_num) 508 f.close() 509 except IOError, (errno, strerror): 510 logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno, 511 strerror)) 512 # Since we're running small chunks of the layout tests, it's important to 513 # mark the ones that have errors in them. These won't be visible in the 514 # summary list for long, but will be useful for someone reviewing this bot. 515 return self.TestLayoutChunk(chunk_num, chunk_size) 516 517 # The known list of tests. 518 # Recognise the original abbreviations as well as full executable names. 519 _test_list = { 520 "cmdline" : RunCmdLine, 521 "app_list": TestAppList, "app_list_unittests": TestAppList, 522 "ash": TestAsh, "ash_unittests": TestAsh, 523 "aura": TestAura, "aura_unittests": TestAura, 524 "automated_ui" : TestAutomatedUI, 525 "base": TestBase, "base_unittests": TestBase, 526 "browser": TestBrowser, "browser_tests": TestBrowser, 527 "chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS, 528 "components": TestComponents,"components_unittests": TestComponents, 529 "compositor": TestCompositor,"compositor_unittests": TestCompositor, 530 "content": TestContent, "content_unittests": TestContent, 531 "content_browsertests": TestContentBrowser, 532 "courgette": TestCourgette, "courgette_unittests": TestCourgette, 533 "crypto": TestCrypto, "crypto_unittests": TestCrypto, 534 "device": TestDevice, "device_unittests": TestDevice, 535 "ffmpeg": TestFFmpeg, "ffmpeg_unittests": TestFFmpeg, 536 "ffmpeg_regression_tests": TestFFmpegRegressions, 537 "gpu": TestGPU, "gpu_unittests": TestGPU, 538 "ipc": TestIpc, "ipc_tests": TestIpc, 539 "interactive_ui": TestInteractiveUI, 540 "jingle": TestJingle, "jingle_unittests": TestJingle, 541 "layout": TestLayout, "layout_tests": TestLayout, 542 "webkit": TestLayout, 543 "media": TestMedia, "media_unittests": TestMedia, 544 "message_center": TestMessageCenter, 545 "message_center_unittests" : TestMessageCenter, 546 "net": TestNet, "net_unittests": TestNet, 547 "net_perf": TestNetPerf, "net_perftests": TestNetPerf, 548 "ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI, 549 "printing": TestPrinting, "printing_unittests": TestPrinting, 550 "reliability": TestReliability, "reliability_tests": TestReliability, 551 "remoting": TestRemoting, "remoting_unittests": TestRemoting, 552 "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing, 553 "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox, 554 "sql": TestSql, "sql_unittests": TestSql, 555 "sync": TestSync, "sync_unit_tests": TestSync, 556 "sync_integration_tests": TestSyncIntegration, 557 "sync_integration": TestSyncIntegration, 558 "ui_unit": TestUIUnit, "ui_unittests": TestUIUnit, 559 "unit": TestUnit, "unit_tests": TestUnit, 560 "url": TestURL, "url_unittests": TestURL, 561 "views": TestViews, "views_unittests": TestViews, 562 } 563 564 565def _main(): 566 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> " 567 "[-t <test> ...]") 568 parser.disable_interspersed_args() 569 570 parser.add_option("", "--help-tests", dest="help_tests", action="store_true", 571 default=False, help="List all available tests") 572 parser.add_option("-b", "--build_dir", 573 help="the location of the compiler output") 574 parser.add_option("-t", "--test", action="append", default=[], 575 help="which test to run, supports test:gtest_filter format " 576 "as well.") 577 parser.add_option("", "--baseline", action="store_true", default=False, 578 help="generate baseline data instead of validating") 579 parser.add_option("", "--gtest_filter", 580 help="additional arguments to --gtest_filter") 581 parser.add_option("", "--gtest_repeat", 582 help="argument for --gtest_repeat") 583 parser.add_option("-v", "--verbose", action="store_true", default=False, 584 help="verbose output - enable debug log messages") 585 parser.add_option("", "--tool", dest="valgrind_tool", default="memcheck", 586 help="specify a valgrind tool to run the tests under") 587 parser.add_option("", "--tool_flags", dest="valgrind_tool_flags", default="", 588 help="specify custom flags for the selected valgrind tool") 589 parser.add_option("", "--keep_logs", action="store_true", default=False, 590 help="store memory tool logs in the <tool>.logs directory " 591 "instead of /tmp.\nThis can be useful for tool " 592 "developers/maintainers.\nPlease note that the <tool>" 593 ".logs directory will be clobbered on tool startup.") 594 parser.add_option("-n", "--num_tests", type="int", 595 default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE, 596 help="for layout tests: # of subtests per run. 0 for all.") 597 # TODO(thestig) Remove this if we can. 598 parser.add_option("", "--gtest_color", dest="gtest_color", default="no", 599 help="dummy compatibility flag for sharding_supervisor.") 600 601 options, args = parser.parse_args() 602 603 if options.verbose: 604 logging_utils.config_root(logging.DEBUG) 605 else: 606 logging_utils.config_root() 607 608 if options.help_tests: 609 ChromeTests.ShowTests() 610 return 0 611 612 if not options.test: 613 parser.error("--test not specified") 614 615 if len(options.test) != 1 and options.gtest_filter: 616 parser.error("--gtest_filter and multiple tests don't make sense together") 617 618 for t in options.test: 619 tests = ChromeTests(options, args, t) 620 ret = tests.Run() 621 if ret: return ret 622 return 0 623 624 625if __name__ == "__main__": 626 sys.exit(_main()) 627