chrome_tests.py revision 5d1f7b1de12d16ceb2c938c56701a3e8bfa558f7
1#!/usr/bin/env python 2# Copyright (c) 2012 The Chromium Authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6''' Runs various chrome tests through valgrind_test.py.''' 7 8import glob 9import logging 10import multiprocessing 11import optparse 12import os 13import stat 14import sys 15 16import logging_utils 17import path_utils 18 19import common 20import valgrind_test 21 22class TestNotFound(Exception): pass 23 24class MultipleGTestFiltersSpecified(Exception): pass 25 26class BuildDirNotFound(Exception): pass 27 28class BuildDirAmbiguous(Exception): pass 29 30class ChromeTests: 31 SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"] 32 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 400 33 34 def __init__(self, options, args, test): 35 if ':' in test: 36 (self._test, self._gtest_filter) = test.split(':', 1) 37 else: 38 self._test = test 39 self._gtest_filter = options.gtest_filter 40 41 if self._test not in self._test_list: 42 raise TestNotFound("Unknown test: %s" % test) 43 44 if options.gtest_filter and options.gtest_filter != self._gtest_filter: 45 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter " 46 "and --test %s" % test) 47 48 self._options = options 49 self._args = args 50 51 script_dir = path_utils.ScriptDir() 52 # Compute the top of the tree (the "source dir") from the script dir (where 53 # this script lives). We assume that the script dir is in tools/valgrind/ 54 # relative to the top of the tree. 55 self._source_dir = os.path.dirname(os.path.dirname(script_dir)) 56 # since this path is used for string matching, make sure it's always 57 # an absolute Unix-style path 58 self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/') 59 valgrind_test_script = os.path.join(script_dir, "valgrind_test.py") 60 self._command_preamble = ["--source-dir=%s" % (self._source_dir)] 61 62 if not self._options.build_dir: 63 dirs = [ 64 os.path.join(self._source_dir, "xcodebuild", "Debug"), 65 os.path.join(self._source_dir, "out", "Debug"), 66 os.path.join(self._source_dir, "build", "Debug"), 67 ] 68 build_dir = [d for d in dirs if os.path.isdir(d)] 69 if len(build_dir) > 1: 70 raise BuildDirAmbiguous("Found more than one suitable build dir:\n" 71 "%s\nPlease specify just one " 72 "using --build-dir" % ", ".join(build_dir)) 73 elif build_dir: 74 self._options.build_dir = build_dir[0] 75 else: 76 self._options.build_dir = None 77 78 if self._options.build_dir: 79 build_dir = os.path.abspath(self._options.build_dir) 80 self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)] 81 82 def _EnsureBuildDirFound(self): 83 if not self._options.build_dir: 84 raise BuildDirNotFound("Oops, couldn't find a build dir, please " 85 "specify it manually using --build-dir") 86 87 def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None): 88 '''Generates the default command array that most tests will use.''' 89 if exe and common.IsWindows(): 90 exe += '.exe' 91 92 cmd = list(self._command_preamble) 93 94 # Find all suppressions matching the following pattern: 95 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt 96 # and list them with --suppressions= prefix. 97 script_dir = path_utils.ScriptDir() 98 tool_name = tool.ToolName(); 99 suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt") 100 if os.path.exists(suppression_file): 101 cmd.append("--suppressions=%s" % suppression_file) 102 # Platform-specific suppression 103 for platform in common.PlatformNames(): 104 platform_suppression_file = \ 105 os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform) 106 if os.path.exists(platform_suppression_file): 107 cmd.append("--suppressions=%s" % platform_suppression_file) 108 109 if self._options.valgrind_tool_flags: 110 cmd += self._options.valgrind_tool_flags.split(" ") 111 if self._options.keep_logs: 112 cmd += ["--keep_logs"] 113 if valgrind_test_args != None: 114 for arg in valgrind_test_args: 115 cmd.append(arg) 116 if exe: 117 self._EnsureBuildDirFound() 118 cmd.append(os.path.join(self._options.build_dir, exe)) 119 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time 120 # so we can find the slowpokes. 121 cmd.append("--gtest_print_time") 122 # Built-in test launcher for gtest-based executables runs tests using 123 # multiple process by default. Force the single-process mode back. 124 cmd.append("--single-process-tests") 125 if self._options.gtest_repeat: 126 cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat) 127 if self._options.gtest_shuffle: 128 cmd.append("--gtest_shuffle") 129 if self._options.brave_new_test_launcher: 130 cmd.append("--brave-new-test-launcher") 131 if self._options.test_launcher_bot_mode: 132 cmd.append("--test-launcher-bot-mode") 133 return cmd 134 135 def Run(self): 136 ''' Runs the test specified by command-line argument --test ''' 137 logging.info("running test %s" % (self._test)) 138 return self._test_list[self._test](self) 139 140 def _AppendGtestFilter(self, tool, name, cmd): 141 '''Append an appropriate --gtest_filter flag to the googletest binary 142 invocation. 143 If the user passed his own filter mentioning only one test, just use it. 144 Othewise, filter out tests listed in the appropriate gtest_exclude files. 145 ''' 146 if (self._gtest_filter and 147 ":" not in self._gtest_filter and 148 "?" not in self._gtest_filter and 149 "*" not in self._gtest_filter): 150 cmd.append("--gtest_filter=%s" % self._gtest_filter) 151 return 152 153 filters = [] 154 gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude") 155 156 gtest_filter_files = [ 157 os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())] 158 # Use ".gtest.txt" files only for slow tools, as they now contain 159 # Valgrind- and Dr.Memory-specific filters. 160 # TODO(glider): rename the files to ".gtest_slow.txt" 161 if tool.ToolName() in ChromeTests.SLOW_TOOLS: 162 gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")] 163 for platform_suffix in common.PlatformNames(): 164 gtest_filter_files += [ 165 os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix), 166 os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \ 167 (tool.ToolName(), platform_suffix))] 168 logging.info("Reading gtest exclude filter files:") 169 for filename in gtest_filter_files: 170 # strip the leading absolute path (may be very long on the bot) 171 # and the following / or \. 172 readable_filename = filename.replace("\\", "/") # '\' on Windows 173 readable_filename = readable_filename.replace(self._source_dir, "")[1:] 174 if not os.path.exists(filename): 175 logging.info(" \"%s\" - not found" % readable_filename) 176 continue 177 logging.info(" \"%s\" - OK" % readable_filename) 178 f = open(filename, 'r') 179 for line in f.readlines(): 180 if line.startswith("#") or line.startswith("//") or line.isspace(): 181 continue 182 line = line.rstrip() 183 test_prefixes = ["FLAKY", "FAILS"] 184 for p in test_prefixes: 185 # Strip prefixes from the test names. 186 line = line.replace(".%s_" % p, ".") 187 # Exclude the original test name. 188 filters.append(line) 189 if line[-2:] != ".*": 190 # List all possible prefixes if line doesn't end with ".*". 191 for p in test_prefixes: 192 filters.append(line.replace(".", ".%s_" % p)) 193 # Get rid of duplicates. 194 filters = set(filters) 195 gtest_filter = self._gtest_filter 196 if len(filters): 197 if gtest_filter: 198 gtest_filter += ":" 199 if gtest_filter.find("-") < 0: 200 gtest_filter += "-" 201 else: 202 gtest_filter = "-" 203 gtest_filter += ":".join(filters) 204 if gtest_filter: 205 cmd.append("--gtest_filter=%s" % gtest_filter) 206 207 @staticmethod 208 def ShowTests(): 209 test_to_names = {} 210 for name, test_function in ChromeTests._test_list.iteritems(): 211 test_to_names.setdefault(test_function, []).append(name) 212 213 name_to_aliases = {} 214 for names in test_to_names.itervalues(): 215 names.sort(key=lambda name: len(name)) 216 name_to_aliases[names[0]] = names[1:] 217 218 print 219 print "Available tests:" 220 print "----------------" 221 for name, aliases in sorted(name_to_aliases.iteritems()): 222 if aliases: 223 print " {} (aka {})".format(name, ', '.join(aliases)) 224 else: 225 print " {}".format(name) 226 227 def SetupLdPath(self, requires_build_dir): 228 if requires_build_dir: 229 self._EnsureBuildDirFound() 230 elif not self._options.build_dir: 231 return 232 233 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded. 234 if (os.getenv("LD_LIBRARY_PATH")): 235 os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"), 236 self._options.build_dir)) 237 else: 238 os.putenv("LD_LIBRARY_PATH", self._options.build_dir) 239 240 def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None): 241 tool = valgrind_test.CreateTool(self._options.valgrind_tool) 242 cmd = self._DefaultCommand(tool, name, valgrind_test_args) 243 self._AppendGtestFilter(tool, name, cmd) 244 cmd.extend(['--test-tiny-timeout=1000']) 245 if cmd_args: 246 cmd.extend(cmd_args) 247 248 self.SetupLdPath(True) 249 return tool.Run(cmd, module) 250 251 def RunCmdLine(self): 252 tool = valgrind_test.CreateTool(self._options.valgrind_tool) 253 cmd = self._DefaultCommand(tool, None, self._args) 254 self.SetupLdPath(False) 255 return tool.Run(cmd, None) 256 257 def TestAppList(self): 258 return self.SimpleTest("app_list", "app_list_unittests") 259 260 def TestAsh(self): 261 return self.SimpleTest("ash", "ash_unittests") 262 263 def TestAura(self): 264 return self.SimpleTest("aura", "aura_unittests") 265 266 def TestBase(self): 267 return self.SimpleTest("base", "base_unittests") 268 269 def TestCast(self): 270 return self.SimpleTest("chrome", "cast_unittests") 271 272 def TestChromeOS(self): 273 return self.SimpleTest("chromeos", "chromeos_unittests") 274 275 def TestComponents(self): 276 return self.SimpleTest("components", "components_unittests") 277 278 def TestCompositor(self): 279 return self.SimpleTest("compositor", "compositor_unittests") 280 281 def TestContent(self): 282 return self.SimpleTest("content", "content_unittests") 283 284 def TestContentBrowser(self): 285 return self.SimpleTest("content", "content_browsertests") 286 287 def TestCourgette(self): 288 return self.SimpleTest("courgette", "courgette_unittests") 289 290 def TestCrypto(self): 291 return self.SimpleTest("crypto", "crypto_unittests") 292 293 def TestDevice(self): 294 return self.SimpleTest("device", "device_unittests") 295 296 def TestEvents(self): 297 return self.SimpleTest("events", "events_unittests") 298 299 def TestFFmpeg(self): 300 return self.SimpleTest("chrome", "ffmpeg_unittests") 301 302 def TestFFmpegRegressions(self): 303 return self.SimpleTest("chrome", "ffmpeg_regression_tests") 304 305 def TestGCM(self): 306 return self.SimpleTest("gcm", "gcm_unit_tests") 307 308 def TestGPU(self): 309 return self.SimpleTest("gpu", "gpu_unittests") 310 311 def TestIpc(self): 312 return self.SimpleTest("ipc", "ipc_tests", 313 valgrind_test_args=["--trace_children"]) 314 315 def TestJingle(self): 316 return self.SimpleTest("chrome", "jingle_unittests") 317 318 def TestMedia(self): 319 return self.SimpleTest("chrome", "media_unittests") 320 321 def TestMessageCenter(self): 322 return self.SimpleTest("message_center", "message_center_unittests") 323 324 def TestNet(self): 325 return self.SimpleTest("net", "net_unittests") 326 327 def TestNetPerf(self): 328 return self.SimpleTest("net", "net_perftests") 329 330 def TestPPAPI(self): 331 return self.SimpleTest("chrome", "ppapi_unittests") 332 333 def TestPrinting(self): 334 return self.SimpleTest("chrome", "printing_unittests") 335 336 def TestRemoting(self): 337 return self.SimpleTest("chrome", "remoting_unittests", 338 cmd_args=[ 339 "--ui-test-action-timeout=60000", 340 "--ui-test-action-max-timeout=150000"]) 341 342 def TestSql(self): 343 return self.SimpleTest("chrome", "sql_unittests") 344 345 def TestSync(self): 346 return self.SimpleTest("chrome", "sync_unit_tests") 347 348 def TestLinuxSandbox(self): 349 return self.SimpleTest("sandbox", "sandbox_linux_unittests") 350 351 def TestUnit(self): 352 # http://crbug.com/51716 353 # Disabling all unit tests 354 # Problems reappeared after r119922 355 if common.IsMac() and (self._options.valgrind_tool == "memcheck"): 356 logging.warning("unit_tests are disabled for memcheck on MacOS.") 357 return 0; 358 return self.SimpleTest("chrome", "unit_tests") 359 360 def TestUIUnit(self): 361 return self.SimpleTest("chrome", "ui_unittests") 362 363 def TestURL(self): 364 return self.SimpleTest("chrome", "url_unittests") 365 366 def TestViews(self): 367 return self.SimpleTest("views", "views_unittests") 368 369 # Valgrind timeouts are in seconds. 370 UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"] 371 # UI test timeouts are in milliseconds. 372 UI_TEST_ARGS = ["--ui-test-action-timeout=60000", 373 "--ui-test-action-max-timeout=150000", 374 "--no-sandbox"] 375 376 # TODO(thestig) fine-tune these values. 377 # Valgrind timeouts are in seconds. 378 BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"] 379 # Browser test timeouts are in milliseconds. 380 BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000", 381 "--ui-test-action-max-timeout=800000", 382 "--no-sandbox"] 383 384 def TestAutomatedUI(self): 385 return self.SimpleTest("chrome", "automated_ui_tests", 386 valgrind_test_args=self.UI_VALGRIND_ARGS, 387 cmd_args=self.UI_TEST_ARGS) 388 389 def TestBrowser(self): 390 return self.SimpleTest("chrome", "browser_tests", 391 valgrind_test_args=self.BROWSER_VALGRIND_ARGS, 392 cmd_args=self.BROWSER_TEST_ARGS) 393 394 def TestInteractiveUI(self): 395 return self.SimpleTest("chrome", "interactive_ui_tests", 396 valgrind_test_args=self.UI_VALGRIND_ARGS, 397 cmd_args=self.UI_TEST_ARGS) 398 399 def TestReliability(self): 400 script_dir = path_utils.ScriptDir() 401 url_list_file = os.path.join(script_dir, "reliability", "url_list.txt") 402 return self.SimpleTest("chrome", "reliability_tests", 403 valgrind_test_args=self.UI_VALGRIND_ARGS, 404 cmd_args=(self.UI_TEST_ARGS + 405 ["--list=%s" % url_list_file])) 406 407 def TestSafeBrowsing(self): 408 return self.SimpleTest("chrome", "safe_browsing_tests", 409 valgrind_test_args=self.UI_VALGRIND_ARGS, 410 cmd_args=(["--ui-test-action-max-timeout=450000"])) 411 412 def TestSyncIntegration(self): 413 return self.SimpleTest("chrome", "sync_integration_tests", 414 valgrind_test_args=self.UI_VALGRIND_ARGS, 415 cmd_args=(["--ui-test-action-max-timeout=450000"])) 416 417 def TestLayoutChunk(self, chunk_num, chunk_size): 418 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the 419 # list of tests. Wrap around to beginning of list at end. 420 # If chunk_size is zero, run all tests in the list once. 421 # If a text file is given as argument, it is used as the list of tests. 422 # 423 # Build the ginormous commandline in 'cmd'. 424 # It's going to be roughly 425 # python valgrind_test.py ... python run_webkit_tests.py ... 426 # but we'll use the --indirect flag to valgrind_test.py 427 # to avoid valgrinding python. 428 # Start by building the valgrind_test.py commandline. 429 tool = valgrind_test.CreateTool(self._options.valgrind_tool) 430 cmd = self._DefaultCommand(tool) 431 cmd.append("--trace_children") 432 cmd.append("--indirect_webkit_layout") 433 cmd.append("--ignore_exit_code") 434 # Now build script_cmd, the run_webkits_tests.py commandline 435 # Store each chunk in its own directory so that we can find the data later 436 chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num) 437 out_dir = os.path.join(path_utils.ScriptDir(), "latest") 438 out_dir = os.path.join(out_dir, chunk_dir) 439 if os.path.exists(out_dir): 440 old_files = glob.glob(os.path.join(out_dir, "*.txt")) 441 for f in old_files: 442 os.remove(f) 443 else: 444 os.makedirs(out_dir) 445 script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests", 446 "run_webkit_tests.py") 447 # http://crbug.com/260627: After the switch to content_shell from DRT, each 448 # test now brings up 3 processes. Under Valgrind, they become memory bound 449 # and can eventually OOM if we don't reduce the total count. 450 # It'd be nice if content_shell automatically throttled the startup of new 451 # tests if we're low on memory. 452 jobs = max(1, int(multiprocessing.cpu_count() * 0.3)) 453 script_cmd = ["python", script, "-v", 454 "--run-singly", # run a separate DumpRenderTree for each test 455 "--fully-parallel", 456 "--child-processes=%d" % jobs, 457 "--time-out-ms=200000", 458 "--no-retry-failures", # retrying takes too much time 459 # http://crbug.com/176908: Don't launch a browser when done. 460 "--no-show-results", 461 "--nocheck-sys-deps"] 462 # Pass build mode to run_webkit_tests.py. We aren't passed it directly, 463 # so parse it out of build_dir. run_webkit_tests.py can only handle 464 # the two values "Release" and "Debug". 465 # TODO(Hercules): unify how all our scripts pass around build mode 466 # (--mode / --target / --build-dir / --debug) 467 if self._options.build_dir: 468 build_root, mode = os.path.split(self._options.build_dir) 469 script_cmd.extend(["--build-directory", build_root, "--target", mode]) 470 if (chunk_size > 0): 471 script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size)) 472 if len(self._args): 473 # if the arg is a txt file, then treat it as a list of tests 474 if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt": 475 script_cmd.append("--test-list=%s" % self._args[0]) 476 else: 477 script_cmd.extend(self._args) 478 self._AppendGtestFilter(tool, "layout", script_cmd) 479 # Now run script_cmd with the wrapper in cmd 480 cmd.extend(["--"]) 481 cmd.extend(script_cmd) 482 483 # Layout tests often times fail quickly, but the buildbot remains green. 484 # Detect this situation when running with the default chunk size. 485 if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE: 486 min_runtime_in_seconds=120 487 else: 488 min_runtime_in_seconds=0 489 ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds) 490 return ret 491 492 493 def TestLayout(self): 494 # A "chunk file" is maintained in the local directory so that each test 495 # runs a slice of the layout tests of size chunk_size that increments with 496 # each run. Since tests can be added and removed from the layout tests at 497 # any time, this is not going to give exact coverage, but it will allow us 498 # to continuously run small slices of the layout tests under valgrind rather 499 # than having to run all of them in one shot. 500 chunk_size = self._options.num_tests 501 if (chunk_size == 0): 502 return self.TestLayoutChunk(0, 0) 503 chunk_num = 0 504 chunk_file = os.path.join("valgrind_layout_chunk.txt") 505 logging.info("Reading state from " + chunk_file) 506 try: 507 f = open(chunk_file) 508 if f: 509 str = f.read() 510 if len(str): 511 chunk_num = int(str) 512 # This should be enough so that we have a couple of complete runs 513 # of test data stored in the archive (although note that when we loop 514 # that we almost guaranteed won't be at the end of the test list) 515 if chunk_num > 10000: 516 chunk_num = 0 517 f.close() 518 except IOError, (errno, strerror): 519 logging.error("error reading from file %s (%d, %s)" % (chunk_file, 520 errno, strerror)) 521 # Save the new chunk size before running the tests. Otherwise if a 522 # particular chunk hangs the bot, the chunk number will never get 523 # incremented and the bot will be wedged. 524 logging.info("Saving state to " + chunk_file) 525 try: 526 f = open(chunk_file, "w") 527 chunk_num += 1 528 f.write("%d" % chunk_num) 529 f.close() 530 except IOError, (errno, strerror): 531 logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno, 532 strerror)) 533 # Since we're running small chunks of the layout tests, it's important to 534 # mark the ones that have errors in them. These won't be visible in the 535 # summary list for long, but will be useful for someone reviewing this bot. 536 return self.TestLayoutChunk(chunk_num, chunk_size) 537 538 # The known list of tests. 539 # Recognise the original abbreviations as well as full executable names. 540 _test_list = { 541 "cmdline" : RunCmdLine, 542 "app_list": TestAppList, "app_list_unittests": TestAppList, 543 "ash": TestAsh, "ash_unittests": TestAsh, 544 "aura": TestAura, "aura_unittests": TestAura, 545 "automated_ui" : TestAutomatedUI, 546 "base": TestBase, "base_unittests": TestBase, 547 "browser": TestBrowser, "browser_tests": TestBrowser, 548 "cast": TestCast, "cast_unittests": TestCast, 549 "chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS, 550 "components": TestComponents,"components_unittests": TestComponents, 551 "compositor": TestCompositor,"compositor_unittests": TestCompositor, 552 "content": TestContent, "content_unittests": TestContent, 553 "content_browsertests": TestContentBrowser, 554 "courgette": TestCourgette, "courgette_unittests": TestCourgette, 555 "crypto": TestCrypto, "crypto_unittests": TestCrypto, 556 "device": TestDevice, "device_unittests": TestDevice, 557 "events": TestEvents, "events_unittests": TestEvents, 558 "ffmpeg": TestFFmpeg, "ffmpeg_unittests": TestFFmpeg, 559 "ffmpeg_regression_tests": TestFFmpegRegressions, 560 "gcm": TestGCM, "gcm_unit_tests": TestGCM, 561 "gpu": TestGPU, "gpu_unittests": TestGPU, 562 "ipc": TestIpc, "ipc_tests": TestIpc, 563 "interactive_ui": TestInteractiveUI, 564 "jingle": TestJingle, "jingle_unittests": TestJingle, 565 "layout": TestLayout, "layout_tests": TestLayout, 566 "webkit": TestLayout, 567 "media": TestMedia, "media_unittests": TestMedia, 568 "message_center": TestMessageCenter, 569 "message_center_unittests" : TestMessageCenter, 570 "net": TestNet, "net_unittests": TestNet, 571 "net_perf": TestNetPerf, "net_perftests": TestNetPerf, 572 "ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI, 573 "printing": TestPrinting, "printing_unittests": TestPrinting, 574 "reliability": TestReliability, "reliability_tests": TestReliability, 575 "remoting": TestRemoting, "remoting_unittests": TestRemoting, 576 "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing, 577 "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox, 578 "sql": TestSql, "sql_unittests": TestSql, 579 "sync": TestSync, "sync_unit_tests": TestSync, 580 "sync_integration_tests": TestSyncIntegration, 581 "sync_integration": TestSyncIntegration, 582 "ui_unit": TestUIUnit, "ui_unittests": TestUIUnit, 583 "unit": TestUnit, "unit_tests": TestUnit, 584 "url": TestURL, "url_unittests": TestURL, 585 "views": TestViews, "views_unittests": TestViews, 586 } 587 588 589def _main(): 590 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> " 591 "[-t <test> ...]") 592 593 parser.add_option("--help-tests", dest="help_tests", action="store_true", 594 default=False, help="List all available tests") 595 parser.add_option("-b", "--build-dir", 596 help="the location of the compiler output") 597 parser.add_option("--target", help="Debug or Release") 598 parser.add_option("-t", "--test", action="append", default=[], 599 help="which test to run, supports test:gtest_filter format " 600 "as well.") 601 parser.add_option("--baseline", action="store_true", default=False, 602 help="generate baseline data instead of validating") 603 parser.add_option("--gtest_filter", 604 help="additional arguments to --gtest_filter") 605 parser.add_option("--gtest_repeat", help="argument for --gtest_repeat") 606 parser.add_option("--gtest_shuffle", action="store_true", default=False, 607 help="Randomize tests' orders on every iteration.") 608 parser.add_option("-v", "--verbose", action="store_true", default=False, 609 help="verbose output - enable debug log messages") 610 parser.add_option("--tool", dest="valgrind_tool", default="memcheck", 611 help="specify a valgrind tool to run the tests under") 612 parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="", 613 help="specify custom flags for the selected valgrind tool") 614 parser.add_option("--keep_logs", action="store_true", default=False, 615 help="store memory tool logs in the <tool>.logs directory " 616 "instead of /tmp.\nThis can be useful for tool " 617 "developers/maintainers.\nPlease note that the <tool>" 618 ".logs directory will be clobbered on tool startup.") 619 parser.add_option("-n", "--num_tests", type="int", 620 default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE, 621 help="for layout tests: # of subtests per run. 0 for all.") 622 # TODO(thestig) Remove this if we can. 623 parser.add_option("--gtest_color", dest="gtest_color", default="no", 624 help="dummy compatibility flag for sharding_supervisor.") 625 parser.add_option("--brave-new-test-launcher", action="store_true", 626 help="run the tests with --brave-new-test-launcher") 627 parser.add_option("--test-launcher-bot-mode", action="store_true", 628 help="run the tests with --test-launcher-bot-mode") 629 630 options, args = parser.parse_args() 631 632 # Bake target into build_dir. 633 if options.target and options.build_dir: 634 assert (options.target != 635 os.path.basename(os.path.dirname(options.build_dir))) 636 options.build_dir = os.path.join(os.path.abspath(options.build_dir), 637 options.target) 638 639 if options.verbose: 640 logging_utils.config_root(logging.DEBUG) 641 else: 642 logging_utils.config_root() 643 644 if options.help_tests: 645 ChromeTests.ShowTests() 646 return 0 647 648 if not options.test: 649 parser.error("--test not specified") 650 651 if len(options.test) != 1 and options.gtest_filter: 652 parser.error("--gtest_filter and multiple tests don't make sense together") 653 654 for t in options.test: 655 tests = ChromeTests(options, args, t) 656 ret = tests.Run() 657 if ret: return ret 658 return 0 659 660 661if __name__ == "__main__": 662 sys.exit(_main()) 663