chrome_tests.py revision a93a17c8d99d686bd4a1511e5504e5e6cc9fcadf
1#!/usr/bin/env python
2# Copyright (c) 2012 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6''' Runs various chrome tests through valgrind_test.py.'''
7
8import glob
9import logging
10import optparse
11import os
12import stat
13import sys
14
15import logging_utils
16import path_utils
17
18import common
19import valgrind_test
20
21class TestNotFound(Exception): pass
22
23class MultipleGTestFiltersSpecified(Exception): pass
24
25class BuildDirNotFound(Exception): pass
26
27class BuildDirAmbiguous(Exception): pass
28
29class ChromeTests:
30  SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
31  LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 1500
32
33  def __init__(self, options, args, test):
34    if ':' in test:
35      (self._test, self._gtest_filter) = test.split(':', 1)
36    else:
37      self._test = test
38      self._gtest_filter = options.gtest_filter
39
40    if self._test not in self._test_list:
41      raise TestNotFound("Unknown test: %s" % test)
42
43    if options.gtest_filter and options.gtest_filter != self._gtest_filter:
44      raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
45                                          "and --test %s" % test)
46
47    self._options = options
48    self._args = args
49
50    script_dir = path_utils.ScriptDir()
51    # Compute the top of the tree (the "source dir") from the script dir (where
52    # this script lives).  We assume that the script dir is in tools/valgrind/
53    # relative to the top of the tree.
54    self._source_dir = os.path.dirname(os.path.dirname(script_dir))
55    # since this path is used for string matching, make sure it's always
56    # an absolute Unix-style path
57    self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
58    valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
59    self._command_preamble = ["--source_dir=%s" % (self._source_dir)]
60
61    if not self._options.build_dir:
62      dirs = [
63        os.path.join(self._source_dir, "xcodebuild", "Debug"),
64        os.path.join(self._source_dir, "out", "Debug"),
65        os.path.join(self._source_dir, "build", "Debug"),
66      ]
67      build_dir = [d for d in dirs if os.path.isdir(d)]
68      if len(build_dir) > 1:
69        raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
70                                "%s\nPlease specify just one "
71                                "using --build_dir" % ", ".join(build_dir))
72      elif build_dir:
73        self._options.build_dir = build_dir[0]
74      else:
75        self._options.build_dir = None
76
77    if self._options.build_dir:
78      build_dir = os.path.abspath(self._options.build_dir)
79      self._command_preamble += ["--build_dir=%s" % (self._options.build_dir)]
80
81  def _EnsureBuildDirFound(self):
82    if not self._options.build_dir:
83      raise BuildDirNotFound("Oops, couldn't find a build dir, please "
84                             "specify it manually using --build_dir")
85
86  def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
87    '''Generates the default command array that most tests will use.'''
88    if exe and common.IsWindows():
89      exe += '.exe'
90
91    cmd = list(self._command_preamble)
92
93    # Find all suppressions matching the following pattern:
94    # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
95    # and list them with --suppressions= prefix.
96    script_dir = path_utils.ScriptDir()
97    tool_name = tool.ToolName();
98    suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
99    if os.path.exists(suppression_file):
100      cmd.append("--suppressions=%s" % suppression_file)
101    # Platform-specific suppression
102    for platform in common.PlatformNames():
103      platform_suppression_file = \
104          os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
105      if os.path.exists(platform_suppression_file):
106        cmd.append("--suppressions=%s" % platform_suppression_file)
107
108    if self._options.valgrind_tool_flags:
109      cmd += self._options.valgrind_tool_flags.split(" ")
110    if self._options.keep_logs:
111      cmd += ["--keep_logs"]
112    if valgrind_test_args != None:
113      for arg in valgrind_test_args:
114        cmd.append(arg)
115    if exe:
116      self._EnsureBuildDirFound()
117      cmd.append(os.path.join(self._options.build_dir, exe))
118      # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
119      # so we can find the slowpokes.
120      cmd.append("--gtest_print_time")
121    if self._options.gtest_repeat:
122      cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
123    return cmd
124
125  def Run(self):
126    ''' Runs the test specified by command-line argument --test '''
127    logging.info("running test %s" % (self._test))
128    return self._test_list[self._test](self)
129
130  def _AppendGtestFilter(self, tool, name, cmd):
131    '''Append an appropriate --gtest_filter flag to the googletest binary
132       invocation.
133       If the user passed his own filter mentioning only one test, just use it.
134       Othewise, filter out tests listed in the appropriate gtest_exclude files.
135    '''
136    if (self._gtest_filter and
137        ":" not in self._gtest_filter and
138        "?" not in self._gtest_filter and
139        "*" not in self._gtest_filter):
140      cmd.append("--gtest_filter=%s" % self._gtest_filter)
141      return
142
143    filters = []
144    gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
145
146    gtest_filter_files = [
147        os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
148    # Use ".gtest.txt" files only for slow tools, as they now contain
149    # Valgrind- and Dr.Memory-specific filters.
150    # TODO(glider): rename the files to ".gtest_slow.txt"
151    if tool.ToolName() in ChromeTests.SLOW_TOOLS:
152      gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
153    for platform_suffix in common.PlatformNames():
154      gtest_filter_files += [
155        os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
156        os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
157            (tool.ToolName(), platform_suffix))]
158    logging.info("Reading gtest exclude filter files:")
159    for filename in gtest_filter_files:
160      # strip the leading absolute path (may be very long on the bot)
161      # and the following / or \.
162      readable_filename = filename.replace("\\", "/")  # '\' on Windows
163      readable_filename = readable_filename.replace(self._source_dir, "")[1:]
164      if not os.path.exists(filename):
165        logging.info("  \"%s\" - not found" % readable_filename)
166        continue
167      logging.info("  \"%s\" - OK" % readable_filename)
168      f = open(filename, 'r')
169      for line in f.readlines():
170        if line.startswith("#") or line.startswith("//") or line.isspace():
171          continue
172        line = line.rstrip()
173        test_prefixes = ["FLAKY", "FAILS"]
174        for p in test_prefixes:
175          # Strip prefixes from the test names.
176          line = line.replace(".%s_" % p, ".")
177        # Exclude the original test name.
178        filters.append(line)
179        if line[-2:] != ".*":
180          # List all possible prefixes if line doesn't end with ".*".
181          for p in test_prefixes:
182            filters.append(line.replace(".", ".%s_" % p))
183    # Get rid of duplicates.
184    filters = set(filters)
185    gtest_filter = self._gtest_filter
186    if len(filters):
187      if gtest_filter:
188        gtest_filter += ":"
189        if gtest_filter.find("-") < 0:
190          gtest_filter += "-"
191      else:
192        gtest_filter = "-"
193      gtest_filter += ":".join(filters)
194    if gtest_filter:
195      cmd.append("--gtest_filter=%s" % gtest_filter)
196
197  @staticmethod
198  def ShowTests():
199    test_to_names = {}
200    for name, test_function in ChromeTests._test_list.iteritems():
201      test_to_names.setdefault(test_function, []).append(name)
202
203    name_to_aliases = {}
204    for names in test_to_names.itervalues():
205      names.sort(key=lambda name: len(name))
206      name_to_aliases[names[0]] = names[1:]
207
208    print
209    print "Available tests:"
210    print "----------------"
211    for name, aliases in sorted(name_to_aliases.iteritems()):
212      if aliases:
213        print "   {} (aka {})".format(name, ', '.join(aliases))
214      else:
215        print "   {}".format(name)
216
217  def SetupLdPath(self, requires_build_dir):
218    if requires_build_dir:
219      self._EnsureBuildDirFound()
220    elif not self._options.build_dir:
221      return
222
223    # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
224    if (os.getenv("LD_LIBRARY_PATH")):
225      os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
226                                              self._options.build_dir))
227    else:
228      os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
229
230  def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
231    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
232    cmd = self._DefaultCommand(tool, name, valgrind_test_args)
233    self._AppendGtestFilter(tool, name, cmd)
234    cmd.extend(['--test-tiny-timeout=1000'])
235    if cmd_args:
236      cmd.extend(cmd_args)
237
238    self.SetupLdPath(True)
239    return tool.Run(cmd, module)
240
241  def RunCmdLine(self):
242    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
243    cmd = self._DefaultCommand(tool, None, self._args)
244    self.SetupLdPath(False)
245    return tool.Run(cmd, None)
246
247  def TestAsh(self):
248    return self.SimpleTest("ash", "ash_unittests")
249
250  def TestAura(self):
251    return self.SimpleTest("aura", "aura_unittests")
252
253  def TestBase(self):
254    return self.SimpleTest("base", "base_unittests")
255
256  def TestChromeOS(self):
257    return self.SimpleTest("chromeos", "chromeos_unittests")
258
259  def TestComponents(self):
260    return self.SimpleTest("components", "components_unittests")
261
262  def TestCompositor(self):
263    return self.SimpleTest("compositor", "compositor_unittests")
264
265  def TestContent(self):
266    return self.SimpleTest("content", "content_unittests")
267
268  def TestContentBrowser(self):
269    return self.SimpleTest("content", "content_browsertests")
270
271  def TestCourgette(self):
272    return self.SimpleTest("courgette", "courgette_unittests")
273
274  def TestCrypto(self):
275    return self.SimpleTest("crypto", "crypto_unittests")
276
277  def TestDevice(self):
278    return self.SimpleTest("device", "device_unittests")
279
280  def TestFFmpeg(self):
281    return self.SimpleTest("chrome", "ffmpeg_unittests")
282
283  def TestFFmpegRegressions(self):
284    return self.SimpleTest("chrome", "ffmpeg_regression_tests")
285
286  def TestGPU(self):
287    return self.SimpleTest("gpu", "gpu_unittests")
288
289  def TestGURL(self):
290    return self.SimpleTest("chrome", "googleurl_unittests")
291
292  def TestURL(self):
293    return self.SimpleTest("chrome", "url_unittests")
294
295  def TestIpc(self):
296    return self.SimpleTest("ipc", "ipc_tests",
297                           valgrind_test_args=["--trace_children"])
298
299  def TestJingle(self):
300    return self.SimpleTest("chrome", "jingle_unittests")
301
302  def TestMedia(self):
303    return self.SimpleTest("chrome", "media_unittests")
304
305  def TestNet(self):
306    return self.SimpleTest("net", "net_unittests")
307
308  def TestPPAPI(self):
309    return self.SimpleTest("chrome", "ppapi_unittests")
310
311  def TestPrinting(self):
312    return self.SimpleTest("chrome", "printing_unittests")
313
314  def TestRemoting(self):
315    return self.SimpleTest("chrome", "remoting_unittests",
316                           cmd_args=[
317                               "--ui-test-action-timeout=60000",
318                               "--ui-test-action-max-timeout=150000"])
319
320  def TestSql(self):
321    return self.SimpleTest("chrome", "sql_unittests")
322
323  def TestSync(self):
324    return self.SimpleTest("chrome", "sync_unit_tests")
325
326  def TestLinuxSandbox(self):
327    return self.SimpleTest("sandbox", "sandbox_linux_unittests")
328
329  def TestUnit(self):
330    # http://crbug.com/51716
331    # Disabling all unit tests
332    # Problems reappeared after r119922
333    if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
334      logging.warning("unit_tests are disabled for memcheck on MacOS.")
335      return 0;
336    return self.SimpleTest("chrome", "unit_tests")
337
338  def TestUIUnit(self):
339    return self.SimpleTest("chrome", "ui_unittests")
340
341  def TestViews(self):
342    return self.SimpleTest("views", "views_unittests")
343
344  # Valgrind timeouts are in seconds.
345  UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
346  # UI test timeouts are in milliseconds.
347  UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
348                  "--ui-test-action-max-timeout=150000",
349                  "--no-sandbox"]
350
351  # TODO(thestig) fine-tune these values.
352  # Valgrind timeouts are in seconds.
353  BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
354  # Browser test timeouts are in milliseconds.
355  BROWSER_TEST_ARGS = ["--ui-test-action-timeout=200000",
356                       "--ui-test-action-max-timeout=400000",
357                       "--no-sandbox"]
358
359  def TestAutomatedUI(self):
360    return self.SimpleTest("chrome", "automated_ui_tests",
361                           valgrind_test_args=self.UI_VALGRIND_ARGS,
362                           cmd_args=self.UI_TEST_ARGS)
363
364  def TestBrowser(self):
365    return self.SimpleTest("chrome", "browser_tests",
366                           valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
367                           cmd_args=self.BROWSER_TEST_ARGS)
368
369  def TestInteractiveUI(self):
370    return self.SimpleTest("chrome", "interactive_ui_tests",
371                           valgrind_test_args=self.UI_VALGRIND_ARGS,
372                           cmd_args=self.UI_TEST_ARGS)
373
374  def TestReliability(self):
375    script_dir = path_utils.ScriptDir()
376    url_list_file = os.path.join(script_dir, "reliability", "url_list.txt")
377    return self.SimpleTest("chrome", "reliability_tests",
378                           valgrind_test_args=self.UI_VALGRIND_ARGS,
379                           cmd_args=(self.UI_TEST_ARGS +
380                                     ["--list=%s" % url_list_file]))
381
382  def TestSafeBrowsing(self):
383    return self.SimpleTest("chrome", "safe_browsing_tests",
384                           valgrind_test_args=self.UI_VALGRIND_ARGS,
385                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
386
387  def TestSyncIntegration(self):
388    return self.SimpleTest("chrome", "sync_integration_tests",
389                           valgrind_test_args=self.UI_VALGRIND_ARGS,
390                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
391
392  def TestLayoutChunk(self, chunk_num, chunk_size):
393    # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
394    # list of tests.  Wrap around to beginning of list at end.
395    # If chunk_size is zero, run all tests in the list once.
396    # If a text file is given as argument, it is used as the list of tests.
397    #
398    # Build the ginormous commandline in 'cmd'.
399    # It's going to be roughly
400    #  python valgrind_test.py ... python run_webkit_tests.py ...
401    # but we'll use the --indirect flag to valgrind_test.py
402    # to avoid valgrinding python.
403    # Start by building the valgrind_test.py commandline.
404    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
405    cmd = self._DefaultCommand(tool)
406    cmd.append("--trace_children")
407    cmd.append("--indirect_webkit_layout")
408    cmd.append("--ignore_exit_code")
409    # Now build script_cmd, the run_webkits_tests.py commandline
410    # Store each chunk in its own directory so that we can find the data later
411    chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
412    out_dir = os.path.join(path_utils.ScriptDir(), "latest")
413    out_dir = os.path.join(out_dir, chunk_dir)
414    if os.path.exists(out_dir):
415      old_files = glob.glob(os.path.join(out_dir, "*.txt"))
416      for f in old_files:
417        os.remove(f)
418    else:
419      os.makedirs(out_dir)
420    script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
421                          "run_webkit_tests.py")
422    script_cmd = ["python", script, "-v",
423                  "--run-singly",  # run a separate DumpRenderTree for each test
424                  "--fully-parallel",
425                  "--time-out-ms=200000",
426                  "--no-retry-failures",  # retrying takes too much time
427                  # http://crbug.com/176908: Don't launch a browser when done.
428                  "--no-show-results",
429                  "--nocheck-sys-deps"]
430    # Pass build mode to run_webkit_tests.py.  We aren't passed it directly,
431    # so parse it out of build_dir.  run_webkit_tests.py can only handle
432    # the two values "Release" and "Debug".
433    # TODO(Hercules): unify how all our scripts pass around build mode
434    # (--mode / --target / --build_dir / --debug)
435    if self._options.build_dir.endswith("Debug"):
436      script_cmd.append("--debug");
437    if (chunk_size > 0):
438      script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
439    if len(self._args):
440      # if the arg is a txt file, then treat it as a list of tests
441      if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
442        script_cmd.append("--test-list=%s" % self._args[0])
443      else:
444        script_cmd.extend(self._args)
445    self._AppendGtestFilter(tool, "layout", script_cmd)
446    # Now run script_cmd with the wrapper in cmd
447    cmd.extend(["--"])
448    cmd.extend(script_cmd)
449
450    # Layout tests often times fail quickly, but the buildbot remains green.
451    # Detect this situation when running with the default chunk size.
452    if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
453      min_runtime_in_seconds=120
454    else:
455      min_runtime_in_seconds=0
456    ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
457    return ret
458
459
460  def TestLayout(self):
461    # A "chunk file" is maintained in the local directory so that each test
462    # runs a slice of the layout tests of size chunk_size that increments with
463    # each run.  Since tests can be added and removed from the layout tests at
464    # any time, this is not going to give exact coverage, but it will allow us
465    # to continuously run small slices of the layout tests under valgrind rather
466    # than having to run all of them in one shot.
467    chunk_size = self._options.num_tests
468    if (chunk_size == 0):
469      return self.TestLayoutChunk(0, 0)
470    chunk_num = 0
471    chunk_file = os.path.join("valgrind_layout_chunk.txt")
472    logging.info("Reading state from " + chunk_file)
473    try:
474      f = open(chunk_file)
475      if f:
476        str = f.read()
477        if len(str):
478          chunk_num = int(str)
479        # This should be enough so that we have a couple of complete runs
480        # of test data stored in the archive (although note that when we loop
481        # that we almost guaranteed won't be at the end of the test list)
482        if chunk_num > 10000:
483          chunk_num = 0
484        f.close()
485    except IOError, (errno, strerror):
486      logging.error("error reading from file %s (%d, %s)" % (chunk_file,
487                    errno, strerror))
488    # Save the new chunk size before running the tests. Otherwise if a
489    # particular chunk hangs the bot, the chunk number will never get
490    # incremented and the bot will be wedged.
491    logging.info("Saving state to " + chunk_file)
492    try:
493      f = open(chunk_file, "w")
494      chunk_num += 1
495      f.write("%d" % chunk_num)
496      f.close()
497    except IOError, (errno, strerror):
498      logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
499                    strerror))
500    # Since we're running small chunks of the layout tests, it's important to
501    # mark the ones that have errors in them.  These won't be visible in the
502    # summary list for long, but will be useful for someone reviewing this bot.
503    return self.TestLayoutChunk(chunk_num, chunk_size)
504
505  # The known list of tests.
506  # Recognise the original abbreviations as well as full executable names.
507  _test_list = {
508    "cmdline" : RunCmdLine,
509    "ash": TestAsh,              "ash_unittests": TestAsh,
510    "aura": TestAura,            "aura_unittests": TestAura,
511    "automated_ui" : TestAutomatedUI,
512    "base": TestBase,            "base_unittests": TestBase,
513    "browser": TestBrowser,      "browser_tests": TestBrowser,
514    "chromeos": TestChromeOS,    "chromeos_unittests": TestChromeOS,
515    "components": TestComponents,"components_unittests": TestComponents,
516    "compositor": TestCompositor,"compositor_unittests": TestCompositor,
517    "content": TestContent,      "content_unittests": TestContent,
518    "content_browsertests": TestContentBrowser,
519    "courgette": TestCourgette,  "courgette_unittests": TestCourgette,
520    "crypto": TestCrypto,        "crypto_unittests": TestCrypto,
521    "device": TestDevice,        "device_unittests": TestDevice,
522    "ffmpeg": TestFFmpeg,        "ffmpeg_unittests": TestFFmpeg,
523    "ffmpeg_regression_tests": TestFFmpegRegressions,
524    "googleurl": TestGURL,       "googleurl_unittests": TestGURL,
525    "url": TestURL,              "url_unittests": TestURL,
526    "gpu": TestGPU,              "gpu_unittests": TestGPU,
527    "ipc": TestIpc,              "ipc_tests": TestIpc,
528    "interactive_ui": TestInteractiveUI,
529    "layout": TestLayout,        "layout_tests": TestLayout,
530    "webkit": TestLayout,
531    "media": TestMedia,          "media_unittests": TestMedia,
532    "net": TestNet,              "net_unittests": TestNet,
533    "jingle": TestJingle,        "jingle_unittests": TestJingle,
534    "ppapi": TestPPAPI,          "ppapi_unittests": TestPPAPI,
535    "printing": TestPrinting,    "printing_unittests": TestPrinting,
536    "reliability": TestReliability, "reliability_tests": TestReliability,
537    "remoting": TestRemoting,    "remoting_unittests": TestRemoting,
538    "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
539    "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
540    "sql": TestSql,              "sql_unittests": TestSql,
541    "sync": TestSync,            "sync_unit_tests": TestSync,
542    "sync_integration_tests": TestSyncIntegration,
543    "sync_integration": TestSyncIntegration,
544    "ui_unit": TestUIUnit,       "ui_unittests": TestUIUnit,
545    "unit": TestUnit,            "unit_tests": TestUnit,
546    "views": TestViews,          "views_unittests": TestViews,
547  }
548
549
550def _main():
551  parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
552                                 "[-t <test> ...]")
553  parser.disable_interspersed_args()
554
555  parser.add_option("", "--help-tests", dest="help_tests", action="store_true",
556                    default=False, help="List all available tests")
557  parser.add_option("-b", "--build_dir",
558                    help="the location of the compiler output")
559  parser.add_option("-t", "--test", action="append", default=[],
560                    help="which test to run, supports test:gtest_filter format "
561                         "as well.")
562  parser.add_option("", "--baseline", action="store_true", default=False,
563                    help="generate baseline data instead of validating")
564  parser.add_option("", "--gtest_filter",
565                    help="additional arguments to --gtest_filter")
566  parser.add_option("", "--gtest_repeat",
567                    help="argument for --gtest_repeat")
568  parser.add_option("-v", "--verbose", action="store_true", default=False,
569                    help="verbose output - enable debug log messages")
570  parser.add_option("", "--tool", dest="valgrind_tool", default="memcheck",
571                    help="specify a valgrind tool to run the tests under")
572  parser.add_option("", "--tool_flags", dest="valgrind_tool_flags", default="",
573                    help="specify custom flags for the selected valgrind tool")
574  parser.add_option("", "--keep_logs", action="store_true", default=False,
575                    help="store memory tool logs in the <tool>.logs directory "
576                         "instead of /tmp.\nThis can be useful for tool "
577                         "developers/maintainers.\nPlease note that the <tool>"
578                         ".logs directory will be clobbered on tool startup.")
579  parser.add_option("-n", "--num_tests", type="int",
580                    default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
581                    help="for layout tests: # of subtests per run.  0 for all.")
582  # TODO(thestig) Remove this if we can.
583  parser.add_option("", "--gtest_color", dest="gtest_color", default="no",
584                    help="dummy compatibility flag for sharding_supervisor.")
585
586  options, args = parser.parse_args()
587
588  if options.verbose:
589    logging_utils.config_root(logging.DEBUG)
590  else:
591    logging_utils.config_root()
592
593  if options.help_tests:
594    ChromeTests.ShowTests()
595    return 0
596
597  if not options.test:
598    parser.error("--test not specified")
599
600  if len(options.test) != 1 and options.gtest_filter:
601    parser.error("--gtest_filter and multiple tests don't make sense together")
602
603  for t in options.test:
604    tests = ChromeTests(options, args, t)
605    ret = tests.Run()
606    if ret: return ret
607  return 0
608
609
610if __name__ == "__main__":
611  sys.exit(_main())
612