chrome_tests.py revision 90dce4d38c5ff5333bea97d859d4e484e27edf0c
1#!/usr/bin/env python
2# Copyright (c) 2012 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6''' Runs various chrome tests through valgrind_test.py.'''
7
8import glob
9import logging
10import optparse
11import os
12import stat
13import sys
14
15import logging_utils
16import path_utils
17
18import common
19import valgrind_test
20
21class TestNotFound(Exception): pass
22
23class MultipleGTestFiltersSpecified(Exception): pass
24
25class BuildDirNotFound(Exception): pass
26
27class BuildDirAmbiguous(Exception): pass
28
29class ChromeTests:
30  SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
31  LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 1500
32
33  def __init__(self, options, args, test):
34    if ':' in test:
35      (self._test, self._gtest_filter) = test.split(':', 1)
36    else:
37      self._test = test
38      self._gtest_filter = options.gtest_filter
39
40    if self._test not in self._test_list:
41      raise TestNotFound("Unknown test: %s" % test)
42
43    if options.gtest_filter and options.gtest_filter != self._gtest_filter:
44      raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
45                                          "and --test %s" % test)
46
47    self._options = options
48    self._args = args
49
50    script_dir = path_utils.ScriptDir()
51    # Compute the top of the tree (the "source dir") from the script dir (where
52    # this script lives).  We assume that the script dir is in tools/valgrind/
53    # relative to the top of the tree.
54    self._source_dir = os.path.dirname(os.path.dirname(script_dir))
55    # since this path is used for string matching, make sure it's always
56    # an absolute Unix-style path
57    self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
58    valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
59    self._command_preamble = ["--source_dir=%s" % (self._source_dir)]
60
61    if not self._options.build_dir:
62      dirs = [
63        os.path.join(self._source_dir, "xcodebuild", "Debug"),
64        os.path.join(self._source_dir, "out", "Debug"),
65        os.path.join(self._source_dir, "build", "Debug"),
66      ]
67      build_dir = [d for d in dirs if os.path.isdir(d)]
68      if len(build_dir) > 1:
69        raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
70                                "%s\nPlease specify just one "
71                                "using --build_dir" % ", ".join(build_dir))
72      elif build_dir:
73        self._options.build_dir = build_dir[0]
74      else:
75        self._options.build_dir = None
76
77    if self._options.build_dir:
78      build_dir = os.path.abspath(self._options.build_dir)
79      self._command_preamble += ["--build_dir=%s" % (self._options.build_dir)]
80
81  def _EnsureBuildDirFound(self):
82    if not self._options.build_dir:
83      raise BuildDirNotFound("Oops, couldn't find a build dir, please "
84                             "specify it manually using --build_dir")
85
86  def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
87    '''Generates the default command array that most tests will use.'''
88    if exe and common.IsWindows():
89      exe += '.exe'
90
91    cmd = list(self._command_preamble)
92
93    # Find all suppressions matching the following pattern:
94    # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
95    # and list them with --suppressions= prefix.
96    script_dir = path_utils.ScriptDir()
97    tool_name = tool.ToolName();
98    suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
99    if os.path.exists(suppression_file):
100      cmd.append("--suppressions=%s" % suppression_file)
101    # Platform-specific suppression
102    for platform in common.PlatformNames():
103      platform_suppression_file = \
104          os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
105      if os.path.exists(platform_suppression_file):
106        cmd.append("--suppressions=%s" % platform_suppression_file)
107
108    if self._options.valgrind_tool_flags:
109      cmd += self._options.valgrind_tool_flags.split(" ")
110    if self._options.keep_logs:
111      cmd += ["--keep_logs"]
112    if valgrind_test_args != None:
113      for arg in valgrind_test_args:
114        cmd.append(arg)
115    if exe:
116      self._EnsureBuildDirFound()
117      cmd.append(os.path.join(self._options.build_dir, exe))
118      # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
119      # so we can find the slowpokes.
120      cmd.append("--gtest_print_time")
121    if self._options.gtest_repeat:
122      cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
123    return cmd
124
125  def Run(self):
126    ''' Runs the test specified by command-line argument --test '''
127    logging.info("running test %s" % (self._test))
128    return self._test_list[self._test](self)
129
130  def _AppendGtestFilter(self, tool, name, cmd):
131    '''Append an appropriate --gtest_filter flag to the googletest binary
132       invocation.
133       If the user passed his own filter mentioning only one test, just use it.
134       Othewise, filter out tests listed in the appropriate gtest_exclude files.
135    '''
136    if (self._gtest_filter and
137        ":" not in self._gtest_filter and
138        "?" not in self._gtest_filter and
139        "*" not in self._gtest_filter):
140      cmd.append("--gtest_filter=%s" % self._gtest_filter)
141      return
142
143    filters = []
144    gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
145
146    gtest_filter_files = [
147        os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
148    # Use ".gtest.txt" files only for slow tools, as they now contain
149    # Valgrind- and Dr.Memory-specific filters.
150    # TODO(glider): rename the files to ".gtest_slow.txt"
151    if tool.ToolName() in ChromeTests.SLOW_TOOLS:
152      gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
153    for platform_suffix in common.PlatformNames():
154      gtest_filter_files += [
155        os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
156        os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
157            (tool.ToolName(), platform_suffix))]
158    logging.info("Reading gtest exclude filter files:")
159    for filename in gtest_filter_files:
160      # strip the leading absolute path (may be very long on the bot)
161      # and the following / or \.
162      readable_filename = filename.replace("\\", "/")  # '\' on Windows
163      readable_filename = readable_filename.replace(self._source_dir, "")[1:]
164      if not os.path.exists(filename):
165        logging.info("  \"%s\" - not found" % readable_filename)
166        continue
167      logging.info("  \"%s\" - OK" % readable_filename)
168      f = open(filename, 'r')
169      for line in f.readlines():
170        if line.startswith("#") or line.startswith("//") or line.isspace():
171          continue
172        line = line.rstrip()
173        test_prefixes = ["FLAKY", "FAILS"]
174        for p in test_prefixes:
175          # Strip prefixes from the test names.
176          line = line.replace(".%s_" % p, ".")
177        # Exclude the original test name.
178        filters.append(line)
179        if line[-2:] != ".*":
180          # List all possible prefixes if line doesn't end with ".*".
181          for p in test_prefixes:
182            filters.append(line.replace(".", ".%s_" % p))
183    # Get rid of duplicates.
184    filters = set(filters)
185    gtest_filter = self._gtest_filter
186    if len(filters):
187      if gtest_filter:
188        gtest_filter += ":"
189        if gtest_filter.find("-") < 0:
190          gtest_filter += "-"
191      else:
192        gtest_filter = "-"
193      gtest_filter += ":".join(filters)
194    if gtest_filter:
195      cmd.append("--gtest_filter=%s" % gtest_filter)
196
197  @staticmethod
198  def ShowTests():
199    test_to_names = {}
200    for name, test_function in ChromeTests._test_list.iteritems():
201      test_to_names.setdefault(test_function, []).append(name)
202
203    name_to_aliases = {}
204    for names in test_to_names.itervalues():
205      names.sort(key=lambda name: len(name))
206      name_to_aliases[names[0]] = names[1:]
207
208    print
209    print "Available tests:"
210    print "----------------"
211    for name, aliases in sorted(name_to_aliases.iteritems()):
212      if aliases:
213        print "   {} (aka {})".format(name, ', '.join(aliases))
214      else:
215        print "   {}".format(name)
216
217  def SetupLdPath(self, requires_build_dir):
218    if requires_build_dir:
219      self._EnsureBuildDirFound()
220    elif not self._options.build_dir:
221      return
222
223    # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
224    if (os.getenv("LD_LIBRARY_PATH")):
225      os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
226                                              self._options.build_dir))
227    else:
228      os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
229
230  def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
231    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
232    cmd = self._DefaultCommand(tool, name, valgrind_test_args)
233    self._AppendGtestFilter(tool, name, cmd)
234    cmd.extend(['--test-tiny-timeout=1000'])
235    if cmd_args:
236      cmd.extend(cmd_args)
237
238    self.SetupLdPath(True)
239    return tool.Run(cmd, module)
240
241  def RunCmdLine(self):
242    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
243    cmd = self._DefaultCommand(tool, None, self._args)
244    self.SetupLdPath(False)
245    return tool.Run(cmd, None)
246
247  def TestAsh(self):
248    return self.SimpleTest("ash", "ash_unittests")
249
250  def TestAura(self):
251    return self.SimpleTest("aura", "aura_unittests")
252
253  def TestBase(self):
254    return self.SimpleTest("base", "base_unittests")
255
256  def TestChromeOS(self):
257    return self.SimpleTest("chromeos", "chromeos_unittests")
258
259  def TestComponents(self):
260    return self.SimpleTest("components", "components_unittests")
261
262  def TestCompositor(self):
263    return self.SimpleTest("compositor", "compositor_unittests")
264
265  def TestContent(self):
266    return self.SimpleTest("content", "content_unittests")
267
268  def TestContentBrowser(self):
269    return self.SimpleTest("content", "content_browsertests")
270
271  def TestCourgette(self):
272    return self.SimpleTest("courgette", "courgette_unittests")
273
274  def TestCrypto(self):
275    return self.SimpleTest("crypto", "crypto_unittests")
276
277  def TestDevice(self):
278    return self.SimpleTest("device", "device_unittests")
279
280  def TestFFmpeg(self):
281    return self.SimpleTest("chrome", "ffmpeg_unittests")
282
283  def TestFFmpegRegressions(self):
284    return self.SimpleTest("chrome", "ffmpeg_regression_tests")
285
286  def TestGPU(self):
287    return self.SimpleTest("gpu", "gpu_unittests")
288
289  def TestGURL(self):
290    return self.SimpleTest("chrome", "googleurl_unittests")
291
292  def TestURL(self):
293    return self.SimpleTest("chrome", "url_unittests")
294
295  def TestIpc(self):
296    return self.SimpleTest("ipc", "ipc_tests",
297                           valgrind_test_args=["--trace_children"])
298
299  def TestJingle(self):
300    return self.SimpleTest("chrome", "jingle_unittests")
301
302  def TestMedia(self):
303    return self.SimpleTest("chrome", "media_unittests")
304
305  def TestNet(self):
306    return self.SimpleTest("net", "net_unittests")
307
308  def TestNetPerf(self):
309    return self.SimpleTest("net", "net_perftests")
310
311  def TestPPAPI(self):
312    return self.SimpleTest("chrome", "ppapi_unittests")
313
314  def TestPrinting(self):
315    return self.SimpleTest("chrome", "printing_unittests")
316
317  def TestRemoting(self):
318    return self.SimpleTest("chrome", "remoting_unittests",
319                           cmd_args=[
320                               "--ui-test-action-timeout=60000",
321                               "--ui-test-action-max-timeout=150000"])
322
323  def TestSql(self):
324    return self.SimpleTest("chrome", "sql_unittests")
325
326  def TestSync(self):
327    return self.SimpleTest("chrome", "sync_unit_tests")
328
329  def TestLinuxSandbox(self):
330    return self.SimpleTest("sandbox", "sandbox_linux_unittests")
331
332  def TestUnit(self):
333    # http://crbug.com/51716
334    # Disabling all unit tests
335    # Problems reappeared after r119922
336    if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
337      logging.warning("unit_tests are disabled for memcheck on MacOS.")
338      return 0;
339    return self.SimpleTest("chrome", "unit_tests")
340
341  def TestUIUnit(self):
342    return self.SimpleTest("chrome", "ui_unittests")
343
344  def TestViews(self):
345    return self.SimpleTest("views", "views_unittests")
346
347  # Valgrind timeouts are in seconds.
348  UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
349  # UI test timeouts are in milliseconds.
350  UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
351                  "--ui-test-action-max-timeout=150000",
352                  "--no-sandbox"]
353
354  # TODO(thestig) fine-tune these values.
355  # Valgrind timeouts are in seconds.
356  BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
357  # Browser test timeouts are in milliseconds.
358  BROWSER_TEST_ARGS = ["--ui-test-action-timeout=200000",
359                       "--ui-test-action-max-timeout=400000",
360                       "--no-sandbox"]
361
362  def TestAutomatedUI(self):
363    return self.SimpleTest("chrome", "automated_ui_tests",
364                           valgrind_test_args=self.UI_VALGRIND_ARGS,
365                           cmd_args=self.UI_TEST_ARGS)
366
367  def TestBrowser(self):
368    return self.SimpleTest("chrome", "browser_tests",
369                           valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
370                           cmd_args=self.BROWSER_TEST_ARGS)
371
372  def TestInteractiveUI(self):
373    return self.SimpleTest("chrome", "interactive_ui_tests",
374                           valgrind_test_args=self.UI_VALGRIND_ARGS,
375                           cmd_args=self.UI_TEST_ARGS)
376
377  def TestReliability(self):
378    script_dir = path_utils.ScriptDir()
379    url_list_file = os.path.join(script_dir, "reliability", "url_list.txt")
380    return self.SimpleTest("chrome", "reliability_tests",
381                           valgrind_test_args=self.UI_VALGRIND_ARGS,
382                           cmd_args=(self.UI_TEST_ARGS +
383                                     ["--list=%s" % url_list_file]))
384
385  def TestSafeBrowsing(self):
386    return self.SimpleTest("chrome", "safe_browsing_tests",
387                           valgrind_test_args=self.UI_VALGRIND_ARGS,
388                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
389
390  def TestSyncIntegration(self):
391    return self.SimpleTest("chrome", "sync_integration_tests",
392                           valgrind_test_args=self.UI_VALGRIND_ARGS,
393                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
394
395  def TestLayoutChunk(self, chunk_num, chunk_size):
396    # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
397    # list of tests.  Wrap around to beginning of list at end.
398    # If chunk_size is zero, run all tests in the list once.
399    # If a text file is given as argument, it is used as the list of tests.
400    #
401    # Build the ginormous commandline in 'cmd'.
402    # It's going to be roughly
403    #  python valgrind_test.py ... python run_webkit_tests.py ...
404    # but we'll use the --indirect flag to valgrind_test.py
405    # to avoid valgrinding python.
406    # Start by building the valgrind_test.py commandline.
407    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
408    cmd = self._DefaultCommand(tool)
409    cmd.append("--trace_children")
410    cmd.append("--indirect_webkit_layout")
411    cmd.append("--ignore_exit_code")
412    # Now build script_cmd, the run_webkits_tests.py commandline
413    # Store each chunk in its own directory so that we can find the data later
414    chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
415    out_dir = os.path.join(path_utils.ScriptDir(), "latest")
416    out_dir = os.path.join(out_dir, chunk_dir)
417    if os.path.exists(out_dir):
418      old_files = glob.glob(os.path.join(out_dir, "*.txt"))
419      for f in old_files:
420        os.remove(f)
421    else:
422      os.makedirs(out_dir)
423    script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
424                          "run_webkit_tests.py")
425    script_cmd = ["python", script, "-v",
426                  "--run-singly",  # run a separate DumpRenderTree for each test
427                  "--fully-parallel",
428                  "--time-out-ms=200000",
429                  "--no-retry-failures",  # retrying takes too much time
430                  # http://crbug.com/176908: Don't launch a browser when done.
431                  "--no-show-results",
432                  "--nocheck-sys-deps"]
433    # Pass build mode to run_webkit_tests.py.  We aren't passed it directly,
434    # so parse it out of build_dir.  run_webkit_tests.py can only handle
435    # the two values "Release" and "Debug".
436    # TODO(Hercules): unify how all our scripts pass around build mode
437    # (--mode / --target / --build_dir / --debug)
438    if self._options.build_dir.endswith("Debug"):
439      script_cmd.append("--debug");
440    if (chunk_size > 0):
441      script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
442    if len(self._args):
443      # if the arg is a txt file, then treat it as a list of tests
444      if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
445        script_cmd.append("--test-list=%s" % self._args[0])
446      else:
447        script_cmd.extend(self._args)
448    self._AppendGtestFilter(tool, "layout", script_cmd)
449    # Now run script_cmd with the wrapper in cmd
450    cmd.extend(["--"])
451    cmd.extend(script_cmd)
452
453    # Layout tests often times fail quickly, but the buildbot remains green.
454    # Detect this situation when running with the default chunk size.
455    if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
456      min_runtime_in_seconds=120
457    else:
458      min_runtime_in_seconds=0
459    ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
460    return ret
461
462
463  def TestLayout(self):
464    # A "chunk file" is maintained in the local directory so that each test
465    # runs a slice of the layout tests of size chunk_size that increments with
466    # each run.  Since tests can be added and removed from the layout tests at
467    # any time, this is not going to give exact coverage, but it will allow us
468    # to continuously run small slices of the layout tests under valgrind rather
469    # than having to run all of them in one shot.
470    chunk_size = self._options.num_tests
471    if (chunk_size == 0):
472      return self.TestLayoutChunk(0, 0)
473    chunk_num = 0
474    chunk_file = os.path.join("valgrind_layout_chunk.txt")
475    logging.info("Reading state from " + chunk_file)
476    try:
477      f = open(chunk_file)
478      if f:
479        str = f.read()
480        if len(str):
481          chunk_num = int(str)
482        # This should be enough so that we have a couple of complete runs
483        # of test data stored in the archive (although note that when we loop
484        # that we almost guaranteed won't be at the end of the test list)
485        if chunk_num > 10000:
486          chunk_num = 0
487        f.close()
488    except IOError, (errno, strerror):
489      logging.error("error reading from file %s (%d, %s)" % (chunk_file,
490                    errno, strerror))
491    # Save the new chunk size before running the tests. Otherwise if a
492    # particular chunk hangs the bot, the chunk number will never get
493    # incremented and the bot will be wedged.
494    logging.info("Saving state to " + chunk_file)
495    try:
496      f = open(chunk_file, "w")
497      chunk_num += 1
498      f.write("%d" % chunk_num)
499      f.close()
500    except IOError, (errno, strerror):
501      logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
502                    strerror))
503    # Since we're running small chunks of the layout tests, it's important to
504    # mark the ones that have errors in them.  These won't be visible in the
505    # summary list for long, but will be useful for someone reviewing this bot.
506    return self.TestLayoutChunk(chunk_num, chunk_size)
507
508  # The known list of tests.
509  # Recognise the original abbreviations as well as full executable names.
510  _test_list = {
511    "cmdline" : RunCmdLine,
512    "ash": TestAsh,              "ash_unittests": TestAsh,
513    "aura": TestAura,            "aura_unittests": TestAura,
514    "automated_ui" : TestAutomatedUI,
515    "base": TestBase,            "base_unittests": TestBase,
516    "browser": TestBrowser,      "browser_tests": TestBrowser,
517    "chromeos": TestChromeOS,    "chromeos_unittests": TestChromeOS,
518    "components": TestComponents,"components_unittests": TestComponents,
519    "compositor": TestCompositor,"compositor_unittests": TestCompositor,
520    "content": TestContent,      "content_unittests": TestContent,
521    "content_browsertests": TestContentBrowser,
522    "courgette": TestCourgette,  "courgette_unittests": TestCourgette,
523    "crypto": TestCrypto,        "crypto_unittests": TestCrypto,
524    "device": TestDevice,        "device_unittests": TestDevice,
525    "ffmpeg": TestFFmpeg,        "ffmpeg_unittests": TestFFmpeg,
526    "ffmpeg_regression_tests": TestFFmpegRegressions,
527    "googleurl": TestGURL,       "googleurl_unittests": TestGURL,
528    "url": TestURL,              "url_unittests": TestURL,
529    "gpu": TestGPU,              "gpu_unittests": TestGPU,
530    "ipc": TestIpc,              "ipc_tests": TestIpc,
531    "interactive_ui": TestInteractiveUI,
532    "layout": TestLayout,        "layout_tests": TestLayout,
533    "webkit": TestLayout,
534    "media": TestMedia,          "media_unittests": TestMedia,
535    "net": TestNet,              "net_unittests": TestNet,
536    "net_perf": TestNetPerf,     "net_perftests": TestNetPerf,
537    "jingle": TestJingle,        "jingle_unittests": TestJingle,
538    "ppapi": TestPPAPI,          "ppapi_unittests": TestPPAPI,
539    "printing": TestPrinting,    "printing_unittests": TestPrinting,
540    "reliability": TestReliability, "reliability_tests": TestReliability,
541    "remoting": TestRemoting,    "remoting_unittests": TestRemoting,
542    "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
543    "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
544    "sql": TestSql,              "sql_unittests": TestSql,
545    "sync": TestSync,            "sync_unit_tests": TestSync,
546    "sync_integration_tests": TestSyncIntegration,
547    "sync_integration": TestSyncIntegration,
548    "ui_unit": TestUIUnit,       "ui_unittests": TestUIUnit,
549    "unit": TestUnit,            "unit_tests": TestUnit,
550    "views": TestViews,          "views_unittests": TestViews,
551  }
552
553
554def _main():
555  parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
556                                 "[-t <test> ...]")
557  parser.disable_interspersed_args()
558
559  parser.add_option("", "--help-tests", dest="help_tests", action="store_true",
560                    default=False, help="List all available tests")
561  parser.add_option("-b", "--build_dir",
562                    help="the location of the compiler output")
563  parser.add_option("-t", "--test", action="append", default=[],
564                    help="which test to run, supports test:gtest_filter format "
565                         "as well.")
566  parser.add_option("", "--baseline", action="store_true", default=False,
567                    help="generate baseline data instead of validating")
568  parser.add_option("", "--gtest_filter",
569                    help="additional arguments to --gtest_filter")
570  parser.add_option("", "--gtest_repeat",
571                    help="argument for --gtest_repeat")
572  parser.add_option("-v", "--verbose", action="store_true", default=False,
573                    help="verbose output - enable debug log messages")
574  parser.add_option("", "--tool", dest="valgrind_tool", default="memcheck",
575                    help="specify a valgrind tool to run the tests under")
576  parser.add_option("", "--tool_flags", dest="valgrind_tool_flags", default="",
577                    help="specify custom flags for the selected valgrind tool")
578  parser.add_option("", "--keep_logs", action="store_true", default=False,
579                    help="store memory tool logs in the <tool>.logs directory "
580                         "instead of /tmp.\nThis can be useful for tool "
581                         "developers/maintainers.\nPlease note that the <tool>"
582                         ".logs directory will be clobbered on tool startup.")
583  parser.add_option("-n", "--num_tests", type="int",
584                    default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
585                    help="for layout tests: # of subtests per run.  0 for all.")
586  # TODO(thestig) Remove this if we can.
587  parser.add_option("", "--gtest_color", dest="gtest_color", default="no",
588                    help="dummy compatibility flag for sharding_supervisor.")
589
590  options, args = parser.parse_args()
591
592  if options.verbose:
593    logging_utils.config_root(logging.DEBUG)
594  else:
595    logging_utils.config_root()
596
597  if options.help_tests:
598    ChromeTests.ShowTests()
599    return 0
600
601  if not options.test:
602    parser.error("--test not specified")
603
604  if len(options.test) != 1 and options.gtest_filter:
605    parser.error("--gtest_filter and multiple tests don't make sense together")
606
607  for t in options.test:
608    tests = ChromeTests(options, args, t)
609    ret = tests.Run()
610    if ret: return ret
611  return 0
612
613
614if __name__ == "__main__":
615  sys.exit(_main())
616