chrome_tests.py revision 8bcbed890bc3ce4d7a057a8f32cab53fa534672e
1#!/usr/bin/env python
2# Copyright (c) 2012 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6''' Runs various chrome tests through valgrind_test.py.'''
7
8import glob
9import logging
10import multiprocessing
11import optparse
12import os
13import stat
14import sys
15
16import logging_utils
17import path_utils
18
19import common
20import valgrind_test
21
22class TestNotFound(Exception): pass
23
24class MultipleGTestFiltersSpecified(Exception): pass
25
26class BuildDirNotFound(Exception): pass
27
28class BuildDirAmbiguous(Exception): pass
29
30class ChromeTests:
31  SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
32  LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 500
33
34  def __init__(self, options, args, test):
35    if ':' in test:
36      (self._test, self._gtest_filter) = test.split(':', 1)
37    else:
38      self._test = test
39      self._gtest_filter = options.gtest_filter
40
41    if self._test not in self._test_list:
42      raise TestNotFound("Unknown test: %s" % test)
43
44    if options.gtest_filter and options.gtest_filter != self._gtest_filter:
45      raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
46                                          "and --test %s" % test)
47
48    self._options = options
49    self._args = args
50
51    script_dir = path_utils.ScriptDir()
52    # Compute the top of the tree (the "source dir") from the script dir (where
53    # this script lives).  We assume that the script dir is in tools/valgrind/
54    # relative to the top of the tree.
55    self._source_dir = os.path.dirname(os.path.dirname(script_dir))
56    # since this path is used for string matching, make sure it's always
57    # an absolute Unix-style path
58    self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
59    valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
60    self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
61
62    if not self._options.build_dir:
63      dirs = [
64        os.path.join(self._source_dir, "xcodebuild", "Debug"),
65        os.path.join(self._source_dir, "out", "Debug"),
66        os.path.join(self._source_dir, "build", "Debug"),
67      ]
68      build_dir = [d for d in dirs if os.path.isdir(d)]
69      if len(build_dir) > 1:
70        raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
71                                "%s\nPlease specify just one "
72                                "using --build-dir" % ", ".join(build_dir))
73      elif build_dir:
74        self._options.build_dir = build_dir[0]
75      else:
76        self._options.build_dir = None
77
78    if self._options.build_dir:
79      build_dir = os.path.abspath(self._options.build_dir)
80      self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
81
82  def _EnsureBuildDirFound(self):
83    if not self._options.build_dir:
84      raise BuildDirNotFound("Oops, couldn't find a build dir, please "
85                             "specify it manually using --build-dir")
86
87  def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
88    '''Generates the default command array that most tests will use.'''
89    if exe and common.IsWindows():
90      exe += '.exe'
91
92    cmd = list(self._command_preamble)
93
94    # Find all suppressions matching the following pattern:
95    # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
96    # and list them with --suppressions= prefix.
97    script_dir = path_utils.ScriptDir()
98    tool_name = tool.ToolName();
99    suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
100    if os.path.exists(suppression_file):
101      cmd.append("--suppressions=%s" % suppression_file)
102    # Platform-specific suppression
103    for platform in common.PlatformNames():
104      platform_suppression_file = \
105          os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
106      if os.path.exists(platform_suppression_file):
107        cmd.append("--suppressions=%s" % platform_suppression_file)
108
109    if self._options.valgrind_tool_flags:
110      cmd += self._options.valgrind_tool_flags.split(" ")
111    if self._options.keep_logs:
112      cmd += ["--keep_logs"]
113    if valgrind_test_args != None:
114      for arg in valgrind_test_args:
115        cmd.append(arg)
116    if exe:
117      self._EnsureBuildDirFound()
118      cmd.append(os.path.join(self._options.build_dir, exe))
119      # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
120      # so we can find the slowpokes.
121      cmd.append("--gtest_print_time")
122    if self._options.gtest_repeat:
123      cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
124    return cmd
125
126  def Run(self):
127    ''' Runs the test specified by command-line argument --test '''
128    logging.info("running test %s" % (self._test))
129    return self._test_list[self._test](self)
130
131  def _AppendGtestFilter(self, tool, name, cmd):
132    '''Append an appropriate --gtest_filter flag to the googletest binary
133       invocation.
134       If the user passed his own filter mentioning only one test, just use it.
135       Othewise, filter out tests listed in the appropriate gtest_exclude files.
136    '''
137    if (self._gtest_filter and
138        ":" not in self._gtest_filter and
139        "?" not in self._gtest_filter and
140        "*" not in self._gtest_filter):
141      cmd.append("--gtest_filter=%s" % self._gtest_filter)
142      return
143
144    filters = []
145    gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
146
147    gtest_filter_files = [
148        os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
149    # Use ".gtest.txt" files only for slow tools, as they now contain
150    # Valgrind- and Dr.Memory-specific filters.
151    # TODO(glider): rename the files to ".gtest_slow.txt"
152    if tool.ToolName() in ChromeTests.SLOW_TOOLS:
153      gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
154    for platform_suffix in common.PlatformNames():
155      gtest_filter_files += [
156        os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
157        os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
158            (tool.ToolName(), platform_suffix))]
159    logging.info("Reading gtest exclude filter files:")
160    for filename in gtest_filter_files:
161      # strip the leading absolute path (may be very long on the bot)
162      # and the following / or \.
163      readable_filename = filename.replace("\\", "/")  # '\' on Windows
164      readable_filename = readable_filename.replace(self._source_dir, "")[1:]
165      if not os.path.exists(filename):
166        logging.info("  \"%s\" - not found" % readable_filename)
167        continue
168      logging.info("  \"%s\" - OK" % readable_filename)
169      f = open(filename, 'r')
170      for line in f.readlines():
171        if line.startswith("#") or line.startswith("//") or line.isspace():
172          continue
173        line = line.rstrip()
174        test_prefixes = ["FLAKY", "FAILS"]
175        for p in test_prefixes:
176          # Strip prefixes from the test names.
177          line = line.replace(".%s_" % p, ".")
178        # Exclude the original test name.
179        filters.append(line)
180        if line[-2:] != ".*":
181          # List all possible prefixes if line doesn't end with ".*".
182          for p in test_prefixes:
183            filters.append(line.replace(".", ".%s_" % p))
184    # Get rid of duplicates.
185    filters = set(filters)
186    gtest_filter = self._gtest_filter
187    if len(filters):
188      if gtest_filter:
189        gtest_filter += ":"
190        if gtest_filter.find("-") < 0:
191          gtest_filter += "-"
192      else:
193        gtest_filter = "-"
194      gtest_filter += ":".join(filters)
195    if gtest_filter:
196      cmd.append("--gtest_filter=%s" % gtest_filter)
197
198  @staticmethod
199  def ShowTests():
200    test_to_names = {}
201    for name, test_function in ChromeTests._test_list.iteritems():
202      test_to_names.setdefault(test_function, []).append(name)
203
204    name_to_aliases = {}
205    for names in test_to_names.itervalues():
206      names.sort(key=lambda name: len(name))
207      name_to_aliases[names[0]] = names[1:]
208
209    print
210    print "Available tests:"
211    print "----------------"
212    for name, aliases in sorted(name_to_aliases.iteritems()):
213      if aliases:
214        print "   {} (aka {})".format(name, ', '.join(aliases))
215      else:
216        print "   {}".format(name)
217
218  def SetupLdPath(self, requires_build_dir):
219    if requires_build_dir:
220      self._EnsureBuildDirFound()
221    elif not self._options.build_dir:
222      return
223
224    # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
225    if (os.getenv("LD_LIBRARY_PATH")):
226      os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
227                                              self._options.build_dir))
228    else:
229      os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
230
231  def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
232    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
233    cmd = self._DefaultCommand(tool, name, valgrind_test_args)
234    self._AppendGtestFilter(tool, name, cmd)
235    cmd.extend(['--test-tiny-timeout=1000'])
236    if cmd_args:
237      cmd.extend(cmd_args)
238
239    self.SetupLdPath(True)
240    return tool.Run(cmd, module)
241
242  def RunCmdLine(self):
243    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
244    cmd = self._DefaultCommand(tool, None, self._args)
245    self.SetupLdPath(False)
246    return tool.Run(cmd, None)
247
248  def TestAppList(self):
249    return self.SimpleTest("app_list", "app_list_unittests")
250
251  def TestAsh(self):
252    return self.SimpleTest("ash", "ash_unittests")
253
254  def TestAura(self):
255    return self.SimpleTest("aura", "aura_unittests")
256
257  def TestBase(self):
258    return self.SimpleTest("base", "base_unittests")
259
260  def TestChromeOS(self):
261    return self.SimpleTest("chromeos", "chromeos_unittests")
262
263  def TestComponents(self):
264    return self.SimpleTest("components", "components_unittests")
265
266  def TestCompositor(self):
267    return self.SimpleTest("compositor", "compositor_unittests")
268
269  def TestContent(self):
270    return self.SimpleTest("content", "content_unittests")
271
272  def TestContentBrowser(self):
273    return self.SimpleTest("content", "content_browsertests")
274
275  def TestCourgette(self):
276    return self.SimpleTest("courgette", "courgette_unittests")
277
278  def TestCrypto(self):
279    return self.SimpleTest("crypto", "crypto_unittests")
280
281  def TestDevice(self):
282    return self.SimpleTest("device", "device_unittests")
283
284  def TestFFmpeg(self):
285    return self.SimpleTest("chrome", "ffmpeg_unittests")
286
287  def TestFFmpegRegressions(self):
288    return self.SimpleTest("chrome", "ffmpeg_regression_tests")
289
290  def TestGPU(self):
291    return self.SimpleTest("gpu", "gpu_unittests")
292
293  def TestIpc(self):
294    return self.SimpleTest("ipc", "ipc_tests",
295                           valgrind_test_args=["--trace_children"])
296
297  def TestJingle(self):
298    return self.SimpleTest("chrome", "jingle_unittests")
299
300  def TestMedia(self):
301    return self.SimpleTest("chrome", "media_unittests")
302
303  def TestMessageCenter(self):
304    return self.SimpleTest("message_center", "message_center_unittests")
305
306  def TestNet(self):
307    return self.SimpleTest("net", "net_unittests")
308
309  def TestNetPerf(self):
310    return self.SimpleTest("net", "net_perftests")
311
312  def TestPPAPI(self):
313    return self.SimpleTest("chrome", "ppapi_unittests")
314
315  def TestPrinting(self):
316    return self.SimpleTest("chrome", "printing_unittests")
317
318  def TestRemoting(self):
319    return self.SimpleTest("chrome", "remoting_unittests",
320                           cmd_args=[
321                               "--ui-test-action-timeout=60000",
322                               "--ui-test-action-max-timeout=150000"])
323
324  def TestSql(self):
325    return self.SimpleTest("chrome", "sql_unittests")
326
327  def TestSync(self):
328    return self.SimpleTest("chrome", "sync_unit_tests")
329
330  def TestLinuxSandbox(self):
331    return self.SimpleTest("sandbox", "sandbox_linux_unittests")
332
333  def TestUnit(self):
334    # http://crbug.com/51716
335    # Disabling all unit tests
336    # Problems reappeared after r119922
337    if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
338      logging.warning("unit_tests are disabled for memcheck on MacOS.")
339      return 0;
340    return self.SimpleTest("chrome", "unit_tests")
341
342  def TestUIUnit(self):
343    return self.SimpleTest("chrome", "ui_unittests")
344
345  def TestURL(self):
346    return self.SimpleTest("chrome", "url_unittests")
347
348  def TestViews(self):
349    return self.SimpleTest("views", "views_unittests")
350
351  # Valgrind timeouts are in seconds.
352  UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
353  # UI test timeouts are in milliseconds.
354  UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
355                  "--ui-test-action-max-timeout=150000",
356                  "--no-sandbox"]
357
358  # TODO(thestig) fine-tune these values.
359  # Valgrind timeouts are in seconds.
360  BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
361  # Browser test timeouts are in milliseconds.
362  BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
363                       "--ui-test-action-max-timeout=800000",
364                       "--no-sandbox"]
365
366  def TestAutomatedUI(self):
367    return self.SimpleTest("chrome", "automated_ui_tests",
368                           valgrind_test_args=self.UI_VALGRIND_ARGS,
369                           cmd_args=self.UI_TEST_ARGS)
370
371  def TestBrowser(self):
372    return self.SimpleTest("chrome", "browser_tests",
373                           valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
374                           cmd_args=self.BROWSER_TEST_ARGS)
375
376  def TestInteractiveUI(self):
377    return self.SimpleTest("chrome", "interactive_ui_tests",
378                           valgrind_test_args=self.UI_VALGRIND_ARGS,
379                           cmd_args=self.UI_TEST_ARGS)
380
381  def TestReliability(self):
382    script_dir = path_utils.ScriptDir()
383    url_list_file = os.path.join(script_dir, "reliability", "url_list.txt")
384    return self.SimpleTest("chrome", "reliability_tests",
385                           valgrind_test_args=self.UI_VALGRIND_ARGS,
386                           cmd_args=(self.UI_TEST_ARGS +
387                                     ["--list=%s" % url_list_file]))
388
389  def TestSafeBrowsing(self):
390    return self.SimpleTest("chrome", "safe_browsing_tests",
391                           valgrind_test_args=self.UI_VALGRIND_ARGS,
392                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
393
394  def TestSyncIntegration(self):
395    return self.SimpleTest("chrome", "sync_integration_tests",
396                           valgrind_test_args=self.UI_VALGRIND_ARGS,
397                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
398
399  def TestLayoutChunk(self, chunk_num, chunk_size):
400    # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
401    # list of tests.  Wrap around to beginning of list at end.
402    # If chunk_size is zero, run all tests in the list once.
403    # If a text file is given as argument, it is used as the list of tests.
404    #
405    # Build the ginormous commandline in 'cmd'.
406    # It's going to be roughly
407    #  python valgrind_test.py ... python run_webkit_tests.py ...
408    # but we'll use the --indirect flag to valgrind_test.py
409    # to avoid valgrinding python.
410    # Start by building the valgrind_test.py commandline.
411    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
412    cmd = self._DefaultCommand(tool)
413    cmd.append("--trace_children")
414    cmd.append("--indirect_webkit_layout")
415    cmd.append("--ignore_exit_code")
416    # Now build script_cmd, the run_webkits_tests.py commandline
417    # Store each chunk in its own directory so that we can find the data later
418    chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
419    out_dir = os.path.join(path_utils.ScriptDir(), "latest")
420    out_dir = os.path.join(out_dir, chunk_dir)
421    if os.path.exists(out_dir):
422      old_files = glob.glob(os.path.join(out_dir, "*.txt"))
423      for f in old_files:
424        os.remove(f)
425    else:
426      os.makedirs(out_dir)
427    script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
428                          "run_webkit_tests.py")
429    # http://crbug.com/260627: After the switch to content_shell from DRT, each
430    # test now brings up 3 processes.  Under Valgrind, they become memory bound
431    # and can eventually OOM if we don't reduce the total count.
432    jobs = max(1, int(multiprocessing.cpu_count() * 0.4))
433    script_cmd = ["python", script, "-v",
434                  "--run-singly",  # run a separate DumpRenderTree for each test
435                  "--fully-parallel",
436                  "--child-processes=%d" % jobs,
437                  "--time-out-ms=200000",
438                  "--no-retry-failures",  # retrying takes too much time
439                  # http://crbug.com/176908: Don't launch a browser when done.
440                  "--no-show-results",
441                  "--nocheck-sys-deps"]
442    # Pass build mode to run_webkit_tests.py.  We aren't passed it directly,
443    # so parse it out of build_dir.  run_webkit_tests.py can only handle
444    # the two values "Release" and "Debug".
445    # TODO(Hercules): unify how all our scripts pass around build mode
446    # (--mode / --target / --build-dir / --debug)
447    if self._options.build_dir:
448      build_root, mode = os.path.split(self._options.build_dir)
449      script_cmd.extend(["--build-directory", build_root, "--target", mode])
450    if (chunk_size > 0):
451      script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
452    if len(self._args):
453      # if the arg is a txt file, then treat it as a list of tests
454      if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
455        script_cmd.append("--test-list=%s" % self._args[0])
456      else:
457        script_cmd.extend(self._args)
458    self._AppendGtestFilter(tool, "layout", script_cmd)
459    # Now run script_cmd with the wrapper in cmd
460    cmd.extend(["--"])
461    cmd.extend(script_cmd)
462
463    # Layout tests often times fail quickly, but the buildbot remains green.
464    # Detect this situation when running with the default chunk size.
465    if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
466      min_runtime_in_seconds=120
467    else:
468      min_runtime_in_seconds=0
469    ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
470    return ret
471
472
473  def TestLayout(self):
474    # A "chunk file" is maintained in the local directory so that each test
475    # runs a slice of the layout tests of size chunk_size that increments with
476    # each run.  Since tests can be added and removed from the layout tests at
477    # any time, this is not going to give exact coverage, but it will allow us
478    # to continuously run small slices of the layout tests under valgrind rather
479    # than having to run all of them in one shot.
480    chunk_size = self._options.num_tests
481    if (chunk_size == 0):
482      return self.TestLayoutChunk(0, 0)
483    chunk_num = 0
484    chunk_file = os.path.join("valgrind_layout_chunk.txt")
485    logging.info("Reading state from " + chunk_file)
486    try:
487      f = open(chunk_file)
488      if f:
489        str = f.read()
490        if len(str):
491          chunk_num = int(str)
492        # This should be enough so that we have a couple of complete runs
493        # of test data stored in the archive (although note that when we loop
494        # that we almost guaranteed won't be at the end of the test list)
495        if chunk_num > 10000:
496          chunk_num = 0
497        f.close()
498    except IOError, (errno, strerror):
499      logging.error("error reading from file %s (%d, %s)" % (chunk_file,
500                    errno, strerror))
501    # Save the new chunk size before running the tests. Otherwise if a
502    # particular chunk hangs the bot, the chunk number will never get
503    # incremented and the bot will be wedged.
504    logging.info("Saving state to " + chunk_file)
505    try:
506      f = open(chunk_file, "w")
507      chunk_num += 1
508      f.write("%d" % chunk_num)
509      f.close()
510    except IOError, (errno, strerror):
511      logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
512                    strerror))
513    # Since we're running small chunks of the layout tests, it's important to
514    # mark the ones that have errors in them.  These won't be visible in the
515    # summary list for long, but will be useful for someone reviewing this bot.
516    return self.TestLayoutChunk(chunk_num, chunk_size)
517
518  # The known list of tests.
519  # Recognise the original abbreviations as well as full executable names.
520  _test_list = {
521    "cmdline" : RunCmdLine,
522    "app_list": TestAppList,     "app_list_unittests": TestAppList,
523    "ash": TestAsh,              "ash_unittests": TestAsh,
524    "aura": TestAura,            "aura_unittests": TestAura,
525    "automated_ui" : TestAutomatedUI,
526    "base": TestBase,            "base_unittests": TestBase,
527    "browser": TestBrowser,      "browser_tests": TestBrowser,
528    "chromeos": TestChromeOS,    "chromeos_unittests": TestChromeOS,
529    "components": TestComponents,"components_unittests": TestComponents,
530    "compositor": TestCompositor,"compositor_unittests": TestCompositor,
531    "content": TestContent,      "content_unittests": TestContent,
532    "content_browsertests": TestContentBrowser,
533    "courgette": TestCourgette,  "courgette_unittests": TestCourgette,
534    "crypto": TestCrypto,        "crypto_unittests": TestCrypto,
535    "device": TestDevice,        "device_unittests": TestDevice,
536    "ffmpeg": TestFFmpeg,        "ffmpeg_unittests": TestFFmpeg,
537    "ffmpeg_regression_tests": TestFFmpegRegressions,
538    "gpu": TestGPU,              "gpu_unittests": TestGPU,
539    "ipc": TestIpc,              "ipc_tests": TestIpc,
540    "interactive_ui": TestInteractiveUI,
541    "jingle": TestJingle,        "jingle_unittests": TestJingle,
542    "layout": TestLayout,        "layout_tests": TestLayout,
543    "webkit": TestLayout,
544    "media": TestMedia,          "media_unittests": TestMedia,
545    "message_center": TestMessageCenter,
546    "message_center_unittests" : TestMessageCenter,
547    "net": TestNet,              "net_unittests": TestNet,
548    "net_perf": TestNetPerf,     "net_perftests": TestNetPerf,
549    "ppapi": TestPPAPI,          "ppapi_unittests": TestPPAPI,
550    "printing": TestPrinting,    "printing_unittests": TestPrinting,
551    "reliability": TestReliability, "reliability_tests": TestReliability,
552    "remoting": TestRemoting,    "remoting_unittests": TestRemoting,
553    "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
554    "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
555    "sql": TestSql,              "sql_unittests": TestSql,
556    "sync": TestSync,            "sync_unit_tests": TestSync,
557    "sync_integration_tests": TestSyncIntegration,
558    "sync_integration": TestSyncIntegration,
559    "ui_unit": TestUIUnit,       "ui_unittests": TestUIUnit,
560    "unit": TestUnit,            "unit_tests": TestUnit,
561    "url": TestURL,              "url_unittests": TestURL,
562    "views": TestViews,          "views_unittests": TestViews,
563  }
564
565
566def _main():
567  parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
568                                 "[-t <test> ...]")
569
570  parser.add_option("--help-tests", dest="help_tests", action="store_true",
571                    default=False, help="List all available tests")
572  parser.add_option("-b", "--build-dir",
573                    help="the location of the compiler output")
574  parser.add_option("--target", help="Debug or Release")
575  parser.add_option("-t", "--test", action="append", default=[],
576                    help="which test to run, supports test:gtest_filter format "
577                         "as well.")
578  parser.add_option("--baseline", action="store_true", default=False,
579                    help="generate baseline data instead of validating")
580  parser.add_option("--gtest_filter",
581                    help="additional arguments to --gtest_filter")
582  parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
583  parser.add_option("-v", "--verbose", action="store_true", default=False,
584                    help="verbose output - enable debug log messages")
585  parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
586                    help="specify a valgrind tool to run the tests under")
587  parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
588                    help="specify custom flags for the selected valgrind tool")
589  parser.add_option("--keep_logs", action="store_true", default=False,
590                    help="store memory tool logs in the <tool>.logs directory "
591                         "instead of /tmp.\nThis can be useful for tool "
592                         "developers/maintainers.\nPlease note that the <tool>"
593                         ".logs directory will be clobbered on tool startup.")
594  parser.add_option("-n", "--num_tests", type="int",
595                    default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
596                    help="for layout tests: # of subtests per run.  0 for all.")
597  # TODO(thestig) Remove this if we can.
598  parser.add_option("--gtest_color", dest="gtest_color", default="no",
599                    help="dummy compatibility flag for sharding_supervisor.")
600
601  options, args = parser.parse_args()
602
603  # bake target into build_dir.
604  assert not options.build_dir.endswith(options.target)
605  options.build_dir = os.path.join(os.path.abspath(options.build_dir),
606                                   options.target)
607
608  if options.verbose:
609    logging_utils.config_root(logging.DEBUG)
610  else:
611    logging_utils.config_root()
612
613  if options.help_tests:
614    ChromeTests.ShowTests()
615    return 0
616
617  if not options.test:
618    parser.error("--test not specified")
619
620  if len(options.test) != 1 and options.gtest_filter:
621    parser.error("--gtest_filter and multiple tests don't make sense together")
622
623  for t in options.test:
624    tests = ChromeTests(options, args, t)
625    ret = tests.Run()
626    if ret: return ret
627  return 0
628
629
630if __name__ == "__main__":
631  sys.exit(_main())
632