chrome_tests.py revision 5821806d5e7f356e8fa4b058a389a808ea183019
1049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project#!/usr/bin/env python
2049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project# Copyright (c) 2012 The Chromium Authors. All rights reserved.
3049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project# Use of this source code is governed by a BSD-style license that can be
4049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project# found in the LICENSE file.
5049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project
6049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project''' Runs various chrome tests through valgrind_test.py.'''
7049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project
8049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectimport glob
9049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectimport logging
10049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectimport optparse
11049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectimport os
12049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectimport stat
13049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectimport sys
14049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project
15049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectimport logging_utils
16049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectimport path_utils
17049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project
18049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectimport common
19049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectimport valgrind_test
20049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project
21049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectclass TestNotFound(Exception): pass
22049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project
23049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectclass MultipleGTestFiltersSpecified(Exception): pass
24049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project
25049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectclass BuildDirNotFound(Exception): pass
26049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project
27049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectclass BuildDirAmbiguous(Exception): pass
28049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project
29049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Projectclass ChromeTests:
30049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project  SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
31049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project  LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 1500
32049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project
33049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project  def __init__(self, options, args, test):
34049d6fea481044fcc000e7782e5bc7046fc70844The Android Open Source Project    if ':' in test:
35      (self._test, self._gtest_filter) = test.split(':', 1)
36    else:
37      self._test = test
38      self._gtest_filter = options.gtest_filter
39
40    if self._test not in self._test_list:
41      raise TestNotFound("Unknown test: %s" % test)
42
43    if options.gtest_filter and options.gtest_filter != self._gtest_filter:
44      raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
45                                          "and --test %s" % test)
46
47    self._options = options
48    self._args = args
49
50    script_dir = path_utils.ScriptDir()
51    # Compute the top of the tree (the "source dir") from the script dir (where
52    # this script lives).  We assume that the script dir is in tools/valgrind/
53    # relative to the top of the tree.
54    self._source_dir = os.path.dirname(os.path.dirname(script_dir))
55    # since this path is used for string matching, make sure it's always
56    # an absolute Unix-style path
57    self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
58    valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
59    self._command_preamble = ["--source_dir=%s" % (self._source_dir)]
60
61    if not self._options.build_dir:
62      dirs = [
63        os.path.join(self._source_dir, "xcodebuild", "Debug"),
64        os.path.join(self._source_dir, "out", "Debug"),
65        os.path.join(self._source_dir, "build", "Debug"),
66      ]
67      build_dir = [d for d in dirs if os.path.isdir(d)]
68      if len(build_dir) > 1:
69        raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
70                                "%s\nPlease specify just one "
71                                "using --build_dir" % ", ".join(build_dir))
72      elif build_dir:
73        self._options.build_dir = build_dir[0]
74      else:
75        self._options.build_dir = None
76
77    if self._options.build_dir:
78      build_dir = os.path.abspath(self._options.build_dir)
79      self._command_preamble += ["--build_dir=%s" % (self._options.build_dir)]
80
81  def _EnsureBuildDirFound(self):
82    if not self._options.build_dir:
83      raise BuildDirNotFound("Oops, couldn't find a build dir, please "
84                             "specify it manually using --build_dir")
85
86  def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
87    '''Generates the default command array that most tests will use.'''
88    if exe and common.IsWindows():
89      exe += '.exe'
90
91    cmd = list(self._command_preamble)
92
93    # Find all suppressions matching the following pattern:
94    # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
95    # and list them with --suppressions= prefix.
96    script_dir = path_utils.ScriptDir()
97    tool_name = tool.ToolName();
98    suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
99    if os.path.exists(suppression_file):
100      cmd.append("--suppressions=%s" % suppression_file)
101    # Platform-specific suppression
102    for platform in common.PlatformNames():
103      platform_suppression_file = \
104          os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
105      if os.path.exists(platform_suppression_file):
106        cmd.append("--suppressions=%s" % platform_suppression_file)
107
108    if self._options.valgrind_tool_flags:
109      cmd += self._options.valgrind_tool_flags.split(" ")
110    if self._options.keep_logs:
111      cmd += ["--keep_logs"]
112    if valgrind_test_args != None:
113      for arg in valgrind_test_args:
114        cmd.append(arg)
115    if exe:
116      self._EnsureBuildDirFound()
117      cmd.append(os.path.join(self._options.build_dir, exe))
118      # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
119      # so we can find the slowpokes.
120      cmd.append("--gtest_print_time")
121    if self._options.gtest_repeat:
122      cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
123    return cmd
124
125  def Run(self):
126    ''' Runs the test specified by command-line argument --test '''
127    logging.info("running test %s" % (self._test))
128    return self._test_list[self._test](self)
129
130  def _AppendGtestFilter(self, tool, name, cmd):
131    '''Append an appropriate --gtest_filter flag to the googletest binary
132       invocation.
133       If the user passed his own filter mentioning only one test, just use it.
134       Othewise, filter out tests listed in the appropriate gtest_exclude files.
135    '''
136    if (self._gtest_filter and
137        ":" not in self._gtest_filter and
138        "?" not in self._gtest_filter and
139        "*" not in self._gtest_filter):
140      cmd.append("--gtest_filter=%s" % self._gtest_filter)
141      return
142
143    filters = []
144    gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
145
146    gtest_filter_files = [
147        os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
148    # Use ".gtest.txt" files only for slow tools, as they now contain
149    # Valgrind- and Dr.Memory-specific filters.
150    # TODO(glider): rename the files to ".gtest_slow.txt"
151    if tool.ToolName() in ChromeTests.SLOW_TOOLS:
152      gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
153    for platform_suffix in common.PlatformNames():
154      gtest_filter_files += [
155        os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
156        os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
157            (tool.ToolName(), platform_suffix))]
158    logging.info("Reading gtest exclude filter files:")
159    for filename in gtest_filter_files:
160      # strip the leading absolute path (may be very long on the bot)
161      # and the following / or \.
162      readable_filename = filename.replace("\\", "/")  # '\' on Windows
163      readable_filename = readable_filename.replace(self._source_dir, "")[1:]
164      if not os.path.exists(filename):
165        logging.info("  \"%s\" - not found" % readable_filename)
166        continue
167      logging.info("  \"%s\" - OK" % readable_filename)
168      f = open(filename, 'r')
169      for line in f.readlines():
170        if line.startswith("#") or line.startswith("//") or line.isspace():
171          continue
172        line = line.rstrip()
173        test_prefixes = ["FLAKY", "FAILS"]
174        for p in test_prefixes:
175          # Strip prefixes from the test names.
176          line = line.replace(".%s_" % p, ".")
177        # Exclude the original test name.
178        filters.append(line)
179        if line[-2:] != ".*":
180          # List all possible prefixes if line doesn't end with ".*".
181          for p in test_prefixes:
182            filters.append(line.replace(".", ".%s_" % p))
183    # Get rid of duplicates.
184    filters = set(filters)
185    gtest_filter = self._gtest_filter
186    if len(filters):
187      if gtest_filter:
188        gtest_filter += ":"
189        if gtest_filter.find("-") < 0:
190          gtest_filter += "-"
191      else:
192        gtest_filter = "-"
193      gtest_filter += ":".join(filters)
194    if gtest_filter:
195      cmd.append("--gtest_filter=%s" % gtest_filter)
196
197  @staticmethod
198  def ShowTests():
199    test_to_names = {}
200    for name, test_function in ChromeTests._test_list.iteritems():
201      test_to_names.setdefault(test_function, []).append(name)
202
203    name_to_aliases = {}
204    for names in test_to_names.itervalues():
205      names.sort(key=lambda name: len(name))
206      name_to_aliases[names[0]] = names[1:]
207
208    print
209    print "Available tests:"
210    print "----------------"
211    for name, aliases in sorted(name_to_aliases.iteritems()):
212      if aliases:
213        print "   {} (aka {})".format(name, ', '.join(aliases))
214      else:
215        print "   {}".format(name)
216
217  def SetupLdPath(self, requires_build_dir):
218    if requires_build_dir:
219      self._EnsureBuildDirFound()
220    elif not self._options.build_dir:
221      return
222
223    # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
224    if (os.getenv("LD_LIBRARY_PATH")):
225      os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
226                                              self._options.build_dir))
227    else:
228      os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
229
230  def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
231    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
232    cmd = self._DefaultCommand(tool, name, valgrind_test_args)
233    self._AppendGtestFilter(tool, name, cmd)
234    cmd.extend(['--test-tiny-timeout=1000'])
235    if cmd_args:
236      cmd.extend(cmd_args)
237
238    self.SetupLdPath(True)
239    return tool.Run(cmd, module)
240
241  def RunCmdLine(self):
242    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
243    cmd = self._DefaultCommand(tool, None, self._args)
244    self.SetupLdPath(False)
245    return tool.Run(cmd, None)
246
247  def TestAsh(self):
248    return self.SimpleTest("ash", "ash_unittests")
249
250  def TestAura(self):
251    return self.SimpleTest("aura", "aura_unittests")
252
253  def TestBase(self):
254    return self.SimpleTest("base", "base_unittests")
255
256  def TestChromeOS(self):
257    return self.SimpleTest("chromeos", "chromeos_unittests")
258
259  def TestCompositor(self):
260    return self.SimpleTest("compositor", "compositor_unittests")
261
262  def TestContent(self):
263    return self.SimpleTest("content", "content_unittests")
264
265  def TestContentBrowser(self):
266    return self.SimpleTest("content", "content_browsertests")
267
268  def TestCourgette(self):
269    return self.SimpleTest("courgette", "courgette_unittests")
270
271  def TestCrypto(self):
272    return self.SimpleTest("crypto", "crypto_unittests")
273
274  def TestDevice(self):
275    return self.SimpleTest("device", "device_unittests")
276
277  def TestFFmpeg(self):
278    return self.SimpleTest("chrome", "ffmpeg_unittests")
279
280  def TestFFmpegRegressions(self):
281    return self.SimpleTest("chrome", "ffmpeg_regression_tests")
282
283  def TestGPU(self):
284    return self.SimpleTest("gpu", "gpu_unittests")
285
286  def TestGURL(self):
287    return self.SimpleTest("chrome", "googleurl_unittests")
288
289  def TestIpc(self):
290    return self.SimpleTest("ipc", "ipc_tests",
291                           valgrind_test_args=["--trace_children"])
292
293  def TestJingle(self):
294    return self.SimpleTest("chrome", "jingle_unittests")
295
296  def TestMedia(self):
297    return self.SimpleTest("chrome", "media_unittests")
298
299  def TestNet(self):
300    return self.SimpleTest("net", "net_unittests")
301
302  def TestPPAPI(self):
303    return self.SimpleTest("chrome", "ppapi_unittests")
304
305  def TestPrinting(self):
306    return self.SimpleTest("chrome", "printing_unittests")
307
308  def TestRemoting(self):
309    return self.SimpleTest("chrome", "remoting_unittests",
310                           cmd_args=[
311                               "--ui-test-action-timeout=60000",
312                               "--ui-test-action-max-timeout=150000"])
313
314  def TestSql(self):
315    return self.SimpleTest("chrome", "sql_unittests")
316
317  def TestSync(self):
318    return self.SimpleTest("chrome", "sync_unit_tests")
319
320  def TestTestShell(self):
321    return self.SimpleTest("webkit", "test_shell_tests")
322
323  def TestUnit(self):
324    # http://crbug.com/51716
325    # Disabling all unit tests
326    # Problems reappeared after r119922
327    if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
328      logging.warning("unit_tests are disabled for memcheck on MacOS.")
329      return 0;
330    return self.SimpleTest("chrome", "unit_tests")
331
332  def TestUIUnit(self):
333    return self.SimpleTest("chrome", "ui_unittests")
334
335  def TestViews(self):
336    return self.SimpleTest("views", "views_unittests")
337
338  # Valgrind timeouts are in seconds.
339  UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
340  # UI test timeouts are in milliseconds.
341  UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
342                  "--ui-test-action-max-timeout=150000"]
343
344  # TODO(thestig) fine-tune these values.
345  # Valgrind timeouts are in seconds.
346  BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
347  # Browser test timeouts are in milliseconds.
348  BROWSER_TEST_ARGS = ["--ui-test-action-timeout=200000",
349                       "--ui-test-action-max-timeout=400000"]
350
351  def TestAutomatedUI(self):
352    return self.SimpleTest("chrome", "automated_ui_tests",
353                           valgrind_test_args=self.UI_VALGRIND_ARGS,
354                           cmd_args=self.UI_TEST_ARGS)
355
356  def TestBrowser(self):
357    return self.SimpleTest("chrome", "browser_tests",
358                           valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
359                           cmd_args=self.BROWSER_TEST_ARGS)
360
361  def TestInteractiveUI(self):
362    return self.SimpleTest("chrome", "interactive_ui_tests",
363                           valgrind_test_args=self.UI_VALGRIND_ARGS,
364                           cmd_args=self.UI_TEST_ARGS)
365
366  def TestReliability(self):
367    script_dir = path_utils.ScriptDir()
368    url_list_file = os.path.join(script_dir, "reliability", "url_list.txt")
369    return self.SimpleTest("chrome", "reliability_tests",
370                           valgrind_test_args=self.UI_VALGRIND_ARGS,
371                           cmd_args=(self.UI_TEST_ARGS +
372                                     ["--list=%s" % url_list_file]))
373
374  def TestSafeBrowsing(self):
375    return self.SimpleTest("chrome", "safe_browsing_tests",
376                           valgrind_test_args=self.UI_VALGRIND_ARGS,
377                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
378
379  def TestSyncIntegration(self):
380    return self.SimpleTest("chrome", "sync_integration_tests",
381                           valgrind_test_args=self.UI_VALGRIND_ARGS,
382                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
383
384  def TestLayoutChunk(self, chunk_num, chunk_size):
385    # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
386    # list of tests.  Wrap around to beginning of list at end.
387    # If chunk_size is zero, run all tests in the list once.
388    # If a text file is given as argument, it is used as the list of tests.
389    #
390    # Build the ginormous commandline in 'cmd'.
391    # It's going to be roughly
392    #  python valgrind_test.py ... python run_webkit_tests.py ...
393    # but we'll use the --indirect flag to valgrind_test.py
394    # to avoid valgrinding python.
395    # Start by building the valgrind_test.py commandline.
396    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
397    cmd = self._DefaultCommand(tool)
398    cmd.append("--trace_children")
399    cmd.append("--indirect_webkit_layout")
400    cmd.append("--ignore_exit_code")
401    # Now build script_cmd, the run_webkits_tests.py commandline
402    # Store each chunk in its own directory so that we can find the data later
403    chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
404    test_shell = os.path.join(self._options.build_dir, "test_shell")
405    out_dir = os.path.join(path_utils.ScriptDir(), "latest")
406    out_dir = os.path.join(out_dir, chunk_dir)
407    if os.path.exists(out_dir):
408      old_files = glob.glob(os.path.join(out_dir, "*.txt"))
409      for f in old_files:
410        os.remove(f)
411    else:
412      os.makedirs(out_dir)
413    script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
414                          "run_webkit_tests.py")
415    script_cmd = ["python", script, "-v",
416                  "--run-singly",  # run a separate DumpRenderTree for each test
417                  "--fully-parallel",
418                  "--time-out-ms=200000",
419                  "--noshow-results",
420                  "--no-retry-failures",  # retrying takes too much time
421                  "--nocheck-sys-deps"]
422    # Pass build mode to run_webkit_tests.py.  We aren't passed it directly,
423    # so parse it out of build_dir.  run_webkit_tests.py can only handle
424    # the two values "Release" and "Debug".
425    # TODO(Hercules): unify how all our scripts pass around build mode
426    # (--mode / --target / --build_dir / --debug)
427    if self._options.build_dir.endswith("Debug"):
428      script_cmd.append("--debug");
429    if (chunk_size > 0):
430      script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
431    if len(self._args):
432      # if the arg is a txt file, then treat it as a list of tests
433      if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
434        script_cmd.append("--test-list=%s" % self._args[0])
435      else:
436        script_cmd.extend(self._args)
437    self._AppendGtestFilter(tool, "layout", script_cmd)
438    # Now run script_cmd with the wrapper in cmd
439    cmd.extend(["--"])
440    cmd.extend(script_cmd)
441
442    # Layout tests often times fail quickly, but the buildbot remains green.
443    # Detect this situation when running with the default chunk size.
444    if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
445      min_runtime_in_seconds=120
446    else:
447      min_runtime_in_seconds=0
448    ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
449    return ret
450
451
452  def TestLayout(self):
453    # A "chunk file" is maintained in the local directory so that each test
454    # runs a slice of the layout tests of size chunk_size that increments with
455    # each run.  Since tests can be added and removed from the layout tests at
456    # any time, this is not going to give exact coverage, but it will allow us
457    # to continuously run small slices of the layout tests under valgrind rather
458    # than having to run all of them in one shot.
459    chunk_size = self._options.num_tests
460    if (chunk_size == 0):
461      return self.TestLayoutChunk(0, 0)
462    chunk_num = 0
463    chunk_file = os.path.join("valgrind_layout_chunk.txt")
464    logging.info("Reading state from " + chunk_file)
465    try:
466      f = open(chunk_file)
467      if f:
468        str = f.read()
469        if len(str):
470          chunk_num = int(str)
471        # This should be enough so that we have a couple of complete runs
472        # of test data stored in the archive (although note that when we loop
473        # that we almost guaranteed won't be at the end of the test list)
474        if chunk_num > 10000:
475          chunk_num = 0
476        f.close()
477    except IOError, (errno, strerror):
478      logging.error("error reading from file %s (%d, %s)" % (chunk_file,
479                    errno, strerror))
480    ret = self.TestLayoutChunk(chunk_num, chunk_size)
481    # Wait until after the test runs to completion to write out the new chunk
482    # number.  This way, if the bot is killed, we'll start running again from
483    # the current chunk rather than skipping it.
484    logging.info("Saving state to " + chunk_file)
485    try:
486      f = open(chunk_file, "w")
487      chunk_num += 1
488      f.write("%d" % chunk_num)
489      f.close()
490    except IOError, (errno, strerror):
491      logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
492                    strerror))
493    # Since we're running small chunks of the layout tests, it's important to
494    # mark the ones that have errors in them.  These won't be visible in the
495    # summary list for long, but will be useful for someone reviewing this bot.
496    return ret
497
498  # The known list of tests.
499  # Recognise the original abbreviations as well as full executable names.
500  _test_list = {
501    "cmdline" : RunCmdLine,
502    "ash": TestAsh,              "ash_unittests": TestAsh,
503    "aura": TestAura,            "aura_unittests": TestAura,
504    "automated_ui" : TestAutomatedUI,
505    "base": TestBase,            "base_unittests": TestBase,
506    "browser": TestBrowser,      "browser_tests": TestBrowser,
507    "chromeos": TestChromeOS,    "chromeos_unittests": TestChromeOS,
508    "compositor": TestCompositor,"compositor_unittests": TestCompositor,
509    "content": TestContent,      "content_unittests": TestContent,
510    "content_browsertests": TestContentBrowser,
511    "courgette": TestCourgette,  "courgette_unittests": TestCourgette,
512    "crypto": TestCrypto,        "crypto_unittests": TestCrypto,
513    "device": TestDevice,        "device_unittests": TestDevice,
514    "ffmpeg": TestFFmpeg,        "ffmpeg_unittests": TestFFmpeg,
515    "ffmpeg_regression_tests": TestFFmpegRegressions,
516    "googleurl": TestGURL,       "googleurl_unittests": TestGURL,
517    "gpu": TestGPU,              "gpu_unittests": TestGPU,
518    "ipc": TestIpc,              "ipc_tests": TestIpc,
519    "interactive_ui": TestInteractiveUI,
520    "layout": TestLayout,        "layout_tests": TestLayout,
521    "webkit": TestLayout,
522    "media": TestMedia,          "media_unittests": TestMedia,
523    "net": TestNet,              "net_unittests": TestNet,
524    "jingle": TestJingle,        "jingle_unittests": TestJingle,
525    "ppapi": TestPPAPI,          "ppapi_unittests": TestPPAPI,
526    "printing": TestPrinting,    "printing_unittests": TestPrinting,
527    "reliability": TestReliability, "reliability_tests": TestReliability,
528    "remoting": TestRemoting,    "remoting_unittests": TestRemoting,
529    "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
530    "sql": TestSql,              "sql_unittests": TestSql,
531    "sync": TestSync,            "sync_unit_tests": TestSync,
532    "sync_integration_tests": TestSyncIntegration,
533    "sync_integration": TestSyncIntegration,
534    "test_shell": TestTestShell, "test_shell_tests": TestTestShell,
535    "ui_unit": TestUIUnit,       "ui_unittests": TestUIUnit,
536    "unit": TestUnit,            "unit_tests": TestUnit,
537    "views": TestViews,          "views_unittests": TestViews,
538  }
539
540
541def _main():
542  parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
543                                 "[-t <test> ...]")
544  parser.disable_interspersed_args()
545
546  parser.add_option("", "--help-tests", dest="help_tests", action="store_true",
547                    default=False, help="List all available tests")
548  parser.add_option("-b", "--build_dir",
549                    help="the location of the compiler output")
550  parser.add_option("-t", "--test", action="append", default=[],
551                    help="which test to run, supports test:gtest_filter format "
552                         "as well.")
553  parser.add_option("", "--baseline", action="store_true", default=False,
554                    help="generate baseline data instead of validating")
555  parser.add_option("", "--gtest_filter",
556                    help="additional arguments to --gtest_filter")
557  parser.add_option("", "--gtest_repeat",
558                    help="argument for --gtest_repeat")
559  parser.add_option("-v", "--verbose", action="store_true", default=False,
560                    help="verbose output - enable debug log messages")
561  parser.add_option("", "--tool", dest="valgrind_tool", default="memcheck",
562                    help="specify a valgrind tool to run the tests under")
563  parser.add_option("", "--tool_flags", dest="valgrind_tool_flags", default="",
564                    help="specify custom flags for the selected valgrind tool")
565  parser.add_option("", "--keep_logs", action="store_true", default=False,
566                    help="store memory tool logs in the <tool>.logs directory "
567                         "instead of /tmp.\nThis can be useful for tool "
568                         "developers/maintainers.\nPlease note that the <tool>"
569                         ".logs directory will be clobbered on tool startup.")
570  parser.add_option("-n", "--num_tests", type="int",
571                    default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
572                    help="for layout tests: # of subtests per run.  0 for all.")
573  # TODO(thestig) Remove this if we can.
574  parser.add_option("", "--gtest_color", dest="gtest_color", default="no",
575                    help="dummy compatibility flag for sharding_supervisor.")
576
577  options, args = parser.parse_args()
578
579  if options.verbose:
580    logging_utils.config_root(logging.DEBUG)
581  else:
582    logging_utils.config_root()
583
584  if options.help_tests:
585    ChromeTests.ShowTests()
586    return 0
587
588  if not options.test:
589    parser.error("--test not specified")
590
591  if len(options.test) != 1 and options.gtest_filter:
592    parser.error("--gtest_filter and multiple tests don't make sense together")
593
594  for t in options.test:
595    tests = ChromeTests(options, args, t)
596    ret = tests.Run()
597    if ret: return ret
598  return 0
599
600
601if __name__ == "__main__":
602  sys.exit(_main())
603