chrome_tests.py revision f2477e01787aa58f445919b809d89e252beef54f
1#!/usr/bin/env python
2# Copyright (c) 2012 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6''' Runs various chrome tests through valgrind_test.py.'''
7
8import glob
9import logging
10import multiprocessing
11import optparse
12import os
13import stat
14import sys
15
16import logging_utils
17import path_utils
18
19import common
20import valgrind_test
21
22class TestNotFound(Exception): pass
23
24class MultipleGTestFiltersSpecified(Exception): pass
25
26class BuildDirNotFound(Exception): pass
27
28class BuildDirAmbiguous(Exception): pass
29
30class ChromeTests:
31  SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
32  LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 500
33
34  def __init__(self, options, args, test):
35    if ':' in test:
36      (self._test, self._gtest_filter) = test.split(':', 1)
37    else:
38      self._test = test
39      self._gtest_filter = options.gtest_filter
40
41    if self._test not in self._test_list:
42      raise TestNotFound("Unknown test: %s" % test)
43
44    if options.gtest_filter and options.gtest_filter != self._gtest_filter:
45      raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
46                                          "and --test %s" % test)
47
48    self._options = options
49    self._args = args
50
51    script_dir = path_utils.ScriptDir()
52    # Compute the top of the tree (the "source dir") from the script dir (where
53    # this script lives).  We assume that the script dir is in tools/valgrind/
54    # relative to the top of the tree.
55    self._source_dir = os.path.dirname(os.path.dirname(script_dir))
56    # since this path is used for string matching, make sure it's always
57    # an absolute Unix-style path
58    self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
59    valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
60    self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
61
62    if not self._options.build_dir:
63      dirs = [
64        os.path.join(self._source_dir, "xcodebuild", "Debug"),
65        os.path.join(self._source_dir, "out", "Debug"),
66        os.path.join(self._source_dir, "build", "Debug"),
67      ]
68      build_dir = [d for d in dirs if os.path.isdir(d)]
69      if len(build_dir) > 1:
70        raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
71                                "%s\nPlease specify just one "
72                                "using --build-dir" % ", ".join(build_dir))
73      elif build_dir:
74        self._options.build_dir = build_dir[0]
75      else:
76        self._options.build_dir = None
77
78    if self._options.build_dir:
79      build_dir = os.path.abspath(self._options.build_dir)
80      self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
81
82  def _EnsureBuildDirFound(self):
83    if not self._options.build_dir:
84      raise BuildDirNotFound("Oops, couldn't find a build dir, please "
85                             "specify it manually using --build-dir")
86
87  def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
88    '''Generates the default command array that most tests will use.'''
89    if exe and common.IsWindows():
90      exe += '.exe'
91
92    cmd = list(self._command_preamble)
93
94    # Find all suppressions matching the following pattern:
95    # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
96    # and list them with --suppressions= prefix.
97    script_dir = path_utils.ScriptDir()
98    tool_name = tool.ToolName();
99    suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
100    if os.path.exists(suppression_file):
101      cmd.append("--suppressions=%s" % suppression_file)
102    # Platform-specific suppression
103    for platform in common.PlatformNames():
104      platform_suppression_file = \
105          os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
106      if os.path.exists(platform_suppression_file):
107        cmd.append("--suppressions=%s" % platform_suppression_file)
108
109    if self._options.valgrind_tool_flags:
110      cmd += self._options.valgrind_tool_flags.split(" ")
111    if self._options.keep_logs:
112      cmd += ["--keep_logs"]
113    if valgrind_test_args != None:
114      for arg in valgrind_test_args:
115        cmd.append(arg)
116    if exe:
117      self._EnsureBuildDirFound()
118      cmd.append(os.path.join(self._options.build_dir, exe))
119      # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
120      # so we can find the slowpokes.
121      cmd.append("--gtest_print_time")
122    if self._options.gtest_repeat:
123      cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
124    if self._options.gtest_shuffle:
125      cmd.append("--gtest_shuffle")
126    return cmd
127
128  def Run(self):
129    ''' Runs the test specified by command-line argument --test '''
130    logging.info("running test %s" % (self._test))
131    return self._test_list[self._test](self)
132
133  def _AppendGtestFilter(self, tool, name, cmd):
134    '''Append an appropriate --gtest_filter flag to the googletest binary
135       invocation.
136       If the user passed his own filter mentioning only one test, just use it.
137       Othewise, filter out tests listed in the appropriate gtest_exclude files.
138    '''
139    if (self._gtest_filter and
140        ":" not in self._gtest_filter and
141        "?" not in self._gtest_filter and
142        "*" not in self._gtest_filter):
143      cmd.append("--gtest_filter=%s" % self._gtest_filter)
144      return
145
146    filters = []
147    gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
148
149    gtest_filter_files = [
150        os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
151    # Use ".gtest.txt" files only for slow tools, as they now contain
152    # Valgrind- and Dr.Memory-specific filters.
153    # TODO(glider): rename the files to ".gtest_slow.txt"
154    if tool.ToolName() in ChromeTests.SLOW_TOOLS:
155      gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
156    for platform_suffix in common.PlatformNames():
157      gtest_filter_files += [
158        os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
159        os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
160            (tool.ToolName(), platform_suffix))]
161    logging.info("Reading gtest exclude filter files:")
162    for filename in gtest_filter_files:
163      # strip the leading absolute path (may be very long on the bot)
164      # and the following / or \.
165      readable_filename = filename.replace("\\", "/")  # '\' on Windows
166      readable_filename = readable_filename.replace(self._source_dir, "")[1:]
167      if not os.path.exists(filename):
168        logging.info("  \"%s\" - not found" % readable_filename)
169        continue
170      logging.info("  \"%s\" - OK" % readable_filename)
171      f = open(filename, 'r')
172      for line in f.readlines():
173        if line.startswith("#") or line.startswith("//") or line.isspace():
174          continue
175        line = line.rstrip()
176        test_prefixes = ["FLAKY", "FAILS"]
177        for p in test_prefixes:
178          # Strip prefixes from the test names.
179          line = line.replace(".%s_" % p, ".")
180        # Exclude the original test name.
181        filters.append(line)
182        if line[-2:] != ".*":
183          # List all possible prefixes if line doesn't end with ".*".
184          for p in test_prefixes:
185            filters.append(line.replace(".", ".%s_" % p))
186    # Get rid of duplicates.
187    filters = set(filters)
188    gtest_filter = self._gtest_filter
189    if len(filters):
190      if gtest_filter:
191        gtest_filter += ":"
192        if gtest_filter.find("-") < 0:
193          gtest_filter += "-"
194      else:
195        gtest_filter = "-"
196      gtest_filter += ":".join(filters)
197    if gtest_filter:
198      cmd.append("--gtest_filter=%s" % gtest_filter)
199
200  @staticmethod
201  def ShowTests():
202    test_to_names = {}
203    for name, test_function in ChromeTests._test_list.iteritems():
204      test_to_names.setdefault(test_function, []).append(name)
205
206    name_to_aliases = {}
207    for names in test_to_names.itervalues():
208      names.sort(key=lambda name: len(name))
209      name_to_aliases[names[0]] = names[1:]
210
211    print
212    print "Available tests:"
213    print "----------------"
214    for name, aliases in sorted(name_to_aliases.iteritems()):
215      if aliases:
216        print "   {} (aka {})".format(name, ', '.join(aliases))
217      else:
218        print "   {}".format(name)
219
220  def SetupLdPath(self, requires_build_dir):
221    if requires_build_dir:
222      self._EnsureBuildDirFound()
223    elif not self._options.build_dir:
224      return
225
226    # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
227    if (os.getenv("LD_LIBRARY_PATH")):
228      os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
229                                              self._options.build_dir))
230    else:
231      os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
232
233  def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
234    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
235    cmd = self._DefaultCommand(tool, name, valgrind_test_args)
236    self._AppendGtestFilter(tool, name, cmd)
237    cmd.extend(['--test-tiny-timeout=1000'])
238    if cmd_args:
239      cmd.extend(cmd_args)
240
241    self.SetupLdPath(True)
242    return tool.Run(cmd, module)
243
244  def RunCmdLine(self):
245    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
246    cmd = self._DefaultCommand(tool, None, self._args)
247    self.SetupLdPath(False)
248    return tool.Run(cmd, None)
249
250  def TestAppList(self):
251    return self.SimpleTest("app_list", "app_list_unittests")
252
253  def TestAsh(self):
254    return self.SimpleTest("ash", "ash_unittests")
255
256  def TestAura(self):
257    return self.SimpleTest("aura", "aura_unittests")
258
259  def TestBase(self):
260    return self.SimpleTest("base", "base_unittests")
261
262  def TestChromeOS(self):
263    return self.SimpleTest("chromeos", "chromeos_unittests")
264
265  def TestComponents(self):
266    return self.SimpleTest("components", "components_unittests")
267
268  def TestCompositor(self):
269    return self.SimpleTest("compositor", "compositor_unittests")
270
271  def TestContent(self):
272    return self.SimpleTest("content", "content_unittests")
273
274  def TestContentBrowser(self):
275    return self.SimpleTest("content", "content_browsertests")
276
277  def TestCourgette(self):
278    return self.SimpleTest("courgette", "courgette_unittests")
279
280  def TestCrypto(self):
281    return self.SimpleTest("crypto", "crypto_unittests")
282
283  def TestDevice(self):
284    return self.SimpleTest("device", "device_unittests")
285
286  def TestEvents(self):
287    return self.SimpleTest("events", "events_unittests")
288
289  def TestFFmpeg(self):
290    return self.SimpleTest("chrome", "ffmpeg_unittests")
291
292  def TestFFmpegRegressions(self):
293    return self.SimpleTest("chrome", "ffmpeg_regression_tests")
294
295  def TestGPU(self):
296    return self.SimpleTest("gpu", "gpu_unittests")
297
298  def TestIpc(self):
299    return self.SimpleTest("ipc", "ipc_tests",
300                           valgrind_test_args=["--trace_children"])
301
302  def TestJingle(self):
303    return self.SimpleTest("chrome", "jingle_unittests")
304
305  def TestMedia(self):
306    return self.SimpleTest("chrome", "media_unittests")
307
308  def TestMessageCenter(self):
309    return self.SimpleTest("message_center", "message_center_unittests")
310
311  def TestNet(self):
312    return self.SimpleTest("net", "net_unittests")
313
314  def TestNetPerf(self):
315    return self.SimpleTest("net", "net_perftests")
316
317  def TestPPAPI(self):
318    return self.SimpleTest("chrome", "ppapi_unittests")
319
320  def TestPrinting(self):
321    return self.SimpleTest("chrome", "printing_unittests")
322
323  def TestRemoting(self):
324    return self.SimpleTest("chrome", "remoting_unittests",
325                           cmd_args=[
326                               "--ui-test-action-timeout=60000",
327                               "--ui-test-action-max-timeout=150000"])
328
329  def TestSql(self):
330    return self.SimpleTest("chrome", "sql_unittests")
331
332  def TestSync(self):
333    return self.SimpleTest("chrome", "sync_unit_tests")
334
335  def TestLinuxSandbox(self):
336    return self.SimpleTest("sandbox", "sandbox_linux_unittests")
337
338  def TestUnit(self):
339    # http://crbug.com/51716
340    # Disabling all unit tests
341    # Problems reappeared after r119922
342    if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
343      logging.warning("unit_tests are disabled for memcheck on MacOS.")
344      return 0;
345    return self.SimpleTest("chrome", "unit_tests")
346
347  def TestUIUnit(self):
348    return self.SimpleTest("chrome", "ui_unittests")
349
350  def TestURL(self):
351    return self.SimpleTest("chrome", "url_unittests")
352
353  def TestViews(self):
354    return self.SimpleTest("views", "views_unittests")
355
356  # Valgrind timeouts are in seconds.
357  UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
358  # UI test timeouts are in milliseconds.
359  UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
360                  "--ui-test-action-max-timeout=150000",
361                  "--no-sandbox"]
362
363  # TODO(thestig) fine-tune these values.
364  # Valgrind timeouts are in seconds.
365  BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
366  # Browser test timeouts are in milliseconds.
367  BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
368                       "--ui-test-action-max-timeout=800000",
369                       "--no-sandbox"]
370
371  def TestAutomatedUI(self):
372    return self.SimpleTest("chrome", "automated_ui_tests",
373                           valgrind_test_args=self.UI_VALGRIND_ARGS,
374                           cmd_args=self.UI_TEST_ARGS)
375
376  def TestBrowser(self):
377    return self.SimpleTest("chrome", "browser_tests",
378                           valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
379                           cmd_args=self.BROWSER_TEST_ARGS)
380
381  def TestInteractiveUI(self):
382    return self.SimpleTest("chrome", "interactive_ui_tests",
383                           valgrind_test_args=self.UI_VALGRIND_ARGS,
384                           cmd_args=self.UI_TEST_ARGS)
385
386  def TestReliability(self):
387    script_dir = path_utils.ScriptDir()
388    url_list_file = os.path.join(script_dir, "reliability", "url_list.txt")
389    return self.SimpleTest("chrome", "reliability_tests",
390                           valgrind_test_args=self.UI_VALGRIND_ARGS,
391                           cmd_args=(self.UI_TEST_ARGS +
392                                     ["--list=%s" % url_list_file]))
393
394  def TestSafeBrowsing(self):
395    return self.SimpleTest("chrome", "safe_browsing_tests",
396                           valgrind_test_args=self.UI_VALGRIND_ARGS,
397                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
398
399  def TestSyncIntegration(self):
400    return self.SimpleTest("chrome", "sync_integration_tests",
401                           valgrind_test_args=self.UI_VALGRIND_ARGS,
402                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
403
404  def TestLayoutChunk(self, chunk_num, chunk_size):
405    # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
406    # list of tests.  Wrap around to beginning of list at end.
407    # If chunk_size is zero, run all tests in the list once.
408    # If a text file is given as argument, it is used as the list of tests.
409    #
410    # Build the ginormous commandline in 'cmd'.
411    # It's going to be roughly
412    #  python valgrind_test.py ... python run_webkit_tests.py ...
413    # but we'll use the --indirect flag to valgrind_test.py
414    # to avoid valgrinding python.
415    # Start by building the valgrind_test.py commandline.
416    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
417    cmd = self._DefaultCommand(tool)
418    cmd.append("--trace_children")
419    cmd.append("--indirect_webkit_layout")
420    cmd.append("--ignore_exit_code")
421    # Now build script_cmd, the run_webkits_tests.py commandline
422    # Store each chunk in its own directory so that we can find the data later
423    chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
424    out_dir = os.path.join(path_utils.ScriptDir(), "latest")
425    out_dir = os.path.join(out_dir, chunk_dir)
426    if os.path.exists(out_dir):
427      old_files = glob.glob(os.path.join(out_dir, "*.txt"))
428      for f in old_files:
429        os.remove(f)
430    else:
431      os.makedirs(out_dir)
432    script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
433                          "run_webkit_tests.py")
434    # http://crbug.com/260627: After the switch to content_shell from DRT, each
435    # test now brings up 3 processes.  Under Valgrind, they become memory bound
436    # and can eventually OOM if we don't reduce the total count.
437    jobs = max(1, int(multiprocessing.cpu_count() * 0.4))
438    script_cmd = ["python", script, "-v",
439                  "--run-singly",  # run a separate DumpRenderTree for each test
440                  "--fully-parallel",
441                  "--child-processes=%d" % jobs,
442                  "--time-out-ms=200000",
443                  "--no-retry-failures",  # retrying takes too much time
444                  # http://crbug.com/176908: Don't launch a browser when done.
445                  "--no-show-results",
446                  "--nocheck-sys-deps"]
447    # Pass build mode to run_webkit_tests.py.  We aren't passed it directly,
448    # so parse it out of build_dir.  run_webkit_tests.py can only handle
449    # the two values "Release" and "Debug".
450    # TODO(Hercules): unify how all our scripts pass around build mode
451    # (--mode / --target / --build-dir / --debug)
452    if self._options.build_dir:
453      build_root, mode = os.path.split(self._options.build_dir)
454      script_cmd.extend(["--build-directory", build_root, "--target", mode])
455    if (chunk_size > 0):
456      script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
457    if len(self._args):
458      # if the arg is a txt file, then treat it as a list of tests
459      if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
460        script_cmd.append("--test-list=%s" % self._args[0])
461      else:
462        script_cmd.extend(self._args)
463    self._AppendGtestFilter(tool, "layout", script_cmd)
464    # Now run script_cmd with the wrapper in cmd
465    cmd.extend(["--"])
466    cmd.extend(script_cmd)
467
468    # Layout tests often times fail quickly, but the buildbot remains green.
469    # Detect this situation when running with the default chunk size.
470    if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
471      min_runtime_in_seconds=120
472    else:
473      min_runtime_in_seconds=0
474    ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
475    return ret
476
477
478  def TestLayout(self):
479    # A "chunk file" is maintained in the local directory so that each test
480    # runs a slice of the layout tests of size chunk_size that increments with
481    # each run.  Since tests can be added and removed from the layout tests at
482    # any time, this is not going to give exact coverage, but it will allow us
483    # to continuously run small slices of the layout tests under valgrind rather
484    # than having to run all of them in one shot.
485    chunk_size = self._options.num_tests
486    if (chunk_size == 0):
487      return self.TestLayoutChunk(0, 0)
488    chunk_num = 0
489    chunk_file = os.path.join("valgrind_layout_chunk.txt")
490    logging.info("Reading state from " + chunk_file)
491    try:
492      f = open(chunk_file)
493      if f:
494        str = f.read()
495        if len(str):
496          chunk_num = int(str)
497        # This should be enough so that we have a couple of complete runs
498        # of test data stored in the archive (although note that when we loop
499        # that we almost guaranteed won't be at the end of the test list)
500        if chunk_num > 10000:
501          chunk_num = 0
502        f.close()
503    except IOError, (errno, strerror):
504      logging.error("error reading from file %s (%d, %s)" % (chunk_file,
505                    errno, strerror))
506    # Save the new chunk size before running the tests. Otherwise if a
507    # particular chunk hangs the bot, the chunk number will never get
508    # incremented and the bot will be wedged.
509    logging.info("Saving state to " + chunk_file)
510    try:
511      f = open(chunk_file, "w")
512      chunk_num += 1
513      f.write("%d" % chunk_num)
514      f.close()
515    except IOError, (errno, strerror):
516      logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
517                    strerror))
518    # Since we're running small chunks of the layout tests, it's important to
519    # mark the ones that have errors in them.  These won't be visible in the
520    # summary list for long, but will be useful for someone reviewing this bot.
521    return self.TestLayoutChunk(chunk_num, chunk_size)
522
523  # The known list of tests.
524  # Recognise the original abbreviations as well as full executable names.
525  _test_list = {
526    "cmdline" : RunCmdLine,
527    "app_list": TestAppList,     "app_list_unittests": TestAppList,
528    "ash": TestAsh,              "ash_unittests": TestAsh,
529    "aura": TestAura,            "aura_unittests": TestAura,
530    "automated_ui" : TestAutomatedUI,
531    "base": TestBase,            "base_unittests": TestBase,
532    "browser": TestBrowser,      "browser_tests": TestBrowser,
533    "chromeos": TestChromeOS,    "chromeos_unittests": TestChromeOS,
534    "components": TestComponents,"components_unittests": TestComponents,
535    "compositor": TestCompositor,"compositor_unittests": TestCompositor,
536    "content": TestContent,      "content_unittests": TestContent,
537    "content_browsertests": TestContentBrowser,
538    "courgette": TestCourgette,  "courgette_unittests": TestCourgette,
539    "crypto": TestCrypto,        "crypto_unittests": TestCrypto,
540    "device": TestDevice,        "device_unittests": TestDevice,
541    "events": TestEvents,        "events_unittests": TestEvents,
542    "ffmpeg": TestFFmpeg,        "ffmpeg_unittests": TestFFmpeg,
543    "ffmpeg_regression_tests": TestFFmpegRegressions,
544    "gpu": TestGPU,              "gpu_unittests": TestGPU,
545    "ipc": TestIpc,              "ipc_tests": TestIpc,
546    "interactive_ui": TestInteractiveUI,
547    "jingle": TestJingle,        "jingle_unittests": TestJingle,
548    "layout": TestLayout,        "layout_tests": TestLayout,
549    "webkit": TestLayout,
550    "media": TestMedia,          "media_unittests": TestMedia,
551    "message_center": TestMessageCenter,
552    "message_center_unittests" : TestMessageCenter,
553    "net": TestNet,              "net_unittests": TestNet,
554    "net_perf": TestNetPerf,     "net_perftests": TestNetPerf,
555    "ppapi": TestPPAPI,          "ppapi_unittests": TestPPAPI,
556    "printing": TestPrinting,    "printing_unittests": TestPrinting,
557    "reliability": TestReliability, "reliability_tests": TestReliability,
558    "remoting": TestRemoting,    "remoting_unittests": TestRemoting,
559    "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
560    "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
561    "sql": TestSql,              "sql_unittests": TestSql,
562    "sync": TestSync,            "sync_unit_tests": TestSync,
563    "sync_integration_tests": TestSyncIntegration,
564    "sync_integration": TestSyncIntegration,
565    "ui_unit": TestUIUnit,       "ui_unittests": TestUIUnit,
566    "unit": TestUnit,            "unit_tests": TestUnit,
567    "url": TestURL,              "url_unittests": TestURL,
568    "views": TestViews,          "views_unittests": TestViews,
569  }
570
571
572def _main():
573  parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
574                                 "[-t <test> ...]")
575
576  parser.add_option("--help-tests", dest="help_tests", action="store_true",
577                    default=False, help="List all available tests")
578  parser.add_option("-b", "--build-dir",
579                    help="the location of the compiler output")
580  parser.add_option("--target", help="Debug or Release")
581  parser.add_option("-t", "--test", action="append", default=[],
582                    help="which test to run, supports test:gtest_filter format "
583                         "as well.")
584  parser.add_option("--baseline", action="store_true", default=False,
585                    help="generate baseline data instead of validating")
586  parser.add_option("--gtest_filter",
587                    help="additional arguments to --gtest_filter")
588  parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
589  parser.add_option("--gtest_shuffle", action="store_true", default=False,
590                    help="Randomize tests' orders on every iteration.")
591  parser.add_option("-v", "--verbose", action="store_true", default=False,
592                    help="verbose output - enable debug log messages")
593  parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
594                    help="specify a valgrind tool to run the tests under")
595  parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
596                    help="specify custom flags for the selected valgrind tool")
597  parser.add_option("--keep_logs", action="store_true", default=False,
598                    help="store memory tool logs in the <tool>.logs directory "
599                         "instead of /tmp.\nThis can be useful for tool "
600                         "developers/maintainers.\nPlease note that the <tool>"
601                         ".logs directory will be clobbered on tool startup.")
602  parser.add_option("-n", "--num_tests", type="int",
603                    default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
604                    help="for layout tests: # of subtests per run.  0 for all.")
605  # TODO(thestig) Remove this if we can.
606  parser.add_option("--gtest_color", dest="gtest_color", default="no",
607                    help="dummy compatibility flag for sharding_supervisor.")
608
609  options, args = parser.parse_args()
610
611  # Bake target into build_dir.
612  if options.target and options.build_dir:
613    assert (options.target !=
614            os.path.basename(os.path.dirname(options.build_dir)))
615    options.build_dir = os.path.join(os.path.abspath(options.build_dir),
616                                     options.target)
617
618  if options.verbose:
619    logging_utils.config_root(logging.DEBUG)
620  else:
621    logging_utils.config_root()
622
623  if options.help_tests:
624    ChromeTests.ShowTests()
625    return 0
626
627  if not options.test:
628    parser.error("--test not specified")
629
630  if len(options.test) != 1 and options.gtest_filter:
631    parser.error("--gtest_filter and multiple tests don't make sense together")
632
633  for t in options.test:
634    tests = ChromeTests(options, args, t)
635    ret = tests.Run()
636    if ret: return ret
637  return 0
638
639
640if __name__ == "__main__":
641  sys.exit(_main())
642