chrome_tests.py revision 116680a4aac90f2aa7413d9095a592090648e557
1#!/usr/bin/env python
2# Copyright (c) 2012 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6''' Runs various chrome tests through valgrind_test.py.'''
7
8import glob
9import logging
10import multiprocessing
11import optparse
12import os
13import stat
14import sys
15
16import logging_utils
17import path_utils
18
19import common
20import valgrind_test
21
22class TestNotFound(Exception): pass
23
24class MultipleGTestFiltersSpecified(Exception): pass
25
26class BuildDirNotFound(Exception): pass
27
28class BuildDirAmbiguous(Exception): pass
29
30class ChromeTests:
31  SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
32  LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
33
34  def __init__(self, options, args, test):
35    if ':' in test:
36      (self._test, self._gtest_filter) = test.split(':', 1)
37    else:
38      self._test = test
39      self._gtest_filter = options.gtest_filter
40
41    if self._test not in self._test_list:
42      raise TestNotFound("Unknown test: %s" % test)
43
44    if options.gtest_filter and options.gtest_filter != self._gtest_filter:
45      raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
46                                          "and --test %s" % test)
47
48    self._options = options
49    self._args = args
50
51    script_dir = path_utils.ScriptDir()
52    # Compute the top of the tree (the "source dir") from the script dir (where
53    # this script lives).  We assume that the script dir is in tools/valgrind/
54    # relative to the top of the tree.
55    self._source_dir = os.path.dirname(os.path.dirname(script_dir))
56    # since this path is used for string matching, make sure it's always
57    # an absolute Unix-style path
58    self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
59    valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
60    self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
61
62    if not self._options.build_dir:
63      dirs = [
64        os.path.join(self._source_dir, "xcodebuild", "Debug"),
65        os.path.join(self._source_dir, "out", "Debug"),
66        os.path.join(self._source_dir, "build", "Debug"),
67      ]
68      build_dir = [d for d in dirs if os.path.isdir(d)]
69      if len(build_dir) > 1:
70        raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
71                                "%s\nPlease specify just one "
72                                "using --build-dir" % ", ".join(build_dir))
73      elif build_dir:
74        self._options.build_dir = build_dir[0]
75      else:
76        self._options.build_dir = None
77
78    if self._options.build_dir:
79      build_dir = os.path.abspath(self._options.build_dir)
80      self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
81
82  def _EnsureBuildDirFound(self):
83    if not self._options.build_dir:
84      raise BuildDirNotFound("Oops, couldn't find a build dir, please "
85                             "specify it manually using --build-dir")
86
87  def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
88    '''Generates the default command array that most tests will use.'''
89    if exe and common.IsWindows():
90      exe += '.exe'
91
92    cmd = list(self._command_preamble)
93
94    # Find all suppressions matching the following pattern:
95    # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
96    # and list them with --suppressions= prefix.
97    script_dir = path_utils.ScriptDir()
98    tool_name = tool.ToolName();
99    suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
100    if os.path.exists(suppression_file):
101      cmd.append("--suppressions=%s" % suppression_file)
102    # Platform-specific suppression
103    for platform in common.PlatformNames():
104      platform_suppression_file = \
105          os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
106      if os.path.exists(platform_suppression_file):
107        cmd.append("--suppressions=%s" % platform_suppression_file)
108
109    if self._options.valgrind_tool_flags:
110      cmd += self._options.valgrind_tool_flags.split(" ")
111    if self._options.keep_logs:
112      cmd += ["--keep_logs"]
113    if valgrind_test_args != None:
114      for arg in valgrind_test_args:
115        cmd.append(arg)
116    if exe:
117      self._EnsureBuildDirFound()
118      cmd.append(os.path.join(self._options.build_dir, exe))
119      # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
120      # so we can find the slowpokes.
121      cmd.append("--gtest_print_time")
122      # Built-in test launcher for gtest-based executables runs tests using
123      # multiple process by default. Force the single-process mode back.
124      cmd.append("--single-process-tests")
125    if self._options.gtest_repeat:
126      cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
127    if self._options.gtest_shuffle:
128      cmd.append("--gtest_shuffle")
129    if self._options.brave_new_test_launcher:
130      cmd.append("--brave-new-test-launcher")
131    if self._options.test_launcher_bot_mode:
132      cmd.append("--test-launcher-bot-mode")
133    return cmd
134
135  def Run(self):
136    ''' Runs the test specified by command-line argument --test '''
137    logging.info("running test %s" % (self._test))
138    return self._test_list[self._test](self)
139
140  def _AppendGtestFilter(self, tool, name, cmd):
141    '''Append an appropriate --gtest_filter flag to the googletest binary
142       invocation.
143       If the user passed his own filter mentioning only one test, just use it.
144       Othewise, filter out tests listed in the appropriate gtest_exclude files.
145    '''
146    if (self._gtest_filter and
147        ":" not in self._gtest_filter and
148        "?" not in self._gtest_filter and
149        "*" not in self._gtest_filter):
150      cmd.append("--gtest_filter=%s" % self._gtest_filter)
151      return
152
153    filters = []
154    gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
155
156    gtest_filter_files = [
157        os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
158    # Use ".gtest.txt" files only for slow tools, as they now contain
159    # Valgrind- and Dr.Memory-specific filters.
160    # TODO(glider): rename the files to ".gtest_slow.txt"
161    if tool.ToolName() in ChromeTests.SLOW_TOOLS:
162      gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
163    for platform_suffix in common.PlatformNames():
164      gtest_filter_files += [
165        os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
166        os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
167            (tool.ToolName(), platform_suffix))]
168    logging.info("Reading gtest exclude filter files:")
169    for filename in gtest_filter_files:
170      # strip the leading absolute path (may be very long on the bot)
171      # and the following / or \.
172      readable_filename = filename.replace("\\", "/")  # '\' on Windows
173      readable_filename = readable_filename.replace(self._source_dir, "")[1:]
174      if not os.path.exists(filename):
175        logging.info("  \"%s\" - not found" % readable_filename)
176        continue
177      logging.info("  \"%s\" - OK" % readable_filename)
178      f = open(filename, 'r')
179      for line in f.readlines():
180        if line.startswith("#") or line.startswith("//") or line.isspace():
181          continue
182        line = line.rstrip()
183        test_prefixes = ["FLAKY", "FAILS"]
184        for p in test_prefixes:
185          # Strip prefixes from the test names.
186          line = line.replace(".%s_" % p, ".")
187        # Exclude the original test name.
188        filters.append(line)
189        if line[-2:] != ".*":
190          # List all possible prefixes if line doesn't end with ".*".
191          for p in test_prefixes:
192            filters.append(line.replace(".", ".%s_" % p))
193    # Get rid of duplicates.
194    filters = set(filters)
195    gtest_filter = self._gtest_filter
196    if len(filters):
197      if gtest_filter:
198        gtest_filter += ":"
199        if gtest_filter.find("-") < 0:
200          gtest_filter += "-"
201      else:
202        gtest_filter = "-"
203      gtest_filter += ":".join(filters)
204    if gtest_filter:
205      cmd.append("--gtest_filter=%s" % gtest_filter)
206
207  @staticmethod
208  def ShowTests():
209    test_to_names = {}
210    for name, test_function in ChromeTests._test_list.iteritems():
211      test_to_names.setdefault(test_function, []).append(name)
212
213    name_to_aliases = {}
214    for names in test_to_names.itervalues():
215      names.sort(key=lambda name: len(name))
216      name_to_aliases[names[0]] = names[1:]
217
218    print
219    print "Available tests:"
220    print "----------------"
221    for name, aliases in sorted(name_to_aliases.iteritems()):
222      if aliases:
223        print "   {} (aka {})".format(name, ', '.join(aliases))
224      else:
225        print "   {}".format(name)
226
227  def SetupLdPath(self, requires_build_dir):
228    if requires_build_dir:
229      self._EnsureBuildDirFound()
230    elif not self._options.build_dir:
231      return
232
233    # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
234    if (os.getenv("LD_LIBRARY_PATH")):
235      os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
236                                              self._options.build_dir))
237    else:
238      os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
239
240  def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
241    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
242    cmd = self._DefaultCommand(tool, name, valgrind_test_args)
243    self._AppendGtestFilter(tool, name, cmd)
244    cmd.extend(['--test-tiny-timeout=1000'])
245    if cmd_args:
246      cmd.extend(cmd_args)
247
248    self.SetupLdPath(True)
249    return tool.Run(cmd, module)
250
251  def RunCmdLine(self):
252    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
253    cmd = self._DefaultCommand(tool, None, self._args)
254    self.SetupLdPath(False)
255    return tool.Run(cmd, None)
256
257  def TestAccessibility(self):
258    return self.SimpleTest("accessibility", "accessibility_unittests")
259
260  def TestAddressInput(self):
261    return self.SimpleTest("addressinput", "libaddressinput_unittests")
262
263  def TestAngle(self):
264    return self.SimpleTest("angle", "angle_unittests")
265
266  def TestAppList(self):
267    return self.SimpleTest("app_list", "app_list_unittests")
268
269  def TestAsh(self):
270    return self.SimpleTest("ash", "ash_unittests")
271
272  def TestAshShell(self):
273    return self.SimpleTest("ash_shelf", "ash_shell_unittests")
274
275  def TestAura(self):
276    return self.SimpleTest("aura", "aura_unittests")
277
278  def TestBase(self):
279    return self.SimpleTest("base", "base_unittests")
280
281  def TestBlinkHeap(self):
282    return self.SimpleTest("blink_heap", "blink_heap_unittests")
283
284  def TestBlinkPlatform(self):
285    return self.SimpleTest("blink_platform", "blink_platform_unittests")
286
287  def TestCacheInvalidation(self):
288    return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
289
290  def TestCast(self):
291    return self.SimpleTest("chrome", "cast_unittests")
292
293  def TestCC(self):
294    return self.SimpleTest("cc", "cc_unittests")
295
296  def TestChromeApp(self):
297    return self.SimpleTest("chrome_app", "chrome_app_unittests")
298
299  def TestChromeElf(self):
300    return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
301
302  def TestChromeDriver(self):
303    return self.SimpleTest("chromedriver", "chromedriver_unittests")
304
305  def TestChromeOS(self):
306    return self.SimpleTest("chromeos", "chromeos_unittests")
307
308  def TestCloudPrint(self):
309    return self.SimpleTest("cloud_print", "cloud_print_unittests")
310
311  def TestComponents(self):
312    return self.SimpleTest("components", "components_unittests")
313
314  def TestCompositor(self):
315    return self.SimpleTest("compositor", "compositor_unittests")
316
317  def TestContent(self):
318    return self.SimpleTest("content", "content_unittests")
319
320  def TestCourgette(self):
321    return self.SimpleTest("courgette", "courgette_unittests")
322
323  def TestCrypto(self):
324    return self.SimpleTest("crypto", "crypto_unittests")
325
326  def TestDevice(self):
327    return self.SimpleTest("device", "device_unittests")
328
329  def TestDisplay(self):
330    return self.SimpleTest("display", "display_unittests")
331
332  def TestEvents(self):
333    return self.SimpleTest("events", "events_unittests")
334
335  def TestExtensions(self):
336    return self.SimpleTest("extensions", "extensions_unittests")
337
338  def TestFFmpeg(self):
339    return self.SimpleTest("chrome", "ffmpeg_unittests")
340
341  def TestFFmpegRegressions(self):
342    return self.SimpleTest("chrome", "ffmpeg_regression_tests")
343
344  def TestGCM(self):
345    return self.SimpleTest("gcm", "gcm_unit_tests")
346
347  def TestGfx(self):
348    return self.SimpleTest("gfx", "gfx_unittests")
349
350  def TestGin(self):
351    return self.SimpleTest("gin", "gin_unittests")
352
353  def TestGoogleApis(self):
354    return self.SimpleTest("google_apis", "google_apis_unittests")
355
356  def TestGPU(self):
357    return self.SimpleTest("gpu", "gpu_unittests")
358
359  def TestIpc(self):
360    return self.SimpleTest("ipc", "ipc_tests",
361                           valgrind_test_args=["--trace_children"])
362
363  def TestInstallerUtil(self):
364    return self.SimpleTest("installer_util", "installer_util_unittests")
365
366  def TestJingle(self):
367    return self.SimpleTest("chrome", "jingle_unittests")
368
369  def TestKeyboard(self):
370    return self.SimpleTest("keyboard", "keyboard_unittests")
371
372  def TestMedia(self):
373    return self.SimpleTest("chrome", "media_unittests")
374
375  def TestMessageCenter(self):
376    return self.SimpleTest("message_center", "message_center_unittests")
377
378  def TestMojoAppsJS(self):
379    return self.SimpleTest("mojo_apps_js", "mojo_apps_js_unittests")
380
381  def TestMojoCommon(self):
382    return self.SimpleTest("mojo_common", "mojo_common_unittests")
383
384  def TestMojoJS(self):
385    return self.SimpleTest("mojo_js", "mojo_js_unittests")
386
387  def TestMojoPublicBindings(self):
388    return self.SimpleTest("mojo_public_bindings",
389                           "mojo_public_bindings_unittests")
390
391  def TestMojoPublicEnv(self):
392    return self.SimpleTest("mojo_public_env",
393                           "mojo_public_environment_unittests")
394
395  def TestMojoPublicSystem(self):
396    return self.SimpleTest("mojo_public_system",
397                           "mojo_public_system_unittests")
398
399  def TestMojoPublicSysPerf(self):
400    return self.SimpleTest("mojo_public_sysperf",
401                           "mojo_public_system_perftests")
402
403  def TestMojoPublicUtility(self):
404    return self.SimpleTest("mojo_public_utility",
405                           "mojo_public_utility_unittests")
406
407  def TestMojoServiceManager(self):
408    return self.SimpleTest("mojo_service_manager",
409                           "mojo_service_manager_unittests")
410
411  def TestMojoSystem(self):
412    return self.SimpleTest("mojo_system", "mojo_system_unittests")
413
414  def TestMojoViewManager(self):
415    return self.SimpleTest("mojo_view_manager", "mojo_view_manager_unittests")
416
417  def TestMojoViewManagerLib(self):
418    return self.SimpleTest("mojo_view_manager_lib",
419                           "mojo_view_manager_lib_unittests")
420
421  def TestNet(self):
422    return self.SimpleTest("net", "net_unittests")
423
424  def TestNetPerf(self):
425    return self.SimpleTest("net", "net_perftests")
426
427  def TestPhoneNumber(self):
428    return self.SimpleTest("phonenumber", "libphonenumber_unittests")
429
430  def TestPPAPI(self):
431    return self.SimpleTest("chrome", "ppapi_unittests")
432
433  def TestPrinting(self):
434    return self.SimpleTest("chrome", "printing_unittests")
435
436  def TestRemoting(self):
437    return self.SimpleTest("chrome", "remoting_unittests",
438                           cmd_args=[
439                               "--ui-test-action-timeout=60000",
440                               "--ui-test-action-max-timeout=150000"])
441
442  def TestSql(self):
443    return self.SimpleTest("chrome", "sql_unittests")
444
445  def TestSync(self):
446    return self.SimpleTest("chrome", "sync_unit_tests")
447
448  def TestLinuxSandbox(self):
449    return self.SimpleTest("sandbox", "sandbox_linux_unittests")
450
451  def TestUnit(self):
452    # http://crbug.com/51716
453    # Disabling all unit tests
454    # Problems reappeared after r119922
455    if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
456      logging.warning("unit_tests are disabled for memcheck on MacOS.")
457      return 0;
458    return self.SimpleTest("chrome", "unit_tests")
459
460  def TestUIUnit(self):
461    return self.SimpleTest("chrome", "ui_unittests")
462
463  def TestURL(self):
464    return self.SimpleTest("chrome", "url_unittests")
465
466  def TestViews(self):
467    return self.SimpleTest("views", "views_unittests")
468
469
470  # Valgrind timeouts are in seconds.
471  UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
472  # UI test timeouts are in milliseconds.
473  UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
474                  "--ui-test-action-max-timeout=150000",
475                  "--no-sandbox"]
476
477  # TODO(thestig) fine-tune these values.
478  # Valgrind timeouts are in seconds.
479  BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
480  # Browser test timeouts are in milliseconds.
481  BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
482                       "--ui-test-action-max-timeout=800000",
483                       "--no-sandbox"]
484
485  def TestBrowser(self):
486    return self.SimpleTest("chrome", "browser_tests",
487                           valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
488                           cmd_args=self.BROWSER_TEST_ARGS)
489
490  def TestContentBrowser(self):
491    return self.SimpleTest("content", "content_browsertests",
492                           valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
493                           cmd_args=self.BROWSER_TEST_ARGS)
494
495  def TestInteractiveUI(self):
496    return self.SimpleTest("chrome", "interactive_ui_tests",
497                           valgrind_test_args=self.UI_VALGRIND_ARGS,
498                           cmd_args=self.UI_TEST_ARGS)
499
500  def TestSafeBrowsing(self):
501    return self.SimpleTest("chrome", "safe_browsing_tests",
502                           valgrind_test_args=self.UI_VALGRIND_ARGS,
503                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
504
505  def TestSyncIntegration(self):
506    return self.SimpleTest("chrome", "sync_integration_tests",
507                           valgrind_test_args=self.UI_VALGRIND_ARGS,
508                           cmd_args=(["--ui-test-action-max-timeout=450000"]))
509
510  def TestLayoutChunk(self, chunk_num, chunk_size):
511    # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
512    # list of tests.  Wrap around to beginning of list at end.
513    # If chunk_size is zero, run all tests in the list once.
514    # If a text file is given as argument, it is used as the list of tests.
515    #
516    # Build the ginormous commandline in 'cmd'.
517    # It's going to be roughly
518    #  python valgrind_test.py ... python run_webkit_tests.py ...
519    # but we'll use the --indirect flag to valgrind_test.py
520    # to avoid valgrinding python.
521    # Start by building the valgrind_test.py commandline.
522    tool = valgrind_test.CreateTool(self._options.valgrind_tool)
523    cmd = self._DefaultCommand(tool)
524    cmd.append("--trace_children")
525    cmd.append("--indirect_webkit_layout")
526    cmd.append("--ignore_exit_code")
527    # Now build script_cmd, the run_webkits_tests.py commandline
528    # Store each chunk in its own directory so that we can find the data later
529    chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
530    out_dir = os.path.join(path_utils.ScriptDir(), "latest")
531    out_dir = os.path.join(out_dir, chunk_dir)
532    if os.path.exists(out_dir):
533      old_files = glob.glob(os.path.join(out_dir, "*.txt"))
534      for f in old_files:
535        os.remove(f)
536    else:
537      os.makedirs(out_dir)
538    script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
539                          "run_webkit_tests.py")
540    # http://crbug.com/260627: After the switch to content_shell from DRT, each
541    # test now brings up 3 processes.  Under Valgrind, they become memory bound
542    # and can eventually OOM if we don't reduce the total count.
543    # It'd be nice if content_shell automatically throttled the startup of new
544    # tests if we're low on memory.
545    jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
546    script_cmd = ["python", script, "-v",
547                  # run a separate DumpRenderTree for each test
548                  "--batch-size=1",
549                  "--fully-parallel",
550                  "--child-processes=%d" % jobs,
551                  "--time-out-ms=800000",
552                  "--no-retry-failures",  # retrying takes too much time
553                  # http://crbug.com/176908: Don't launch a browser when done.
554                  "--no-show-results",
555                  "--nocheck-sys-deps"]
556    # Pass build mode to run_webkit_tests.py.  We aren't passed it directly,
557    # so parse it out of build_dir.  run_webkit_tests.py can only handle
558    # the two values "Release" and "Debug".
559    # TODO(Hercules): unify how all our scripts pass around build mode
560    # (--mode / --target / --build-dir / --debug)
561    if self._options.build_dir:
562      build_root, mode = os.path.split(self._options.build_dir)
563      script_cmd.extend(["--build-directory", build_root, "--target", mode])
564    if (chunk_size > 0):
565      script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
566    if len(self._args):
567      # if the arg is a txt file, then treat it as a list of tests
568      if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
569        script_cmd.append("--test-list=%s" % self._args[0])
570      else:
571        script_cmd.extend(self._args)
572    self._AppendGtestFilter(tool, "layout", script_cmd)
573    # Now run script_cmd with the wrapper in cmd
574    cmd.extend(["--"])
575    cmd.extend(script_cmd)
576
577    # Layout tests often times fail quickly, but the buildbot remains green.
578    # Detect this situation when running with the default chunk size.
579    if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
580      min_runtime_in_seconds=120
581    else:
582      min_runtime_in_seconds=0
583    ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
584    return ret
585
586
587  def TestLayout(self):
588    # A "chunk file" is maintained in the local directory so that each test
589    # runs a slice of the layout tests of size chunk_size that increments with
590    # each run.  Since tests can be added and removed from the layout tests at
591    # any time, this is not going to give exact coverage, but it will allow us
592    # to continuously run small slices of the layout tests under valgrind rather
593    # than having to run all of them in one shot.
594    chunk_size = self._options.num_tests
595    if (chunk_size == 0):
596      return self.TestLayoutChunk(0, 0)
597    chunk_num = 0
598    chunk_file = os.path.join("valgrind_layout_chunk.txt")
599    logging.info("Reading state from " + chunk_file)
600    try:
601      f = open(chunk_file)
602      if f:
603        chunk_str = f.read()
604        if len(chunk_str):
605          chunk_num = int(chunk_str)
606        # This should be enough so that we have a couple of complete runs
607        # of test data stored in the archive (although note that when we loop
608        # that we almost guaranteed won't be at the end of the test list)
609        if chunk_num > 10000:
610          chunk_num = 0
611        f.close()
612    except IOError, (errno, strerror):
613      logging.error("error reading from file %s (%d, %s)" % (chunk_file,
614                    errno, strerror))
615    # Save the new chunk size before running the tests. Otherwise if a
616    # particular chunk hangs the bot, the chunk number will never get
617    # incremented and the bot will be wedged.
618    logging.info("Saving state to " + chunk_file)
619    try:
620      f = open(chunk_file, "w")
621      chunk_num += 1
622      f.write("%d" % chunk_num)
623      f.close()
624    except IOError, (errno, strerror):
625      logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
626                    strerror))
627    # Since we're running small chunks of the layout tests, it's important to
628    # mark the ones that have errors in them.  These won't be visible in the
629    # summary list for long, but will be useful for someone reviewing this bot.
630    return self.TestLayoutChunk(chunk_num, chunk_size)
631
632  # The known list of tests.
633  # Recognise the original abbreviations as well as full executable names.
634  _test_list = {
635    "cmdline" : RunCmdLine,
636    "addressinput": TestAddressInput,
637    "libaddressinput_unittests": TestAddressInput,
638    "accessibility": TestAccessibility,
639    "angle": TestAngle,          "angle_unittests": TestAngle,
640    "app_list": TestAppList,     "app_list_unittests": TestAppList,
641    "ash": TestAsh,              "ash_unittests": TestAsh,
642    "ash_shell": TestAshShell,   "ash_shell_unittests": TestAshShell,
643    "aura": TestAura,            "aura_unittests": TestAura,
644    "base": TestBase,            "base_unittests": TestBase,
645    "blink_heap": TestBlinkHeap,
646    "blink_platform": TestBlinkPlatform,
647    "browser": TestBrowser,      "browser_tests": TestBrowser,
648    "cacheinvalidation": TestCacheInvalidation,
649    "cacheinvalidation_unittests": TestCacheInvalidation,
650    "cast": TestCast,            "cast_unittests": TestCast,
651    "cc": TestCC,                "cc_unittests": TestCC,
652    "chrome_app": TestChromeApp,
653    "chrome_elf": TestChromeElf,
654    "chromedriver": TestChromeDriver,
655    "chromeos": TestChromeOS,    "chromeos_unittests": TestChromeOS,
656    "cloud_print": TestCloudPrint,
657    "cloud_print_unittests": TestCloudPrint,
658    "components": TestComponents,"components_unittests": TestComponents,
659    "compositor": TestCompositor,"compositor_unittests": TestCompositor,
660    "content": TestContent,      "content_unittests": TestContent,
661    "content_browsertests": TestContentBrowser,
662    "courgette": TestCourgette,  "courgette_unittests": TestCourgette,
663    "crypto": TestCrypto,        "crypto_unittests": TestCrypto,
664    "device": TestDevice,        "device_unittests": TestDevice,
665    "display": TestDisplay,      "display_unittests": TestDisplay,
666    "events": TestEvents,        "events_unittests": TestEvents,
667    "extensions": TestExtensions,
668    "ffmpeg": TestFFmpeg,        "ffmpeg_unittests": TestFFmpeg,
669    "ffmpeg_regression_tests": TestFFmpegRegressions,
670    "gcm": TestGCM,              "gcm_unit_tests": TestGCM,
671    "gin": TestGin,              "gin_unittests": TestGin,
672    "gfx": TestGfx,              "gfx_unittests": TestGfx,
673    "google_apis": TestGoogleApis,
674    "gpu": TestGPU,              "gpu_unittests": TestGPU,
675    "ipc": TestIpc,              "ipc_tests": TestIpc,
676    "installer_util": TestInstallerUtil,
677    "interactive_ui": TestInteractiveUI,
678    "jingle": TestJingle,        "jingle_unittests": TestJingle,
679    "keyboard": TestKeyboard,    "keyboard_unittests": TestKeyboard,
680    "layout": TestLayout,        "layout_tests": TestLayout,
681    "media": TestMedia,          "media_unittests": TestMedia,
682    "message_center": TestMessageCenter,
683    "message_center_unittests" : TestMessageCenter,
684    "mojo_apps_js": TestMojoAppsJS,
685    "mojo_common": TestMojoCommon,
686    "mojo_js": TestMojoJS,
687    "mojo_system": TestMojoSystem,
688    "mojo_public_system": TestMojoPublicSystem,
689    "mojo_public_utility": TestMojoPublicUtility,
690    "mojo_public_bindings": TestMojoPublicBindings,
691    "mojo_public_env": TestMojoPublicEnv,
692    "mojo_public_sysperf": TestMojoPublicSysPerf,
693    "mojo_service_manager": TestMojoServiceManager,
694    "mojo_view_manager": TestMojoViewManager,
695    "mojo_view_manager_lib": TestMojoViewManagerLib,
696    "net": TestNet,              "net_unittests": TestNet,
697    "net_perf": TestNetPerf,     "net_perftests": TestNetPerf,
698    "phonenumber": TestPhoneNumber,
699    "libphonenumber_unittests": TestPhoneNumber,
700    "ppapi": TestPPAPI,          "ppapi_unittests": TestPPAPI,
701    "printing": TestPrinting,    "printing_unittests": TestPrinting,
702    "remoting": TestRemoting,    "remoting_unittests": TestRemoting,
703    "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
704    "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
705    "sql": TestSql,              "sql_unittests": TestSql,
706    "sync": TestSync,            "sync_unit_tests": TestSync,
707    "sync_integration_tests": TestSyncIntegration,
708    "sync_integration": TestSyncIntegration,
709    "ui_unit": TestUIUnit,       "ui_unittests": TestUIUnit,
710    "unit": TestUnit,            "unit_tests": TestUnit,
711    "url": TestURL,              "url_unittests": TestURL,
712    "views": TestViews,          "views_unittests": TestViews,
713    "webkit": TestLayout,
714  }
715
716
717def _main():
718  parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
719                                 "[-t <test> ...]")
720
721  parser.add_option("--help-tests", dest="help_tests", action="store_true",
722                    default=False, help="List all available tests")
723  parser.add_option("-b", "--build-dir",
724                    help="the location of the compiler output")
725  parser.add_option("--target", help="Debug or Release")
726  parser.add_option("-t", "--test", action="append", default=[],
727                    help="which test to run, supports test:gtest_filter format "
728                         "as well.")
729  parser.add_option("--baseline", action="store_true", default=False,
730                    help="generate baseline data instead of validating")
731  parser.add_option("--gtest_filter",
732                    help="additional arguments to --gtest_filter")
733  parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
734  parser.add_option("--gtest_shuffle", action="store_true", default=False,
735                    help="Randomize tests' orders on every iteration.")
736  parser.add_option("-v", "--verbose", action="store_true", default=False,
737                    help="verbose output - enable debug log messages")
738  parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
739                    help="specify a valgrind tool to run the tests under")
740  parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
741                    help="specify custom flags for the selected valgrind tool")
742  parser.add_option("--keep_logs", action="store_true", default=False,
743                    help="store memory tool logs in the <tool>.logs directory "
744                         "instead of /tmp.\nThis can be useful for tool "
745                         "developers/maintainers.\nPlease note that the <tool>"
746                         ".logs directory will be clobbered on tool startup.")
747  parser.add_option("-n", "--num_tests", type="int",
748                    default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
749                    help="for layout tests: # of subtests per run.  0 for all.")
750  # TODO(thestig) Remove this if we can.
751  parser.add_option("--gtest_color", dest="gtest_color", default="no",
752                    help="dummy compatibility flag for sharding_supervisor.")
753  parser.add_option("--brave-new-test-launcher", action="store_true",
754                    help="run the tests with --brave-new-test-launcher")
755  parser.add_option("--test-launcher-bot-mode", action="store_true",
756                    help="run the tests with --test-launcher-bot-mode")
757
758  options, args = parser.parse_args()
759
760  # Bake target into build_dir.
761  if options.target and options.build_dir:
762    assert (options.target !=
763            os.path.basename(os.path.dirname(options.build_dir)))
764    options.build_dir = os.path.join(os.path.abspath(options.build_dir),
765                                     options.target)
766
767  if options.verbose:
768    logging_utils.config_root(logging.DEBUG)
769  else:
770    logging_utils.config_root()
771
772  if options.help_tests:
773    ChromeTests.ShowTests()
774    return 0
775
776  if not options.test:
777    parser.error("--test not specified")
778
779  if len(options.test) != 1 and options.gtest_filter:
780    parser.error("--gtest_filter and multiple tests don't make sense together")
781
782  for t in options.test:
783    tests = ChromeTests(options, args, t)
784    ret = tests.Run()
785    if ret: return ret
786  return 0
787
788
789if __name__ == "__main__":
790  sys.exit(_main())
791