1# Copyright 2016 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import fnmatch
6import re
7import sys
8import json
9
10from telemetry.core import discover
11from telemetry.internal.browser import browser_options
12from telemetry.internal.platform import android_device
13from telemetry.internal.util import binary_manager
14from telemetry.testing import browser_test_context
15from telemetry.testing import serially_executed_browser_test_case
16
17import typ
18from typ import arg_parser
19
20TEST_SUFFIXES = ['*_test.py', '*_tests.py', '*_unittest.py', '*_unittests.py']
21
22
23def ProcessCommandLineOptions(test_class, typ_options, args):
24  options = browser_options.BrowserFinderOptions()
25  options.browser_type = 'any'
26  parser = options.CreateParser(test_class.__doc__)
27  test_class.AddCommandlineArgs(parser)
28  # Set the default chrome root variable. This is required for the
29  # Android browser finder to function properly.
30  if typ_options.default_chrome_root:
31    parser.set_defaults(chrome_root=typ_options.default_chrome_root)
32  finder_options, positional_args = parser.parse_args(args)
33  finder_options.positional_args = positional_args
34  # Typ parses the "verbose", or "-v", command line arguments which
35  # are supposed to control logging verbosity. Carry them over.
36  finder_options.verbosity = typ_options.verbose
37  return finder_options
38
39
40def _ValidateDistinctNames(browser_test_classes):
41  names_to_test_classes = {}
42  for cl in browser_test_classes:
43    name = cl.Name()
44    if name in names_to_test_classes:
45      raise Exception('Test name %s is duplicated between %s and %s' % (
46          name, repr(cl), repr(names_to_test_classes[name])))
47    names_to_test_classes[name] = cl
48
49
50def _TestRangeForShard(total_shards, shard_index, num_tests):
51  """Returns a 2-tuple containing the start (inclusive) and ending
52  (exclusive) indices of the tests that should be run, given that
53  |num_tests| tests are split across |total_shards| shards, and that
54  |shard_index| is currently being run.
55  """
56  assert num_tests >= 0
57  assert total_shards >= 1
58  assert shard_index >= 0 and shard_index < total_shards, (
59    'shard_index (%d) must be >= 0 and < total_shards (%d)' %
60    (shard_index, total_shards))
61  if num_tests == 0:
62    return (0, 0)
63  floored_tests_per_shard = num_tests // total_shards
64  remaining_tests = num_tests % total_shards
65  if remaining_tests == 0:
66    return (floored_tests_per_shard * shard_index,
67            floored_tests_per_shard * (1 + shard_index))
68  # More complicated. Some shards will run floored_tests_per_shard
69  # tests, and some will run 1 + floored_tests_per_shard.
70  num_earlier_shards_with_one_extra_test = min(remaining_tests, shard_index)
71  num_earlier_shards_with_no_extra_tests = max(
72    0, shard_index - num_earlier_shards_with_one_extra_test)
73  num_earlier_tests = (
74    num_earlier_shards_with_one_extra_test * (floored_tests_per_shard + 1) +
75    num_earlier_shards_with_no_extra_tests * floored_tests_per_shard)
76  tests_for_this_shard = floored_tests_per_shard
77  if shard_index < remaining_tests:
78    tests_for_this_shard += 1
79  return (num_earlier_tests, num_earlier_tests + tests_for_this_shard)
80
81
82def _MedianTestTime(test_times):
83  times = test_times.values()
84  times.sort()
85  if len(times) == 0:
86    return 0
87  halfLen = len(times) / 2
88  if len(times) % 2:
89    return times[halfLen]
90  else:
91    return 0.5 * (times[halfLen - 1] + times[halfLen])
92
93
94def _TestTime(test, test_times, default_test_time):
95  return test_times.get(test.shortName()) or default_test_time
96
97
98def _DebugShardDistributions(shards, test_times):
99  for i, s in enumerate(shards):
100    num_tests = len(s)
101    if test_times:
102      median = _MedianTestTime(test_times)
103      shard_time = 0.0
104      for t in s:
105        shard_time += _TestTime(t, test_times, median)
106      print 'shard %d: %d seconds (%d tests)' % (i, shard_time, num_tests)
107    else:
108      print 'shard %d: %d tests (unknown duration)' % (i, num_tests)
109
110
111def _SplitShardsByTime(test_cases, total_shards, test_times,
112                       debug_shard_distributions):
113  median = _MedianTestTime(test_times)
114  shards = []
115  for i in xrange(total_shards):
116    shards.append({'total_time': 0.0, 'tests': []})
117  test_cases.sort(key=lambda t: _TestTime(t, test_times, median),
118                  reverse=True)
119
120  # The greedy algorithm has been empirically tested on the WebGL 2.0
121  # conformance tests' times, and results in an essentially perfect
122  # shard distribution of 530 seconds per shard. In the same scenario,
123  # round-robin scheduling resulted in shard times spread between 502
124  # and 592 seconds, and the current alphabetical sharding resulted in
125  # shard times spread between 44 and 1591 seconds.
126
127  # Greedy scheduling. O(m*n), where m is the number of shards and n
128  # is the number of test cases.
129  for t in test_cases:
130    min_shard_index = 0
131    min_shard_time = None
132    for i in xrange(total_shards):
133      if min_shard_time is None or shards[i]['total_time'] < min_shard_time:
134        min_shard_index = i
135        min_shard_time = shards[i]['total_time']
136    shards[min_shard_index]['tests'].append(t)
137    shards[min_shard_index]['total_time'] += _TestTime(t, test_times, median)
138
139  res = [s['tests'] for s in shards]
140  if debug_shard_distributions:
141    _DebugShardDistributions(res, test_times)
142
143  return res
144
145
146def LoadTestCasesToBeRun(
147    test_class, finder_options, filter_regex_str, filter_tests_after_sharding,
148    total_shards, shard_index, test_times, debug_shard_distributions):
149  test_cases = []
150  real_regex = re.compile(filter_regex_str)
151  noop_regex = re.compile('')
152  if filter_tests_after_sharding:
153    filter_regex = noop_regex
154    post_filter_regex = real_regex
155  else:
156    filter_regex = real_regex
157    post_filter_regex = noop_regex
158
159  for t in serially_executed_browser_test_case.GenerateTestCases(
160      test_class, finder_options):
161    if filter_regex.search(t.shortName()):
162      test_cases.append(t)
163
164  if test_times:
165    # Assign tests to shards.
166    shards = _SplitShardsByTime(test_cases, total_shards, test_times,
167                                debug_shard_distributions)
168    return [t for t in shards[shard_index]
169            if post_filter_regex.search(t.shortName())]
170  else:
171    test_cases.sort(key=lambda t: t.shortName())
172    test_range = _TestRangeForShard(total_shards, shard_index, len(test_cases))
173    if debug_shard_distributions:
174      tmp_shards = []
175      for i in xrange(total_shards):
176        tmp_range = _TestRangeForShard(total_shards, i, len(test_cases))
177        tmp_shards.append(test_cases[tmp_range[0]:tmp_range[1]])
178      # Can edit the code to get 'test_times' passed in here for
179      # debugging and comparison purposes.
180      _DebugShardDistributions(tmp_shards, None)
181    return [t for t in test_cases[test_range[0]:test_range[1]]
182            if post_filter_regex.search(t.shortName())]
183
184
185def _CreateTestArgParsers():
186  parser = typ.ArgumentParser(discovery=False, reporting=True, running=True)
187  parser.add_argument('test', type=str, help='Name of the test suite to run')
188  parser.add_argument('--test-filter', type=str, default='', action='store',
189      help='Run only tests whose names match the given filter regexp.')
190  parser.add_argument(
191    '--filter-tests-after-sharding', default=False, action='store_true',
192    help=('Apply the test filter after tests are split for sharding. Useful '
193          'for reproducing bugs related to the order in which tests run.'))
194  parser.add_argument(
195      '--read-abbreviated-json-results-from', metavar='FILENAME',
196      action='store', help=(
197        'If specified, reads abbreviated results from that path in json form. '
198        'This information is used to more evenly distribute tests among '
199        'shards.'))
200  parser.add_argument('--debug-shard-distributions',
201      action='store_true', default=False,
202      help='Print debugging information about the shards\' test distributions')
203
204  parser.add_argument('--default-chrome-root', type=str, default=None)
205  parser.add_argument('--client-config', dest='client_configs',
206                      action='append', default=[])
207  parser.add_argument('--start-dir', dest='start_dirs',
208                      action='append', default=[])
209  parser.add_argument('--skip', metavar='glob', default=[],
210      action='append',
211      help=('Globs of test names to skip (defaults to %(default)s).'))
212  return parser
213
214
215def _SkipMatch(name, skipGlobs):
216  return any(fnmatch.fnmatch(name, glob) for glob in skipGlobs)
217
218
219def _GetClassifier(args):
220  def _SeriallyExecutedBrowserTestCaseClassifer(test_set, test):
221    # Do not pick up tests that do not inherit from
222    # serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase
223    # class.
224    if not isinstance(test,
225        serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
226      return
227    name = test.id()
228    if _SkipMatch(name, args.skip):
229      test_set.tests_to_skip.append(
230          typ.TestInput(name, 'skipped because matched --skip'))
231      return
232    # For now, only support running these tests serially.
233    test_set.isolated_tests.append(typ.TestInput(name))
234  return _SeriallyExecutedBrowserTestCaseClassifer
235
236
237def RunTests(args):
238  parser = _CreateTestArgParsers()
239  try:
240    options, extra_args = parser.parse_known_args(args)
241  except arg_parser._Bailout:
242    return parser.exit_status
243  binary_manager.InitDependencyManager(options.client_configs)
244
245  for start_dir in options.start_dirs:
246    modules_to_classes = discover.DiscoverClasses(
247        start_dir, options.top_level_dir,
248        base_class=serially_executed_browser_test_case.
249            SeriallyExecutedBrowserTestCase)
250    browser_test_classes = modules_to_classes.values()
251
252  _ValidateDistinctNames(browser_test_classes)
253
254  test_class = None
255  for cl in browser_test_classes:
256    if cl.Name() == options.test:
257      test_class = cl
258      break
259
260  if not test_class:
261    print 'Cannot find test class with name matching %s' % options.test
262    print 'Available tests: %s' % '\n'.join(
263        cl.Name() for cl in browser_test_classes)
264    return 1
265
266  # Create test context.
267  context = browser_test_context.TypTestContext()
268  for c in options.client_configs:
269    context.client_configs.append(c)
270  context.finder_options = ProcessCommandLineOptions(
271      test_class, options, extra_args)
272  context.test_class = test_class
273  test_times = None
274  if options.read_abbreviated_json_results_from:
275    with open(options.read_abbreviated_json_results_from, 'r') as f:
276      abbr_results = json.load(f)
277      test_times = abbr_results.get('times')
278  tests_to_run = LoadTestCasesToBeRun(
279      test_class=test_class, finder_options=context.finder_options,
280      filter_regex_str=options.test_filter,
281      filter_tests_after_sharding=options.filter_tests_after_sharding,
282      total_shards=options.total_shards, shard_index=options.shard_index,
283      test_times=test_times,
284      debug_shard_distributions=options.debug_shard_distributions)
285  for t in tests_to_run:
286    context.test_case_ids_to_run.add(t.id())
287  context.Freeze()
288  browser_test_context._global_test_context = context
289
290  # Setup typ runner.
291  runner = typ.Runner()
292
293  runner.context = context
294  runner.setup_fn = _SetUpProcess
295  runner.teardown_fn = _TearDownProcess
296
297  runner.args.jobs = options.jobs
298  runner.args.metadata = options.metadata
299  runner.args.passthrough = options.passthrough
300  runner.args.path = options.path
301  runner.args.retry_limit = options.retry_limit
302  runner.args.test_results_server = options.test_results_server
303  runner.args.test_type = options.test_type
304  runner.args.top_level_dir = options.top_level_dir
305  runner.args.write_full_results_to = options.write_full_results_to
306  runner.args.write_trace_to = options.write_trace_to
307  runner.args.list_only = options.list_only
308  runner.classifier = _GetClassifier(options)
309
310  runner.args.suffixes = TEST_SUFFIXES
311
312  # Since sharding logic is handled by browser_test_runner harness by passing
313  # browser_test_context.test_case_ids_to_run to subprocess to indicate test
314  # cases to be run, we explicitly disable sharding logic in typ.
315  runner.args.total_shards = 1
316  runner.args.shard_index = 0
317
318  runner.args.timing = True
319  runner.args.verbose = options.verbose
320  runner.win_multiprocessing = typ.WinMultiprocessing.importable
321  try:
322    ret, _, _ = runner.run()
323  except KeyboardInterrupt:
324    print >> sys.stderr, "interrupted, exiting"
325    ret = 130
326  return ret
327
328
329def _SetUpProcess(child, context):
330  del child  # Unused.
331  args = context.finder_options
332  if binary_manager.NeedsInit():
333    # On windows, typ doesn't keep the DependencyManager initialization in the
334    # child processes.
335    binary_manager.InitDependencyManager(context.client_configs)
336  if args.remote_platform_options.device == 'android':
337    android_devices = android_device.FindAllAvailableDevices(args)
338    if not android_devices:
339      raise RuntimeError("No Android device found")
340    android_devices.sort(key=lambda device: device.name)
341    args.remote_platform_options.device = (
342        android_devices[child.worker_num-1].guid)
343  browser_test_context._global_test_context = context
344  context.test_class.SetUpProcess()
345
346
347def _TearDownProcess(child, context):
348  del child, context  # Unused.
349  browser_test_context._global_test_context.test_class.TearDownProcess()
350  browser_test_context._global_test_context = None
351
352
353if __name__ == '__main__':
354  ret_code = RunTests(sys.argv[1:])
355  sys.exit(ret_code)
356