json_results.py revision 1320f92c476a1ad9d19dba2a48c72b75566198e9
1# Copyright 2014 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import functools
6import json
7import re
8import time
9import unittest
10import urllib2
11
12
13# TODO(dpranke): This code is largely cloned from, and redundant with,
14# src/mojo/tools/run_mojo_python_tests.py, and also duplicates logic
15# in test-webkitpy and run-webkit-tests. We should consolidate the
16# python TestResult parsing/converting/uploading code as much as possible.
17
18
19def AddOptions(parser):
20  parser.add_option('--metadata', action='append', default=[],
21                    help=('optional key=value metadata that will be stored '
22                          'in the results files (can be used for revision '
23                          'numbers, etc.)'))
24  parser.add_option('--write-full-results-to', metavar='FILENAME',
25                    action='store',
26                    help='The path to write the list of full results to.')
27  parser.add_option('--builder-name',
28                    help='The name of the builder as shown on the waterfall.')
29  parser.add_option('--master-name',
30                    help='The name of the buildbot master.')
31  parser.add_option("--test-results-server", default="",
32                    help=('If specified, upload full_results.json file to '
33                          'this server.'))
34  parser.add_option('--test-type',
35                    help=('Name of test type / step on the waterfall '
36                         '(e.g., "telemetry_unittests").'))
37
38
39def ValidateArgs(parser, args):
40  for val in args.metadata:
41    if '=' not in val:
42      parser.error('Error: malformed metadata "%s"' % val)
43
44  if (args.test_results_server and
45      (not args.builder_name or not args.master_name or not args.test_type)):
46    parser.error('Error: --builder-name, --master-name, and --test-type '
47                 'must be specified along with --test-result-server.')
48
49
50def WriteFullResultsIfNecessary(args, full_results):
51  if not args.write_full_results_to:
52    return
53
54  with open(args.write_full_results_to, 'w') as fp:
55    json.dump(full_results, fp, indent=2)
56    fp.write("\n")
57
58
59def UploadFullResultsIfNecessary(args, full_results):
60  if not args.test_results_server:
61    return False, ''
62
63  url = 'http://%s/testfile/upload' % args.test_results_server
64  attrs = [('builder', args.builder_name),
65           ('master', args.master_name),
66           ('testtype', args.test_type)]
67  content_type, data = _EncodeMultiPartFormData(attrs,  full_results)
68  return _UploadData(url, data, content_type)
69
70
71TEST_SEPARATOR = '.'
72
73
74def FullResults(args, suite, results):
75  """Convert the unittest results to the Chromium JSON test result format.
76
77  This matches run-webkit-tests (the layout tests) and the flakiness dashboard.
78  """
79
80  full_results = {}
81  full_results['interrupted'] = False
82  full_results['path_delimiter'] = TEST_SEPARATOR
83  full_results['version'] = 3
84  full_results['seconds_since_epoch'] = time.time()
85  full_results['builder_name'] = args.builder_name or ''
86  for md in args.metadata:
87    key, val = md.split('=', 1)
88    full_results[key] = val
89
90  all_test_names = AllTestNames(suite)
91  sets_of_passing_test_names = map(PassingTestNames, results)
92  sets_of_failing_test_names = map(functools.partial(FailedTestNames, suite),
93                                   results)
94
95  # TODO(crbug.com/405379): This handles tests that are skipped via the
96  # unittest skip decorators (like skipUnless). The tests that are skipped via
97  # telemetry's decorators package are not included in the test suite at all so
98  # we need those to be passed in in order to include them.
99  skipped_tests = (set(all_test_names) - sets_of_passing_test_names[0]
100                                       - sets_of_failing_test_names[0])
101
102  num_tests = len(all_test_names)
103  num_failures = NumFailuresAfterRetries(suite, results)
104  num_skips = len(skipped_tests)
105  num_passes = num_tests - num_failures - num_skips
106  full_results['num_failures_by_type'] = {
107      'FAIL': num_failures,
108      'PASS': num_passes,
109      'SKIP': num_skips,
110  }
111
112  full_results['tests'] = {}
113
114  for test_name in all_test_names:
115    if test_name in skipped_tests:
116      value = {
117          'expected': 'SKIP',
118          'actual': 'SKIP',
119      }
120    else:
121      value = {
122          'expected': 'PASS',
123          'actual': ActualResultsForTest(test_name,
124                                         sets_of_failing_test_names,
125                                         sets_of_passing_test_names),
126      }
127      if value['actual'].endswith('FAIL'):
128        value['is_unexpected'] = True
129    _AddPathToTrie(full_results['tests'], test_name, value)
130
131  return full_results
132
133
134def ActualResultsForTest(test_name, sets_of_failing_test_names,
135                         sets_of_passing_test_names):
136  actuals = []
137  for retry_num in range(len(sets_of_failing_test_names)):
138    if test_name in sets_of_failing_test_names[retry_num]:
139      actuals.append('FAIL')
140    elif test_name in sets_of_passing_test_names[retry_num]:
141      assert ((retry_num == 0) or
142              (test_name in sets_of_failing_test_names[retry_num - 1])), (
143              'We should not have run a test that did not fail '
144              'on the previous run.')
145      actuals.append('PASS')
146
147  assert actuals, 'We did not find any result data for %s.' % test_name
148  return ' '.join(actuals)
149
150
151def ExitCodeFromFullResults(full_results):
152  return 1 if full_results['num_failures_by_type']['FAIL'] else 0
153
154
155def AllTestNames(suite):
156  test_names = []
157  # _tests is protected  pylint: disable=W0212
158  for test in suite._tests:
159    if isinstance(test, unittest.suite.TestSuite):
160      test_names.extend(AllTestNames(test))
161    else:
162      test_names.append(test.id())
163  return test_names
164
165
166def NumFailuresAfterRetries(suite, results):
167  return len(FailedTestNames(suite, results[-1]))
168
169
170def FailedTestNames(suite, result):
171  failed_test_names = set()
172  for test, error in result.failures + result.errors:
173    if isinstance(test, unittest.TestCase):
174      failed_test_names.add(test.id())
175    elif isinstance(test, unittest.suite._ErrorHolder):  # pylint: disable=W0212
176      # If there's an error in setUpClass or setUpModule, unittest gives us an
177      # _ErrorHolder object. We can parse the object's id for the class or
178      # module that failed, then find all tests in that class or module.
179      match = re.match('setUp[a-zA-Z]+ \\((.+)\\)', test.id())
180      assert match, "Don't know how to retry after this error:\n%s" % error
181      module_or_class = match.groups()[0]
182      failed_test_names |= _FindChildren(module_or_class, AllTestNames(suite))
183    else:
184      assert False, 'Unknown test type: %s' % test.__class__
185  return failed_test_names
186
187
188def _FindChildren(parent, potential_children):
189  children = set()
190  parent_name_parts = parent.split('.')
191  for potential_child in potential_children:
192    child_name_parts = potential_child.split('.')
193    if parent_name_parts == child_name_parts[:len(parent_name_parts)]:
194      children.add(potential_child)
195  return children
196
197
198def PassingTestNames(result):
199  return set(test.id() for test in result.successes)
200
201
202def _AddPathToTrie(trie, path, value):
203  if TEST_SEPARATOR not in path:
204    trie[path] = value
205    return
206  directory, rest = path.split(TEST_SEPARATOR, 1)
207  if directory not in trie:
208    trie[directory] = {}
209  _AddPathToTrie(trie[directory], rest, value)
210
211
212def _EncodeMultiPartFormData(attrs, full_results):
213  # Cloned from webkitpy/common/net/file_uploader.py
214  BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
215  CRLF = '\r\n'
216  lines = []
217
218  for key, value in attrs:
219    lines.append('--' + BOUNDARY)
220    lines.append('Content-Disposition: form-data; name="%s"' % key)
221    lines.append('')
222    lines.append(value)
223
224  lines.append('--' + BOUNDARY)
225  lines.append('Content-Disposition: form-data; name="file"; '
226               'filename="full_results.json"')
227  lines.append('Content-Type: application/json')
228  lines.append('')
229  lines.append(json.dumps(full_results))
230
231  lines.append('--' + BOUNDARY + '--')
232  lines.append('')
233  body = CRLF.join(lines)
234  content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
235  return content_type, body
236
237
238def _UploadData(url, data, content_type):
239  request = urllib2.Request(url, data, {'Content-Type': content_type})
240  try:
241    response = urllib2.urlopen(request)
242    if response.code == 200:
243      return False, ''
244    return True, ('Uploading the JSON results failed with %d: "%s"' %
245                  (response.code, response.read()))
246  except Exception as e:
247    return True, 'Uploading the JSON results raised "%s"\n' % str(e)
248