json_results.py revision 5f1c94371a64b3196d4be9466099bb892df9b88e
1# Copyright 2014 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import json
6import time
7import unittest
8
9# TODO(dpranke): This code is largely cloned from, and redundant with,
10# src/mojo/tools/run_mojo_python_tests.py, and also duplicates logic
11# in test-webkitpy and run-webkit-tests. We should consolidate the
12# python TestResult parsing/converting/uploading code as much as possible.
13
14def AddOptions(parser):
15  parser.add_option('--metadata', action='append', default=[],
16                    help=('optional key=value metadata that will be stored '
17                          'in the results files (can be used for revision '
18                          'numbers, etc.)'))
19  parser.add_option('--write-full-results-to', metavar='FILENAME',
20                    action='store',
21                    help='path to write the list of full results to.')
22
23
24def ValidateArgs(parser, args):
25  for val in args.metadata:
26    if '=' not in val:
27      parser.error('Error: malformed metadata "%s"' % val)
28
29
30def WriteandUploadResultsIfNecessary(args, test_suite, result):
31  if not args.write_full_results_to:
32    return
33
34  full_results = _FullResults(test_suite, result, args.metadata)
35
36  with open(args.write_full_results_to, 'w') as fp:
37    json.dump(full_results, fp, indent=2)
38    fp.write("\n")
39
40  # TODO(dpranke): upload to test-results.appspot.com if requested as well.
41
42TEST_SEPARATOR = '.'
43
44
45def _FullResults(suite, result, metadata):
46  """Convert the unittest results to the Chromium JSON test result format.
47
48  This matches run-webkit-tests (the layout tests) and the flakiness dashboard.
49  """
50
51  full_results = {}
52  full_results['interrupted'] = False
53  full_results['path_delimiter'] = TEST_SEPARATOR
54  full_results['version'] = 3
55  full_results['seconds_since_epoch'] = time.time()
56  for md in metadata:
57    key, val = md.split('=', 1)
58    full_results[key] = val
59
60  all_test_names = _AllTestNames(suite)
61  failed_test_names = _FailedTestNames(result)
62
63  full_results['num_failures_by_type'] = {
64      'FAIL': len(failed_test_names),
65      'PASS': len(all_test_names) - len(failed_test_names),
66  }
67
68  full_results['tests'] = {}
69
70  for test_name in all_test_names:
71    value = {}
72    value['expected'] = 'PASS'
73    if test_name in failed_test_names:
74      value['actual'] = 'FAIL'
75      value['is_unexpected'] = True
76    else:
77      value['actual'] = 'PASS'
78
79    _AddPathToTrie(full_results['tests'], test_name, value)
80
81  return full_results
82
83
84def _AllTestNames(suite):
85  test_names = []
86  # _tests is protected  pylint: disable=W0212
87  for test in suite._tests:
88    if isinstance(test, unittest.suite.TestSuite):
89      test_names.extend(_AllTestNames(test))
90    else:
91      test_names.append(test.id())
92  return test_names
93
94
95def _FailedTestNames(result):
96  return set(test.id() for test, _ in result.failures + result.errors)
97
98
99def _AddPathToTrie(trie, path, value):
100  if TEST_SEPARATOR not in path:
101    trie[path] = value
102    return
103  directory, rest = path.split(TEST_SEPARATOR, 1)
104  if directory not in trie:
105    trie[directory] = {}
106  _AddPathToTrie(trie[directory], rest, value)
107
108
109