generate_report_unittest.py revision c9df56c133c35bb5154a6c947d0986c35097c6e1
1#!/usr/bin/python2
2#
3# Copyright 2016 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6"""Test for generate_report.py."""
7
8from __future__ import division
9from __future__ import print_function
10
11from StringIO import StringIO
12
13import copy
14import json
15import mock
16import test_flag
17import unittest
18
19import generate_report
20import results_report
21
22class _ContextualStringIO(StringIO):
23  """StringIO that can be used in `with` statements."""
24  def __init__(self, *args):
25    StringIO.__init__(self, *args)
26
27  def __enter__(self):
28    return self
29
30  def __exit__(self, _type, _value, _traceback):
31    pass
32
33
34class GenerateReportTests(unittest.TestCase):
35  """Tests for generate_report.py."""
36  def testCountBenchmarks(self):
37    runs = {
38        'foo': [[{}, {}, {}], [{}, {}, {}, {}]],
39        'bar': [],
40        'baz': [[], [{}], [{}, {}, {}]]
41    }
42    results = generate_report.CountBenchmarks(runs)
43    expected_results = [('foo', 4), ('bar', 0), ('baz', 3)]
44    self.assertItemsEqual(expected_results, results)
45
46  def testCutResultsInPlace(self):
47    bench_data = {
48        'foo': [[{'a': 1, 'b': 2, 'c': 3}, {'a': 3, 'b': 2.5, 'c': 1}]],
49        'bar': [[{'d': 11, 'e': 12, 'f': 13}]],
50        'baz': [[{'g': 12, 'h': 13}]],
51        'qux': [[{'i': 11}]],
52    }
53    original_bench_data = copy.deepcopy(bench_data)
54
55    max_keys = 2
56    results = generate_report.CutResultsInPlace(bench_data, max_keys=max_keys,
57                                                complain_on_update=False)
58    # Cuts should be in-place.
59    self.assertIs(results, bench_data)
60    self.assertItemsEqual(original_bench_data.keys(), bench_data.keys())
61    for bench_name, original_runs in original_bench_data.iteritems():
62      bench_runs = bench_data[bench_name]
63      self.assertEquals(len(original_runs), len(bench_runs))
64      # Order of these sub-lists shouldn't have changed.
65      for original_list, new_list in zip(original_runs, bench_runs):
66        self.assertEqual(len(original_list), len(new_list))
67        for original_keyvals, sub_keyvals in zip(original_list, new_list):
68          # sub_keyvals must be a subset of original_keyvals
69          self.assertDictContainsSubset(sub_keyvals, original_keyvals)
70
71
72  def testCutResultsInPlaceLeavesRetval(self):
73    bench_data = {
74        'foo': [[{'retval': 0, 'a': 1}]],
75        'bar': [[{'retval': 1}]],
76        'baz': [[{'RETVAL': 1}]],
77    }
78    results = generate_report.CutResultsInPlace(bench_data, max_keys=0,
79                                                complain_on_update=False)
80    # Just reach into results assuming we know it otherwise outputs things
81    # sanely. If it doesn't, testCutResultsInPlace should give an indication as
82    # to what, exactly, is broken.
83    self.assertEqual(results['foo'][0][0].items(), [('retval', 0)])
84    self.assertEqual(results['bar'][0][0].items(), [('retval', 1)])
85    self.assertEqual(results['baz'][0][0].items(), [])
86
87  def _RunMainWithInput(self, args, input_obj):
88    assert '-i' not in args
89    args += ['-i', '-']
90    input_buf = _ContextualStringIO(json.dumps(input_obj))
91    with mock.patch('generate_report.PickInputFile', return_value=input_buf) \
92        as patched_pick:
93      result = generate_report.Main(args)
94      patched_pick.assert_called_once_with('-')
95      return result
96
97  @mock.patch('generate_report.RunActions')
98  def testMain(self, mock_run_actions):
99    # Email is left out because it's a bit more difficult to test, and it'll be
100    # mildly obvious if it's failing.
101    args = ['--json', '--html', '--text']
102    return_code = self._RunMainWithInput(args, {'platforms': [], 'data': {}})
103    self.assertEqual(0, return_code)
104    self.assertEqual(mock_run_actions.call_count, 1)
105    ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
106    self.assertItemsEqual(ctors, [
107        results_report.JSONResultsReport,
108        results_report.TextResultsReport,
109        results_report.HTMLResultsReport,
110    ])
111
112  @mock.patch('generate_report.RunActions')
113  def testMainSelectsHTMLIfNoReportsGiven(self, mock_run_actions):
114    args = []
115    return_code = self._RunMainWithInput(args, {'platforms': [], 'data': {}})
116    self.assertEqual(0, return_code)
117    self.assertEqual(mock_run_actions.call_count, 1)
118    ctors = [ctor for ctor, _ in mock_run_actions.call_args[0][0]]
119    self.assertItemsEqual(ctors, [results_report.HTMLResultsReport])
120
121  @mock.patch('generate_report.WriteFile')
122  def testRunActionsRunsAllActionsRegardlessOfExceptions(self, mock_write_file):
123    def raise_error(_):
124      raise Exception('Oh nooo')
125    actions = [
126        (raise_error, 'json'),
127        (raise_error, 'html'),
128        (raise_error, 'text'),
129        (raise_error, 'email'),
130    ]
131    output_prefix = '-'
132    ok = generate_report.RunActions(actions, {}, output_prefix, overwrite=False,
133                                    verbose=False)
134    self.assertFalse(ok)
135    self.assertEqual(mock_write_file.call_count, 4)
136
137
138if __name__ == '__main__':
139  test_flag.SetTestMode(True)
140  unittest.main()
141