results_report_unittest.py revision 63f13489b1a02bbbc75b46fec6ae7a817442df94
1#!/usr/bin/python2
2#
3# Copyright 2016 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""Unittest for the results reporter."""
8
9from __future__ import division
10from __future__ import print_function
11
12from StringIO import StringIO
13
14import collections
15import mock
16import os
17import test_flag
18import unittest
19
20from benchmark_run import MockBenchmarkRun
21from cros_utils import logger
22from experiment_factory import ExperimentFactory
23from experiment_file import ExperimentFile
24from machine_manager import MockCrosMachine
25from machine_manager import MockMachineManager
26from results_cache import MockResult
27from results_report import HTMLResultsReport
28from results_report import JSONResultsReport
29from results_report import ParseChromeosImage
30from results_report import TextResultsReport
31
32
33class FreeFunctionsTest(unittest.TestCase):
34  """Tests for any free functions in results_report."""
35
36  def testParseChromeosImage(self):
37    # N.B. the cases with blank versions aren't explicitly supported by
38    # ParseChromeosImage. I'm not sure if they need to be supported, but the
39    # goal of this was to capture existing functionality as much as possible.
40    base_case = '/my/chroot/src/build/images/x86-generic/R01-1.0.date-time' \
41        '/chromiumos_test_image.bin'
42    self.assertEqual(ParseChromeosImage(base_case), ('R01-1.0', base_case))
43
44    dir_base_case = os.path.dirname(base_case)
45    self.assertEqual(ParseChromeosImage(dir_base_case), ('', dir_base_case))
46
47    buildbot_case = '/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time' \
48        '/chromiumos_test_image.bin'
49    buildbot_img = buildbot_case.split('/chroot/tmp')[1]
50
51    self.assertEqual(ParseChromeosImage(buildbot_case),
52                     ('R02-1.0', buildbot_img))
53    self.assertEqual(ParseChromeosImage(os.path.dirname(buildbot_case)),
54                     ('', os.path.dirname(buildbot_img)))
55
56    # Ensure we don't act completely insanely given a few mildly insane paths.
57    fun_case = '/chromiumos_test_image.bin'
58    self.assertEqual(ParseChromeosImage(fun_case), ('', fun_case))
59
60    fun_case2 = 'chromiumos_test_image.bin'
61    self.assertEqual(ParseChromeosImage(fun_case2), ('', fun_case2))
62
63
64# There are many ways for this to be done better, but the linter complains
65# about all of them (that I can think of, at least).
66_fake_path_number = [0]
67def FakePath(ext):
68  """Makes a unique path that shouldn't exist on the host system.
69
70  Each call returns a different path, so if said path finds its way into an
71  error message, it may be easier to track it to its source.
72  """
73  _fake_path_number[0] += 1
74  prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0], )
75  return os.path.join(prefix, ext)
76
77
78def MakeMockExperiment(compiler='gcc'):
79  """Mocks an experiment using the given compiler."""
80  mock_experiment_file = StringIO("""
81      board: x86-alex
82      remote: 127.0.0.1
83      perf_args: record -a -e cycles
84      benchmark: PageCycler {
85        iterations: 3
86      }
87
88      image1 {
89        chromeos_image: %s
90      }
91
92      image2 {
93        remote: 127.0.0.2
94        chromeos_image: %s
95      }
96      """ % (FakePath('cros_image1.bin'), FakePath('cros_image2.bin')))
97  efile = ExperimentFile(mock_experiment_file)
98  experiment = ExperimentFactory().GetExperiment(efile,
99                                                 FakePath('working_directory'),
100                                                 FakePath('log_dir'))
101  for label in experiment.labels:
102    label.compiler = compiler
103  return experiment
104
105
106def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
107                     label=None):
108  """Injects successful experiment runs (for each label) into the experiment."""
109  # Defensive copy of keyvals, so if it's modified, we'll know.
110  keyvals = dict(keyvals)
111  num_configs = len(experiment.benchmarks) * len(experiment.labels)
112  num_runs = len(experiment.benchmark_runs) // num_configs
113
114  # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in
115  # benchmark_run_unittest)
116  bench = experiment.benchmarks[for_benchmark]
117  cache_conditions = []
118  log_level = 'average'
119  share_cache = ''
120  locks_dir = ''
121  log = logger.GetLogger()
122  machine_manager = MockMachineManager(FakePath('chromeos_root'), 0,
123                                       log_level, locks_dir)
124  machine_manager.AddMachine('testing_machine')
125  machine = next(m for m in machine_manager.GetMachines()
126                 if m.name == 'testing_machine')
127  for label in experiment.labels:
128    def MakeSuccessfulRun(n):
129      run = MockBenchmarkRun('mock_success%d' % (n, ), bench, label,
130                             1 + n + num_runs, cache_conditions,
131                             machine_manager, log, log_level, share_cache)
132      mock_result = MockResult(log, label, log_level, machine)
133      mock_result.keyvals = keyvals
134      run.result = mock_result
135      return run
136
137    experiment.benchmark_runs.extend(MakeSuccessfulRun(n)
138                                     for n in xrange(how_many))
139  return experiment
140
141
142class TextResultsReportTest(unittest.TestCase):
143  """Tests that the output of a text report contains the things we pass in.
144
145  At the moment, this doesn't care deeply about the format in which said
146  things are displayed. It just cares that they're present.
147  """
148
149  def _checkReport(self, email):
150    num_success = 2
151    success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
152    experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
153                                  success_keyvals)
154    text_report = TextResultsReport(experiment, email=email).GetReport()
155    self.assertIn(str(success_keyvals['a_float']), text_report)
156    self.assertIn(success_keyvals['machine'], text_report)
157    self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
158    return text_report
159
160
161  def testOutput(self):
162    email_report = self._checkReport(email=True)
163    text_report = self._checkReport(email=False)
164
165    # Ensure that the reports somehow different. Otherwise, having the
166    # distinction is useless.
167    self.assertNotEqual(email_report, text_report)
168
169
170class HTMLResultsReportTest(unittest.TestCase):
171  """Tests that the output of a HTML report contains the things we pass in.
172
173  At the moment, this doesn't care deeply about the format in which said
174  things are displayed. It just cares that they're present.
175  """
176
177  _TestOutput = collections.namedtuple('TestOutput', ['summary_table',
178                                                      'perf_html',
179                                                      'charts',
180                                                      'table_html',
181                                                      'experiment_file'])
182
183  @staticmethod
184  def _TupleToTestOutput(to_what):
185    fields = {}
186    # to_what has 13 fields. So, dealing with it can be unfun.
187    it = iter(to_what)
188    next(it) # perf_init
189    next(it) # chart_javascript
190    fields['summary_table'] = next(it) # HTML summary
191    next(it) # plaintext summary
192    next(it) # TSV summary
193    next(it) # tab menu summary
194    fields['perf_html'] = next(it)
195    fields['charts'] = next(it)
196    fields['table_html'] = next(it)
197    next(it) # full table plain text
198    next(it) # full table TSV
199    next(it) # full tab menu
200    fields['experiment_file'] = next(it)
201
202    remaining_fields = list(it)
203    if not remaining_fields:
204      return HTMLResultsReportTest._TestOutput(**fields)
205
206    raise RuntimeError('Initialization missed field(s): %s' %
207                       (remaining_fields, ))
208
209  def _GetOutput(self, experiment):
210    with mock.patch('results_report.HTMLResultsReport.HTML') as standin:
211      HTMLResultsReport(experiment).GetReport()
212      mod_mock = standin.__mod__
213    self.assertEquals(mod_mock.call_count, 1)
214    fmt_args = mod_mock.call_args[0][0]
215    return self._TupleToTestOutput(fmt_args)
216
217  def testNoSuccessOutput(self):
218    output = self._GetOutput(MakeMockExperiment())
219    self.assertIn('no result', output.summary_table)
220    self.assertEqual(output.charts, '')
221
222  def testSuccessfulOutput(self):
223    num_success = 2
224    success_keyvals = {'retval': 0, 'a_float': 3.96}
225    output = self._GetOutput(_InjectSuccesses(MakeMockExperiment(), num_success,
226                                              success_keyvals))
227
228    self.assertNotIn('no result', output.summary_table)
229    #self.assertIn(success_keyvals['machine'], output.summary_table)
230    self.assertIn('a_float', output.summary_table)
231    self.assertIn(str(success_keyvals['a_float']), output.summary_table)
232    # The _ in a_float is filtered out when we're generating HTML.
233    self.assertIn('afloat', output.charts)
234
235
236class JSONResultsReportTest(unittest.TestCase):
237  """Tests JSONResultsReport."""
238  REQUIRED_REPORT_KEYS = ('date', 'time', 'board', 'label', 'chromeos_image',
239                          'chromeos_version', 'chrome_version', 'compiler',
240                          'test_name', 'pass')
241
242  # JSONResultsReport.GetReport was initially made to write to disk; unless we
243  # refactor it, testing is... a bit awkward.
244  def _GetResultsFor(self, experiment, results_dir, date=None, time=None):
245    """Gets a JSON report, given an experiment and results_dir.
246
247    Returns [filename, result_as_python_datastructures].
248    """
249    # Linters complain if this isn't populated with precisely two things.
250    test_results = [None, None]
251    def grab_results(filename, results):
252      test_results[0] = filename
253      test_results[1] = results
254    report = JSONResultsReport(experiment, date=date, time=time)
255    report.GetReport(results_dir, write_results=grab_results)
256    self.assertNotIn(None, test_results)
257    return test_results
258
259  def testJSONReportOutputFileNameInfo(self):
260    date, time = '1/1/2001', '01:02:03'
261    results_dir = FakePath('results')
262    experiment = MakeMockExperiment(compiler='gcc')
263    board = experiment.labels[0].board
264    out_path, _ = self._GetResultsFor(experiment, results_dir, date, time)
265
266    self.assertTrue(out_path.startswith(results_dir))
267    self.assertTrue(out_path.endswith('.json'))
268    out_file = out_path[len(results_dir):]
269
270    # This should replace : in time with something else, since : is a path sep.
271    # At the moment, it's '.'.
272    self.assertIn(time.replace(':', '.'), out_file)
273    self.assertIn(date, out_file)
274    self.assertIn(board, out_file)
275    self.assertIn('gcc', out_file)
276
277    out_path, _ = self._GetResultsFor(MakeMockExperiment(compiler='llvm'),
278                                      results_dir, date, time)
279    self.assertIn('llvm', out_path)
280
281    # Comments say that if *any* compiler used was LLVM, then LLVM must be in
282    # the file name, instead of gcc.
283    experiment = MakeMockExperiment(compiler='gcc')
284    experiment.labels[len(experiment.labels)//2].compiler = 'llvm'
285    out_path, _ = self._GetResultsFor(experiment, results_dir, date, time)
286    self.assertIn('llvm', out_path)
287
288  def _CheckRequiredKeys(self, test_output):
289    for output in test_output:
290      for key in JSONResultsReportTest.REQUIRED_REPORT_KEYS:
291        self.assertIn(key, output)
292
293  def testAllFailedJSONReportOutput(self):
294    _, results = self._GetResultsFor(MakeMockExperiment(), FakePath('results'))
295    self._CheckRequiredKeys(results)
296    # Nothing succeeded; we don't send anything more than what's required.
297    for result in results:
298      self.assertItemsEqual(result.iterkeys(), self.REQUIRED_REPORT_KEYS)
299
300  def testJSONReportOutputWithSuccesses(self):
301    success_keyvals = {
302        'retval': 0,
303        'a_float': '2.3',
304        'many_floats': [['1.0', '2.0'], ['3.0']],
305        'machine': "i'm a pirate"
306    }
307
308    # 2 is arbitrary.
309    num_success = 2
310    experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
311                                  success_keyvals)
312    _, results = self._GetResultsFor(experiment, FakePath('results'))
313    self._CheckRequiredKeys(results)
314
315    num_passes = num_success * len(experiment.labels)
316    non_failures = [r for r in results if r['pass']]
317    self.assertEqual(num_passes, len(non_failures))
318
319    # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped?
320    expected_detailed = {'a_float': 2.3, 'many_floats': [1.0, 2.0]}
321    for pass_ in non_failures:
322      self.assertIn('detailed_results', pass_)
323      self.assertDictEqual(expected_detailed, pass_['detailed_results'])
324      self.assertIn('machine', pass_)
325      self.assertEqual(success_keyvals['machine'], pass_['machine'])
326
327
328if __name__ == '__main__':
329  test_flag.SetTestMode(True)
330  unittest.main()
331