1#!/usr/bin/env python2
2#
3# Copyright 2016 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""Unittest for the results reporter."""
8
9from __future__ import division
10from __future__ import print_function
11
12from StringIO import StringIO
13
14import collections
15import mock
16import os
17import test_flag
18import unittest
19
20from benchmark_run import MockBenchmarkRun
21from cros_utils import logger
22from experiment_factory import ExperimentFactory
23from experiment_file import ExperimentFile
24from machine_manager import MockCrosMachine
25from machine_manager import MockMachineManager
26from results_cache import MockResult
27from results_report import BenchmarkResults
28from results_report import HTMLResultsReport
29from results_report import JSONResultsReport
30from results_report import ParseChromeosImage
31from results_report import ParseStandardPerfReport
32from results_report import TextResultsReport
33
34
35class FreeFunctionsTest(unittest.TestCase):
36  """Tests for any free functions in results_report."""
37
38  def testParseChromeosImage(self):
39    # N.B. the cases with blank versions aren't explicitly supported by
40    # ParseChromeosImage. I'm not sure if they need to be supported, but the
41    # goal of this was to capture existing functionality as much as possible.
42    base_case = '/my/chroot/src/build/images/x86-generic/R01-1.0.date-time' \
43        '/chromiumos_test_image.bin'
44    self.assertEqual(ParseChromeosImage(base_case), ('R01-1.0', base_case))
45
46    dir_base_case = os.path.dirname(base_case)
47    self.assertEqual(ParseChromeosImage(dir_base_case), ('', dir_base_case))
48
49    buildbot_case = '/my/chroot/chroot/tmp/buildbot-build/R02-1.0.date-time' \
50        '/chromiumos_test_image.bin'
51    buildbot_img = buildbot_case.split('/chroot/tmp')[1]
52
53    self.assertEqual(ParseChromeosImage(buildbot_case),
54                     ('R02-1.0', buildbot_img))
55    self.assertEqual(ParseChromeosImage(os.path.dirname(buildbot_case)),
56                     ('', os.path.dirname(buildbot_img)))
57
58    # Ensure we don't act completely insanely given a few mildly insane paths.
59    fun_case = '/chromiumos_test_image.bin'
60    self.assertEqual(ParseChromeosImage(fun_case), ('', fun_case))
61
62    fun_case2 = 'chromiumos_test_image.bin'
63    self.assertEqual(ParseChromeosImage(fun_case2), ('', fun_case2))
64
65
66# There are many ways for this to be done better, but the linter complains
67# about all of them (that I can think of, at least).
68_fake_path_number = [0]
69def FakePath(ext):
70  """Makes a unique path that shouldn't exist on the host system.
71
72  Each call returns a different path, so if said path finds its way into an
73  error message, it may be easier to track it to its source.
74  """
75  _fake_path_number[0] += 1
76  prefix = '/tmp/should/not/exist/%d/' % (_fake_path_number[0], )
77  return os.path.join(prefix, ext)
78
79
80def MakeMockExperiment(compiler='gcc'):
81  """Mocks an experiment using the given compiler."""
82  mock_experiment_file = StringIO("""
83      board: x86-alex
84      remote: 127.0.0.1
85      perf_args: record -a -e cycles
86      benchmark: PageCycler {
87        iterations: 3
88      }
89
90      image1 {
91        chromeos_image: %s
92      }
93
94      image2 {
95        remote: 127.0.0.2
96        chromeos_image: %s
97      }
98      """ % (FakePath('cros_image1.bin'), FakePath('cros_image2.bin')))
99  efile = ExperimentFile(mock_experiment_file)
100  experiment = ExperimentFactory().GetExperiment(efile,
101                                                 FakePath('working_directory'),
102                                                 FakePath('log_dir'))
103  for label in experiment.labels:
104    label.compiler = compiler
105  return experiment
106
107
108def _InjectSuccesses(experiment, how_many, keyvals, for_benchmark=0,
109                     label=None):
110  """Injects successful experiment runs (for each label) into the experiment."""
111  # Defensive copy of keyvals, so if it's modified, we'll know.
112  keyvals = dict(keyvals)
113  num_configs = len(experiment.benchmarks) * len(experiment.labels)
114  num_runs = len(experiment.benchmark_runs) // num_configs
115
116  # TODO(gbiv): Centralize the mocking of these, maybe? (It's also done in
117  # benchmark_run_unittest)
118  bench = experiment.benchmarks[for_benchmark]
119  cache_conditions = []
120  log_level = 'average'
121  share_cache = ''
122  locks_dir = ''
123  log = logger.GetLogger()
124  machine_manager = MockMachineManager(FakePath('chromeos_root'), 0,
125                                       log_level, locks_dir)
126  machine_manager.AddMachine('testing_machine')
127  machine = next(m for m in machine_manager.GetMachines()
128                 if m.name == 'testing_machine')
129  for label in experiment.labels:
130    def MakeSuccessfulRun(n):
131      run = MockBenchmarkRun('mock_success%d' % (n, ), bench, label,
132                             1 + n + num_runs, cache_conditions,
133                             machine_manager, log, log_level, share_cache)
134      mock_result = MockResult(log, label, log_level, machine)
135      mock_result.keyvals = keyvals
136      run.result = mock_result
137      return run
138
139    experiment.benchmark_runs.extend(MakeSuccessfulRun(n)
140                                     for n in xrange(how_many))
141  return experiment
142
143
144class TextResultsReportTest(unittest.TestCase):
145  """Tests that the output of a text report contains the things we pass in.
146
147  At the moment, this doesn't care deeply about the format in which said
148  things are displayed. It just cares that they're present.
149  """
150
151  def _checkReport(self, email):
152    num_success = 2
153    success_keyvals = {'retval': 0, 'machine': 'some bot', 'a_float': 3.96}
154    experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
155                                  success_keyvals)
156    text_report = TextResultsReport.FromExperiment(experiment, email=email) \
157                                   .GetReport()
158    self.assertIn(str(success_keyvals['a_float']), text_report)
159    self.assertIn(success_keyvals['machine'], text_report)
160    self.assertIn(MockCrosMachine.CPUINFO_STRING, text_report)
161    return text_report
162
163
164  def testOutput(self):
165    email_report = self._checkReport(email=True)
166    text_report = self._checkReport(email=False)
167
168    # Ensure that the reports somehow different. Otherwise, having the
169    # distinction is useless.
170    self.assertNotEqual(email_report, text_report)
171
172
173class HTMLResultsReportTest(unittest.TestCase):
174  """Tests that the output of a HTML report contains the things we pass in.
175
176  At the moment, this doesn't care deeply about the format in which said
177  things are displayed. It just cares that they're present.
178  """
179
180  _TestOutput = collections.namedtuple('TestOutput', ['summary_table',
181                                                      'perf_html',
182                                                      'chart_js',
183                                                      'charts',
184                                                      'full_table',
185                                                      'experiment_file'])
186
187  @staticmethod
188  def _GetTestOutput(perf_table, chart_js, summary_table, print_table,
189                     chart_divs, full_table, experiment_file):
190    # N.B. Currently we don't check chart_js; it's just passed through because
191    # cros lint complains otherwise.
192    summary_table = print_table(summary_table, 'HTML')
193    perf_html = print_table(perf_table, 'HTML')
194    full_table = print_table(full_table, 'HTML')
195    return HTMLResultsReportTest._TestOutput(summary_table=summary_table,
196                                             perf_html=perf_html,
197                                             chart_js=chart_js,
198                                             charts=chart_divs,
199                                             full_table=full_table,
200                                             experiment_file=experiment_file)
201
202  def _GetOutput(self, experiment=None, benchmark_results=None):
203    with mock.patch('results_report_templates.GenerateHTMLPage') as standin:
204      if experiment is not None:
205        HTMLResultsReport.FromExperiment(experiment).GetReport()
206      else:
207        HTMLResultsReport(benchmark_results).GetReport()
208      mod_mock = standin
209    self.assertEquals(mod_mock.call_count, 1)
210    # call_args[0] is positional args, call_args[1] is kwargs.
211    self.assertEquals(mod_mock.call_args[0], tuple())
212    fmt_args = mod_mock.call_args[1]
213    return self._GetTestOutput(**fmt_args)
214
215  def testNoSuccessOutput(self):
216    output = self._GetOutput(MakeMockExperiment())
217    self.assertIn('no result', output.summary_table)
218    self.assertIn('no result', output.full_table)
219    self.assertEqual(output.charts, '')
220    self.assertNotEqual(output.experiment_file, '')
221
222  def testSuccessfulOutput(self):
223    num_success = 2
224    success_keyvals = {'retval': 0, 'a_float': 3.96}
225    output = self._GetOutput(_InjectSuccesses(MakeMockExperiment(), num_success,
226                                              success_keyvals))
227
228    self.assertNotIn('no result', output.summary_table)
229    #self.assertIn(success_keyvals['machine'], output.summary_table)
230    self.assertIn('a_float', output.summary_table)
231    self.assertIn(str(success_keyvals['a_float']), output.summary_table)
232    self.assertIn('a_float', output.full_table)
233    # The _ in a_float is filtered out when we're generating HTML.
234    self.assertIn('afloat', output.charts)
235    # And make sure we have our experiment file...
236    self.assertNotEqual(output.experiment_file, '')
237
238  def testBenchmarkResultFailure(self):
239    labels = ['label1']
240    benchmark_names_and_iterations = [('bench1', 1)]
241    benchmark_keyvals = {'bench1': [[]]}
242    results = BenchmarkResults(labels, benchmark_names_and_iterations,
243                               benchmark_keyvals)
244    output = self._GetOutput(benchmark_results=results)
245    self.assertIn('no result', output.summary_table)
246    self.assertEqual(output.charts, '')
247    self.assertEqual(output.experiment_file, '')
248
249  def testBenchmarkResultSuccess(self):
250    labels = ['label1']
251    benchmark_names_and_iterations = [('bench1', 1)]
252    benchmark_keyvals = {'bench1': [[{'retval': 1, 'foo': 2.0}]]}
253    results = BenchmarkResults(labels, benchmark_names_and_iterations,
254                               benchmark_keyvals)
255    output = self._GetOutput(benchmark_results=results)
256    self.assertNotIn('no result', output.summary_table)
257    self.assertIn('bench1', output.summary_table)
258    self.assertIn('bench1', output.full_table)
259    self.assertNotEqual(output.charts, '')
260    self.assertEqual(output.experiment_file, '')
261
262
263class JSONResultsReportTest(unittest.TestCase):
264  """Tests JSONResultsReport."""
265
266  REQUIRED_REPORT_KEYS = ('date', 'time', 'label', 'test_name', 'pass')
267  EXPERIMENT_REPORT_KEYS = ('board', 'chromeos_image', 'chromeos_version',
268                            'chrome_version', 'compiler')
269
270  @staticmethod
271  def _GetRequiredKeys(is_experiment):
272    required_keys = JSONResultsReportTest.REQUIRED_REPORT_KEYS
273    if is_experiment:
274      required_keys += JSONResultsReportTest.EXPERIMENT_REPORT_KEYS
275    return required_keys
276
277  def _CheckRequiredKeys(self, test_output, is_experiment):
278    required_keys = self._GetRequiredKeys(is_experiment)
279    for output in test_output:
280      for key in required_keys:
281        self.assertIn(key, output)
282
283  def testAllFailedJSONReportOutput(self):
284    experiment = MakeMockExperiment()
285    results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
286    self._CheckRequiredKeys(results, is_experiment=True)
287    # Nothing succeeded; we don't send anything more than what's required.
288    required_keys = self._GetRequiredKeys(is_experiment=True)
289    for result in results:
290      self.assertItemsEqual(result.iterkeys(), required_keys)
291
292  def testJSONReportOutputWithSuccesses(self):
293    success_keyvals = {
294        'retval': 0,
295        'a_float': '2.3',
296        'many_floats': [['1.0', '2.0'], ['3.0']],
297        'machine': "i'm a pirate"
298    }
299
300    # 2 is arbitrary.
301    num_success = 2
302    experiment = _InjectSuccesses(MakeMockExperiment(), num_success,
303                                  success_keyvals)
304    results = JSONResultsReport.FromExperiment(experiment).GetReportObject()
305    self._CheckRequiredKeys(results, is_experiment=True)
306
307    num_passes = num_success * len(experiment.labels)
308    non_failures = [r for r in results if r['pass']]
309    self.assertEqual(num_passes, len(non_failures))
310
311    # TODO(gbiv): ...Is the 3.0 *actually* meant to be dropped?
312    expected_detailed = {'a_float': 2.3, 'many_floats': [1.0, 2.0]}
313    for pass_ in non_failures:
314      self.assertIn('detailed_results', pass_)
315      self.assertDictEqual(expected_detailed, pass_['detailed_results'])
316      self.assertIn('machine', pass_)
317      self.assertEqual(success_keyvals['machine'], pass_['machine'])
318
319  def testFailedJSONReportOutputWithoutExperiment(self):
320    labels = ['label1']
321    benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2),
322                                      ('bench3', 1), ('bench4', 0)]
323    benchmark_keyvals = {
324        'bench1': [[{'retval': 1, 'foo': 2.0}]],
325        'bench2': [[{'retval': 1, 'foo': 4.0}, {'retval': -1, 'bar': 999}]],
326        # lack of retval is considered a failure.
327        'bench3': [[{}]],
328        'bench4': [[]]
329    }
330    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
331                                     benchmark_keyvals)
332    results = JSONResultsReport(bench_results).GetReportObject()
333    self._CheckRequiredKeys(results, is_experiment=False)
334    self.assertFalse(any(r['pass'] for r in results))
335
336  def testJSONGetReportObeysJSONSettings(self):
337    labels = ['label1']
338    benchmark_names_and_iterations = [('bench1', 1)]
339    # These can be anything, really. So long as they're distinctive.
340    separators = (',\t\n\t', ':\t\n\t')
341    benchmark_keyvals = {'bench1': [[{'retval': 0, 'foo': 2.0}]]}
342    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
343                                     benchmark_keyvals)
344    reporter = JSONResultsReport(bench_results,
345                                 json_args={'separators': separators})
346    result_str = reporter.GetReport()
347    self.assertIn(separators[0], result_str)
348    self.assertIn(separators[1], result_str)
349
350  def testSuccessfulJSONReportOutputWithoutExperiment(self):
351    labels = ['label1']
352    benchmark_names_and_iterations = [('bench1', 1), ('bench2', 2)]
353    benchmark_keyvals = {
354        'bench1': [[{'retval': 0, 'foo': 2.0}]],
355        'bench2': [[{'retval': 0, 'foo': 4.0}, {'retval': 0, 'bar': 999}]]
356    }
357    bench_results = BenchmarkResults(labels, benchmark_names_and_iterations,
358                                     benchmark_keyvals)
359    results = JSONResultsReport(bench_results).GetReportObject()
360    self._CheckRequiredKeys(results, is_experiment=False)
361    self.assertTrue(all(r['pass'] for r in results))
362    # Enforce that the results have *some* deterministic order.
363    keyfn = lambda r: (r['test_name'], r['detailed_results'].get('foo', 5.0))
364    sorted_results = sorted(results, key=keyfn)
365    detailed_results = [r['detailed_results'] for r in sorted_results]
366    bench1, bench2_foo, bench2_bar = detailed_results
367    self.assertEqual(bench1['foo'], 2.0)
368    self.assertEqual(bench2_foo['foo'], 4.0)
369    self.assertEqual(bench2_bar['bar'], 999)
370    self.assertNotIn('bar', bench1)
371    self.assertNotIn('bar', bench2_foo)
372    self.assertNotIn('foo', bench2_bar)
373
374
375class PerfReportParserTest(unittest.TestCase):
376  """Tests for the perf report parser in results_report."""
377  @staticmethod
378  def _ReadRealPerfReport():
379    my_dir = os.path.dirname(os.path.realpath(__file__))
380    with open(os.path.join(my_dir, 'perf_files/perf.data.report.0')) as f:
381      return f.read()
382
383  def testParserParsesRealWorldPerfReport(self):
384    report = ParseStandardPerfReport(self._ReadRealPerfReport())
385    self.assertItemsEqual(['cycles', 'instructions'], report.keys())
386
387    # Arbitrarily selected known percentages from the perf report.
388    known_cycles_percentages = {
389        '0xffffffffa4a1f1c9': 0.66,
390        '0x0000115bb7ba9b54': 0.47,
391        '0x0000000000082e08': 0.00,
392        '0xffffffffa4a13e63': 0.00,
393    }
394    report_cycles = report['cycles']
395    self.assertEqual(len(report_cycles), 214)
396    for k, v in known_cycles_percentages.iteritems():
397      self.assertIn(k, report_cycles)
398      self.assertEqual(v, report_cycles[k])
399
400    known_instrunctions_percentages = {
401        '0x0000115bb6c35d7a': 1.65,
402        '0x0000115bb7ba9b54': 0.67,
403        '0x0000000000024f56': 0.00,
404        '0xffffffffa4a0ee03': 0.00,
405    }
406    report_instructions = report['instructions']
407    self.assertEqual(len(report_instructions), 492)
408    for k, v in known_instrunctions_percentages.iteritems():
409      self.assertIn(k, report_instructions)
410      self.assertEqual(v, report_instructions[k])
411
412
413if __name__ == '__main__':
414  test_flag.SetTestMode(True)
415  unittest.main()
416