results_report.py revision eb9fce674ff90a6de04827bfe1ef6e07a99c8f61
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""A module to handle the report format."""
5from __future__ import print_function
6
7import datetime
8import itertools
9import json
10import os
11
12from cros_utils.tabulator import AmeanResult
13from cros_utils.tabulator import Cell
14from cros_utils.tabulator import CoeffVarFormat
15from cros_utils.tabulator import CoeffVarResult
16from cros_utils.tabulator import Column
17from cros_utils.tabulator import Format
18from cros_utils.tabulator import GmeanRatioResult
19from cros_utils.tabulator import LiteralResult
20from cros_utils.tabulator import MaxResult
21from cros_utils.tabulator import MinResult
22from cros_utils.tabulator import PValueFormat
23from cros_utils.tabulator import PValueResult
24from cros_utils.tabulator import RatioFormat
25from cros_utils.tabulator import RawResult
26from cros_utils.tabulator import StdResult
27from cros_utils.tabulator import TableFormatter
28from cros_utils.tabulator import TableGenerator
29from cros_utils.tabulator import TablePrinter
30from update_telemetry_defaults import TelemetryDefaults
31
32from column_chart import ColumnChart
33from results_organizer import ResultOrganizer
34from perf_table import PerfTable
35
36
37def ParseChromeosImage(chromeos_image):
38  """Parse the chromeos_image string for the image and version.
39
40  The chromeos_image string will probably be in one of two formats:
41  1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
42     chromiumos_test_image.bin
43  2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
44      chromiumos_test_image.bin
45
46  We parse these strings to find the 'chromeos_version' to store in the
47  json archive (without the .datatime bit in the first case); and also
48  the 'chromeos_image', which would be all of the first case, but only the
49  part after '/chroot/tmp' in the second case.
50
51  Args:
52      chromeos_image: string containing the path to the chromeos_image that
53      crosperf used for the test.
54
55  Returns:
56      version, image: The results of parsing the input string, as explained
57      above.
58  """
59  # Find the Chromeos Version, e.g. R45-2345.0.0.....
60  # chromeos_image should have been something like:
61  # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
62  if chromeos_image.endswith('/chromiumos_test_image.bin'):
63    full_version = chromeos_image.split('/')[-2]
64    # Strip the date and time off of local builds (which have the format
65    # "R43-2345.0.0.date-and-time").
66    version, _ = os.path.splitext(full_version)
67  else:
68    version = ''
69
70  # Find the chromeos image.  If it's somewhere in .../chroot/tmp/..., then
71  # it's an official image that got downloaded, so chop off the download path
72  # to make the official image name more clear.
73  official_image_path = '/chroot/tmp'
74  if official_image_path in chromeos_image:
75    image = chromeos_image.split(official_image_path, 1)[1]
76  else:
77    image = chromeos_image
78  return version, image
79
80
81class ResultsReport(object):
82  """Class to handle the report format."""
83  MAX_COLOR_CODE = 255
84  PERF_ROWS = 5
85
86  def __init__(self, experiment):
87    self.experiment = experiment
88    self.benchmark_runs = experiment.benchmark_runs
89    self.labels = experiment.labels
90    self.benchmarks = experiment.benchmarks
91    self.baseline = self.labels[0]
92
93  def _SortByLabel(self, runs):
94    labels = {}
95    for benchmark_run in runs:
96      if benchmark_run.label_name not in labels:
97        labels[benchmark_run.label_name] = []
98      labels[benchmark_run.label_name].append(benchmark_run)
99    return labels
100
101  def GetFullTables(self, perf=False):
102    columns = [Column(RawResult(), Format()), Column(
103        MinResult(), Format()), Column(MaxResult(),
104                                       Format()), Column(AmeanResult(),
105                                                         Format()),
106               Column(StdResult(), Format(),
107                      'StdDev'), Column(CoeffVarResult(), CoeffVarFormat(),
108                                        'StdDev/Mean'),
109               Column(GmeanRatioResult(), RatioFormat(),
110                      'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
111                                              'p-value')]
112    if not perf:
113      return self._GetTables(self.labels, self.benchmark_runs, columns, 'full')
114    return self._GetPerfTables(self.labels, columns, 'full')
115
116  def GetSummaryTables(self, perf=False):
117    columns = [Column(AmeanResult(), Format()), Column(StdResult(), Format(),
118                                                       'StdDev'),
119               Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
120               Column(GmeanRatioResult(), RatioFormat(),
121                      'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
122                                              'p-value')]
123    if not perf:
124      return self._GetTables(self.labels, self.benchmark_runs, columns,
125                             'summary')
126    return self._GetPerfTables(self.labels, columns, 'summary')
127
128  def _ParseColumn(self, columns, iteration):
129    new_column = []
130    for column in columns:
131      if column.result.__class__.__name__ != 'RawResult':
132        #TODO(asharif): tabulator should support full table natively.
133        new_column.append(column)
134      else:
135        for i in range(iteration):
136          cc = Column(LiteralResult(i), Format(), str(i + 1))
137          new_column.append(cc)
138    return new_column
139
140  def _AreAllRunsEmpty(self, runs):
141    for label in runs:
142      for dictionary in label:
143        if dictionary:
144          return False
145    return True
146
147  def _GetTableHeader(self, benchmark):
148    benchmark_info = ('Benchmark:  {0};  Iterations: {1}'
149                      .format(benchmark.name, benchmark.iterations))
150    cell = Cell()
151    cell.string_value = benchmark_info
152    cell.header = True
153    return [[cell]]
154
155  def _GetTables(self, labels, benchmark_runs, columns, table_type):
156    tables = []
157    ro = ResultOrganizer(benchmark_runs, labels, self.benchmarks)
158    result = ro.result
159    label_name = ro.labels
160    for item in result:
161      benchmark = None
162      runs = result[item]
163      for benchmark in self.benchmarks:
164        if benchmark.name == item:
165          break
166      ben_table = self._GetTableHeader(benchmark)
167
168      if self._AreAllRunsEmpty(runs):
169        cell = Cell()
170        cell.string_value = ('This benchmark contains no result.'
171                             ' Is the benchmark name valid?')
172        cell_table = [[cell]]
173      else:
174        tg = TableGenerator(runs, label_name)
175        table = tg.GetTable()
176        parsed_columns = self._ParseColumn(columns, benchmark.iterations)
177        tf = TableFormatter(table, parsed_columns)
178        cell_table = tf.GetCellTable(table_type)
179      tables.append(ben_table)
180      tables.append(cell_table)
181    return tables
182
183  def _GetPerfTables(self, labels, columns, table_type):
184    tables = []
185    label_names = [label.name for label in labels]
186    p_table = PerfTable(self.experiment, label_names)
187
188    if not p_table.perf_data:
189      return tables
190
191    for benchmark in p_table.perf_data:
192      ben = None
193      for ben in self.benchmarks:
194        if ben.name == benchmark:
195          break
196
197      ben_table = self._GetTableHeader(ben)
198      tables.append(ben_table)
199      benchmark_data = p_table.perf_data[benchmark]
200      row_info = p_table.row_info[benchmark]
201      table = []
202      for event in benchmark_data:
203        tg = TableGenerator(benchmark_data[event],
204                            label_names,
205                            sort=TableGenerator.SORT_BY_VALUES_DESC)
206        table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
207        parsed_columns = self._ParseColumn(columns, ben.iterations)
208        tf = TableFormatter(table, parsed_columns)
209        tf.GenerateCellTable(table_type)
210        tf.AddColumnName()
211        tf.AddLabelName()
212        tf.AddHeader(str(event))
213        table = tf.GetCellTable(table_type, headers=False)
214        tables.append(table)
215    return tables
216
217  def PrintTables(self, tables, out_to):
218    output = ''
219    if not tables:
220      return output
221    for table in tables:
222      if out_to == 'HTML':
223        tp = TablePrinter(table, TablePrinter.HTML)
224      elif out_to == 'PLAIN':
225        tp = TablePrinter(table, TablePrinter.PLAIN)
226      elif out_to == 'CONSOLE':
227        tp = TablePrinter(table, TablePrinter.CONSOLE)
228      elif out_to == 'TSV':
229        tp = TablePrinter(table, TablePrinter.TSV)
230      elif out_to == 'EMAIL':
231        tp = TablePrinter(table, TablePrinter.EMAIL)
232      else:
233        pass
234      output += tp.Print()
235    return output
236
237
238class TextResultsReport(ResultsReport):
239  """Class to generate text result report."""
240  TEXT = """
241===========================================
242Results report for: '%s'
243===========================================
244
245-------------------------------------------
246Summary
247-------------------------------------------
248%s
249
250
251Number re-images: %s
252
253-------------------------------------------
254Benchmark Run Status
255-------------------------------------------
256%s
257
258
259-------------------------------------------
260Perf Data
261-------------------------------------------
262%s
263
264
265
266Experiment File
267-------------------------------------------
268%s
269
270
271CPUInfo
272-------------------------------------------
273%s
274===========================================
275"""
276
277  def __init__(self, experiment, email=False):
278    super(TextResultsReport, self).__init__(experiment)
279    self.email = email
280
281  def GetStatusTable(self):
282    """Generate the status table by the tabulator."""
283    table = [['', '']]
284    columns = [Column(
285        LiteralResult(iteration=0),
286        Format(),
287        'Status'), Column(
288            LiteralResult(iteration=1),
289            Format(),
290            'Failing Reason')]
291
292    for benchmark_run in self.benchmark_runs:
293      status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
294                                     benchmark_run.failure_reason]]
295      table.append(status)
296    tf = TableFormatter(table, columns)
297    cell_table = tf.GetCellTable('status')
298    return [cell_table]
299
300  def GetReport(self):
301    """Generate the report for email and console."""
302    status_table = self.GetStatusTable()
303    summary_table = self.GetSummaryTables()
304    perf_table = self.GetSummaryTables(perf=True)
305    if not perf_table:
306      perf_table = None
307    output_type = 'EMAIL' if self.email else 'CONSOLE'
308    return self.TEXT % (
309        self.experiment.name, self.PrintTables(summary_table, output_type),
310        self.experiment.machine_manager.num_reimages,
311        self.PrintTables(status_table, output_type),
312        self.PrintTables(perf_table, output_type),
313        self.experiment.experiment_file,
314        self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))
315
316
317class HTMLResultsReport(ResultsReport):
318  """Class to generate html result report."""
319
320  HTML = """
321<html>
322  <head>
323    <style type="text/css">
324
325body {
326  font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif;
327  font-size: 12px;
328}
329
330pre {
331  margin: 10px;
332  color: #039;
333  font-size: 14px;
334}
335
336.chart {
337  display: inline;
338}
339
340.hidden {
341  visibility: hidden;
342}
343
344.results-section {
345  border: 1px solid #b9c9fe;
346  margin: 10px;
347}
348
349.results-section-title {
350  background-color: #b9c9fe;
351  color: #039;
352  padding: 7px;
353  font-size: 14px;
354  width: 200px;
355}
356
357.results-section-content {
358  margin: 10px;
359  padding: 10px;
360  overflow:auto;
361}
362
363#box-table-a {
364  font-size: 12px;
365  width: 480px;
366  text-align: left;
367  border-collapse: collapse;
368}
369
370#box-table-a th {
371  padding: 6px;
372  background: #b9c9fe;
373  border-right: 1px solid #fff;
374  border-bottom: 1px solid #fff;
375  color: #039;
376  text-align: center;
377}
378
379#box-table-a td {
380  padding: 4px;
381  background: #e8edff;
382  border-bottom: 1px solid #fff;
383  border-right: 1px solid #fff;
384  color: #669;
385  border-top: 1px solid transparent;
386}
387
388#box-table-a tr:hover td {
389  background: #d0dafd;
390  color: #339;
391}
392
393    </style>
394    <script type='text/javascript' src='https://www.google.com/jsapi'></script>
395    <script type='text/javascript'>
396      google.load('visualization', '1', {packages:['corechart']});
397      google.setOnLoadCallback(init);
398      function init() {
399        switchTab('summary', 'html');
400        %s
401        switchTab('full', 'html');
402        drawTable();
403      }
404      function drawTable() {
405        %s
406      }
407      function switchTab(table, tab) {
408        document.getElementById(table + '-html').style.display = 'none';
409        document.getElementById(table + '-text').style.display = 'none';
410        document.getElementById(table + '-tsv').style.display = 'none';
411        document.getElementById(table + '-' + tab).style.display = 'block';
412      }
413    </script>
414  </head>
415
416  <body>
417    <div class='results-section'>
418      <div class='results-section-title'>Summary Table</div>
419      <div class='results-section-content'>
420        <div id='summary-html'>%s</div>
421        <div id='summary-text'><pre>%s</pre></div>
422        <div id='summary-tsv'><pre>%s</pre></div>
423      </div>
424      %s
425    </div>
426    %s
427    <div class='results-section'>
428      <div class='results-section-title'>Charts</div>
429      <div class='results-section-content'>%s</div>
430    </div>
431    <div class='results-section'>
432      <div class='results-section-title'>Full Table</div>
433      <div class='results-section-content'>
434        <div id='full-html'>%s</div>
435        <div id='full-text'><pre>%s</pre></div>
436        <div id='full-tsv'><pre>%s</pre></div>
437      </div>
438      %s
439    </div>
440    <div class='results-section'>
441      <div class='results-section-title'>Experiment File</div>
442      <div class='results-section-content'>
443        <pre>%s</pre>
444    </div>
445    </div>
446  </body>
447</html>
448"""
449
450  PERF_HTML = """
451    <div class='results-section'>
452      <div class='results-section-title'>Perf Table</div>
453      <div class='results-section-content'>
454        <div id='perf-html'>%s</div>
455        <div id='perf-text'><pre>%s</pre></div>
456        <div id='perf-tsv'><pre>%s</pre></div>
457      </div>
458      %s
459    </div>
460"""
461
462  def __init__(self, experiment):
463    super(HTMLResultsReport, self).__init__(experiment)
464
465  def _GetTabMenuHTML(self, table):
466    return """
467<div class='tab-menu'>
468  <a href="javascript:switchTab('%s', 'html')">HTML</a>
469  <a href="javascript:switchTab('%s', 'text')">Text</a>
470  <a href="javascript:switchTab('%s', 'tsv')">TSV</a>
471</div>""" % (table, table, table)
472
473  def GetReport(self):
474    chart_javascript = ''
475    charts = self._GetCharts(self.labels, self.benchmark_runs)
476    chart_javascript = ''.join(chart.GetJavascript() for chart in charts)
477    chart_divs = ''.join(chart.GetDiv() for chart in charts)
478
479    summary_table = self.GetSummaryTables()
480    full_table = self.GetFullTables()
481    perf_table = self.GetSummaryTables(perf=True)
482    if perf_table:
483      perf_html = self.PERF_HTML % (self.PrintTables(perf_table, 'HTML'),
484                                    self.PrintTables(perf_table, 'PLAIN'),
485                                    self.PrintTables(perf_table, 'TSV'),
486                                    self._GetTabMenuHTML('perf'))
487      perf_init = "switchTab('perf', 'html');"
488    else:
489      perf_html = ''
490      perf_init = ''
491
492    return self.HTML % (
493        perf_init, chart_javascript, self.PrintTables(summary_table, 'HTML'),
494        self.PrintTables(summary_table, 'PLAIN'),
495        self.PrintTables(summary_table, 'TSV'), self._GetTabMenuHTML('summary'),
496        perf_html, chart_divs, self.PrintTables(full_table, 'HTML'),
497        self.PrintTables(full_table, 'PLAIN'),
498        self.PrintTables(full_table, 'TSV'), self._GetTabMenuHTML('full'),
499        self.experiment.experiment_file)
500
501  def _GetCharts(self, labels, benchmark_runs):
502    charts = []
503    ro = ResultOrganizer(benchmark_runs, labels)
504    result = ro.result
505    for item, runs in result.iteritems():
506      tg = TableGenerator(runs, ro.labels)
507      table = tg.GetTable()
508      columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
509                 Column(MaxResult(), Format())]
510      tf = TableFormatter(table, columns)
511      data_table = tf.GetCellTable('full')
512
513      for i in range(2, len(data_table)):
514        cur_row_data = data_table[i]
515        test_key = cur_row_data[0].string_value
516        title = '{0}: {1}'.format(item, test_key.replace('/', ''))
517        chart = ColumnChart(title, 300, 200)
518        chart.AddColumn('Label', 'string')
519        chart.AddColumn('Average', 'number')
520        chart.AddColumn('Min', 'number')
521        chart.AddColumn('Max', 'number')
522        chart.AddSeries('Min', 'line', 'black')
523        chart.AddSeries('Max', 'line', 'black')
524        cur_index = 1
525        for label in ro.labels:
526          chart.AddRow([label, cur_row_data[cur_index].value, cur_row_data[
527              cur_index + 1].value, cur_row_data[cur_index + 2].value])
528          if isinstance(cur_row_data[cur_index].value, str):
529            chart = None
530            break
531          cur_index += 3
532        if chart:
533          charts.append(chart)
534    return charts
535
536
537class JSONResultsReport(ResultsReport):
538  """Class that generates JSON reports."""
539
540  @staticmethod
541  def _WriteResultsToFile(filename, results):
542    """Write the results as JSON to the given filename."""
543    with open(filename, 'w') as fp:
544      json.dump(results, fp, indent=2)
545
546  def __init__(self, experiment, date=None, time=None):
547    super(JSONResultsReport, self).__init__(experiment)
548    self.ro = ResultOrganizer(experiment.benchmark_runs,
549                              experiment.labels,
550                              experiment.benchmarks,
551                              json_report=True)
552    self.date = date
553    self.time = time
554    self.defaults = TelemetryDefaults()
555    if not self.date:
556      timestamp = datetime.datetime.strftime(datetime.datetime.now(),
557                                             '%Y-%m-%d %H:%M:%S')
558      date, time = timestamp.split(' ')
559      self.date = date
560      self.time = time
561
562  def GetReport(self, results_dir, write_results=None):
563    if write_results is None:
564      write_results = JSONResultsReport._WriteResultsToFile
565
566    self.defaults.ReadDefaultsFile()
567    final_results = []
568    board = self.experiment.labels[0].board
569    compiler_string = 'gcc'
570    for test, test_results in self.ro.result.iteritems():
571      for label, label_results in itertools.izip(self.ro.labels, test_results):
572        for iter_results in label_results:
573          json_results = {
574              'date': self.date,
575              'time': self.time,
576              'board': board,
577              'label': label
578          }
579          common_checksum = ''
580          common_string = ''
581          for l in self.experiment.labels:
582            if l.name == label:
583              img_path = os.path.realpath(os.path.expanduser(l.chromeos_image))
584              ver, img = ParseChromeosImage(img_path)
585              json_results['chromeos_image'] = img
586              json_results['chromeos_version'] = ver
587              json_results['chrome_version'] = l.chrome_version
588              json_results['compiler'] = l.compiler
589              # If any of the labels used the LLVM compiler, we will add
590              # ".llvm" to the json report filename. (Otherwise we use .gcc).
591              if 'llvm' in l.compiler:
592                compiler_string = 'llvm'
593              common_checksum = \
594                self.experiment.machine_manager.machine_checksum[l.name]
595              common_string = \
596                self.experiment.machine_manager.machine_checksum_string[l.name]
597              break
598          else:
599            raise RuntimeError("Label doesn't exist in label_results?")
600          json_results['test_name'] = test
601
602          if not iter_results or iter_results['retval'] != 0:
603            json_results['pass'] = False
604          else:
605            json_results['pass'] = True
606            # Get overall results.
607            if test in self.defaults.GetDefault():
608              default_result_fields = self.defaults.GetDefault()[test]
609              value = []
610              for f in default_result_fields:
611                if f in iter_results:
612                  v = iter_results[f]
613                  if type(v) == list:
614                    v = v[0]
615                  # New telemetry results format: sometimes we get a list
616                  # of lists now.
617                  if type(v) == list:
618                    v = v[0]
619                  item = (f, float(v))
620                  value.append(item)
621              json_results['overall_result'] = value
622            # Get detailed results.
623            detail_results = {}
624            for k in iter_results:
625              if k != 'retval':
626                v = iter_results[k]
627                if type(v) == list:
628                  v = v[0]
629                if v != 'PASS':
630                  if k.find('machine') == -1:
631                    if v is None:
632                      continue
633                    if type(v) != list:
634                      detail_results[k] = float(v)
635                    else:
636                      detail_results[k] = [float(d) for d in v]
637                  else:
638                    json_results[k] = v
639            if 'machine_checksum' not in json_results:
640              json_results['machine_checksum'] = common_checksum
641            if 'machine_string' not in json_results:
642              json_results['machine_string'] = common_string
643            json_results['detailed_results'] = detail_results
644          final_results.append(json_results)
645
646    filename = 'report_%s_%s_%s.%s.json' % (
647        board, self.date, self.time.replace(':', '.'), compiler_string)
648    fullname = os.path.join(results_dir, filename)
649    write_results(fullname, final_results)
650