results_report.py revision 2e9f8a097c095ca93052b368ffab4c850d4d3d0f
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""A module to handle the report format."""
5from __future__ import print_function
6
7import datetime
8import itertools
9import json
10import os
11
12from cros_utils.tabulator import AmeanResult
13from cros_utils.tabulator import Cell
14from cros_utils.tabulator import CoeffVarFormat
15from cros_utils.tabulator import CoeffVarResult
16from cros_utils.tabulator import Column
17from cros_utils.tabulator import Format
18from cros_utils.tabulator import GmeanRatioResult
19from cros_utils.tabulator import LiteralResult
20from cros_utils.tabulator import MaxResult
21from cros_utils.tabulator import MinResult
22from cros_utils.tabulator import PValueFormat
23from cros_utils.tabulator import PValueResult
24from cros_utils.tabulator import RatioFormat
25from cros_utils.tabulator import RawResult
26from cros_utils.tabulator import StdResult
27from cros_utils.tabulator import TableFormatter
28from cros_utils.tabulator import TableGenerator
29from cros_utils.tabulator import TablePrinter
30from update_telemetry_defaults import TelemetryDefaults
31
32from column_chart import ColumnChart
33from results_organizer import ResultOrganizer
34from perf_table import PerfTable
35
36
37def ParseChromeosImage(chromeos_image):
38  """Parse the chromeos_image string for the image and version.
39
40  The chromeos_image string will probably be in one of two formats:
41  1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
42     chromiumos_test_image.bin
43  2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
44      chromiumos_test_image.bin
45
46  We parse these strings to find the 'chromeos_version' to store in the
47  json archive (without the .datatime bit in the first case); and also
48  the 'chromeos_image', which would be all of the first case, but only the
49  part after '/chroot/tmp' in the second case.
50
51  Args:
52      chromeos_image: string containing the path to the chromeos_image that
53      crosperf used for the test.
54
55  Returns:
56      version, image: The results of parsing the input string, as explained
57      above.
58  """
59  # Find the Chromeos Version, e.g. R45-2345.0.0.....
60  # chromeos_image should have been something like:
61  # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
62  if chromeos_image.endswith('/chromiumos_test_image.bin'):
63    full_version = chromeos_image.split('/')[-2]
64    # Strip the date and time off of local builds (which have the format
65    # "R43-2345.0.0.date-and-time").
66    version, _ = os.path.splitext(full_version)
67  else:
68    version = ''
69
70  # Find the chromeos image.  If it's somewhere in .../chroot/tmp/..., then
71  # it's an official image that got downloaded, so chop off the download path
72  # to make the official image name more clear.
73  official_image_path = '/chroot/tmp'
74  if official_image_path in chromeos_image:
75    image = chromeos_image.split(official_image_path, 1)[1]
76  else:
77    image = chromeos_image
78  return version, image
79
80
81class ResultsReport(object):
82  """Class to handle the report format."""
83  MAX_COLOR_CODE = 255
84  PERF_ROWS = 5
85
86  def __init__(self, experiment):
87    self.experiment = experiment
88    self.benchmark_runs = experiment.benchmark_runs
89    self.labels = experiment.labels
90    self.benchmarks = experiment.benchmarks
91    self.baseline = self.labels[0]
92
93  def _SortByLabel(self, runs):
94    labels = {}
95    for benchmark_run in runs:
96      if benchmark_run.label_name not in labels:
97        labels[benchmark_run.label_name] = []
98      labels[benchmark_run.label_name].append(benchmark_run)
99    return labels
100
101  def GetFullTables(self, perf=False):
102    columns = [Column(RawResult(), Format()), Column(
103        MinResult(), Format()), Column(MaxResult(),
104                                       Format()), Column(AmeanResult(),
105                                                         Format()),
106               Column(StdResult(), Format(),
107                      'StdDev'), Column(CoeffVarResult(), CoeffVarFormat(),
108                                        'StdDev/Mean'),
109               Column(GmeanRatioResult(), RatioFormat(),
110                      'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
111                                              'p-value')]
112    if not perf:
113      return self._GetTables(self.labels, self.benchmark_runs, columns, 'full')
114    return self._GetPerfTables(self.labels, columns, 'full')
115
116  def GetSummaryTables(self, perf=False):
117    columns = [Column(AmeanResult(), Format()), Column(StdResult(), Format(),
118                                                       'StdDev'),
119               Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
120               Column(GmeanRatioResult(), RatioFormat(),
121                      'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
122                                              'p-value')]
123    if not perf:
124      return self._GetTables(self.labels, self.benchmark_runs, columns,
125                             'summary')
126    return self._GetPerfTables(self.labels, columns, 'summary')
127
128  def _ParseColumn(self, columns, iteration):
129    new_column = []
130    for column in columns:
131      if column.result.__class__.__name__ != 'RawResult':
132        #TODO(asharif): tabulator should support full table natively.
133        new_column.append(column)
134      else:
135        for i in range(iteration):
136          cc = Column(LiteralResult(i), Format(), str(i + 1))
137          new_column.append(cc)
138    return new_column
139
140  def _AreAllRunsEmpty(self, runs):
141    for label in runs:
142      for dictionary in label:
143        if dictionary:
144          return False
145    return True
146
147  def _GetTableHeader(self, benchmark):
148    benchmark_info = ('Benchmark:  {0};  Iterations: {1}'
149                      .format(benchmark.name, benchmark.iterations))
150    cell = Cell()
151    cell.string_value = benchmark_info
152    cell.header = True
153    return [[cell]]
154
155  def _GetTables(self, labels, benchmark_runs, columns, table_type):
156    tables = []
157    ro = ResultOrganizer(benchmark_runs, labels, self.benchmarks)
158    result = ro.result
159    label_name = ro.labels
160    for item in result:
161      benchmark = None
162      runs = result[item]
163      for benchmark in self.benchmarks:
164        if benchmark.name == item:
165          break
166      ben_table = self._GetTableHeader(benchmark)
167
168      if self._AreAllRunsEmpty(runs):
169        cell = Cell()
170        cell.string_value = ('This benchmark contains no result.'
171                             ' Is the benchmark name valid?')
172        cell_table = [[cell]]
173      else:
174        tg = TableGenerator(runs, label_name)
175        table = tg.GetTable()
176        parsed_columns = self._ParseColumn(columns, benchmark.iterations)
177        tf = TableFormatter(table, parsed_columns)
178        cell_table = tf.GetCellTable(table_type)
179      tables.append(ben_table)
180      tables.append(cell_table)
181    return tables
182
183  def _GetPerfTables(self, labels, columns, table_type):
184    tables = []
185    label_names = [label.name for label in labels]
186    p_table = PerfTable(self.experiment, label_names)
187
188    if not p_table.perf_data:
189      return tables
190
191    for benchmark in p_table.perf_data:
192      ben = None
193      for ben in self.benchmarks:
194        if ben.name == benchmark:
195          break
196
197      ben_table = self._GetTableHeader(ben)
198      tables.append(ben_table)
199      benchmark_data = p_table.perf_data[benchmark]
200      row_info = p_table.row_info[benchmark]
201      table = []
202      for event in benchmark_data:
203        tg = TableGenerator(benchmark_data[event],
204                            label_names,
205                            sort=TableGenerator.SORT_BY_VALUES_DESC)
206        table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
207        parsed_columns = self._ParseColumn(columns, ben.iterations)
208        tf = TableFormatter(table, parsed_columns)
209        tf.GenerateCellTable(table_type)
210        tf.AddColumnName()
211        tf.AddLabelName()
212        tf.AddHeader(str(event))
213        table = tf.GetCellTable(table_type, headers=False)
214        tables.append(table)
215    return tables
216
217  def PrintTables(self, tables, out_to):
218    output = ''
219    if not tables:
220      return output
221    for table in tables:
222      if out_to == 'HTML':
223        tp = TablePrinter(table, TablePrinter.HTML)
224      elif out_to == 'PLAIN':
225        tp = TablePrinter(table, TablePrinter.PLAIN)
226      elif out_to == 'CONSOLE':
227        tp = TablePrinter(table, TablePrinter.CONSOLE)
228      elif out_to == 'TSV':
229        tp = TablePrinter(table, TablePrinter.TSV)
230      elif out_to == 'EMAIL':
231        tp = TablePrinter(table, TablePrinter.EMAIL)
232      else:
233        pass
234      output += tp.Print()
235    return output
236
237
238class TextResultsReport(ResultsReport):
239  """Class to generate text result report."""
240  TEXT = """
241===========================================
242Results report for: '%s'
243===========================================
244
245-------------------------------------------
246Summary
247-------------------------------------------
248%s
249
250
251Number re-images: %s
252
253-------------------------------------------
254Benchmark Run Status
255-------------------------------------------
256%s
257
258
259-------------------------------------------
260Perf Data
261-------------------------------------------
262%s
263
264
265
266Experiment File
267-------------------------------------------
268%s
269
270
271CPUInfo
272-------------------------------------------
273%s
274===========================================
275"""
276
277  def __init__(self, experiment, email=False):
278    super(TextResultsReport, self).__init__(experiment)
279    self.email = email
280
281  def GetStatusTable(self):
282    """Generate the status table by the tabulator."""
283    table = [['', '']]
284    columns = [Column(
285        LiteralResult(iteration=0),
286        Format(),
287        'Status'), Column(
288            LiteralResult(iteration=1),
289            Format(),
290            'Failing Reason')]
291
292    for benchmark_run in self.benchmark_runs:
293      status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
294                                     benchmark_run.failure_reason]]
295      table.append(status)
296    tf = TableFormatter(table, columns)
297    cell_table = tf.GetCellTable('status')
298    return [cell_table]
299
300  def GetReport(self):
301    """Generate the report for email and console."""
302    status_table = self.GetStatusTable()
303    summary_table = self.GetSummaryTables()
304    perf_table = self.GetSummaryTables(perf=True)
305    if not perf_table:
306      perf_table = None
307    if not self.email:
308      return self.TEXT % (
309          self.experiment.name, self.PrintTables(summary_table, 'CONSOLE'),
310          self.experiment.machine_manager.num_reimages,
311          self.PrintTables(status_table, 'CONSOLE'),
312          self.PrintTables(perf_table, 'CONSOLE'),
313          self.experiment.experiment_file,
314          self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))
315
316    return self.TEXT % (
317        self.experiment.name, self.PrintTables(summary_table, 'EMAIL'),
318        self.experiment.machine_manager.num_reimages,
319        self.PrintTables(status_table, 'EMAIL'),
320        self.PrintTables(perf_table, 'EMAIL'), self.experiment.experiment_file,
321        self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))
322
323
324class HTMLResultsReport(ResultsReport):
325  """Class to generate html result report."""
326
327  HTML = """
328<html>
329  <head>
330    <style type="text/css">
331
332body {
333  font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif;
334  font-size: 12px;
335}
336
337pre {
338  margin: 10px;
339  color: #039;
340  font-size: 14px;
341}
342
343.chart {
344  display: inline;
345}
346
347.hidden {
348  visibility: hidden;
349}
350
351.results-section {
352  border: 1px solid #b9c9fe;
353  margin: 10px;
354}
355
356.results-section-title {
357  background-color: #b9c9fe;
358  color: #039;
359  padding: 7px;
360  font-size: 14px;
361  width: 200px;
362}
363
364.results-section-content {
365  margin: 10px;
366  padding: 10px;
367  overflow:auto;
368}
369
370#box-table-a {
371  font-size: 12px;
372  width: 480px;
373  text-align: left;
374  border-collapse: collapse;
375}
376
377#box-table-a th {
378  padding: 6px;
379  background: #b9c9fe;
380  border-right: 1px solid #fff;
381  border-bottom: 1px solid #fff;
382  color: #039;
383  text-align: center;
384}
385
386#box-table-a td {
387  padding: 4px;
388  background: #e8edff;
389  border-bottom: 1px solid #fff;
390  border-right: 1px solid #fff;
391  color: #669;
392  border-top: 1px solid transparent;
393}
394
395#box-table-a tr:hover td {
396  background: #d0dafd;
397  color: #339;
398}
399
400    </style>
401    <script type='text/javascript' src='https://www.google.com/jsapi'></script>
402    <script type='text/javascript'>
403      google.load('visualization', '1', {packages:['corechart']});
404      google.setOnLoadCallback(init);
405      function init() {
406        switchTab('summary', 'html');
407        %s
408        switchTab('full', 'html');
409        drawTable();
410      }
411      function drawTable() {
412        %s
413      }
414      function switchTab(table, tab) {
415        document.getElementById(table + '-html').style.display = 'none';
416        document.getElementById(table + '-text').style.display = 'none';
417        document.getElementById(table + '-tsv').style.display = 'none';
418        document.getElementById(table + '-' + tab).style.display = 'block';
419      }
420    </script>
421  </head>
422
423  <body>
424    <div class='results-section'>
425      <div class='results-section-title'>Summary Table</div>
426      <div class='results-section-content'>
427        <div id='summary-html'>%s</div>
428        <div id='summary-text'><pre>%s</pre></div>
429        <div id='summary-tsv'><pre>%s</pre></div>
430      </div>
431      %s
432    </div>
433    %s
434    <div class='results-section'>
435      <div class='results-section-title'>Charts</div>
436      <div class='results-section-content'>%s</div>
437    </div>
438    <div class='results-section'>
439      <div class='results-section-title'>Full Table</div>
440      <div class='results-section-content'>
441        <div id='full-html'>%s</div>
442        <div id='full-text'><pre>%s</pre></div>
443        <div id='full-tsv'><pre>%s</pre></div>
444      </div>
445      %s
446    </div>
447    <div class='results-section'>
448      <div class='results-section-title'>Experiment File</div>
449      <div class='results-section-content'>
450        <pre>%s</pre>
451    </div>
452    </div>
453  </body>
454</html>
455"""
456
457  PERF_HTML = """
458    <div class='results-section'>
459      <div class='results-section-title'>Perf Table</div>
460      <div class='results-section-content'>
461        <div id='perf-html'>%s</div>
462        <div id='perf-text'><pre>%s</pre></div>
463        <div id='perf-tsv'><pre>%s</pre></div>
464      </div>
465      %s
466    </div>
467"""
468
469  def __init__(self, experiment):
470    super(HTMLResultsReport, self).__init__(experiment)
471
472  def _GetTabMenuHTML(self, table):
473    return """
474<div class='tab-menu'>
475  <a href="javascript:switchTab('%s', 'html')">HTML</a>
476  <a href="javascript:switchTab('%s', 'text')">Text</a>
477  <a href="javascript:switchTab('%s', 'tsv')">TSV</a>
478</div>""" % (table, table, table)
479
480  def GetReport(self):
481    chart_javascript = ''
482    charts = self._GetCharts(self.labels, self.benchmark_runs)
483    for chart in charts:
484      chart_javascript += chart.GetJavascript()
485    chart_divs = ''
486    for chart in charts:
487      chart_divs += chart.GetDiv()
488
489    summary_table = self.GetSummaryTables()
490    full_table = self.GetFullTables()
491    perf_table = self.GetSummaryTables(perf=True)
492    if perf_table:
493      perf_html = self.PERF_HTML % (self.PrintTables(perf_table, 'HTML'),
494                                    self.PrintTables(perf_table, 'PLAIN'),
495                                    self.PrintTables(perf_table, 'TSV'),
496                                    self._GetTabMenuHTML('perf'))
497      perf_init = "switchTab('perf', 'html');"
498    else:
499      perf_html = ''
500      perf_init = ''
501
502    return self.HTML % (
503        perf_init, chart_javascript, self.PrintTables(summary_table, 'HTML'),
504        self.PrintTables(summary_table, 'PLAIN'),
505        self.PrintTables(summary_table, 'TSV'), self._GetTabMenuHTML('summary'),
506        perf_html, chart_divs, self.PrintTables(full_table, 'HTML'),
507        self.PrintTables(full_table, 'PLAIN'),
508        self.PrintTables(full_table, 'TSV'), self._GetTabMenuHTML('full'),
509        self.experiment.experiment_file)
510
511  def _GetCharts(self, labels, benchmark_runs):
512    charts = []
513    ro = ResultOrganizer(benchmark_runs, labels)
514    result = ro.result
515    for item in result:
516      runs = result[item]
517      tg = TableGenerator(runs, ro.labels)
518      table = tg.GetTable()
519      columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
520                 Column(MaxResult(), Format())]
521      tf = TableFormatter(table, columns)
522      data_table = tf.GetCellTable('full')
523
524      for i in range(2, len(data_table)):
525        cur_row_data = data_table[i]
526        test_key = cur_row_data[0].string_value
527        title = '{0}: {1}'.format(item, test_key.replace('/', ''))
528        chart = ColumnChart(title, 300, 200)
529        chart.AddColumn('Label', 'string')
530        chart.AddColumn('Average', 'number')
531        chart.AddColumn('Min', 'number')
532        chart.AddColumn('Max', 'number')
533        chart.AddSeries('Min', 'line', 'black')
534        chart.AddSeries('Max', 'line', 'black')
535        cur_index = 1
536        for label in ro.labels:
537          chart.AddRow([label, cur_row_data[cur_index].value, cur_row_data[
538              cur_index + 1].value, cur_row_data[cur_index + 2].value])
539          if isinstance(cur_row_data[cur_index].value, str):
540            chart = None
541            break
542          cur_index += 3
543        if chart:
544          charts.append(chart)
545    return charts
546
547
548class JSONResultsReport(ResultsReport):
549  """Class that generates JSON reports."""
550
551  @staticmethod
552  def _WriteResultsToFile(filename, results):
553    """Write the results as JSON to the given filename."""
554    with open(filename, 'w') as fp:
555      json.dump(results, fp, indent=2)
556
557  def __init__(self, experiment, date=None, time=None):
558    super(JSONResultsReport, self).__init__(experiment)
559    self.ro = ResultOrganizer(experiment.benchmark_runs,
560                              experiment.labels,
561                              experiment.benchmarks,
562                              json_report=True)
563    self.date = date
564    self.time = time
565    self.defaults = TelemetryDefaults()
566    if not self.date:
567      timestamp = datetime.datetime.strftime(datetime.datetime.now(),
568                                             '%Y-%m-%d %H:%M:%S')
569      date, time = timestamp.split(' ')
570      self.date = date
571      self.time = time
572
573  def GetReport(self, results_dir, write_results=None):
574    if write_results is None:
575      write_results = JSONResultsReport._WriteResultsToFile
576
577    self.defaults.ReadDefaultsFile()
578    final_results = []
579    board = self.experiment.labels[0].board
580    compiler_string = 'gcc'
581    for test, test_results in self.ro.result.iteritems():
582      for label, label_results in itertools.izip(self.ro.labels, test_results):
583        for iter_results in label_results:
584          json_results = {
585              'date': self.date,
586              'time': self.time,
587              'board': board,
588              'label': label
589          }
590          common_checksum = ''
591          common_string = ''
592          for l in self.experiment.labels:
593            if l.name == label:
594              img_path = os.path.realpath(os.path.expanduser(l.chromeos_image))
595              ver, img = ParseChromeosImage(img_path)
596              json_results['chromeos_image'] = img
597              json_results['chromeos_version'] = ver
598              json_results['chrome_version'] = l.chrome_version
599              json_results['compiler'] = l.compiler
600              # If any of the labels used the LLVM compiler, we will add
601              # ".llvm" to the json report filename. (Otherwise we use .gcc).
602              if 'llvm' in l.compiler:
603                compiler_string = 'llvm'
604              common_checksum = \
605                self.experiment.machine_manager.machine_checksum[l.name]
606              common_string = \
607                self.experiment.machine_manager.machine_checksum_string[l.name]
608              break
609          else:
610            raise RuntimeError("Label doesn't exist in label_results?")
611          json_results['test_name'] = test
612
613          if not iter_results or iter_results['retval'] != 0:
614            json_results['pass'] = False
615          else:
616            json_results['pass'] = True
617            # Get overall results.
618            if test in self.defaults.GetDefault():
619              default_result_fields = self.defaults.GetDefault()[test]
620              value = []
621              for f in default_result_fields:
622                if f in iter_results:
623                  v = iter_results[f]
624                  if type(v) == list:
625                    v = v[0]
626                  item = (f, float(v))
627                  value.append(item)
628              json_results['overall_result'] = value
629            # Get detailed results.
630            detail_results = {}
631            for k in iter_results:
632              if k != 'retval':
633                v = iter_results[k]
634                if type(v) == list:
635                  v = v[0]
636                if v != 'PASS':
637                  if k.find('machine') == -1:
638                    if type(v) != list:
639                      detail_results[k] = float(v)
640                    else:
641                      detail_results[k] = [float(d) for d in v]
642                  else:
643                    json_results[k] = v
644            if 'machine_checksum' not in json_results:
645              json_results['machine_checksum'] = common_checksum
646            if 'machine_string' not in json_results:
647              json_results['machine_string'] = common_string
648            json_results['detailed_results'] = detail_results
649          final_results.append(json_results)
650
651    filename = 'report_%s_%s_%s.%s.json' % (
652        board, self.date, self.time.replace(':', '.'), compiler_string)
653    fullname = os.path.join(results_dir, filename)
654    write_results(fullname, final_results)
655