results_report.py revision 75e1ccc6513c4529e47007105b7c523755f8e0c0
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""A module to handle the report format."""
6from __future__ import print_function
7
8import datetime
9import json
10import os
11
12from cros_utils.tabulator import AmeanResult
13from cros_utils.tabulator import Cell
14from cros_utils.tabulator import CoeffVarFormat
15from cros_utils.tabulator import CoeffVarResult
16from cros_utils.tabulator import Column
17from cros_utils.tabulator import Format
18from cros_utils.tabulator import GmeanRatioResult
19from cros_utils.tabulator import LiteralResult
20from cros_utils.tabulator import MaxResult
21from cros_utils.tabulator import MinResult
22from cros_utils.tabulator import PValueFormat
23from cros_utils.tabulator import PValueResult
24from cros_utils.tabulator import RatioFormat
25from cros_utils.tabulator import RawResult
26from cros_utils.tabulator import StdResult
27from cros_utils.tabulator import TableFormatter
28from cros_utils.tabulator import TableGenerator
29from cros_utils.tabulator import TablePrinter
30from update_telemetry_defaults import TelemetryDefaults
31
32from column_chart import ColumnChart
33from results_organizer import ResultOrganizer
34from perf_table import PerfTable
35
36
37def ParseChromeosImage(chromeos_image):
38  """Parse the chromeos_image string for the image and version.
39
40  The chromeos_image string will probably be in one of two formats:
41  1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
42     chromiumos_test_image.bin
43  2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
44      chromiumos_test_image.bin
45
46  We parse these strings to find the 'chromeos_version' to store in the
47  json archive (without the .datatime bit in the first case); and also
48  the 'chromeos_image', which would be all of the first case, but only the
49  part after '/chroot/tmp' in the second case.
50
51  Args:
52      chromeos_image: string containing the path to the chromeos_image that
53      crosperf used for the test.
54
55  Returns:
56      version, image: The results of parsing the input string, as explained
57      above.
58  """
59  version = ''
60  real_file = os.path.realpath(os.path.expanduser(chromeos_image))
61  pieces = real_file.split('/')
62  # Find the Chromeos Version, e.g. R45-2345.0.0.....
63  # chromeos_image should have been something like:
64  # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
65  num_pieces = len(pieces)
66  if pieces[num_pieces-1] == "chromiumos_test_image.bin":
67    version = pieces[num_pieces-2]
68    # Find last '.' in the version and chop it off (removing the .datatime
69    # piece from local builds).
70    loc = version.rfind('.')
71    version = version[:loc]
72  # Find the chromeos image.  If it's somewhere in .../chroot/tmp/..., then
73  # it's an official image that got downloaded, so chop off the download path
74  # to make the official image name more clear.
75  loc = real_file.find('/chroot/tmp')
76  if loc != -1:
77    loc += len('/chroot/tmp')
78    real_file = real_file[loc:]
79  image = real_file
80  return version, image
81
82class ResultsReport(object):
83  """Class to handle the report format."""
84  MAX_COLOR_CODE = 255
85  PERF_ROWS = 5
86
87  def __init__(self, experiment):
88    self.experiment = experiment
89    self.benchmark_runs = experiment.benchmark_runs
90    self.labels = experiment.labels
91    self.benchmarks = experiment.benchmarks
92    self.baseline = self.labels[0]
93
94  def _SortByLabel(self, runs):
95    labels = {}
96    for benchmark_run in runs:
97      if benchmark_run.label_name not in labels:
98        labels[benchmark_run.label_name] = []
99      labels[benchmark_run.label_name].append(benchmark_run)
100    return labels
101
102  def GetFullTables(self, perf=False):
103    columns = [Column(RawResult(),
104                      Format()),
105               Column(MinResult(),
106                      Format()),
107               Column(MaxResult(),
108                      Format()),
109               Column(AmeanResult(),
110                      Format()),
111               Column(StdResult(),
112                      Format(), "StdDev"),
113               Column(CoeffVarResult(),
114                      CoeffVarFormat(), "StdDev/Mean"),
115               Column(GmeanRatioResult(),
116                      RatioFormat(), "GmeanSpeedup"),
117               Column(PValueResult(),
118                      PValueFormat(), "p-value")
119              ]
120    if not perf:
121      return self._GetTables(self.labels, self.benchmark_runs, columns,
122                             "full")
123    return self._GetPerfTables(self.labels, columns, "full")
124
125  def GetSummaryTables(self, perf=False):
126    columns = [Column(AmeanResult(),
127                      Format()),
128               Column(StdResult(),
129                      Format(), "StdDev"),
130               Column(CoeffVarResult(),
131                      CoeffVarFormat(), "StdDev/Mean"),
132               Column(GmeanRatioResult(),
133                      RatioFormat(), "GmeanSpeedup"),
134               Column(PValueResult(),
135                      PValueFormat(), "p-value")
136              ]
137    if not perf:
138      return self._GetTables(self.labels, self.benchmark_runs, columns,
139                             "summary")
140    return self._GetPerfTables(self.labels, columns, "summary")
141
142  def _ParseColumn(self, columns, iteration):
143    new_column = []
144    for column in columns:
145      if column.result.__class__.__name__ != "RawResult":
146      #TODO(asharif): tabulator should support full table natively.
147        new_column.append(column)
148      else:
149        for i in range(iteration):
150          cc = Column(LiteralResult(i), Format(), str(i+1))
151          new_column.append(cc)
152    return new_column
153
154  def _AreAllRunsEmpty(self, runs):
155    for label in runs:
156      for dictionary in label:
157        if dictionary:
158          return False
159    return True
160
161  def _GetTableHeader(self, benchmark):
162    benchmark_info = ("Benchmark:  {0};  Iterations: {1}"
163                      .format(benchmark.name, benchmark.iterations))
164    cell = Cell()
165    cell.string_value = benchmark_info
166    cell.header = True
167    return  [[cell]]
168
169  def _GetTables(self, labels, benchmark_runs, columns, table_type):
170    tables = []
171    ro = ResultOrganizer(benchmark_runs, labels, self.benchmarks)
172    result = ro.result
173    label_name = ro.labels
174    for item in result:
175      benchmark = None
176      runs = result[item]
177      for benchmark in self.benchmarks:
178        if benchmark.name == item:
179          break
180      ben_table = self._GetTableHeader(benchmark)
181
182      if  self._AreAllRunsEmpty(runs):
183        cell = Cell()
184        cell.string_value = ("This benchmark contains no result."
185                             " Is the benchmark name valid?")
186        cell_table = [[cell]]
187      else:
188        tg = TableGenerator(runs, label_name)
189        table = tg.GetTable()
190        parsed_columns = self._ParseColumn(columns, benchmark.iterations)
191        tf = TableFormatter(table, parsed_columns)
192        cell_table = tf.GetCellTable(table_type)
193      tables.append(ben_table)
194      tables.append(cell_table)
195    return tables
196
197  def _GetPerfTables(self, labels, columns, table_type):
198    tables = []
199    label_names = [label.name for label in labels]
200    p_table = PerfTable(self.experiment, label_names)
201
202    if not p_table.perf_data:
203      return tables
204
205    for benchmark in p_table.perf_data:
206      ben = None
207      for ben in self.benchmarks:
208        if ben.name == benchmark:
209          break
210
211      ben_table = self._GetTableHeader(ben)
212      tables.append(ben_table)
213      benchmark_data = p_table.perf_data[benchmark]
214      row_info = p_table.row_info[benchmark]
215      table = []
216      for event in benchmark_data:
217        tg = TableGenerator(benchmark_data[event], label_names,
218                            sort=TableGenerator.SORT_BY_VALUES_DESC)
219        table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
220        parsed_columns = self._ParseColumn(columns, ben.iterations)
221        tf = TableFormatter(table, parsed_columns)
222        tf.GenerateCellTable()
223        tf.AddColumnName()
224        tf.AddLabelName()
225        tf.AddHeader(str(event))
226        table = tf.GetCellTable(table_type, headers=False)
227        tables.append(table)
228    return tables
229
230  def PrintTables(self, tables, out_to):
231    output = ""
232    if not tables:
233      return output
234    for table in tables:
235      if out_to == "HTML":
236        tp = TablePrinter(table, TablePrinter.HTML)
237      elif out_to == "PLAIN":
238        tp = TablePrinter(table, TablePrinter.PLAIN)
239      elif out_to == "CONSOLE":
240        tp = TablePrinter(table, TablePrinter.CONSOLE)
241      elif out_to == "TSV":
242        tp = TablePrinter(table, TablePrinter.TSV)
243      elif out_to == "EMAIL":
244        tp = TablePrinter(table, TablePrinter.EMAIL)
245      else:
246        pass
247      output += tp.Print()
248    return output
249
250
251class TextResultsReport(ResultsReport):
252  """Class to generate text result report."""
253  TEXT = """
254===========================================
255Results report for: '%s'
256===========================================
257
258-------------------------------------------
259Summary
260-------------------------------------------
261%s
262
263
264Number re-images: %s
265
266-------------------------------------------
267Benchmark Run Status
268-------------------------------------------
269%s
270
271
272-------------------------------------------
273Perf Data
274-------------------------------------------
275%s
276
277
278
279Experiment File
280-------------------------------------------
281%s
282
283
284CPUInfo
285-------------------------------------------
286%s
287===========================================
288"""
289
290  def __init__(self, experiment, email=False):
291    super(TextResultsReport, self).__init__(experiment)
292    self.email = email
293
294  def GetStatusTable(self):
295    """Generate the status table by the tabulator."""
296    table = [["", ""]]
297    columns = [Column(LiteralResult(iteration=0), Format(), "Status"),
298               Column(LiteralResult(iteration=1), Format(), "Failing Reason")]
299
300    for benchmark_run in self.benchmark_runs:
301      status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
302                                     benchmark_run.failure_reason]]
303      table.append(status)
304    tf = TableFormatter(table, columns)
305    cell_table = tf.GetCellTable("status")
306    return [cell_table]
307
308  def GetReport(self):
309    """Generate the report for email and console."""
310    status_table = self.GetStatusTable()
311    summary_table = self.GetSummaryTables()
312    perf_table = self.GetSummaryTables(perf=True)
313    if not perf_table:
314      perf_table = None
315    if not self.email:
316      return self.TEXT % (self.experiment.name,
317                          self.PrintTables(summary_table, "CONSOLE"),
318                          self.experiment.machine_manager.num_reimages,
319                          self.PrintTables(status_table, "CONSOLE"),
320                          self.PrintTables(perf_table, "CONSOLE"),
321                          self.experiment.experiment_file,
322                          self.experiment.machine_manager.GetAllCPUInfo(
323                              self.experiment.labels))
324
325    return self.TEXT % (self.experiment.name,
326                        self.PrintTables(summary_table, "EMAIL"),
327                        self.experiment.machine_manager.num_reimages,
328                        self.PrintTables(status_table, "EMAIL"),
329                        self.PrintTables(perf_table, "EMAIL"),
330                        self.experiment.experiment_file,
331                        self.experiment.machine_manager.GetAllCPUInfo(
332                            self.experiment.labels))
333
334
335class HTMLResultsReport(ResultsReport):
336  """Class to generate html result report."""
337
338  HTML = """
339<html>
340  <head>
341    <style type="text/css">
342
343body {
344  font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif;
345  font-size: 12px;
346}
347
348pre {
349  margin: 10px;
350  color: #039;
351  font-size: 14px;
352}
353
354.chart {
355  display: inline;
356}
357
358.hidden {
359  visibility: hidden;
360}
361
362.results-section {
363  border: 1px solid #b9c9fe;
364  margin: 10px;
365}
366
367.results-section-title {
368  background-color: #b9c9fe;
369  color: #039;
370  padding: 7px;
371  font-size: 14px;
372  width: 200px;
373}
374
375.results-section-content {
376  margin: 10px;
377  padding: 10px;
378  overflow:auto;
379}
380
381#box-table-a {
382  font-size: 12px;
383  width: 480px;
384  text-align: left;
385  border-collapse: collapse;
386}
387
388#box-table-a th {
389  padding: 6px;
390  background: #b9c9fe;
391  border-right: 1px solid #fff;
392  border-bottom: 1px solid #fff;
393  color: #039;
394  text-align: center;
395}
396
397#box-table-a td {
398  padding: 4px;
399  background: #e8edff;
400  border-bottom: 1px solid #fff;
401  border-right: 1px solid #fff;
402  color: #669;
403  border-top: 1px solid transparent;
404}
405
406#box-table-a tr:hover td {
407  background: #d0dafd;
408  color: #339;
409}
410
411    </style>
412    <script type='text/javascript' src='https://www.google.com/jsapi'></script>
413    <script type='text/javascript'>
414      google.load('visualization', '1', {packages:['corechart']});
415      google.setOnLoadCallback(init);
416      function init() {
417        switchTab('summary', 'html');
418        %s
419        switchTab('full', 'html');
420        drawTable();
421      }
422      function drawTable() {
423        %s
424      }
425      function switchTab(table, tab) {
426        document.getElementById(table + '-html').style.display = 'none';
427        document.getElementById(table + '-text').style.display = 'none';
428        document.getElementById(table + '-tsv').style.display = 'none';
429        document.getElementById(table + '-' + tab).style.display = 'block';
430      }
431    </script>
432  </head>
433
434  <body>
435    <div class='results-section'>
436      <div class='results-section-title'>Summary Table</div>
437      <div class='results-section-content'>
438        <div id='summary-html'>%s</div>
439        <div id='summary-text'><pre>%s</pre></div>
440        <div id='summary-tsv'><pre>%s</pre></div>
441      </div>
442      %s
443    </div>
444    %s
445    <div class='results-section'>
446      <div class='results-section-title'>Charts</div>
447      <div class='results-section-content'>%s</div>
448    </div>
449    <div class='results-section'>
450      <div class='results-section-title'>Full Table</div>
451      <div class='results-section-content'>
452        <div id='full-html'>%s</div>
453        <div id='full-text'><pre>%s</pre></div>
454        <div id='full-tsv'><pre>%s</pre></div>
455      </div>
456      %s
457    </div>
458    <div class='results-section'>
459      <div class='results-section-title'>Experiment File</div>
460      <div class='results-section-content'>
461        <pre>%s</pre>
462    </div>
463    </div>
464  </body>
465</html>
466"""
467
468  PERF_HTML = """
469    <div class='results-section'>
470      <div class='results-section-title'>Perf Table</div>
471      <div class='results-section-content'>
472        <div id='perf-html'>%s</div>
473        <div id='perf-text'><pre>%s</pre></div>
474        <div id='perf-tsv'><pre>%s</pre></div>
475      </div>
476      %s
477    </div>
478"""
479
480  def __init__(self, experiment):
481    super(HTMLResultsReport, self).__init__(experiment)
482
483  def _GetTabMenuHTML(self, table):
484    return """
485<div class='tab-menu'>
486  <a href="javascript:switchTab('%s', 'html')">HTML</a>
487  <a href="javascript:switchTab('%s', 'text')">Text</a>
488  <a href="javascript:switchTab('%s', 'tsv')">TSV</a>
489</div>""" % (table, table, table)
490
491  def GetReport(self):
492    chart_javascript = ""
493    charts = self._GetCharts(self.labels, self.benchmark_runs)
494    for chart in charts:
495      chart_javascript += chart.GetJavascript()
496    chart_divs = ""
497    for chart in charts:
498      chart_divs += chart.GetDiv()
499
500    summary_table = self.GetSummaryTables()
501    full_table = self.GetFullTables()
502    perf_table = self.GetSummaryTables(perf=True)
503    if perf_table:
504      perf_html = self.PERF_HTML % (
505          self.PrintTables(perf_table, "HTML"),
506          self.PrintTables(perf_table, "PLAIN"),
507          self.PrintTables(perf_table, "TSV"),
508          self._GetTabMenuHTML("perf")
509          )
510      perf_init = "switchTab('perf', 'html');"
511    else:
512      perf_html = ""
513      perf_init = ""
514
515    return self.HTML % (perf_init,
516                        chart_javascript,
517                        self.PrintTables(summary_table, "HTML"),
518                        self.PrintTables(summary_table, "PLAIN"),
519                        self.PrintTables(summary_table, "TSV"),
520                        self._GetTabMenuHTML("summary"),
521                        perf_html,
522                        chart_divs,
523                        self.PrintTables(full_table, "HTML"),
524                        self.PrintTables(full_table, "PLAIN"),
525                        self.PrintTables(full_table, "TSV"),
526                        self._GetTabMenuHTML("full"),
527                        self.experiment.experiment_file)
528
529  def _GetCharts(self, labels, benchmark_runs):
530    charts = []
531    ro = ResultOrganizer(benchmark_runs, labels)
532    result = ro.result
533    for item in result:
534      runs = result[item]
535      tg = TableGenerator(runs, ro.labels)
536      table = tg.GetTable()
537      columns = [Column(AmeanResult(),
538                        Format()),
539                 Column(MinResult(),
540                        Format()),
541                 Column(MaxResult(),
542                        Format())
543                ]
544      tf = TableFormatter(table, columns)
545      data_table = tf.GetCellTable("full")
546
547      for i in range(2, len(data_table)):
548        cur_row_data = data_table[i]
549        test_key = cur_row_data[0].string_value
550        title = "{0}: {1}".format(item, test_key.replace("/", ""))
551        chart = ColumnChart(title, 300, 200)
552        chart.AddColumn("Label", "string")
553        chart.AddColumn("Average", "number")
554        chart.AddColumn("Min", "number")
555        chart.AddColumn("Max", "number")
556        chart.AddSeries("Min", "line", "black")
557        chart.AddSeries("Max", "line", "black")
558        cur_index = 1
559        for label in ro.labels:
560          chart.AddRow([label, cur_row_data[cur_index].value,
561                        cur_row_data[cur_index + 1].value,
562                        cur_row_data[cur_index + 2].value])
563          if isinstance(cur_row_data[cur_index].value, str):
564            chart = None
565            break
566          cur_index += 3
567        if chart:
568          charts.append(chart)
569    return charts
570
571class JSONResultsReport(ResultsReport):
572  """class to generate JASON report."""
573  def __init__(self, experiment, date=None, time=None):
574    super(JSONResultsReport, self).__init__(experiment)
575    self.ro = ResultOrganizer(experiment.benchmark_runs,
576                              experiment.labels,
577                              experiment.benchmarks,
578                              json_report=True)
579    self.date = date
580    self.time = time
581    self.defaults = TelemetryDefaults()
582    if not self.date:
583      timestamp = datetime.datetime.strftime(datetime.datetime.now(),
584                                             "%Y-%m-%d %H:%M:%S")
585      date, time = timestamp.split(" ")
586      self.date = date
587      self.time = time
588
589  def GetReport(self, results_dir):
590    self.defaults.ReadDefaultsFile()
591    final_results = []
592    board = self.experiment.labels[0].board
593    for test, test_results in self.ro.result.iteritems():
594      for i, label in enumerate(self.ro.labels):
595        label_results = test_results[i]
596        for j, iter_results in enumerate(label_results):
597          json_results = dict()
598          json_results['date'] = self.date
599          json_results['time'] = self.time
600          json_results['board'] = board
601          json_results['label'] = label
602          common_checksum = ''
603          common_string = ''
604          compiler_string = 'gcc'
605          for l in self.experiment.labels:
606            if l.name == label:
607              ver, img = ParseChromeosImage(l.chromeos_image)
608              json_results['chromeos_image'] = img
609              json_results['chromeos_version'] = ver
610              json_results['chrome_version'] = l.chrome_version
611              json_results['compiler'] = l.compiler
612              # If any of the labels used the LLVM compiler, we will add
613              # ".llvm" to the json report filename. (Otherwise we use .gcc).
614              if 'llvm' in l.compiler:
615                compiler_string = 'llvm'
616              common_checksum = \
617                self.experiment.machine_manager.machine_checksum[l.name]
618              common_string = \
619                self.experiment.machine_manager.machine_checksum_string[l.name]
620              break
621          json_results['test_name'] = test
622          if not iter_results or iter_results['retval'] != 0:
623            json_results['pass'] = False
624          else:
625            json_results['pass'] = True
626            # Get overall results.
627            if test in self.defaults.GetDefault():
628              default_result_fields = self.defaults.GetDefault()[test]
629              value = []
630              for f in default_result_fields:
631                v = iter_results[f]
632                if type(v) == list:
633                  v = v[0]
634                item = (f, float(v))
635                value.append(item)
636              json_results['overall_result'] = value
637            # Get detailed results.
638            detail_results = dict()
639            for k in iter_results.keys():
640              if k != 'retval':
641                v = iter_results[k]
642                if type(v) == list:
643                  v = v[0]
644                if v != 'PASS':
645                  if k.find('machine') == -1:
646                    detail_results[k] = float(v)
647                  else:
648                    json_results[k] = v
649            if 'machine_checksum' not in json_results.keys():
650              json_results['machine_checksum'] = common_checksum
651            if 'machine_string' not in json_results.keys():
652              json_results['machine_string'] = common_string
653            json_results['detailed_results'] = detail_results
654          final_results.append(json_results)
655
656    filename = "report_%s_%s_%s.%s.json" % (board, self.date,
657                                            self.time.replace(':', '.'),
658                                            compiler_string)
659    fullname = os.path.join(results_dir, filename)
660    with open(fullname, "w") as fp:
661      json.dump(final_results, fp, indent=2)
662