results_report.py revision 89d263c7cf9773129cbe8e8858ad21ea539a2ba0
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7import datetime
8import json
9import os
10
11from utils.tabulator import *
12
13from update_telemetry_defaults import TelemetryDefaults
14from column_chart import ColumnChart
15from results_organizer import ResultOrganizer
16from perf_table import PerfTable
17
18
19def ParseChromeosImage(chromeos_image):
20  """Parse the chromeos_image string for the image and version.
21
22  The chromeos_image string will probably be in one of two formats:
23  1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
24     chromiumos_test_image.bin
25  2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
26      chromiumos_test_image.bin
27
28  We parse these strings to find the 'chromeos_version' to store in the
29  json archive (without the .datatime bit in the first case); and also
30  the 'chromeos_image', which would be all of the first case, but only the
31  part after '/chroot/tmp' in the second case.
32
33  Args:
34    chromeos_image:  String containing the path to the chromeos_image that
35      crosperf used for the test.
36
37  Returns:
38    version, image:  The results of parsing the input string, as explained
39      above.
40  """
41  version = ''
42  real_file = os.path.realpath(os.path.expanduser(chromeos_image))
43  pieces = real_file.split('/')
44  # Find the Chromeos Version, e.g. R45-2345.0.0.....
45  # chromeos_image should have been something like:
46  # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
47  num_pieces = len(pieces)
48  if pieces[num_pieces-1] == "chromiumos_test_image.bin":
49    version = pieces[num_pieces-2]
50    # Find last '.' in the version and chop it off (removing the .datatime
51    # piece from local builds).
52    loc = version.rfind('.')
53    version = version[:loc]
54  # Find the chromeos image.  If it's somewhere in .../chroot/tmp/..., then
55  # it's an official image that got downloaded, so chop off the download path
56  # to make the official image name more clear.
57  loc = real_file.find('/chroot/tmp')
58  if loc != -1:
59    loc += len('/chroot/tmp')
60    real_file = real_file[loc:]
61  image = real_file
62  return version,image
63
64class ResultsReport(object):
65  MAX_COLOR_CODE = 255
66  PERF_ROWS = 5
67
68  def __init__(self, experiment):
69    self.experiment = experiment
70    self.benchmark_runs = experiment.benchmark_runs
71    self.labels = experiment.labels
72    self.benchmarks = experiment.benchmarks
73    self.baseline = self.labels[0]
74
75  def _SortByLabel(self, runs):
76    labels = {}
77    for benchmark_run in runs:
78      if benchmark_run.label_name not in labels:
79        labels[benchmark_run.label_name] = []
80      labels[benchmark_run.label_name].append(benchmark_run)
81    return labels
82
83  def GetFullTables(self, perf=False):
84    columns = [Column(RawResult(),
85                      Format()),
86               Column(MinResult(),
87                      Format()),
88               Column(MaxResult(),
89                      Format()),
90               Column(AmeanResult(),
91                      Format()),
92               Column(StdResult(),
93                      Format(), "StdDev"),
94               Column(CoeffVarResult(),
95                      CoeffVarFormat(), "StdDev/Mean"),
96               Column(GmeanRatioResult(),
97                      RatioFormat(), "GmeanSpeedup"),
98               Column(PValueResult(),
99                      PValueFormat(), "p-value")
100              ]
101    if not perf:
102      return self._GetTables(self.labels, self.benchmark_runs, columns,
103                             "full")
104    return self._GetPerfTables(self.labels, columns, "full")
105
106  def GetSummaryTables(self, perf=False):
107    columns = [Column(AmeanResult(),
108                      Format()),
109               Column(StdResult(),
110                      Format(), "StdDev"),
111               Column(CoeffVarResult(),
112                      CoeffVarFormat(), "StdDev/Mean"),
113               Column(GmeanRatioResult(),
114                      RatioFormat(), "GmeanSpeedup"),
115               Column(PValueResult(),
116                      PValueFormat(), "p-value")
117              ]
118    if not perf:
119      return self._GetTables(self.labels, self.benchmark_runs, columns,
120                             "summary")
121    return self._GetPerfTables(self.labels, columns, "summary")
122
123  def _ParseColumn(self, columns, iteration):
124    new_column = []
125    for column in columns:
126      if column.result.__class__.__name__ != "RawResult":
127      #TODO(asharif): tabulator should support full table natively.
128        new_column.append(column)
129      else:
130        for i in range(iteration):
131          cc = Column(LiteralResult(i), Format(), str(i+1))
132          new_column.append(cc)
133    return new_column
134
135  def _AreAllRunsEmpty(self, runs):
136    for label in runs:
137      for dictionary in label:
138        if dictionary:
139          return False
140    return True
141
142  def _GetTableHeader(self, benchmark):
143    benchmark_info = ("Benchmark:  {0};  Iterations: {1}"
144                      .format(benchmark.name, benchmark.iterations))
145    cell = Cell()
146    cell.string_value = benchmark_info
147    cell.header = True
148    return  [[cell]]
149
150  def _GetTables(self, labels, benchmark_runs, columns, table_type):
151    tables = []
152    ro = ResultOrganizer(benchmark_runs, labels, self.benchmarks)
153    result = ro.result
154    label_name = ro.labels
155    for item in result:
156      runs = result[item]
157      for benchmark in self.benchmarks:
158        if benchmark.name == item:
159          break
160      ben_table = self._GetTableHeader(benchmark)
161
162      if  self._AreAllRunsEmpty(runs):
163        cell = Cell()
164        cell.string_value = ("This benchmark contains no result."
165                             " Is the benchmark name valid?")
166        cell_table = [[cell]]
167      else:
168        tg = TableGenerator(runs, label_name)
169        table = tg.GetTable()
170        parsed_columns = self._ParseColumn(columns, benchmark.iterations)
171        tf = TableFormatter(table, parsed_columns)
172        cell_table = tf.GetCellTable(table_type)
173      tables.append(ben_table)
174      tables.append(cell_table)
175    return tables
176
177  def _GetPerfTables(self, labels, columns, table_type):
178    tables = []
179    label_names = [label.name for label in labels]
180    p_table = PerfTable(self.experiment, label_names)
181
182    if not p_table.perf_data:
183      return tables
184
185    for benchmark in p_table.perf_data:
186      ben = None
187      for ben in self.benchmarks:
188        if ben.name == benchmark:
189          break
190
191      ben_table = self._GetTableHeader(ben)
192      tables.append(ben_table)
193      benchmark_data = p_table.perf_data[benchmark]
194      row_info = p_table.row_info[benchmark]
195      table = []
196      for event in benchmark_data:
197        tg = TableGenerator(benchmark_data[event], label_names,
198                            sort=TableGenerator.SORT_BY_VALUES_DESC)
199        table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
200        parsed_columns = self._ParseColumn(columns, ben.iterations)
201        tf = TableFormatter(table, parsed_columns)
202        tf.GenerateCellTable()
203        tf.AddColumnName()
204        tf.AddLabelName()
205        tf.AddHeader(str(event))
206        table = tf.GetCellTable(table_type, headers=False)
207        tables.append(table)
208    return tables
209
210  def PrintTables(self, tables, out_to):
211    output = ""
212    if not tables:
213      return output
214    for table in tables:
215      if out_to == "HTML":
216        tp = TablePrinter(table, TablePrinter.HTML)
217      elif out_to == "PLAIN":
218        tp = TablePrinter(table, TablePrinter.PLAIN)
219      elif out_to == "CONSOLE":
220        tp = TablePrinter(table, TablePrinter.CONSOLE)
221      elif out_to == "TSV":
222        tp = TablePrinter(table, TablePrinter.TSV)
223      elif out_to == "EMAIL":
224        tp = TablePrinter(table, TablePrinter.EMAIL)
225      else:
226        pass
227      output += tp.Print()
228    return output
229
230
231class TextResultsReport(ResultsReport):
232  TEXT = """
233===========================================
234Results report for: '%s'
235===========================================
236
237-------------------------------------------
238Summary
239-------------------------------------------
240%s
241
242
243Number re-images: %s
244
245-------------------------------------------
246Benchmark Run Status
247-------------------------------------------
248%s
249
250
251-------------------------------------------
252Perf Data
253-------------------------------------------
254%s
255
256
257
258Experiment File
259-------------------------------------------
260%s
261
262
263CPUInfo
264-------------------------------------------
265%s
266===========================================
267"""
268
269  def __init__(self, experiment, email=False):
270    super(TextResultsReport, self).__init__(experiment)
271    self.email = email
272
273  def GetStatusTable(self):
274    """Generate the status table by the tabulator."""
275    table = [["", ""]]
276    columns = [Column(LiteralResult(iteration=0), Format(), "Status"),
277               Column(LiteralResult(iteration=1), Format(), "Failing Reason")]
278
279    for benchmark_run in self.benchmark_runs:
280      status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
281                                     benchmark_run.failure_reason]]
282      table.append(status)
283    tf = TableFormatter(table, columns)
284    cell_table = tf.GetCellTable("status")
285    return [cell_table]
286
287  def GetReport(self):
288    """Generate the report for email and console."""
289    status_table = self.GetStatusTable()
290    summary_table = self.GetSummaryTables()
291    full_table = self.GetFullTables()
292    perf_table = self.GetSummaryTables(perf=True)
293    if not perf_table:
294      perf_table = None
295    if not self.email:
296      return self.TEXT % (self.experiment.name,
297                          self.PrintTables(summary_table, "CONSOLE"),
298                          self.experiment.machine_manager.num_reimages,
299                          self.PrintTables(status_table, "CONSOLE"),
300                          self.PrintTables(perf_table, "CONSOLE"),
301                          self.experiment.experiment_file,
302                          self.experiment.machine_manager.GetAllCPUInfo(
303                              self.experiment.labels))
304
305    return self.TEXT % (self.experiment.name,
306                        self.PrintTables(summary_table, "EMAIL"),
307                        self.experiment.machine_manager.num_reimages,
308                        self.PrintTables(status_table, "EMAIL"),
309                        self.PrintTables(perf_table, "EMAIL"),
310                        self.experiment.experiment_file,
311                        self.experiment.machine_manager.GetAllCPUInfo(
312                            self.experiment.labels))
313
314
315class HTMLResultsReport(ResultsReport):
316
317  HTML = """
318<html>
319  <head>
320    <style type="text/css">
321
322body {
323  font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif;
324  font-size: 12px;
325}
326
327pre {
328  margin: 10px;
329  color: #039;
330  font-size: 14px;
331}
332
333.chart {
334  display: inline;
335}
336
337.hidden {
338  visibility: hidden;
339}
340
341.results-section {
342  border: 1px solid #b9c9fe;
343  margin: 10px;
344}
345
346.results-section-title {
347  background-color: #b9c9fe;
348  color: #039;
349  padding: 7px;
350  font-size: 14px;
351  width: 200px;
352}
353
354.results-section-content {
355  margin: 10px;
356  padding: 10px;
357  overflow:auto;
358}
359
360#box-table-a {
361  font-size: 12px;
362  width: 480px;
363  text-align: left;
364  border-collapse: collapse;
365}
366
367#box-table-a th {
368  padding: 6px;
369  background: #b9c9fe;
370  border-right: 1px solid #fff;
371  border-bottom: 1px solid #fff;
372  color: #039;
373  text-align: center;
374}
375
376#box-table-a td {
377  padding: 4px;
378  background: #e8edff;
379  border-bottom: 1px solid #fff;
380  border-right: 1px solid #fff;
381  color: #669;
382  border-top: 1px solid transparent;
383}
384
385#box-table-a tr:hover td {
386  background: #d0dafd;
387  color: #339;
388}
389
390    </style>
391    <script type='text/javascript' src='https://www.google.com/jsapi'></script>
392    <script type='text/javascript'>
393      google.load('visualization', '1', {packages:['corechart']});
394      google.setOnLoadCallback(init);
395      function init() {
396        switchTab('summary', 'html');
397        %s
398        switchTab('full', 'html');
399        drawTable();
400      }
401      function drawTable() {
402        %s
403      }
404      function switchTab(table, tab) {
405        document.getElementById(table + '-html').style.display = 'none';
406        document.getElementById(table + '-text').style.display = 'none';
407        document.getElementById(table + '-tsv').style.display = 'none';
408        document.getElementById(table + '-' + tab).style.display = 'block';
409      }
410    </script>
411  </head>
412
413  <body>
414    <div class='results-section'>
415      <div class='results-section-title'>Summary Table</div>
416      <div class='results-section-content'>
417        <div id='summary-html'>%s</div>
418        <div id='summary-text'><pre>%s</pre></div>
419        <div id='summary-tsv'><pre>%s</pre></div>
420      </div>
421      %s
422    </div>
423    %s
424    <div class='results-section'>
425      <div class='results-section-title'>Charts</div>
426      <div class='results-section-content'>%s</div>
427    </div>
428    <div class='results-section'>
429      <div class='results-section-title'>Full Table</div>
430      <div class='results-section-content'>
431        <div id='full-html'>%s</div>
432        <div id='full-text'><pre>%s</pre></div>
433        <div id='full-tsv'><pre>%s</pre></div>
434      </div>
435      %s
436    </div>
437    <div class='results-section'>
438      <div class='results-section-title'>Experiment File</div>
439      <div class='results-section-content'>
440        <pre>%s</pre>
441    </div>
442    </div>
443  </body>
444</html>
445"""
446
447  PERF_HTML = """
448    <div class='results-section'>
449      <div class='results-section-title'>Perf Table</div>
450      <div class='results-section-content'>
451        <div id='perf-html'>%s</div>
452        <div id='perf-text'><pre>%s</pre></div>
453        <div id='perf-tsv'><pre>%s</pre></div>
454      </div>
455      %s
456    </div>
457"""
458
459  def __init__(self, experiment):
460    super(HTMLResultsReport, self).__init__(experiment)
461
462  def _GetTabMenuHTML(self, table):
463    return """
464<div class='tab-menu'>
465  <a href="javascript:switchTab('%s', 'html')">HTML</a>
466  <a href="javascript:switchTab('%s', 'text')">Text</a>
467  <a href="javascript:switchTab('%s', 'tsv')">TSV</a>
468</div>""" % (table, table, table)
469
470  def GetReport(self):
471    chart_javascript = ""
472    charts = self._GetCharts(self.labels, self.benchmark_runs)
473    for chart in charts:
474      chart_javascript += chart.GetJavascript()
475    chart_divs = ""
476    for chart in charts:
477      chart_divs += chart.GetDiv()
478
479    summary_table = self.GetSummaryTables()
480    full_table = self.GetFullTables()
481    perf_table = self.GetSummaryTables(perf=True)
482    if perf_table:
483      perf_html = self.PERF_HTML % (
484          self.PrintTables(perf_table, "HTML"),
485          self.PrintTables(perf_table, "PLAIN"),
486          self.PrintTables(perf_table, "TSV"),
487          self._GetTabMenuHTML("perf")
488          )
489      perf_init = "switchTab('perf', 'html');"
490    else:
491      perf_html = ""
492      perf_init = ""
493
494    return self.HTML % (perf_init,
495                        chart_javascript,
496                        self.PrintTables(summary_table, "HTML"),
497                        self.PrintTables(summary_table, "PLAIN"),
498                        self.PrintTables(summary_table, "TSV"),
499                        self._GetTabMenuHTML("summary"),
500                        perf_html,
501                        chart_divs,
502                        self.PrintTables(full_table, "HTML"),
503                        self.PrintTables(full_table, "PLAIN"),
504                        self.PrintTables(full_table, "TSV"),
505                        self._GetTabMenuHTML("full"),
506                        self.experiment.experiment_file)
507
508  def _GetCharts(self, labels, benchmark_runs):
509    charts = []
510    ro = ResultOrganizer(benchmark_runs, labels)
511    result = ro.result
512    for item in result:
513      runs = result[item]
514      tg = TableGenerator(runs, ro.labels)
515      table = tg.GetTable()
516      columns = [Column(AmeanResult(),
517                        Format()),
518                 Column(MinResult(),
519                        Format()),
520                 Column(MaxResult(),
521                        Format())
522                ]
523      tf = TableFormatter(table, columns)
524      data_table = tf.GetCellTable("full")
525
526      for i in range(2, len(data_table)):
527        cur_row_data = data_table[i]
528        test_key = cur_row_data[0].string_value
529        title = "{0}: {1}".format(item, test_key.replace("/", ""))
530        chart = ColumnChart(title, 300, 200)
531        chart.AddColumn("Label", "string")
532        chart.AddColumn("Average", "number")
533        chart.AddColumn("Min", "number")
534        chart.AddColumn("Max", "number")
535        chart.AddSeries("Min", "line", "black")
536        chart.AddSeries("Max", "line", "black")
537        cur_index = 1
538        for label in ro.labels:
539          chart.AddRow([label, cur_row_data[cur_index].value,
540                        cur_row_data[cur_index + 1].value,
541                        cur_row_data[cur_index + 2].value])
542          if isinstance(cur_row_data[cur_index].value, str):
543            chart = None
544            break
545          cur_index += 3
546        if chart:
547          charts.append(chart)
548    return charts
549
550class JSONResultsReport(ResultsReport):
551
552  def __init__(self, experiment, date=None, time=None):
553    super(JSONResultsReport, self).__init__(experiment)
554    self.ro = ResultOrganizer(experiment.benchmark_runs,
555                              experiment.labels,
556                              experiment.benchmarks,
557                              json_report=True)
558    self.date = date
559    self.time = time
560    self.defaults = TelemetryDefaults()
561    if not self.date:
562      timestamp = datetime.datetime.strftime(datetime.datetime.now(),
563                                           "%Y-%m-%d %H:%M:%S")
564      date, time = timestamp.split(" ")
565      self.date = date
566      self.time = time
567
568  def GetReport(self, results_dir):
569    self.defaults.ReadDefaultsFile()
570    final_results = []
571    board = self.experiment.labels[0].board
572    for test, test_results in self.ro.result.iteritems():
573      for i, label in enumerate(self.ro.labels):
574        label_results = test_results[i]
575        for j, iter_Results in enumerate(label_results):
576          iter_results = label_results[j]
577          json_results = dict()
578          json_results['date'] = self.date
579          json_results['time'] = self.time
580          json_results['board'] = board
581          json_results['label'] = label
582          common_checksum = ''
583          common_string = ''
584          compiler_string = 'gcc'
585          for l in self.experiment.labels:
586            if l.name == label:
587              ver, img = ParseChromeosImage(l.chromeos_image)
588              json_results['chromeos_image'] = img
589              json_results['chromeos_version'] = ver
590              json_results['chrome_version'] = l.chrome_version
591              json_results['compiler'] = l.compiler
592              # If any of the labels used the LLVM compiler, we will add
593              # ".llvm" to the json report filename. (Otherwise we use .gcc).
594              if 'llvm' in l.compiler:
595                compiler_string = 'llvm'
596              common_checksum = \
597                self.experiment.machine_manager.machine_checksum[l.name]
598              common_string = \
599                self.experiment.machine_manager.machine_checksum_string[l.name]
600              break
601          json_results['test_name'] = test
602          if not iter_results or iter_results['retval'] != 0:
603            json_results['pass'] = False
604          else:
605            json_results['pass'] = True
606            # Get overall results.
607            if test in self.defaults._defaults:
608              default_result_fields = self.defaults._defaults[test]
609              value = []
610              for f in default_result_fields:
611                v = iter_results[f]
612                if type(v) == list:
613                    v = v[0]
614                item = (f, float(v))
615                value.append(item)
616              json_results['overall_result'] = value
617            # Get detailed results.
618            detail_results = dict()
619            for k in iter_results.keys():
620              if k != 'retval':
621                v = iter_results[k]
622                if type(v) == list:
623                  v = v[0]
624                if v != 'PASS':
625                  if k.find('machine') == -1:
626                    detail_results[k] = float(v)
627                  else:
628                    json_results[k] = v
629            if 'machine_checksum' not in json_results.keys():
630              json_results['machine_checksum'] = common_checksum
631            if 'machine_string' not in json_results.keys():
632              json_results['machine_string'] = common_string
633            json_results['detailed_results'] = detail_results
634          final_results.append(json_results)
635
636    filename = "report_%s_%s_%s.%s.json" % (board, self.date,
637                                           self.time.replace(':','.'),
638                                           compiler_string)
639    fullname = os.path.join(results_dir, filename)
640    with open(fullname, "w") as fp:
641      json.dump(final_results, fp, indent=2)
642