results_report.py revision 74d85cf83860538fcf57c413cf8ae416baa0ef51
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""A module to handle the report format."""
5from __future__ import print_function
6
7import datetime
8import json
9import os
10
11from cros_utils.tabulator import AmeanResult
12from cros_utils.tabulator import Cell
13from cros_utils.tabulator import CoeffVarFormat
14from cros_utils.tabulator import CoeffVarResult
15from cros_utils.tabulator import Column
16from cros_utils.tabulator import Format
17from cros_utils.tabulator import GmeanRatioResult
18from cros_utils.tabulator import LiteralResult
19from cros_utils.tabulator import MaxResult
20from cros_utils.tabulator import MinResult
21from cros_utils.tabulator import PValueFormat
22from cros_utils.tabulator import PValueResult
23from cros_utils.tabulator import RatioFormat
24from cros_utils.tabulator import RawResult
25from cros_utils.tabulator import StdResult
26from cros_utils.tabulator import TableFormatter
27from cros_utils.tabulator import TableGenerator
28from cros_utils.tabulator import TablePrinter
29from update_telemetry_defaults import TelemetryDefaults
30
31from column_chart import ColumnChart
32from results_organizer import ResultOrganizer
33from perf_table import PerfTable
34
35
36def ParseChromeosImage(chromeos_image):
37  """Parse the chromeos_image string for the image and version.
38
39  The chromeos_image string will probably be in one of two formats:
40  1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
41     chromiumos_test_image.bin
42  2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
43      chromiumos_test_image.bin
44
45  We parse these strings to find the 'chromeos_version' to store in the
46  json archive (without the .datatime bit in the first case); and also
47  the 'chromeos_image', which would be all of the first case, but only the
48  part after '/chroot/tmp' in the second case.
49
50  Args:
51      chromeos_image: string containing the path to the chromeos_image that
52      crosperf used for the test.
53
54  Returns:
55      version, image: The results of parsing the input string, as explained
56      above.
57  """
58  version = ''
59  real_file = os.path.realpath(os.path.expanduser(chromeos_image))
60  pieces = real_file.split('/')
61  # Find the Chromeos Version, e.g. R45-2345.0.0.....
62  # chromeos_image should have been something like:
63  # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
64  num_pieces = len(pieces)
65  if pieces[num_pieces - 1] == 'chromiumos_test_image.bin':
66    version = pieces[num_pieces - 2]
67    # Find last '.' in the version and chop it off (removing the .datatime
68    # piece from local builds).
69    loc = version.rfind('.')
70    version = version[:loc]
71  # Find the chromeos image.  If it's somewhere in .../chroot/tmp/..., then
72  # it's an official image that got downloaded, so chop off the download path
73  # to make the official image name more clear.
74  loc = real_file.find('/chroot/tmp')
75  if loc != -1:
76    loc += len('/chroot/tmp')
77    real_file = real_file[loc:]
78  image = real_file
79  return version, image
80
81
82class ResultsReport(object):
83  """Class to handle the report format."""
84  MAX_COLOR_CODE = 255
85  PERF_ROWS = 5
86
87  def __init__(self, experiment):
88    self.experiment = experiment
89    self.benchmark_runs = experiment.benchmark_runs
90    self.labels = experiment.labels
91    self.benchmarks = experiment.benchmarks
92    self.baseline = self.labels[0]
93
94  def _SortByLabel(self, runs):
95    labels = {}
96    for benchmark_run in runs:
97      if benchmark_run.label_name not in labels:
98        labels[benchmark_run.label_name] = []
99      labels[benchmark_run.label_name].append(benchmark_run)
100    return labels
101
102  def GetFullTables(self, perf=False):
103    columns = [Column(RawResult(), Format()), Column(
104        MinResult(), Format()), Column(MaxResult(),
105                                       Format()), Column(AmeanResult(),
106                                                         Format()),
107               Column(StdResult(), Format(),
108                      'StdDev'), Column(CoeffVarResult(), CoeffVarFormat(),
109                                        'StdDev/Mean'),
110               Column(GmeanRatioResult(), RatioFormat(),
111                      'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
112                                              'p-value')]
113    if not perf:
114      return self._GetTables(self.labels, self.benchmark_runs, columns, 'full')
115    return self._GetPerfTables(self.labels, columns, 'full')
116
117  def GetSummaryTables(self, perf=False):
118    columns = [Column(AmeanResult(), Format()), Column(StdResult(), Format(),
119                                                       'StdDev'),
120               Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'),
121               Column(GmeanRatioResult(), RatioFormat(),
122                      'GmeanSpeedup'), Column(PValueResult(), PValueFormat(),
123                                              'p-value')]
124    if not perf:
125      return self._GetTables(self.labels, self.benchmark_runs, columns,
126                             'summary')
127    return self._GetPerfTables(self.labels, columns, 'summary')
128
129  def _ParseColumn(self, columns, iteration):
130    new_column = []
131    for column in columns:
132      if column.result.__class__.__name__ != 'RawResult':
133        #TODO(asharif): tabulator should support full table natively.
134        new_column.append(column)
135      else:
136        for i in range(iteration):
137          cc = Column(LiteralResult(i), Format(), str(i + 1))
138          new_column.append(cc)
139    return new_column
140
141  def _AreAllRunsEmpty(self, runs):
142    for label in runs:
143      for dictionary in label:
144        if dictionary:
145          return False
146    return True
147
148  def _GetTableHeader(self, benchmark):
149    benchmark_info = ('Benchmark:  {0};  Iterations: {1}'
150                      .format(benchmark.name, benchmark.iterations))
151    cell = Cell()
152    cell.string_value = benchmark_info
153    cell.header = True
154    return [[cell]]
155
156  def _GetTables(self, labels, benchmark_runs, columns, table_type):
157    tables = []
158    ro = ResultOrganizer(benchmark_runs, labels, self.benchmarks)
159    result = ro.result
160    label_name = ro.labels
161    for item in result:
162      benchmark = None
163      runs = result[item]
164      for benchmark in self.benchmarks:
165        if benchmark.name == item:
166          break
167      ben_table = self._GetTableHeader(benchmark)
168
169      if self._AreAllRunsEmpty(runs):
170        cell = Cell()
171        cell.string_value = ('This benchmark contains no result.'
172                             ' Is the benchmark name valid?')
173        cell_table = [[cell]]
174      else:
175        tg = TableGenerator(runs, label_name)
176        table = tg.GetTable()
177        parsed_columns = self._ParseColumn(columns, benchmark.iterations)
178        tf = TableFormatter(table, parsed_columns)
179        cell_table = tf.GetCellTable(table_type)
180      tables.append(ben_table)
181      tables.append(cell_table)
182    return tables
183
184  def _GetPerfTables(self, labels, columns, table_type):
185    tables = []
186    label_names = [label.name for label in labels]
187    p_table = PerfTable(self.experiment, label_names)
188
189    if not p_table.perf_data:
190      return tables
191
192    for benchmark in p_table.perf_data:
193      ben = None
194      for ben in self.benchmarks:
195        if ben.name == benchmark:
196          break
197
198      ben_table = self._GetTableHeader(ben)
199      tables.append(ben_table)
200      benchmark_data = p_table.perf_data[benchmark]
201      row_info = p_table.row_info[benchmark]
202      table = []
203      for event in benchmark_data:
204        tg = TableGenerator(benchmark_data[event],
205                            label_names,
206                            sort=TableGenerator.SORT_BY_VALUES_DESC)
207        table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
208        parsed_columns = self._ParseColumn(columns, ben.iterations)
209        tf = TableFormatter(table, parsed_columns)
210        tf.GenerateCellTable()
211        tf.AddColumnName()
212        tf.AddLabelName()
213        tf.AddHeader(str(event))
214        table = tf.GetCellTable(table_type, headers=False)
215        tables.append(table)
216    return tables
217
218  def PrintTables(self, tables, out_to):
219    output = ''
220    if not tables:
221      return output
222    for table in tables:
223      if out_to == 'HTML':
224        tp = TablePrinter(table, TablePrinter.HTML)
225      elif out_to == 'PLAIN':
226        tp = TablePrinter(table, TablePrinter.PLAIN)
227      elif out_to == 'CONSOLE':
228        tp = TablePrinter(table, TablePrinter.CONSOLE)
229      elif out_to == 'TSV':
230        tp = TablePrinter(table, TablePrinter.TSV)
231      elif out_to == 'EMAIL':
232        tp = TablePrinter(table, TablePrinter.EMAIL)
233      else:
234        pass
235      output += tp.Print()
236    return output
237
238
239class TextResultsReport(ResultsReport):
240  """Class to generate text result report."""
241  TEXT = """
242===========================================
243Results report for: '%s'
244===========================================
245
246-------------------------------------------
247Summary
248-------------------------------------------
249%s
250
251
252Number re-images: %s
253
254-------------------------------------------
255Benchmark Run Status
256-------------------------------------------
257%s
258
259
260-------------------------------------------
261Perf Data
262-------------------------------------------
263%s
264
265
266
267Experiment File
268-------------------------------------------
269%s
270
271
272CPUInfo
273-------------------------------------------
274%s
275===========================================
276"""
277
278  def __init__(self, experiment, email=False):
279    super(TextResultsReport, self).__init__(experiment)
280    self.email = email
281
282  def GetStatusTable(self):
283    """Generate the status table by the tabulator."""
284    table = [['', '']]
285    columns = [Column(
286        LiteralResult(iteration=0),
287        Format(),
288        'Status'), Column(
289            LiteralResult(iteration=1),
290            Format(),
291            'Failing Reason')]
292
293    for benchmark_run in self.benchmark_runs:
294      status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(),
295                                     benchmark_run.failure_reason]]
296      table.append(status)
297    tf = TableFormatter(table, columns)
298    cell_table = tf.GetCellTable('status')
299    return [cell_table]
300
301  def GetReport(self):
302    """Generate the report for email and console."""
303    status_table = self.GetStatusTable()
304    summary_table = self.GetSummaryTables()
305    perf_table = self.GetSummaryTables(perf=True)
306    if not perf_table:
307      perf_table = None
308    if not self.email:
309      return self.TEXT % (
310          self.experiment.name, self.PrintTables(summary_table, 'CONSOLE'),
311          self.experiment.machine_manager.num_reimages,
312          self.PrintTables(status_table, 'CONSOLE'),
313          self.PrintTables(perf_table, 'CONSOLE'),
314          self.experiment.experiment_file,
315          self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))
316
317    return self.TEXT % (
318        self.experiment.name, self.PrintTables(summary_table, 'EMAIL'),
319        self.experiment.machine_manager.num_reimages,
320        self.PrintTables(status_table, 'EMAIL'),
321        self.PrintTables(perf_table, 'EMAIL'), self.experiment.experiment_file,
322        self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels))
323
324
325class HTMLResultsReport(ResultsReport):
326  """Class to generate html result report."""
327
328  HTML = """
329<html>
330  <head>
331    <style type="text/css">
332
333body {
334  font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif;
335  font-size: 12px;
336}
337
338pre {
339  margin: 10px;
340  color: #039;
341  font-size: 14px;
342}
343
344.chart {
345  display: inline;
346}
347
348.hidden {
349  visibility: hidden;
350}
351
352.results-section {
353  border: 1px solid #b9c9fe;
354  margin: 10px;
355}
356
357.results-section-title {
358  background-color: #b9c9fe;
359  color: #039;
360  padding: 7px;
361  font-size: 14px;
362  width: 200px;
363}
364
365.results-section-content {
366  margin: 10px;
367  padding: 10px;
368  overflow:auto;
369}
370
371#box-table-a {
372  font-size: 12px;
373  width: 480px;
374  text-align: left;
375  border-collapse: collapse;
376}
377
378#box-table-a th {
379  padding: 6px;
380  background: #b9c9fe;
381  border-right: 1px solid #fff;
382  border-bottom: 1px solid #fff;
383  color: #039;
384  text-align: center;
385}
386
387#box-table-a td {
388  padding: 4px;
389  background: #e8edff;
390  border-bottom: 1px solid #fff;
391  border-right: 1px solid #fff;
392  color: #669;
393  border-top: 1px solid transparent;
394}
395
396#box-table-a tr:hover td {
397  background: #d0dafd;
398  color: #339;
399}
400
401    </style>
402    <script type='text/javascript' src='https://www.google.com/jsapi'></script>
403    <script type='text/javascript'>
404      google.load('visualization', '1', {packages:['corechart']});
405      google.setOnLoadCallback(init);
406      function init() {
407        switchTab('summary', 'html');
408        %s
409        switchTab('full', 'html');
410        drawTable();
411      }
412      function drawTable() {
413        %s
414      }
415      function switchTab(table, tab) {
416        document.getElementById(table + '-html').style.display = 'none';
417        document.getElementById(table + '-text').style.display = 'none';
418        document.getElementById(table + '-tsv').style.display = 'none';
419        document.getElementById(table + '-' + tab).style.display = 'block';
420      }
421    </script>
422  </head>
423
424  <body>
425    <div class='results-section'>
426      <div class='results-section-title'>Summary Table</div>
427      <div class='results-section-content'>
428        <div id='summary-html'>%s</div>
429        <div id='summary-text'><pre>%s</pre></div>
430        <div id='summary-tsv'><pre>%s</pre></div>
431      </div>
432      %s
433    </div>
434    %s
435    <div class='results-section'>
436      <div class='results-section-title'>Charts</div>
437      <div class='results-section-content'>%s</div>
438    </div>
439    <div class='results-section'>
440      <div class='results-section-title'>Full Table</div>
441      <div class='results-section-content'>
442        <div id='full-html'>%s</div>
443        <div id='full-text'><pre>%s</pre></div>
444        <div id='full-tsv'><pre>%s</pre></div>
445      </div>
446      %s
447    </div>
448    <div class='results-section'>
449      <div class='results-section-title'>Experiment File</div>
450      <div class='results-section-content'>
451        <pre>%s</pre>
452    </div>
453    </div>
454  </body>
455</html>
456"""
457
458  PERF_HTML = """
459    <div class='results-section'>
460      <div class='results-section-title'>Perf Table</div>
461      <div class='results-section-content'>
462        <div id='perf-html'>%s</div>
463        <div id='perf-text'><pre>%s</pre></div>
464        <div id='perf-tsv'><pre>%s</pre></div>
465      </div>
466      %s
467    </div>
468"""
469
470  def __init__(self, experiment):
471    super(HTMLResultsReport, self).__init__(experiment)
472
473  def _GetTabMenuHTML(self, table):
474    return """
475<div class='tab-menu'>
476  <a href="javascript:switchTab('%s', 'html')">HTML</a>
477  <a href="javascript:switchTab('%s', 'text')">Text</a>
478  <a href="javascript:switchTab('%s', 'tsv')">TSV</a>
479</div>""" % (table, table, table)
480
481  def GetReport(self):
482    chart_javascript = ''
483    charts = self._GetCharts(self.labels, self.benchmark_runs)
484    for chart in charts:
485      chart_javascript += chart.GetJavascript()
486    chart_divs = ''
487    for chart in charts:
488      chart_divs += chart.GetDiv()
489
490    summary_table = self.GetSummaryTables()
491    full_table = self.GetFullTables()
492    perf_table = self.GetSummaryTables(perf=True)
493    if perf_table:
494      perf_html = self.PERF_HTML % (self.PrintTables(perf_table, 'HTML'),
495                                    self.PrintTables(perf_table, 'PLAIN'),
496                                    self.PrintTables(perf_table, 'TSV'),
497                                    self._GetTabMenuHTML('perf'))
498      perf_init = "switchTab('perf', 'html');"
499    else:
500      perf_html = ''
501      perf_init = ''
502
503    return self.HTML % (
504        perf_init, chart_javascript, self.PrintTables(summary_table, 'HTML'),
505        self.PrintTables(summary_table, 'PLAIN'),
506        self.PrintTables(summary_table, 'TSV'), self._GetTabMenuHTML('summary'),
507        perf_html, chart_divs, self.PrintTables(full_table, 'HTML'),
508        self.PrintTables(full_table, 'PLAIN'),
509        self.PrintTables(full_table, 'TSV'), self._GetTabMenuHTML('full'),
510        self.experiment.experiment_file)
511
512  def _GetCharts(self, labels, benchmark_runs):
513    charts = []
514    ro = ResultOrganizer(benchmark_runs, labels)
515    result = ro.result
516    for item in result:
517      runs = result[item]
518      tg = TableGenerator(runs, ro.labels)
519      table = tg.GetTable()
520      columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()),
521                 Column(MaxResult(), Format())]
522      tf = TableFormatter(table, columns)
523      data_table = tf.GetCellTable('full')
524
525      for i in range(2, len(data_table)):
526        cur_row_data = data_table[i]
527        test_key = cur_row_data[0].string_value
528        title = '{0}: {1}'.format(item, test_key.replace('/', ''))
529        chart = ColumnChart(title, 300, 200)
530        chart.AddColumn('Label', 'string')
531        chart.AddColumn('Average', 'number')
532        chart.AddColumn('Min', 'number')
533        chart.AddColumn('Max', 'number')
534        chart.AddSeries('Min', 'line', 'black')
535        chart.AddSeries('Max', 'line', 'black')
536        cur_index = 1
537        for label in ro.labels:
538          chart.AddRow([label, cur_row_data[cur_index].value, cur_row_data[
539              cur_index + 1].value, cur_row_data[cur_index + 2].value])
540          if isinstance(cur_row_data[cur_index].value, str):
541            chart = None
542            break
543          cur_index += 3
544        if chart:
545          charts.append(chart)
546    return charts
547
548
549class JSONResultsReport(ResultsReport):
550  """class to generate JASON report."""
551
552  def __init__(self, experiment, date=None, time=None):
553    super(JSONResultsReport, self).__init__(experiment)
554    self.ro = ResultOrganizer(experiment.benchmark_runs,
555                              experiment.labels,
556                              experiment.benchmarks,
557                              json_report=True)
558    self.date = date
559    self.time = time
560    self.defaults = TelemetryDefaults()
561    if not self.date:
562      timestamp = datetime.datetime.strftime(datetime.datetime.now(),
563                                             '%Y-%m-%d %H:%M:%S')
564      date, time = timestamp.split(' ')
565      self.date = date
566      self.time = time
567
568  def GetReport(self, results_dir):
569    self.defaults.ReadDefaultsFile()
570    final_results = []
571    board = self.experiment.labels[0].board
572    for test, test_results in self.ro.result.iteritems():
573      for i, label in enumerate(self.ro.labels):
574        label_results = test_results[i]
575        for j, iter_results in enumerate(label_results):
576          json_results = dict()
577          json_results['date'] = self.date
578          json_results['time'] = self.time
579          json_results['board'] = board
580          json_results['label'] = label
581          common_checksum = ''
582          common_string = ''
583          compiler_string = 'gcc'
584          for l in self.experiment.labels:
585            if l.name == label:
586              ver, img = ParseChromeosImage(l.chromeos_image)
587              json_results['chromeos_image'] = img
588              json_results['chromeos_version'] = ver
589              json_results['chrome_version'] = l.chrome_version
590              json_results['compiler'] = l.compiler
591              # If any of the labels used the LLVM compiler, we will add
592              # ".llvm" to the json report filename. (Otherwise we use .gcc).
593              if 'llvm' in l.compiler:
594                compiler_string = 'llvm'
595              common_checksum = \
596                self.experiment.machine_manager.machine_checksum[l.name]
597              common_string = \
598                self.experiment.machine_manager.machine_checksum_string[l.name]
599              break
600          json_results['test_name'] = test
601          if not iter_results or iter_results['retval'] != 0:
602            json_results['pass'] = False
603          else:
604            json_results['pass'] = True
605            # Get overall results.
606            if test in self.defaults.GetDefault():
607              default_result_fields = self.defaults.GetDefault()[test]
608              value = []
609              for f in default_result_fields:
610                if f in iter_results:
611                  v = iter_results[f]
612                  if type(v) == list:
613                    v = v[0]
614                  item = (f, float(v))
615                  value.append(item)
616              json_results['overall_result'] = value
617            # Get detailed results.
618            detail_results = dict()
619            for k in iter_results.keys():
620              if k != 'retval':
621                v = iter_results[k]
622                if type(v) == list:
623                  v = v[0]
624                if v != 'PASS':
625                  if k.find('machine') == -1:
626                    if type(v) != list:
627                      detail_results[k] = float(v)
628                    else:
629                      detail_results[k] = [float(d) for d in v]
630                  else:
631                    json_results[k] = v
632            if 'machine_checksum' not in json_results.keys():
633              json_results['machine_checksum'] = common_checksum
634            if 'machine_string' not in json_results.keys():
635              json_results['machine_string'] = common_string
636            json_results['detailed_results'] = detail_results
637          final_results.append(json_results)
638
639    filename = 'report_%s_%s_%s.%s.json' % (
640        board, self.date, self.time.replace(':', '.'), compiler_string)
641    fullname = os.path.join(results_dir, filename)
642    with open(fullname, 'w') as fp:
643      json.dump(final_results, fp, indent=2)
644