results_report.py revision afb8cc77e82c35faedfe541d097fc01fd1d7ca3d
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4"""A module to handle the report format.""" 5from __future__ import print_function 6 7import datetime 8import itertools 9import json 10import os 11 12from cros_utils.tabulator import AmeanResult 13from cros_utils.tabulator import Cell 14from cros_utils.tabulator import CoeffVarFormat 15from cros_utils.tabulator import CoeffVarResult 16from cros_utils.tabulator import Column 17from cros_utils.tabulator import Format 18from cros_utils.tabulator import GmeanRatioResult 19from cros_utils.tabulator import LiteralResult 20from cros_utils.tabulator import MaxResult 21from cros_utils.tabulator import MinResult 22from cros_utils.tabulator import PValueFormat 23from cros_utils.tabulator import PValueResult 24from cros_utils.tabulator import RatioFormat 25from cros_utils.tabulator import RawResult 26from cros_utils.tabulator import StdResult 27from cros_utils.tabulator import TableFormatter 28from cros_utils.tabulator import TableGenerator 29from cros_utils.tabulator import TablePrinter 30from update_telemetry_defaults import TelemetryDefaults 31 32from column_chart import ColumnChart 33from results_organizer import OrganizeResults 34from perf_table import PerfTable 35 36 37def ParseChromeosImage(chromeos_image): 38 """Parse the chromeos_image string for the image and version. 39 40 The chromeos_image string will probably be in one of two formats: 41 1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \ 42 chromiumos_test_image.bin 43 2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \ 44 chromiumos_test_image.bin 45 46 We parse these strings to find the 'chromeos_version' to store in the 47 json archive (without the .datatime bit in the first case); and also 48 the 'chromeos_image', which would be all of the first case, but only the 49 part after '/chroot/tmp' in the second case. 50 51 Args: 52 chromeos_image: string containing the path to the chromeos_image that 53 crosperf used for the test. 54 55 Returns: 56 version, image: The results of parsing the input string, as explained 57 above. 58 """ 59 # Find the Chromeos Version, e.g. R45-2345.0.0..... 60 # chromeos_image should have been something like: 61 # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin" 62 if chromeos_image.endswith('/chromiumos_test_image.bin'): 63 full_version = chromeos_image.split('/')[-2] 64 # Strip the date and time off of local builds (which have the format 65 # "R43-2345.0.0.date-and-time"). 66 version, _ = os.path.splitext(full_version) 67 else: 68 version = '' 69 70 # Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then 71 # it's an official image that got downloaded, so chop off the download path 72 # to make the official image name more clear. 73 official_image_path = '/chroot/tmp' 74 if official_image_path in chromeos_image: 75 image = chromeos_image.split(official_image_path, 1)[1] 76 else: 77 image = chromeos_image 78 return version, image 79 80 81class ResultsReport(object): 82 """Class to handle the report format.""" 83 MAX_COLOR_CODE = 255 84 PERF_ROWS = 5 85 86 def __init__(self, experiment): 87 self.experiment = experiment 88 self.benchmark_runs = experiment.benchmark_runs 89 self.labels = experiment.labels 90 self.benchmarks = experiment.benchmarks 91 self.baseline = self.labels[0] 92 93 def _SortByLabel(self, runs): 94 labels = {} 95 for benchmark_run in runs: 96 if benchmark_run.label_name not in labels: 97 labels[benchmark_run.label_name] = [] 98 labels[benchmark_run.label_name].append(benchmark_run) 99 return labels 100 101 def GetFullTables(self, perf=False): 102 columns = [Column(RawResult(), Format()), Column( 103 MinResult(), Format()), Column(MaxResult(), 104 Format()), Column(AmeanResult(), 105 Format()), 106 Column(StdResult(), Format(), 107 'StdDev'), Column(CoeffVarResult(), CoeffVarFormat(), 108 'StdDev/Mean'), 109 Column(GmeanRatioResult(), RatioFormat(), 110 'GmeanSpeedup'), Column(PValueResult(), PValueFormat(), 111 'p-value')] 112 if not perf: 113 return self._GetTables(self.labels, self.benchmark_runs, columns, 'full') 114 return self._GetPerfTables(self.labels, columns, 'full') 115 116 def GetSummaryTables(self, perf=False): 117 columns = [Column(AmeanResult(), Format()), Column(StdResult(), Format(), 118 'StdDev'), 119 Column(CoeffVarResult(), CoeffVarFormat(), 'StdDev/Mean'), 120 Column(GmeanRatioResult(), RatioFormat(), 121 'GmeanSpeedup'), Column(PValueResult(), PValueFormat(), 122 'p-value')] 123 if not perf: 124 return self._GetTables(self.labels, self.benchmark_runs, columns, 125 'summary') 126 return self._GetPerfTables(self.labels, columns, 'summary') 127 128 def _ParseColumn(self, columns, iteration): 129 new_column = [] 130 for column in columns: 131 if column.result.__class__.__name__ != 'RawResult': 132 #TODO(asharif): tabulator should support full table natively. 133 new_column.append(column) 134 else: 135 for i in range(iteration): 136 cc = Column(LiteralResult(i), Format(), str(i + 1)) 137 new_column.append(cc) 138 return new_column 139 140 def _AreAllRunsEmpty(self, runs): 141 for label in runs: 142 for dictionary in label: 143 if dictionary: 144 return False 145 return True 146 147 def _GetTableHeader(self, benchmark): 148 benchmark_info = ('Benchmark: {0}; Iterations: {1}' 149 .format(benchmark.name, benchmark.iterations)) 150 cell = Cell() 151 cell.string_value = benchmark_info 152 cell.header = True 153 return [[cell]] 154 155 def _GetTables(self, labels, benchmark_runs, columns, table_type): 156 tables = [] 157 result = OrganizeResults(benchmark_runs, labels, self.benchmarks) 158 label_name = [label.name for label in labels] 159 for item in result: 160 benchmark = None 161 runs = result[item] 162 for benchmark in self.benchmarks: 163 if benchmark.name == item: 164 break 165 ben_table = self._GetTableHeader(benchmark) 166 167 if self._AreAllRunsEmpty(runs): 168 cell = Cell() 169 cell.string_value = ('This benchmark contains no result.' 170 ' Is the benchmark name valid?') 171 cell_table = [[cell]] 172 else: 173 tg = TableGenerator(runs, label_name) 174 table = tg.GetTable() 175 parsed_columns = self._ParseColumn(columns, benchmark.iterations) 176 tf = TableFormatter(table, parsed_columns) 177 cell_table = tf.GetCellTable(table_type) 178 tables.append(ben_table) 179 tables.append(cell_table) 180 return tables 181 182 def _GetPerfTables(self, labels, columns, table_type): 183 tables = [] 184 label_names = [label.name for label in labels] 185 p_table = PerfTable(self.experiment, label_names) 186 187 if not p_table.perf_data: 188 return tables 189 190 for benchmark in p_table.perf_data: 191 ben = None 192 for ben in self.benchmarks: 193 if ben.name == benchmark: 194 break 195 196 ben_table = self._GetTableHeader(ben) 197 tables.append(ben_table) 198 benchmark_data = p_table.perf_data[benchmark] 199 row_info = p_table.row_info[benchmark] 200 table = [] 201 for event in benchmark_data: 202 tg = TableGenerator(benchmark_data[event], 203 label_names, 204 sort=TableGenerator.SORT_BY_VALUES_DESC) 205 table = tg.GetTable(max(self.PERF_ROWS, row_info[event])) 206 parsed_columns = self._ParseColumn(columns, ben.iterations) 207 tf = TableFormatter(table, parsed_columns) 208 tf.GenerateCellTable(table_type) 209 tf.AddColumnName() 210 tf.AddLabelName() 211 tf.AddHeader(str(event)) 212 table = tf.GetCellTable(table_type, headers=False) 213 tables.append(table) 214 return tables 215 216 def PrintTables(self, tables, out_to): 217 output = '' 218 if not tables: 219 return output 220 for table in tables: 221 if out_to == 'HTML': 222 tp = TablePrinter(table, TablePrinter.HTML) 223 elif out_to == 'PLAIN': 224 tp = TablePrinter(table, TablePrinter.PLAIN) 225 elif out_to == 'CONSOLE': 226 tp = TablePrinter(table, TablePrinter.CONSOLE) 227 elif out_to == 'TSV': 228 tp = TablePrinter(table, TablePrinter.TSV) 229 elif out_to == 'EMAIL': 230 tp = TablePrinter(table, TablePrinter.EMAIL) 231 else: 232 pass 233 output += tp.Print() 234 return output 235 236 237class TextResultsReport(ResultsReport): 238 """Class to generate text result report.""" 239 TEXT = """ 240=========================================== 241Results report for: '%s' 242=========================================== 243 244------------------------------------------- 245Summary 246------------------------------------------- 247%s 248 249 250Number re-images: %s 251 252------------------------------------------- 253Benchmark Run Status 254------------------------------------------- 255%s 256 257 258------------------------------------------- 259Perf Data 260------------------------------------------- 261%s 262 263 264 265Experiment File 266------------------------------------------- 267%s 268 269 270CPUInfo 271------------------------------------------- 272%s 273=========================================== 274""" 275 276 def __init__(self, experiment, email=False): 277 super(TextResultsReport, self).__init__(experiment) 278 self.email = email 279 280 def GetStatusTable(self): 281 """Generate the status table by the tabulator.""" 282 table = [['', '']] 283 columns = [Column( 284 LiteralResult(iteration=0), 285 Format(), 286 'Status'), Column( 287 LiteralResult(iteration=1), 288 Format(), 289 'Failing Reason')] 290 291 for benchmark_run in self.benchmark_runs: 292 status = [benchmark_run.name, [benchmark_run.timeline.GetLastEvent(), 293 benchmark_run.failure_reason]] 294 table.append(status) 295 tf = TableFormatter(table, columns) 296 cell_table = tf.GetCellTable('status') 297 return [cell_table] 298 299 def GetReport(self): 300 """Generate the report for email and console.""" 301 status_table = self.GetStatusTable() 302 summary_table = self.GetSummaryTables() 303 perf_table = self.GetSummaryTables(perf=True) 304 if not perf_table: 305 perf_table = None 306 output_type = 'EMAIL' if self.email else 'CONSOLE' 307 return self.TEXT % ( 308 self.experiment.name, self.PrintTables(summary_table, output_type), 309 self.experiment.machine_manager.num_reimages, 310 self.PrintTables(status_table, output_type), 311 self.PrintTables(perf_table, output_type), 312 self.experiment.experiment_file, 313 self.experiment.machine_manager.GetAllCPUInfo(self.experiment.labels)) 314 315 316class HTMLResultsReport(ResultsReport): 317 """Class to generate html result report.""" 318 319 HTML = """ 320<html> 321 <head> 322 <style type="text/css"> 323 324body { 325 font-family: "Lucida Sans Unicode", "Lucida Grande", Sans-Serif; 326 font-size: 12px; 327} 328 329pre { 330 margin: 10px; 331 color: #039; 332 font-size: 14px; 333} 334 335.chart { 336 display: inline; 337} 338 339.hidden { 340 visibility: hidden; 341} 342 343.results-section { 344 border: 1px solid #b9c9fe; 345 margin: 10px; 346} 347 348.results-section-title { 349 background-color: #b9c9fe; 350 color: #039; 351 padding: 7px; 352 font-size: 14px; 353 width: 200px; 354} 355 356.results-section-content { 357 margin: 10px; 358 padding: 10px; 359 overflow:auto; 360} 361 362#box-table-a { 363 font-size: 12px; 364 width: 480px; 365 text-align: left; 366 border-collapse: collapse; 367} 368 369#box-table-a th { 370 padding: 6px; 371 background: #b9c9fe; 372 border-right: 1px solid #fff; 373 border-bottom: 1px solid #fff; 374 color: #039; 375 text-align: center; 376} 377 378#box-table-a td { 379 padding: 4px; 380 background: #e8edff; 381 border-bottom: 1px solid #fff; 382 border-right: 1px solid #fff; 383 color: #669; 384 border-top: 1px solid transparent; 385} 386 387#box-table-a tr:hover td { 388 background: #d0dafd; 389 color: #339; 390} 391 392 </style> 393 <script type='text/javascript' src='https://www.google.com/jsapi'></script> 394 <script type='text/javascript'> 395 google.load('visualization', '1', {packages:['corechart']}); 396 google.setOnLoadCallback(init); 397 function init() { 398 switchTab('summary', 'html'); 399 %s 400 switchTab('full', 'html'); 401 drawTable(); 402 } 403 function drawTable() { 404 %s 405 } 406 function switchTab(table, tab) { 407 document.getElementById(table + '-html').style.display = 'none'; 408 document.getElementById(table + '-text').style.display = 'none'; 409 document.getElementById(table + '-tsv').style.display = 'none'; 410 document.getElementById(table + '-' + tab).style.display = 'block'; 411 } 412 </script> 413 </head> 414 415 <body> 416 <div class='results-section'> 417 <div class='results-section-title'>Summary Table</div> 418 <div class='results-section-content'> 419 <div id='summary-html'>%s</div> 420 <div id='summary-text'><pre>%s</pre></div> 421 <div id='summary-tsv'><pre>%s</pre></div> 422 </div> 423 %s 424 </div> 425 %s 426 <div class='results-section'> 427 <div class='results-section-title'>Charts</div> 428 <div class='results-section-content'>%s</div> 429 </div> 430 <div class='results-section'> 431 <div class='results-section-title'>Full Table</div> 432 <div class='results-section-content'> 433 <div id='full-html'>%s</div> 434 <div id='full-text'><pre>%s</pre></div> 435 <div id='full-tsv'><pre>%s</pre></div> 436 </div> 437 %s 438 </div> 439 <div class='results-section'> 440 <div class='results-section-title'>Experiment File</div> 441 <div class='results-section-content'> 442 <pre>%s</pre> 443 </div> 444 </div> 445 </body> 446</html> 447""" 448 449 PERF_HTML = """ 450 <div class='results-section'> 451 <div class='results-section-title'>Perf Table</div> 452 <div class='results-section-content'> 453 <div id='perf-html'>%s</div> 454 <div id='perf-text'><pre>%s</pre></div> 455 <div id='perf-tsv'><pre>%s</pre></div> 456 </div> 457 %s 458 </div> 459""" 460 461 def __init__(self, experiment): 462 super(HTMLResultsReport, self).__init__(experiment) 463 464 def _GetTabMenuHTML(self, table): 465 return """ 466<div class='tab-menu'> 467 <a href="javascript:switchTab('%s', 'html')">HTML</a> 468 <a href="javascript:switchTab('%s', 'text')">Text</a> 469 <a href="javascript:switchTab('%s', 'tsv')">TSV</a> 470</div>""" % (table, table, table) 471 472 def GetReport(self): 473 chart_javascript = '' 474 charts = self._GetCharts(self.labels, self.benchmark_runs) 475 chart_javascript = ''.join(chart.GetJavascript() for chart in charts) 476 chart_divs = ''.join(chart.GetDiv() for chart in charts) 477 478 summary_table = self.GetSummaryTables() 479 full_table = self.GetFullTables() 480 perf_table = self.GetSummaryTables(perf=True) 481 if perf_table: 482 perf_html = self.PERF_HTML % (self.PrintTables(perf_table, 'HTML'), 483 self.PrintTables(perf_table, 'PLAIN'), 484 self.PrintTables(perf_table, 'TSV'), 485 self._GetTabMenuHTML('perf')) 486 perf_init = "switchTab('perf', 'html');" 487 else: 488 perf_html = '' 489 perf_init = '' 490 491 return self.HTML % ( 492 perf_init, chart_javascript, self.PrintTables(summary_table, 'HTML'), 493 self.PrintTables(summary_table, 'PLAIN'), 494 self.PrintTables(summary_table, 'TSV'), self._GetTabMenuHTML('summary'), 495 perf_html, chart_divs, self.PrintTables(full_table, 'HTML'), 496 self.PrintTables(full_table, 'PLAIN'), 497 self.PrintTables(full_table, 'TSV'), self._GetTabMenuHTML('full'), 498 self.experiment.experiment_file) 499 500 def _GetCharts(self, labels, benchmark_runs): 501 charts = [] 502 result = OrganizeResults(benchmark_runs, labels) 503 label_names = [label.name for label in labels] 504 for item, runs in result.iteritems(): 505 tg = TableGenerator(runs, label_names) 506 table = tg.GetTable() 507 columns = [Column(AmeanResult(), Format()), Column(MinResult(), Format()), 508 Column(MaxResult(), Format())] 509 tf = TableFormatter(table, columns) 510 data_table = tf.GetCellTable('full') 511 512 for i in range(2, len(data_table)): 513 cur_row_data = data_table[i] 514 test_key = cur_row_data[0].string_value 515 title = '{0}: {1}'.format(item, test_key.replace('/', '')) 516 chart = ColumnChart(title, 300, 200) 517 chart.AddColumn('Label', 'string') 518 chart.AddColumn('Average', 'number') 519 chart.AddColumn('Min', 'number') 520 chart.AddColumn('Max', 'number') 521 chart.AddSeries('Min', 'line', 'black') 522 chart.AddSeries('Max', 'line', 'black') 523 cur_index = 1 524 for label in label_names: 525 chart.AddRow([label, cur_row_data[cur_index].value, cur_row_data[ 526 cur_index + 1].value, cur_row_data[cur_index + 2].value]) 527 if isinstance(cur_row_data[cur_index].value, str): 528 chart = None 529 break 530 cur_index += 3 531 if chart: 532 charts.append(chart) 533 return charts 534 535 536class JSONResultsReport(ResultsReport): 537 """Class that generates JSON reports.""" 538 539 @staticmethod 540 def _WriteResultsToFile(filename, results): 541 """Write the results as JSON to the given filename.""" 542 with open(filename, 'w') as fp: 543 json.dump(results, fp, indent=2) 544 545 def __init__(self, experiment, date=None, time=None): 546 super(JSONResultsReport, self).__init__(experiment) 547 self.label_names = [label.name for label in experiment.labels] 548 self.organized_result = OrganizeResults(experiment.benchmark_runs, 549 experiment.labels, 550 experiment.benchmarks, 551 json_report=True) 552 self.date = date 553 self.time = time 554 self.defaults = TelemetryDefaults() 555 if not self.date: 556 timestamp = datetime.datetime.strftime(datetime.datetime.now(), 557 '%Y-%m-%d %H:%M:%S') 558 date, time = timestamp.split(' ') 559 self.date = date 560 self.time = time 561 562 def GetReport(self, results_dir, write_results=None): 563 if write_results is None: 564 write_results = JSONResultsReport._WriteResultsToFile 565 566 self.defaults.ReadDefaultsFile() 567 final_results = [] 568 board = self.experiment.labels[0].board 569 compiler_string = 'gcc' 570 for test, test_results in self.organized_result.iteritems(): 571 for label, label_results in itertools.izip(self.label_names, 572 test_results): 573 for iter_results in label_results: 574 json_results = { 575 'date': self.date, 576 'time': self.time, 577 'board': board, 578 'label': label 579 } 580 common_checksum = '' 581 common_string = '' 582 for l in self.experiment.labels: 583 if l.name == label: 584 img_path = os.path.realpath(os.path.expanduser(l.chromeos_image)) 585 ver, img = ParseChromeosImage(img_path) 586 json_results['chromeos_image'] = img 587 json_results['chromeos_version'] = ver 588 json_results['chrome_version'] = l.chrome_version 589 json_results['compiler'] = l.compiler 590 # If any of the labels used the LLVM compiler, we will add 591 # ".llvm" to the json report filename. (Otherwise we use .gcc). 592 if 'llvm' in l.compiler: 593 compiler_string = 'llvm' 594 common_checksum = \ 595 self.experiment.machine_manager.machine_checksum[l.name] 596 common_string = \ 597 self.experiment.machine_manager.machine_checksum_string[l.name] 598 break 599 else: 600 raise RuntimeError("Label doesn't exist in label_results?") 601 json_results['test_name'] = test 602 603 if not iter_results or iter_results['retval'] != 0: 604 json_results['pass'] = False 605 else: 606 json_results['pass'] = True 607 # Get overall results. 608 if test in self.defaults.GetDefault(): 609 default_result_fields = self.defaults.GetDefault()[test] 610 value = [] 611 for f in default_result_fields: 612 if f in iter_results: 613 v = iter_results[f] 614 if type(v) == list: 615 v = v[0] 616 # New telemetry results format: sometimes we get a list 617 # of lists now. 618 if type(v) == list: 619 v = v[0] 620 item = (f, float(v)) 621 value.append(item) 622 json_results['overall_result'] = value 623 # Get detailed results. 624 detail_results = {} 625 for k in iter_results: 626 if k != 'retval': 627 v = iter_results[k] 628 if type(v) == list: 629 v = v[0] 630 if v != 'PASS': 631 if k.find('machine') == -1: 632 if v is None: 633 continue 634 if type(v) != list: 635 detail_results[k] = float(v) 636 else: 637 detail_results[k] = [float(d) for d in v] 638 else: 639 json_results[k] = v 640 if 'machine_checksum' not in json_results: 641 json_results['machine_checksum'] = common_checksum 642 if 'machine_string' not in json_results: 643 json_results['machine_string'] = common_string 644 json_results['detailed_results'] = detail_results 645 final_results.append(json_results) 646 647 filename = 'report_%s_%s_%s.%s.json' % ( 648 board, self.date, self.time.replace(':', '.'), compiler_string) 649 fullname = os.path.join(results_dir, filename) 650 write_results(fullname, final_results) 651