gen_bench_expectations.py revision 5f640a30b92b7cf926ff1168b386882182bcb3bb
1#!/usr/bin/env python 2# Copyright (c) 2014 The Chromium Authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6""" Generate bench_expectations file from a given set of bench data files. """ 7 8import argparse 9import bench_util 10import os 11import re 12import sys 13 14# Parameters for calculating bench ranges. 15RANGE_RATIO_UPPER = 1.5 # Ratio of range for upper bounds. 16RANGE_RATIO_LOWER = 2.0 # Ratio of range for lower bounds. 17ERR_RATIO = 0.08 # Further widens the range by the ratio of average value. 18ERR_UB = 1.0 # Adds an absolute upper error to cope with small benches. 19ERR_LB = 1.5 20 21# List of bench configs to monitor. Ignore all other configs. 22CONFIGS_TO_INCLUDE = ['simple_viewport_1000x1000', 23 'simple_viewport_1000x1000_angle', 24 'simple_viewport_1000x1000_gpu', 25 'simple_viewport_1000x1000_scalar_1.100000', 26 'simple_viewport_1000x1000_scalar_1.100000_gpu', 27 ] 28 29# List of flaky entries that should be excluded. Each entry is defined by a list 30# of 3 strings, corresponding to the substrings of [bench, config, builder] to 31# search for. A bench expectations line is excluded when each of the 3 strings 32# in the list is a substring of the corresponding element of the given line. For 33# instance, ['desk_yahooanswers', 'gpu', 'Ubuntu'] will skip expectation entries 34# of SKP benchs whose name contains 'desk_yahooanswers' on all gpu-related 35# configs of all Ubuntu builders. 36ENTRIES_TO_EXCLUDE = [ 37 ] 38 39 40def compute_ranges(benches): 41 """Given a list of bench numbers, calculate the alert range. 42 43 Args: 44 benches: a list of float bench values. 45 46 Returns: 47 a list of float [lower_bound, upper_bound]. 48 """ 49 minimum = min(benches) 50 maximum = max(benches) 51 diff = maximum - minimum 52 avg = sum(benches) / len(benches) 53 54 return [minimum - diff * RANGE_RATIO_LOWER - avg * ERR_RATIO - ERR_LB, 55 maximum + diff * RANGE_RATIO_UPPER + avg * ERR_RATIO + ERR_UB] 56 57 58def create_expectations_dict(revision_data_points, builder): 59 """Convert list of bench data points into a dictionary of expectations data. 60 61 Args: 62 revision_data_points: a list of BenchDataPoint objects. 63 builder: string of the corresponding buildbot builder name. 64 65 Returns: 66 a dictionary of this form: 67 keys = tuple of (config, bench) strings. 68 values = list of float [expected, lower_bound, upper_bound] for the key. 69 """ 70 bench_dict = {} 71 for point in revision_data_points: 72 if (point.time_type or # Not walltime which has time_type '' 73 not point.config in CONFIGS_TO_INCLUDE): 74 continue 75 to_skip = False 76 for bench_substr, config_substr, builder_substr in ENTRIES_TO_EXCLUDE: 77 if (bench_substr in point.bench and config_substr in point.config and 78 builder_substr in builder): 79 to_skip = True 80 break 81 if to_skip: 82 continue 83 key = (point.config, point.bench) 84 if key in bench_dict: 85 raise Exception('Duplicate bench entry: ' + str(key)) 86 bench_dict[key] = [point.time] + compute_ranges(point.per_iter_time) 87 88 return bench_dict 89 90 91def main(): 92 """Reads bench data points, then calculate and export expectations. 93 """ 94 parser = argparse.ArgumentParser() 95 parser.add_argument( 96 '-a', '--representation_alg', default='25th', 97 help='bench representation algorithm to use, see bench_util.py.') 98 parser.add_argument( 99 '-b', '--builder', required=True, 100 help='name of the builder whose bench ranges we are computing.') 101 parser.add_argument( 102 '-d', '--input_dir', required=True, 103 help='a directory containing bench data files.') 104 parser.add_argument( 105 '-o', '--output_file', required=True, 106 help='file path and name for storing the output bench expectations.') 107 parser.add_argument( 108 '-r', '--git_revision', required=True, 109 help='the git hash to indicate the revision of input data to use.') 110 args = parser.parse_args() 111 112 builder = args.builder 113 114 data_points = bench_util.parse_skp_bench_data( 115 args.input_dir, args.git_revision, args.representation_alg) 116 117 expectations_dict = create_expectations_dict(data_points, builder) 118 119 out_lines = [] 120 keys = expectations_dict.keys() 121 keys.sort() 122 for (config, bench) in keys: 123 (expected, lower_bound, upper_bound) = expectations_dict[(config, bench)] 124 out_lines.append('%(bench)s_%(config)s_,%(builder)s-%(representation)s,' 125 '%(expected)s,%(lower_bound)s,%(upper_bound)s' % { 126 'bench': bench, 127 'config': config, 128 'builder': builder, 129 'representation': args.representation_alg, 130 'expected': expected, 131 'lower_bound': lower_bound, 132 'upper_bound': upper_bound}) 133 134 with open(args.output_file, 'w') as file_handle: 135 file_handle.write('\n'.join(out_lines)) 136 137 138if __name__ == "__main__": 139 main() 140