gen_bench_expectations.py revision 10dc56c46899952b761817323fba9ee690b5bbd3
1#!/usr/bin/env python 2# Copyright (c) 2014 The Chromium Authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6""" Generate bench_expectations file from a given set of bench data files. """ 7 8import argparse 9import bench_util 10import os 11import re 12import sys 13 14# Parameters for calculating bench ranges. 15RANGE_RATIO_UPPER = 1.5 # Ratio of range for upper bounds. 16RANGE_RATIO_LOWER = 2.0 # Ratio of range for lower bounds. 17ERR_RATIO = 0.08 # Further widens the range by the ratio of average value. 18ERR_UB = 1.0 # Adds an absolute upper error to cope with small benches. 19ERR_LB = 1.5 20 21# List of bench configs to monitor. Ignore all other configs. 22CONFIGS_TO_INCLUDE = ['simple_viewport_1000x1000', 23 'simple_viewport_1000x1000_angle', 24 'simple_viewport_1000x1000_gpu', 25 'simple_viewport_1000x1000_scalar_1.100000', 26 'simple_viewport_1000x1000_scalar_1.100000_gpu', 27 ] 28 29# List of flaky entries that should be excluded. Each entry is defined by a list 30# of 3 strings, corresponding to the substrings of [bench, config, builder] to 31# search for. A bench expectations line is excluded when each of the 3 strings 32# in the list is a substring of the corresponding element of the given line. For 33# instance, ['desk_yahooanswers', 'gpu', 'Ubuntu'] will skip expectation entries 34# of SKP benchs whose name contains 'desk_yahooanswers' on all gpu-related 35# configs of all Ubuntu builders. 36ENTRIES_TO_EXCLUDE = [ 37 ] 38 39 40def compute_ranges(benches): 41 """Given a list of bench numbers, calculate the alert range. 42 43 Args: 44 benches: a list of float bench values. 45 46 Returns: 47 a list of float [lower_bound, upper_bound]. 48 """ 49 avg = sum(benches) / len(benches) 50 squared_avg = avg ** 2 51<<<<<<< HEAD 52 avg_sum_squared = sum([bench**2 for bench in benches])/len(benches) 53 std_dev = (abs(avg_sum_squared - squared_avg) + 0.05*abs(avg)) ** 0.5 54======= 55 avg_squared = sum([bench**2 for bench in benches])/len(benches) 56 std_dev = (avg_squared - squared_avg) ** 0.5 57>>>>>>> origin/master 58 59 # If the results are normally distributed, 2 standard deviations 60 # captures something like ~95% of the possible range of results I think 61 return [avg - 2*std_dev, avg + 2*std_dev] 62 63 64def create_expectations_dict(revision_data_points, builder): 65 """Convert list of bench data points into a dictionary of expectations data. 66 67 Args: 68 revision_data_points: a list of BenchDataPoint objects. 69 builder: string of the corresponding buildbot builder name. 70 71 Returns: 72 a dictionary of this form: 73 keys = tuple of (config, bench) strings. 74 values = list of float [expected, lower_bound, upper_bound] for the key. 75 """ 76 bench_dict = {} 77 for point in revision_data_points: 78 if (point.time_type or # Not walltime which has time_type '' 79 not point.config in CONFIGS_TO_INCLUDE): 80 continue 81 to_skip = False 82 for bench_substr, config_substr, builder_substr in ENTRIES_TO_EXCLUDE: 83 if (bench_substr in point.bench and config_substr in point.config and 84 builder_substr in builder): 85 to_skip = True 86 break 87 if to_skip: 88 continue 89 key = (point.config, point.bench) 90 if key in bench_dict: 91 raise Exception('Duplicate bench entry: ' + str(key)) 92 bench_dict[key] = [point.time] + compute_ranges(point.per_iter_time) 93 94 return bench_dict 95 96 97def main(): 98 """Reads bench data points, then calculate and export expectations. 99 """ 100 parser = argparse.ArgumentParser() 101 parser.add_argument( 102 '-a', '--representation_alg', default='25th', 103 help='bench representation algorithm to use, see bench_util.py.') 104 parser.add_argument( 105 '-b', '--builder', required=True, 106 help='name of the builder whose bench ranges we are computing.') 107 parser.add_argument( 108 '-d', '--input_dir', required=True, 109 help='a directory containing bench data files.') 110 parser.add_argument( 111 '-o', '--output_file', required=True, 112 help='file path and name for storing the output bench expectations.') 113 parser.add_argument( 114 '-r', '--git_revision', required=True, 115 help='the git hash to indicate the revision of input data to use.') 116 args = parser.parse_args() 117 118 builder = args.builder 119 120 data_points = bench_util.parse_skp_bench_data( 121 args.input_dir, args.git_revision, args.representation_alg) 122 123 expectations_dict = create_expectations_dict(data_points, builder) 124 125 out_lines = [] 126 keys = expectations_dict.keys() 127 keys.sort() 128 for (config, bench) in keys: 129 (expected, lower_bound, upper_bound) = expectations_dict[(config, bench)] 130 out_lines.append('%(bench)s_%(config)s_,%(builder)s-%(representation)s,' 131 '%(expected)s,%(lower_bound)s,%(upper_bound)s' % { 132 'bench': bench, 133 'config': config, 134 'builder': builder, 135 'representation': args.representation_alg, 136 'expected': expected, 137 'lower_bound': lower_bound, 138 'upper_bound': upper_bound}) 139 140 with open(args.output_file, 'w') as file_handle: 141 file_handle.write('\n'.join(out_lines)) 142 143 144if __name__ == "__main__": 145 main() 146