experiment_factory.py revision e82513b0aec27bf5d3ca51789edc48dde5ee439b
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4"""A module to generate experiments.""" 5 6from __future__ import print_function 7import os 8import re 9import socket 10 11from benchmark import Benchmark 12import config 13from experiment import Experiment 14from label import Label 15from label import MockLabel 16from results_cache import CacheConditions 17import test_flag 18import file_lock_machine 19 20# Users may want to run Telemetry tests either individually, or in 21# specified sets. Here we define sets of tests that users may want 22# to run together. 23 24telemetry_perfv2_tests = [ 25 'dromaeo.domcoreattr', 'dromaeo.domcoremodify', 'dromaeo.domcorequery', 26 'dromaeo.domcoretraverse', 'kraken', 'octane', 'robohornet_pro', 'sunspider' 27] 28 29telemetry_pagecycler_tests = [ 30 'page_cycler_v2.intl_ar_fa_he', 31 'page_cycler_v2.intl_es_fr_pt-BR', 32 'page_cycler_v2.intl_hi_ru', 33 'page_cycler_v2.intl_ja_zh', 34 'page_cycler_v2.intl_ko_th_vi', 35 # 'page_cycler_v2.morejs', 36 # 'page_cycler_v2.moz', 37 # 'page_cycler_v2.netsim.top_10', 38 'page_cycler_v2.tough_layout_cases', 39 'page_cycler_v2.typical_25' 40] 41 42telemetry_toolchain_old_perf_tests = [ 43 'dromaeo.domcoremodify', 'page_cycler_v2.intl_es_fr_pt-BR', 44 'page_cycler_v2.intl_hi_ru', 'page_cycler_v2.intl_ja_zh', 45 'page_cycler_v2.intl_ko_th_vi', 'page_cycler_v2.netsim.top_10', 46 'page_cycler_v2.typical_25', 'robohornet_pro', 'spaceport', 47 'tab_switching.top_10' 48] 49telemetry_toolchain_perf_tests = [ 50 'octane', 51 'kraken', 52 'speedometer', 53 'dromaeo.domcoreattr', 54 'dromaeo.domcoremodify', 55 'smoothness.tough_webgl_cases', 56] 57 58# 'page_cycler_v2.typical_25'] 59 60 61class ExperimentFactory(object): 62 """Factory class for building an Experiment, given an ExperimentFile as input. 63 64 This factory is currently hardcoded to produce an experiment for running 65 ChromeOS benchmarks, but the idea is that in the future, other types 66 of experiments could be produced. 67 """ 68 69 def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args, 70 iterations, rm_chroot_tmp, perf_args, suite, 71 show_all_results, retries, run_local): 72 """Add all the tests in a set to the benchmarks list.""" 73 for test_name in benchmark_list: 74 telemetry_benchmark = Benchmark(test_name, test_name, test_args, 75 iterations, rm_chroot_tmp, perf_args, 76 suite, show_all_results, retries, 77 run_local) 78 benchmarks.append(telemetry_benchmark) 79 80 def GetExperiment(self, experiment_file, working_directory, log_dir): 81 """Construct an experiment from an experiment file.""" 82 global_settings = experiment_file.GetGlobalSettings() 83 experiment_name = global_settings.GetField('name') 84 board = global_settings.GetField('board') 85 remote = global_settings.GetField('remote') 86 # This is used to remove the ",' from the remote if user 87 # add them to the remote string. 88 new_remote = [] 89 if remote: 90 for i in remote: 91 c = re.sub('["\']', '', i) 92 new_remote.append(c) 93 remote = new_remote 94 chromeos_root = global_settings.GetField('chromeos_root') 95 rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp') 96 perf_args = global_settings.GetField('perf_args') 97 acquire_timeout = global_settings.GetField('acquire_timeout') 98 cache_dir = global_settings.GetField('cache_dir') 99 cache_only = global_settings.GetField('cache_only') 100 config.AddConfig('no_email', global_settings.GetField('no_email')) 101 share_cache = global_settings.GetField('share_cache') 102 results_dir = global_settings.GetField('results_dir') 103 use_file_locks = global_settings.GetField('use_file_locks') 104 locks_dir = global_settings.GetField('locks_dir') 105 # If we pass a blank locks_dir to the Experiment, it will use the AFE server 106 # lock mechanism. So if the user specified use_file_locks, but did not 107 # specify a locks dir, set the locks dir to the default locks dir in 108 # file_lock_machine. 109 if use_file_locks and not locks_dir: 110 locks_dir = file_lock_machine.Machine.LOCKS_DIR 111 chrome_src = global_settings.GetField('chrome_src') 112 show_all_results = global_settings.GetField('show_all_results') 113 log_level = global_settings.GetField('logging_level') 114 if log_level not in ('quiet', 'average', 'verbose'): 115 log_level = 'verbose' 116 # Default cache hit conditions. The image checksum in the cache and the 117 # computed checksum of the image must match. Also a cache file must exist. 118 cache_conditions = [ 119 CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH 120 ] 121 if global_settings.GetField('rerun_if_failed'): 122 cache_conditions.append(CacheConditions.RUN_SUCCEEDED) 123 if global_settings.GetField('rerun'): 124 cache_conditions.append(CacheConditions.FALSE) 125 if global_settings.GetField('same_machine'): 126 cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH) 127 if global_settings.GetField('same_specs'): 128 cache_conditions.append(CacheConditions.MACHINES_MATCH) 129 130 # Construct benchmarks. 131 # Some fields are common with global settings. The values are 132 # inherited and/or merged with the global settings values. 133 benchmarks = [] 134 all_benchmark_settings = experiment_file.GetSettings('benchmark') 135 for benchmark_settings in all_benchmark_settings: 136 benchmark_name = benchmark_settings.name 137 test_name = benchmark_settings.GetField('test_name') 138 if not test_name: 139 test_name = benchmark_name 140 test_args = benchmark_settings.GetField('test_args') 141 iterations = benchmark_settings.GetField('iterations') 142 suite = benchmark_settings.GetField('suite') 143 retries = benchmark_settings.GetField('retries') 144 run_local = benchmark_settings.GetField('run_local') 145 146 if suite == 'telemetry_Crosperf': 147 if test_name == 'all_perfv2': 148 self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args, 149 iterations, rm_chroot_tmp, perf_args, suite, 150 show_all_results, retries, run_local) 151 elif test_name == 'all_pagecyclers': 152 self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests, 153 test_args, iterations, rm_chroot_tmp, 154 perf_args, suite, show_all_results, retries, 155 run_local) 156 elif test_name == 'all_toolchain_perf': 157 self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests, 158 test_args, iterations, rm_chroot_tmp, 159 perf_args, suite, show_all_results, retries, 160 run_local) 161 # Add non-telemetry toolchain-perf benchmarks: 162 benchmarks.append( 163 Benchmark( 164 'graphics_WebGLAquarium', 165 'graphics_WebGLAquarium', 166 '', 167 iterations, 168 rm_chroot_tmp, 169 perf_args, 170 '', 171 show_all_results, 172 retries, 173 run_local=False)) 174 elif test_name == 'all_toolchain_perf_old': 175 self.AppendBenchmarkSet(benchmarks, 176 telemetry_toolchain_old_perf_tests, test_args, 177 iterations, rm_chroot_tmp, perf_args, suite, 178 show_all_results, retries, run_local) 179 else: 180 benchmark = Benchmark(test_name, test_name, test_args, iterations, 181 rm_chroot_tmp, perf_args, suite, 182 show_all_results, retries, run_local) 183 benchmarks.append(benchmark) 184 else: 185 # Add the single benchmark. 186 benchmark = Benchmark( 187 benchmark_name, 188 test_name, 189 test_args, 190 iterations, 191 rm_chroot_tmp, 192 perf_args, 193 suite, 194 show_all_results, 195 retries, 196 run_local=False) 197 benchmarks.append(benchmark) 198 199 if not benchmarks: 200 raise RuntimeError('No benchmarks specified') 201 202 # Construct labels. 203 # Some fields are common with global settings. The values are 204 # inherited and/or merged with the global settings values. 205 labels = [] 206 all_label_settings = experiment_file.GetSettings('label') 207 all_remote = list(remote) 208 for label_settings in all_label_settings: 209 label_name = label_settings.name 210 image = label_settings.GetField('chromeos_image') 211 chromeos_root = label_settings.GetField('chromeos_root') 212 my_remote = label_settings.GetField('remote') 213 compiler = label_settings.GetField('compiler') 214 new_remote = [] 215 if my_remote: 216 for i in my_remote: 217 c = re.sub('["\']', '', i) 218 new_remote.append(c) 219 my_remote = new_remote 220 if image == '': 221 build = label_settings.GetField('build') 222 if len(build) == 0: 223 raise RuntimeError("Can not have empty 'build' field!") 224 image = label_settings.GetXbuddyPath(build, board, chromeos_root, 225 log_level) 226 227 cache_dir = label_settings.GetField('cache_dir') 228 chrome_src = label_settings.GetField('chrome_src') 229 230 # TODO(yunlian): We should consolidate code in machine_manager.py 231 # to derermine whether we are running from within google or not 232 if ('corp.google.com' in socket.gethostname() and 233 (not my_remote or my_remote == remote and 234 global_settings.GetField('board') != board)): 235 my_remote = self.GetDefaultRemotes(board) 236 if global_settings.GetField('same_machine') and len(my_remote) > 1: 237 raise RuntimeError('Only one remote is allowed when same_machine ' 238 'is turned on') 239 all_remote += my_remote 240 image_args = label_settings.GetField('image_args') 241 if test_flag.GetTestMode(): 242 # pylint: disable=too-many-function-args 243 label = MockLabel(label_name, image, chromeos_root, board, my_remote, 244 image_args, cache_dir, cache_only, log_level, 245 compiler, chrome_src) 246 else: 247 label = Label(label_name, image, chromeos_root, board, my_remote, 248 image_args, cache_dir, cache_only, log_level, compiler, 249 chrome_src) 250 labels.append(label) 251 252 if not labels: 253 raise RuntimeError('No labels specified') 254 255 email = global_settings.GetField('email') 256 all_remote += list(set(my_remote)) 257 all_remote = list(set(all_remote)) 258 experiment = Experiment(experiment_name, all_remote, working_directory, 259 chromeos_root, cache_conditions, labels, benchmarks, 260 experiment_file.Canonicalize(), email, 261 acquire_timeout, log_dir, log_level, share_cache, 262 results_dir, locks_dir) 263 264 return experiment 265 266 def GetDefaultRemotes(self, board): 267 default_remotes_file = os.path.join( 268 os.path.dirname(__file__), 'default_remotes') 269 try: 270 with open(default_remotes_file) as f: 271 for line in f: 272 key, v = line.split(':') 273 if key.strip() == board: 274 remotes = v.strip().split() 275 if remotes: 276 return remotes 277 else: 278 raise RuntimeError('There is no remote for {0}'.format(board)) 279 except IOError: 280 # TODO: rethrow instead of throwing different exception. 281 raise RuntimeError('IOError while reading file {0}' 282 .format(default_remotes_file)) 283 else: 284 raise RuntimeError('There is not remote for {0}'.format(board)) 285