experiment_factory.py revision 1a224369afcbfd0276f4c7bdc625dec7f7b30d01
1#!/usr/bin/python 2 3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved. 4# Use of this source code is governed by a BSD-style license that can be 5# found in the LICENSE file. 6 7"""A module to generate experments.""" 8 9import os 10import re 11import socket 12 13from benchmark import Benchmark 14import config 15from experiment import Experiment 16from label import Label 17from label import MockLabel 18from results_cache import CacheConditions 19import test_flag 20 21# Users may want to run Telemetry tests either individually, or in 22# specified sets. Here we define sets of tests that users may want 23# to run together. 24 25telemetry_perfv2_tests = [ 'dromaeo.domcoreattr', 26 'dromaeo.domcoremodify', 27 'dromaeo.domcorequery', 28 'dromaeo.domcoretraverse', 29 'kraken', 30# The following benchmark is extremely flaky, so omit it for now. 31# 'memory.top_25', 32 'octane', 33 'robohornet_pro', 34# The following benchmark is broken (and hanging) for now, so omit it. 35# 'smoothness.top_25', 36 'sunspider', 37 ] 38 39telemetry_pagecycler_tests = [ 'page_cycler.indexed_db.basic_insert', 40 'page_cycler.bloat', 41 'page_cycler.dhtml', 42 'page_cycler.intl_ar_fa_he', 43 'page_cycler.intl_es_fr_pt-BR', 44 'page_cycler.intl_hi_ru', 45 'page_cycler.intl_ja_zh', 46 'page_cycler.intl_ko_th_vi', 47 'page_cycler.morejs', 48 'page_cycler.moz', 49 'page_cycler.netsim.top_10', 50 'page_cycler.tough_layout_cases', 51 'page_cycler.typical_25', 52 ] 53 54telemetry_toolchain_perf_tests = [ 'canvasmark', 55 'jsgamebench', 56 'dromaeo.domcoremodify', 57 'page_cycler.bloat', 58 'page_cycler.intl_es_fr_pt-BR', 59 'page_cycler.intl_hi_ru', 60 'page_cycler.intl_ja_zh', 61 'page_cycler.intl_ko_th_vi', 62 'page_cycler.netsim.top_10', 63 'page_cycler.typical_25', 64 'peacekeeper.html', 65 'robohornet_pro', 66 'spaceport', 67 'tab_switching.top_10', 68 ] 69 70class ExperimentFactory(object): 71 """Factory class for building an Experiment, given an ExperimentFile as input. 72 73 This factory is currently hardcoded to produce an experiment for running 74 ChromeOS benchmarks, but the idea is that in the future, other types 75 of experiments could be produced. 76 """ 77 78 def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args, 79 iterations, rm_chroot_tmp, perf_args, suite, 80 show_all_results): 81 """Add all the tests in a set to the benchmarks list.""" 82 for test_name in benchmark_list: 83 telemetry_benchmark = Benchmark (test_name, test_name, test_args, 84 iterations, rm_chroot_tmp, perf_args, 85 suite, show_all_results) 86 benchmarks.append(telemetry_benchmark) 87 88 89 def GetExperiment(self, experiment_file, working_directory, log_dir): 90 """Construct an experiment from an experiment file.""" 91 global_settings = experiment_file.GetGlobalSettings() 92 experiment_name = global_settings.GetField("name") 93 board = global_settings.GetField("board") 94 remote = global_settings.GetField("remote") 95 # This is used to remove the ",' from the remote if user 96 # add them to the remote string. 97 new_remote = [] 98 for i in remote: 99 c = re.sub('["\']', '', i) 100 new_remote.append(c) 101 remote = new_remote 102 chromeos_root = global_settings.GetField("chromeos_root") 103 rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp") 104 perf_args = global_settings.GetField("perf_args") 105 acquire_timeout= global_settings.GetField("acquire_timeout") 106 cache_dir = global_settings.GetField("cache_dir") 107 cache_only = global_settings.GetField("cache_only") 108 config.AddConfig("no_email", global_settings.GetField("no_email")) 109 share_cache = global_settings.GetField("share_cache") 110 results_dir = global_settings.GetField("results_dir") 111 chrome_src = global_settings.GetField("chrome_src") 112 show_all_results = global_settings.GetField("show_all_results") 113 log_level = global_settings.GetField("logging_level") 114 if log_level not in ("quiet", "average", "verbose"): 115 log_level = "verbose" 116 # Default cache hit conditions. The image checksum in the cache and the 117 # computed checksum of the image must match. Also a cache file must exist. 118 cache_conditions = [CacheConditions.CACHE_FILE_EXISTS, 119 CacheConditions.CHECKSUMS_MATCH] 120 if global_settings.GetField("rerun_if_failed"): 121 cache_conditions.append(CacheConditions.RUN_SUCCEEDED) 122 if global_settings.GetField("rerun"): 123 cache_conditions.append(CacheConditions.FALSE) 124 if global_settings.GetField("same_machine"): 125 cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH) 126 if global_settings.GetField("same_specs"): 127 cache_conditions.append(CacheConditions.MACHINES_MATCH) 128 129 # Construct benchmarks. 130 benchmarks = [] 131 all_benchmark_settings = experiment_file.GetSettings("benchmark") 132 for benchmark_settings in all_benchmark_settings: 133 benchmark_name = benchmark_settings.name 134 test_name = benchmark_settings.GetField("test_name") 135 if not test_name: 136 test_name = benchmark_name 137 test_args = benchmark_settings.GetField("test_args") 138 iterations = benchmark_settings.GetField("iterations") 139 suite = benchmark_settings.GetField("suite") 140 141 if suite == 'telemetry_Crosperf': 142 if test_name == 'all_perfv2': 143 self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests, 144 test_args, iterations, rm_chroot_tmp, 145 perf_args, suite, show_all_results) 146 elif test_name == 'all_pagecyclers': 147 self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests, 148 test_args, iterations, rm_chroot_tmp, 149 perf_args, suite, show_all_results) 150 elif test_name == 'all_toolchain_perf': 151 self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests, 152 test_args, iterations, rm_chroot_tmp, 153 perf_args, suite, show_all_results) 154 else: 155 benchmark = Benchmark(test_name, test_name, test_args, 156 iterations, rm_chroot_tmp, perf_args, suite, 157 show_all_results) 158 benchmarks.append(benchmark) 159 else: 160 # Add the single benchmark. 161 benchmark = Benchmark(benchmark_name, test_name, test_args, 162 iterations, rm_chroot_tmp, perf_args, suite, 163 show_all_results) 164 benchmarks.append(benchmark) 165 166 # Construct labels. 167 labels = [] 168 all_label_settings = experiment_file.GetSettings("label") 169 all_remote = list(remote) 170 for label_settings in all_label_settings: 171 label_name = label_settings.name 172 image = label_settings.GetField("chromeos_image") 173 chromeos_root = label_settings.GetField("chromeos_root") 174 my_remote = label_settings.GetField("remote") 175 new_remote = [] 176 for i in my_remote: 177 c = re.sub('["\']', '', i) 178 new_remote.append(c) 179 my_remote = new_remote 180 if image == "": 181 build = label_settings.GetField("build") 182 image = label_settings.GetXbuddyPath (build, board, chromeos_root, 183 log_level) 184 185 cache_dir = label_settings.GetField("cache_dir") 186 chrome_src = label_settings.GetField("chrome_src") 187 188 # TODO(yunlian): We should consolidate code in machine_manager.py 189 # to derermine whether we are running from within google or not 190 if ("corp.google.com" in socket.gethostname() and 191 (not my_remote 192 or my_remote == remote 193 and global_settings.GetField("board") != board)): 194 my_remote = self.GetDefaultRemotes(board) 195 if global_settings.GetField("same_machine") and len(my_remote) > 1: 196 raise Exception("Only one remote is allowed when same_machine " 197 "is turned on") 198 all_remote += my_remote 199 image_args = label_settings.GetField("image_args") 200 if test_flag.GetTestMode(): 201 label = MockLabel(label_name, image, chromeos_root, board, my_remote, 202 image_args, cache_dir, cache_only, chrome_src) 203 else: 204 label = Label(label_name, image, chromeos_root, board, my_remote, 205 image_args, cache_dir, cache_only, chrome_src) 206 labels.append(label) 207 208 email = global_settings.GetField("email") 209 all_remote += list(set(my_remote)) 210 all_remote = list(set(all_remote)) 211 experiment = Experiment(experiment_name, all_remote, 212 working_directory, chromeos_root, 213 cache_conditions, labels, benchmarks, 214 experiment_file.Canonicalize(), 215 email, acquire_timeout, log_dir, log_level, 216 share_cache, 217 results_dir) 218 219 return experiment 220 221 def GetDefaultRemotes(self, board): 222 default_remotes_file = os.path.join(os.path.dirname(__file__), 223 "default_remotes") 224 try: 225 with open(default_remotes_file) as f: 226 for line in f: 227 key, v = line.split(":") 228 if key.strip() == board: 229 remotes = v.strip().split(" ") 230 if remotes: 231 return remotes 232 else: 233 raise Exception("There is not remote for {0}".format(board)) 234 except IOError: 235 raise Exception("IOError while reading file {0}" 236 .format(default_remotes_file)) 237 else: 238 raise Exception("There is not remote for {0}".format(board)) 239