experiment_factory.py revision 7f20acb9c9036015a665a14f27633c639e9b9d91
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""A module to generate experiments."""
8
9import os
10import re
11import socket
12
13from benchmark import Benchmark
14import config
15from experiment import Experiment
16from label import Label
17from label import MockLabel
18from results_cache import CacheConditions
19import test_flag
20import file_lock_machine
21
22# Users may want to run Telemetry tests either individually, or in
23# specified sets.  Here we define sets of tests that users may want
24# to run together.
25
26telemetry_perfv2_tests = [ 'dromaeo.domcoreattr',
27                           'dromaeo.domcoremodify',
28                           'dromaeo.domcorequery',
29                           'dromaeo.domcoretraverse',
30                           'kraken',
31# The following benchmark is extremely flaky, so omit it for now.
32#                           'memory.top_25',
33                           'octane',
34                           'robohornet_pro',
35# The following benchmark is broken (and hanging) for now, so omit it.
36#                           'smoothness.top_25',
37                           'sunspider',
38                           ]
39
40telemetry_pagecycler_tests = [
41                               'page_cycler.intl_ar_fa_he',
42                               'page_cycler.intl_es_fr_pt-BR',
43                               'page_cycler.intl_hi_ru',
44                               'page_cycler.intl_ja_zh',
45                               'page_cycler.intl_ko_th_vi',
46                               'page_cycler.morejs',
47                               'page_cycler.moz',
48                               'page_cycler.netsim.top_10',
49                               'page_cycler.tough_layout_cases',
50                               'page_cycler.typical_25',
51# Following benchmarks are now deprecated in Telemetry:
52#                               'page_cycler.indexed_db.basic_insert',
53#                               'page_cycler.bloat',
54                               ]
55
56telemetry_toolchain_old_perf_tests = [
57                               'dromaeo.domcoremodify',
58                               'page_cycler.intl_es_fr_pt-BR',
59                               'page_cycler.intl_hi_ru',
60                               'page_cycler.intl_ja_zh',
61                               'page_cycler.intl_ko_th_vi',
62                               'page_cycler.netsim.top_10',
63                               'page_cycler.typical_25',
64                               'robohornet_pro',
65                               'spaceport',
66                               'tab_switching.top_10',
67# Following benchmarks are now deprecated in Telemetry:
68#                               'canvasmark',
69#                               'jsgamebench',
70#                               'page_cycler.bloat',
71#                               'peacekeeper.html',
72                               ]
73telemetry_toolchain_perf_tests = [
74                               'octane',
75                               'kraken',
76                               'speedometer',
77                               'dromaeo.domcoreattr',
78                               'dromaeo.domcoremodify',
79                               'smoothness.tough_webgl_cases',
80                               'page_cycler.typical_25',
81                               'media.tough_video_cases',
82                               ]
83
84class ExperimentFactory(object):
85  """Factory class for building an Experiment, given an ExperimentFile as input.
86
87  This factory is currently hardcoded to produce an experiment for running
88  ChromeOS benchmarks, but the idea is that in the future, other types
89  of experiments could be produced.
90  """
91
92  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
93                          iterations, rm_chroot_tmp, perf_args, suite,
94                          show_all_results, retries, run_local):
95    """Add all the tests in a set to the benchmarks list."""
96    for test_name in benchmark_list:
97      telemetry_benchmark = Benchmark (test_name, test_name, test_args,
98                                       iterations, rm_chroot_tmp, perf_args,
99                                       suite, show_all_results, retries, run_local)
100      benchmarks.append(telemetry_benchmark)
101
102
103  def GetExperiment(self, experiment_file, working_directory, log_dir):
104    """Construct an experiment from an experiment file."""
105    global_settings = experiment_file.GetGlobalSettings()
106    experiment_name = global_settings.GetField("name")
107    board = global_settings.GetField("board")
108    remote = global_settings.GetField("remote")
109    # This is used to remove the ",' from the remote if user
110    # add them to the remote string.
111    new_remote = []
112    for i in remote:
113      c = re.sub('["\']', '', i)
114      new_remote.append(c)
115    remote = new_remote
116    chromeos_root = global_settings.GetField("chromeos_root")
117    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
118    perf_args = global_settings.GetField("perf_args")
119    acquire_timeout= global_settings.GetField("acquire_timeout")
120    cache_dir = global_settings.GetField("cache_dir")
121    cache_only = global_settings.GetField("cache_only")
122    config.AddConfig("no_email", global_settings.GetField("no_email"))
123    share_cache = global_settings.GetField("share_cache")
124    results_dir = global_settings.GetField("results_dir")
125    use_file_locks = global_settings.GetField("use_file_locks")
126    locks_dir = global_settings.GetField("locks_dir")
127    # If we pass a blank locks_dir to the Experiment, it will use the AFE server
128    # lock mechanism.  So if the user specified use_file_locks, but did not
129    # specify a locks dir, set the locks  dir to the default locks dir in
130    # file_lock_machine.
131    if use_file_locks and not locks_dir:
132      locks_dir = file_lock_machine.Machine.LOCKS_DIR
133    chrome_src = global_settings.GetField("chrome_src")
134    show_all_results = global_settings.GetField("show_all_results")
135    log_level = global_settings.GetField("logging_level")
136    if log_level not in ("quiet", "average", "verbose"):
137      log_level = "verbose"
138    # Default cache hit conditions. The image checksum in the cache and the
139    # computed checksum of the image must match. Also a cache file must exist.
140    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
141                        CacheConditions.CHECKSUMS_MATCH]
142    if global_settings.GetField("rerun_if_failed"):
143      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
144    if global_settings.GetField("rerun"):
145      cache_conditions.append(CacheConditions.FALSE)
146    if global_settings.GetField("same_machine"):
147      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
148    if global_settings.GetField("same_specs"):
149      cache_conditions.append(CacheConditions.MACHINES_MATCH)
150
151    # Construct benchmarks.
152    # Some fields are common with global settings. The values are
153    # inherited and/or merged with the global settings values.
154    benchmarks = []
155    all_benchmark_settings = experiment_file.GetSettings("benchmark")
156    for benchmark_settings in all_benchmark_settings:
157      benchmark_name = benchmark_settings.name
158      test_name = benchmark_settings.GetField("test_name")
159      if not test_name:
160        test_name = benchmark_name
161      test_args = benchmark_settings.GetField("test_args")
162      iterations = benchmark_settings.GetField("iterations")
163      suite = benchmark_settings.GetField("suite")
164      retries = benchmark_settings.GetField("retries")
165      run_local = benchmark_settings.GetField("run_local")
166
167      if suite == 'telemetry_Crosperf':
168        if test_name == 'all_perfv2':
169          self._AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
170                                   test_args, iterations, rm_chroot_tmp,
171                                   perf_args, suite, show_all_results, retries,
172                                   run_local)
173        elif test_name == 'all_pagecyclers':
174          self._AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
175                                   test_args, iterations, rm_chroot_tmp,
176                                   perf_args, suite, show_all_results, retries,
177                                   run_local)
178        elif test_name == 'all_toolchain_perf':
179          self._AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
180                                   test_args, iterations, rm_chroot_tmp,
181                                   perf_args, suite, show_all_results, retries,
182                                   run_local)
183          # Add non-telemetry toolchain-perf benchmarks:
184          benchmarks.append(Benchmark('graphics_WebGLAquarium',
185                                      'graphics_WebGLAquarium', '', iterations,
186                                      rm_chroot_tmp, perf_args, '',
187                                      show_all_results, retries,
188                                      run_local=False))
189        elif test_name == 'all_toolchain_perf_pure_telemetry':
190          self._AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
191                                   test_args, iterations, rm_chroot_tmp,
192                                   perf_args, suite, show_all_results, retries,
193                                   run_local)
194        elif test_name == 'all_toolchain_perf_old':
195          self._AppendBenchmarkSet(benchmarks,
196                                   telemetry_toolchain_old_perf_tests,
197                                   test_args, iterations, rm_chroot_tmp,
198                                   perf_args, suite, show_all_results, retries,
199                                   run_local)
200        else:
201          benchmark = Benchmark(test_name, test_name, test_args,
202                                iterations, rm_chroot_tmp, perf_args, suite,
203                                show_all_results, retries, run_local)
204          benchmarks.append(benchmark)
205      else:
206        # Add the single benchmark.
207        benchmark = Benchmark(benchmark_name, test_name, test_args,
208                              iterations, rm_chroot_tmp, perf_args, suite,
209                              show_all_results, run_local)
210        benchmarks.append(benchmark)
211
212    # Construct labels.
213    # Some fields are common with global settings. The values are
214    # inherited and/or merged with the global settings values.
215    labels = []
216    all_label_settings = experiment_file.GetSettings("label")
217    all_remote = list(remote)
218    for label_settings in all_label_settings:
219      label_name = label_settings.name
220      image = label_settings.GetField("chromeos_image")
221      chromeos_root = label_settings.GetField("chromeos_root")
222      my_remote = label_settings.GetField("remote")
223      compiler = label_settings.GetField("compiler")
224      new_remote = []
225      for i in my_remote:
226        c = re.sub('["\']', '', i)
227        new_remote.append(c)
228      my_remote = new_remote
229      if image == "":
230        build = label_settings.GetField("build")
231        if len(build) == 0:
232          raise Exception("Can not have empty 'build' field!")
233        image = label_settings.GetXbuddyPath (build, board, chromeos_root,
234                                              log_level)
235
236      cache_dir = label_settings.GetField("cache_dir")
237      chrome_src = label_settings.GetField("chrome_src")
238
239    # TODO(yunlian): We should consolidate code in machine_manager.py
240    # to derermine whether we are running from within google or not
241      if ("corp.google.com" in socket.gethostname() and
242          (not my_remote
243           or my_remote == remote
244           and global_settings.GetField("board") != board)):
245        my_remote = self.GetDefaultRemotes(board)
246      if global_settings.GetField("same_machine") and len(my_remote) > 1:
247        raise Exception("Only one remote is allowed when same_machine "
248                        "is turned on")
249      all_remote += my_remote
250      image_args = label_settings.GetField("image_args")
251      if test_flag.GetTestMode():
252        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
253                          image_args, cache_dir, cache_only, compiler, chrome_src)
254      else:
255        label = Label(label_name, image, chromeos_root, board, my_remote,
256                      image_args, cache_dir, cache_only, log_level, compiler,
257                      chrome_src)
258      labels.append(label)
259
260    email = global_settings.GetField("email")
261    all_remote += list(set(my_remote))
262    all_remote = list(set(all_remote))
263    experiment = Experiment(experiment_name, all_remote,
264                            working_directory, chromeos_root,
265                            cache_conditions, labels, benchmarks,
266                            experiment_file.Canonicalize(),
267                            email, acquire_timeout, log_dir, log_level,
268                            share_cache,
269                            results_dir, locks_dir)
270
271    return experiment
272
273  def GetDefaultRemotes(self, board):
274    default_remotes_file = os.path.join(os.path.dirname(__file__),
275                                        "default_remotes")
276    try:
277      with open(default_remotes_file) as f:
278        for line in f:
279          key, v = line.split(":")
280          if key.strip() == board:
281            remotes = v.strip().split(" ")
282            if remotes:
283              return remotes
284            else:
285              raise Exception("There is not remote for {0}".format(board))
286    except IOError:
287      raise Exception("IOError while reading file {0}"
288                      .format(default_remotes_file))
289    else:
290      raise Exception("There is not remote for {0}".format(board))
291