experiment_factory.py revision 1489d6440dc37585f3bb9325f3d12fbaa5a37b2b
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""A module to generate experiments."""
8
9import os
10import re
11import socket
12
13from benchmark import Benchmark
14import config
15from experiment import Experiment
16from label import Label
17from label import MockLabel
18from results_cache import CacheConditions
19import test_flag
20import file_lock_machine
21
22# Users may want to run Telemetry tests either individually, or in
23# specified sets.  Here we define sets of tests that users may want
24# to run together.
25
26telemetry_perfv2_tests = [ 'dromaeo.domcoreattr',
27                           'dromaeo.domcoremodify',
28                           'dromaeo.domcorequery',
29                           'dromaeo.domcoretraverse',
30                           'kraken',
31# The following benchmark is extremely flaky, so omit it for now.
32#                           'memory.top_25',
33                           'octane',
34                           'robohornet_pro',
35# The following benchmark is broken (and hanging) for now, so omit it.
36#                           'smoothness.top_25',
37                           'sunspider',
38                           ]
39
40telemetry_pagecycler_tests = [
41                               'page_cycler.intl_ar_fa_he',
42                               'page_cycler.intl_es_fr_pt-BR',
43                               'page_cycler.intl_hi_ru',
44                               'page_cycler.intl_ja_zh',
45                               'page_cycler.intl_ko_th_vi',
46                               'page_cycler.morejs',
47                               'page_cycler.moz',
48                               'page_cycler.netsim.top_10',
49                               'page_cycler.tough_layout_cases',
50                               'page_cycler.typical_25',
51# Following benchmarks are now deprecated in Telemetry:
52#                               'page_cycler.indexed_db.basic_insert',
53#                               'page_cycler.bloat',
54                               ]
55
56telemetry_toolchain_old_perf_tests = [
57                               'dromaeo.domcoremodify',
58                               'page_cycler.intl_es_fr_pt-BR',
59                               'page_cycler.intl_hi_ru',
60                               'page_cycler.intl_ja_zh',
61                               'page_cycler.intl_ko_th_vi',
62                               'page_cycler.netsim.top_10',
63                               'page_cycler.typical_25',
64                               'robohornet_pro',
65                               'spaceport',
66                               'tab_switching.top_10',
67# Following benchmarks are now deprecated in Telemetry:
68#                               'canvasmark',
69#                               'jsgamebench',
70#                               'page_cycler.bloat',
71#                               'peacekeeper.html',
72                               ]
73telemetry_toolchain_perf_tests = [
74                               'octane',
75                               'kraken',
76                               'speedometer',
77                               'dromaeo.domcoreattr',
78                               'dromaeo.domcoremodify',
79                               'smoothness.tough_webgl_cases',
80                               'page_cycler.typical_25',
81                               'media.tough_video_cases',
82                               ]
83
84class ExperimentFactory(object):
85  """Factory class for building an Experiment, given an ExperimentFile as input.
86
87  This factory is currently hardcoded to produce an experiment for running
88  ChromeOS benchmarks, but the idea is that in the future, other types
89  of experiments could be produced.
90  """
91
92  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
93                          iterations, rm_chroot_tmp, perf_args, suite,
94                          show_all_results, retries, run_local):
95    """Add all the tests in a set to the benchmarks list."""
96    for test_name in benchmark_list:
97      telemetry_benchmark = Benchmark (test_name, test_name, test_args,
98                                       iterations, rm_chroot_tmp, perf_args,
99                                       suite, show_all_results, retries, run_local)
100      benchmarks.append(telemetry_benchmark)
101
102
103  def GetExperiment(self, experiment_file, working_directory, log_dir):
104    """Construct an experiment from an experiment file."""
105    global_settings = experiment_file.GetGlobalSettings()
106    experiment_name = global_settings.GetField("name")
107    board = global_settings.GetField("board")
108    remote = global_settings.GetField("remote")
109    # This is used to remove the ",' from the remote if user
110    # add them to the remote string.
111    new_remote = []
112    for i in remote:
113      c = re.sub('["\']', '', i)
114      new_remote.append(c)
115    remote = new_remote
116    chromeos_root = global_settings.GetField("chromeos_root")
117    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
118    perf_args = global_settings.GetField("perf_args")
119    acquire_timeout= global_settings.GetField("acquire_timeout")
120    cache_dir = global_settings.GetField("cache_dir")
121    cache_only = global_settings.GetField("cache_only")
122    config.AddConfig("no_email", global_settings.GetField("no_email"))
123    share_cache = global_settings.GetField("share_cache")
124    results_dir = global_settings.GetField("results_dir")
125    use_file_locks = global_settings.GetField("use_file_locks")
126    locks_dir = global_settings.GetField("locks_dir")
127    # If we pass a blank locks_dir to the Experiment, it will use the AFE server
128    # lock mechanism.  So if the user specified use_file_locks, but did not
129    # specify a locks dir, set the locks  dir to the default locks dir in
130    # file_lock_machine.
131    if use_file_locks and not locks_dir:
132      locks_dir = file_lock_machine.Machine.LOCKS_DIR
133    chrome_src = global_settings.GetField("chrome_src")
134    show_all_results = global_settings.GetField("show_all_results")
135    log_level = global_settings.GetField("logging_level")
136    if log_level not in ("quiet", "average", "verbose"):
137      log_level = "verbose"
138    # Default cache hit conditions. The image checksum in the cache and the
139    # computed checksum of the image must match. Also a cache file must exist.
140    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
141                        CacheConditions.CHECKSUMS_MATCH]
142    if global_settings.GetField("rerun_if_failed"):
143      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
144    if global_settings.GetField("rerun"):
145      cache_conditions.append(CacheConditions.FALSE)
146    if global_settings.GetField("same_machine"):
147      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
148    if global_settings.GetField("same_specs"):
149      cache_conditions.append(CacheConditions.MACHINES_MATCH)
150
151    # Construct benchmarks.
152    # Some fields are common with global settings. The values are
153    # inherited and/or merged with the global settings values.
154    benchmarks = []
155    all_benchmark_settings = experiment_file.GetSettings("benchmark")
156    for benchmark_settings in all_benchmark_settings:
157      benchmark_name = benchmark_settings.name
158      test_name = benchmark_settings.GetField("test_name")
159      if not test_name:
160        test_name = benchmark_name
161      test_args = benchmark_settings.GetField("test_args")
162      iterations = benchmark_settings.GetField("iterations")
163      suite = benchmark_settings.GetField("suite")
164      retries = benchmark_settings.GetField("retries")
165      run_local = benchmark_settings.GetField("run_local")
166
167      if suite == 'telemetry_Crosperf':
168        if test_name == 'all_perfv2':
169          self._AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
170                                   test_args, iterations, rm_chroot_tmp,
171                                   perf_args, suite, show_all_results, retries,
172                                   run_local)
173        elif test_name == 'all_pagecyclers':
174          self._AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
175                                   test_args, iterations, rm_chroot_tmp,
176                                   perf_args, suite, show_all_results, retries,
177                                   run_local)
178        elif test_name == 'all_toolchain_perf':
179          self._AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
180                                   test_args, iterations, rm_chroot_tmp,
181                                   perf_args, suite, show_all_results, retries,
182                                   run_local)
183          # Add non-telemetry toolchain-perf benchmarks:
184          benchmarks.append(Benchmark('graphics_WebGLAquarium',
185                                      'graphics_WebGLAquarium', '', iterations,
186                                      rm_chroot_tmp, perf_args, '',
187                                      show_all_results, retries,
188                                      run_local=False))
189        elif test_name == 'all_toolchain_perf_old':
190          self._AppendBenchmarkSet(benchmarks,
191                                   telemetry_toolchain_old_perf_tests,
192                                   test_args, iterations, rm_chroot_tmp,
193                                   perf_args, suite, show_all_results, retries,
194                                   run_local)
195        else:
196          benchmark = Benchmark(test_name, test_name, test_args,
197                                iterations, rm_chroot_tmp, perf_args, suite,
198                                show_all_results, retries, run_local)
199          benchmarks.append(benchmark)
200      else:
201        # Add the single benchmark.
202        benchmark = Benchmark(benchmark_name, test_name, test_args,
203                              iterations, rm_chroot_tmp, perf_args, suite,
204                              show_all_results, retries, run_local=False)
205        benchmarks.append(benchmark)
206
207    # Construct labels.
208    # Some fields are common with global settings. The values are
209    # inherited and/or merged with the global settings values.
210    labels = []
211    all_label_settings = experiment_file.GetSettings("label")
212    all_remote = list(remote)
213    for label_settings in all_label_settings:
214      label_name = label_settings.name
215      image = label_settings.GetField("chromeos_image")
216      chromeos_root = label_settings.GetField("chromeos_root")
217      my_remote = label_settings.GetField("remote")
218      compiler = label_settings.GetField("compiler")
219      new_remote = []
220      for i in my_remote:
221        c = re.sub('["\']', '', i)
222        new_remote.append(c)
223      my_remote = new_remote
224      if image == "":
225        build = label_settings.GetField("build")
226        if len(build) == 0:
227          raise Exception("Can not have empty 'build' field!")
228        image = label_settings.GetXbuddyPath (build, board, chromeos_root,
229                                              log_level)
230
231      cache_dir = label_settings.GetField("cache_dir")
232      chrome_src = label_settings.GetField("chrome_src")
233
234    # TODO(yunlian): We should consolidate code in machine_manager.py
235    # to derermine whether we are running from within google or not
236      if ("corp.google.com" in socket.gethostname() and
237          (not my_remote
238           or my_remote == remote
239           and global_settings.GetField("board") != board)):
240        my_remote = self.GetDefaultRemotes(board)
241      if global_settings.GetField("same_machine") and len(my_remote) > 1:
242        raise Exception("Only one remote is allowed when same_machine "
243                        "is turned on")
244      all_remote += my_remote
245      image_args = label_settings.GetField("image_args")
246      if test_flag.GetTestMode():
247        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
248                          image_args, cache_dir, cache_only, compiler, chrome_src)
249      else:
250        label = Label(label_name, image, chromeos_root, board, my_remote,
251                      image_args, cache_dir, cache_only, log_level, compiler,
252                      chrome_src)
253      labels.append(label)
254
255    email = global_settings.GetField("email")
256    all_remote += list(set(my_remote))
257    all_remote = list(set(all_remote))
258    experiment = Experiment(experiment_name, all_remote,
259                            working_directory, chromeos_root,
260                            cache_conditions, labels, benchmarks,
261                            experiment_file.Canonicalize(),
262                            email, acquire_timeout, log_dir, log_level,
263                            share_cache,
264                            results_dir, locks_dir)
265
266    return experiment
267
268  def GetDefaultRemotes(self, board):
269    default_remotes_file = os.path.join(os.path.dirname(__file__),
270                                        "default_remotes")
271    try:
272      with open(default_remotes_file) as f:
273        for line in f:
274          key, v = line.split(":")
275          if key.strip() == board:
276            remotes = v.strip().split(" ")
277            if remotes:
278              return remotes
279            else:
280              raise Exception("There is no remote for {0}".format(board))
281    except IOError:
282      raise Exception("IOError while reading file {0}"
283                      .format(default_remotes_file))
284    else:
285      raise Exception("There is not remote for {0}".format(board))
286