experiment_factory.py revision bc2d3d13d2e1928217140a76acdf9f9917b1fa30
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""A module to generate experiments."""
8
9import os
10import re
11import socket
12
13from benchmark import Benchmark
14import config
15from experiment import Experiment
16from label import Label
17from label import MockLabel
18from results_cache import CacheConditions
19import test_flag
20import file_lock_machine
21
22# Users may want to run Telemetry tests either individually, or in
23# specified sets.  Here we define sets of tests that users may want
24# to run together.
25
26telemetry_perfv2_tests = [ 'dromaeo.domcoreattr',
27                           'dromaeo.domcoremodify',
28                           'dromaeo.domcorequery',
29                           'dromaeo.domcoretraverse',
30                           'kraken',
31# The following benchmark is extremely flaky, so omit it for now.
32#                           'memory.top_25',
33                           'octane',
34                           'robohornet_pro',
35# The following benchmark is broken (and hanging) for now, so omit it.
36#                           'smoothness.top_25',
37                           'sunspider',
38                           ]
39
40telemetry_pagecycler_tests = [
41                               'page_cycler.dhtml',
42                               'page_cycler.intl_ar_fa_he',
43                               'page_cycler.intl_es_fr_pt-BR',
44                               'page_cycler.intl_hi_ru',
45                               'page_cycler.intl_ja_zh',
46                               'page_cycler.intl_ko_th_vi',
47                               'page_cycler.morejs',
48                               'page_cycler.moz',
49                               'page_cycler.netsim.top_10',
50                               'page_cycler.tough_layout_cases',
51                               'page_cycler.typical_25',
52# Following benchmarks are now deprecated in Telemetry:
53#                               'page_cycler.indexed_db.basic_insert',
54#                               'page_cycler.bloat',
55                               ]
56
57telemetry_toolchain_old_perf_tests = [
58                               'dromaeo.domcoremodify',
59                               'page_cycler.intl_es_fr_pt-BR',
60                               'page_cycler.intl_hi_ru',
61                               'page_cycler.intl_ja_zh',
62                               'page_cycler.intl_ko_th_vi',
63                               'page_cycler.netsim.top_10',
64                               'page_cycler.typical_25',
65                               'robohornet_pro',
66                               'spaceport',
67                               'tab_switching.top_10',
68# Following benchmarks are now deprecated in Telemetry:
69#                               'canvasmark',
70#                               'jsgamebench',
71#                               'page_cycler.bloat',
72#                               'peacekeeper.html',
73                               ]
74telemetry_toolchain_perf_tests = [
75                               'octane',
76                               'kraken',
77                               'speedometer',
78                               'dromaeo.domcoreattr',
79                               'dromaeo.domcoremodify',
80                               'smoothness.tough_webgl_cases',
81                               'page_cycler.typical_25',
82                               'media.tough_video_cases',
83                               ]
84
85class ExperimentFactory(object):
86  """Factory class for building an Experiment, given an ExperimentFile as input.
87
88  This factory is currently hardcoded to produce an experiment for running
89  ChromeOS benchmarks, but the idea is that in the future, other types
90  of experiments could be produced.
91  """
92
93  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
94                          iterations, rm_chroot_tmp, perf_args, suite,
95                          show_all_results, retries, run_local):
96    """Add all the tests in a set to the benchmarks list."""
97    for test_name in benchmark_list:
98      telemetry_benchmark = Benchmark (test_name, test_name, test_args,
99                                       iterations, rm_chroot_tmp, perf_args,
100                                       suite, show_all_results, retries, run_local)
101      benchmarks.append(telemetry_benchmark)
102
103
104  def GetExperiment(self, experiment_file, working_directory, log_dir):
105    """Construct an experiment from an experiment file."""
106    global_settings = experiment_file.GetGlobalSettings()
107    experiment_name = global_settings.GetField("name")
108    board = global_settings.GetField("board")
109    remote = global_settings.GetField("remote")
110    # This is used to remove the ",' from the remote if user
111    # add them to the remote string.
112    new_remote = []
113    for i in remote:
114      c = re.sub('["\']', '', i)
115      new_remote.append(c)
116    remote = new_remote
117    chromeos_root = global_settings.GetField("chromeos_root")
118    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
119    perf_args = global_settings.GetField("perf_args")
120    acquire_timeout= global_settings.GetField("acquire_timeout")
121    cache_dir = global_settings.GetField("cache_dir")
122    cache_only = global_settings.GetField("cache_only")
123    config.AddConfig("no_email", global_settings.GetField("no_email"))
124    share_cache = global_settings.GetField("share_cache")
125    results_dir = global_settings.GetField("results_dir")
126    use_file_locks = global_settings.GetField("use_file_locks")
127    locks_dir = global_settings.GetField("locks_dir")
128    # If we pass a blank locks_dir to the Experiment, it will use the AFE server
129    # lock mechanism.  So if the user specified use_file_locks, but did not
130    # specify a locks dir, set the locks  dir to the default locks dir in
131    # file_lock_machine.
132    if use_file_locks and not locks_dir:
133      locks_dir = file_lock_machine.Machine.LOCKS_DIR
134    chrome_src = global_settings.GetField("chrome_src")
135    show_all_results = global_settings.GetField("show_all_results")
136    log_level = global_settings.GetField("logging_level")
137    if log_level not in ("quiet", "average", "verbose"):
138      log_level = "verbose"
139    # Default cache hit conditions. The image checksum in the cache and the
140    # computed checksum of the image must match. Also a cache file must exist.
141    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
142                        CacheConditions.CHECKSUMS_MATCH]
143    if global_settings.GetField("rerun_if_failed"):
144      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
145    if global_settings.GetField("rerun"):
146      cache_conditions.append(CacheConditions.FALSE)
147    if global_settings.GetField("same_machine"):
148      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
149    if global_settings.GetField("same_specs"):
150      cache_conditions.append(CacheConditions.MACHINES_MATCH)
151
152    # Construct benchmarks.
153    # Some fields are common with global settings. The values are
154    # inherited and/or merged with the global settings values.
155    benchmarks = []
156    all_benchmark_settings = experiment_file.GetSettings("benchmark")
157    for benchmark_settings in all_benchmark_settings:
158      benchmark_name = benchmark_settings.name
159      test_name = benchmark_settings.GetField("test_name")
160      if not test_name:
161        test_name = benchmark_name
162      test_args = benchmark_settings.GetField("test_args")
163      iterations = benchmark_settings.GetField("iterations")
164      suite = benchmark_settings.GetField("suite")
165      retries = benchmark_settings.GetField("retries")
166      run_local = benchmark_settings.GetField("run_local")
167
168      if suite == 'telemetry_Crosperf':
169        if test_name == 'all_perfv2':
170          self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
171                                    test_args, iterations, rm_chroot_tmp,
172                                    perf_args, suite, show_all_results, retries,
173                                    run_local)
174        elif test_name == 'all_pagecyclers':
175          self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
176                                    test_args, iterations, rm_chroot_tmp,
177                                    perf_args, suite, show_all_results, retries,
178                                    run_local)
179        elif test_name == 'all_toolchain_perf':
180          self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
181                                    test_args, iterations, rm_chroot_tmp,
182                                    perf_args, suite, show_all_results, retries,
183                                    run_local)
184          # Add non-telemetry toolchain-perf benchmarks:
185          benchmarks.append(Benchmark('graphics_WebGLAquarium',
186                                      'graphics_WebGLAquarium', '', iterations,
187                                      rm_chroot_tmp, perf_args, '',
188                                      show_all_results, retries,
189                                      run_local=False))
190        elif test_name == 'all_toolchain_perf_old':
191          self._AppendBenchmarkSet (benchmarks,
192                                    telemetry_toolchain_old_perf_tests,
193                                    test_args, iterations, rm_chroot_tmp,
194                                    perf_args, suite, show_all_results, retries,
195                                    run_local)
196        else:
197          benchmark = Benchmark(test_name, test_name, test_args,
198                                iterations, rm_chroot_tmp, perf_args, suite,
199                                show_all_results, retries, run_local)
200          benchmarks.append(benchmark)
201      else:
202        # Add the single benchmark.
203        benchmark = Benchmark(benchmark_name, test_name, test_args,
204                              iterations, rm_chroot_tmp, perf_args, suite,
205                              show_all_results, run_local)
206        benchmarks.append(benchmark)
207
208    # Construct labels.
209    # Some fields are common with global settings. The values are
210    # inherited and/or merged with the global settings values.
211    labels = []
212    all_label_settings = experiment_file.GetSettings("label")
213    all_remote = list(remote)
214    for label_settings in all_label_settings:
215      label_name = label_settings.name
216      image = label_settings.GetField("chromeos_image")
217      chromeos_root = label_settings.GetField("chromeos_root")
218      my_remote = label_settings.GetField("remote")
219      new_remote = []
220      for i in my_remote:
221        c = re.sub('["\']', '', i)
222        new_remote.append(c)
223      my_remote = new_remote
224      if image == "":
225        build = label_settings.GetField("build")
226        if len(build) == 0:
227            raise Exception("Can not have empty 'build' field!")
228        image = label_settings.GetXbuddyPath (build, board, chromeos_root,
229                                              log_level)
230
231      cache_dir = label_settings.GetField("cache_dir")
232      chrome_src = label_settings.GetField("chrome_src")
233
234    # TODO(yunlian): We should consolidate code in machine_manager.py
235    # to derermine whether we are running from within google or not
236      if ("corp.google.com" in socket.gethostname() and
237          (not my_remote
238           or my_remote == remote
239           and global_settings.GetField("board") != board)):
240        my_remote = self.GetDefaultRemotes(board)
241      if global_settings.GetField("same_machine") and len(my_remote) > 1:
242        raise Exception("Only one remote is allowed when same_machine "
243                        "is turned on")
244      all_remote += my_remote
245      image_args = label_settings.GetField("image_args")
246      if test_flag.GetTestMode():
247        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
248                          image_args, cache_dir, cache_only, chrome_src)
249      else:
250        label = Label(label_name, image, chromeos_root, board, my_remote,
251                      image_args, cache_dir, cache_only, chrome_src)
252      labels.append(label)
253
254    email = global_settings.GetField("email")
255    all_remote += list(set(my_remote))
256    all_remote = list(set(all_remote))
257    experiment = Experiment(experiment_name, all_remote,
258                            working_directory, chromeos_root,
259                            cache_conditions, labels, benchmarks,
260                            experiment_file.Canonicalize(),
261                            email, acquire_timeout, log_dir, log_level,
262                            share_cache,
263                            results_dir, locks_dir)
264
265    return experiment
266
267  def GetDefaultRemotes(self, board):
268    default_remotes_file = os.path.join(os.path.dirname(__file__),
269                                        "default_remotes")
270    try:
271      with open(default_remotes_file) as f:
272        for line in f:
273          key, v = line.split(":")
274          if key.strip() == board:
275            remotes = v.strip().split(" ")
276            if remotes:
277              return remotes
278            else:
279              raise Exception("There is not remote for {0}".format(board))
280    except IOError:
281      raise Exception("IOError while reading file {0}"
282                      .format(default_remotes_file))
283    else:
284      raise Exception("There is not remote for {0}".format(board))
285