experiment_factory.py revision 0c84ea75c90b494df6b1ea6923a48611be81b96c
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""A module to generate experiments."""
8
9import os
10import re
11import socket
12
13from benchmark import Benchmark
14import config
15from experiment import Experiment
16from label import Label
17from label import MockLabel
18from results_cache import CacheConditions
19import test_flag
20import file_lock_machine
21
22# Users may want to run Telemetry tests either individually, or in
23# specified sets.  Here we define sets of tests that users may want
24# to run together.
25
26telemetry_perfv2_tests = [ 'dromaeo.domcoreattr',
27                           'dromaeo.domcoremodify',
28                           'dromaeo.domcorequery',
29                           'dromaeo.domcoretraverse',
30                           'kraken',
31# The following benchmark is extremely flaky, so omit it for now.
32#                           'memory.top_25',
33                           'octane',
34                           'robohornet_pro',
35# The following benchmark is broken (and hanging) for now, so omit it.
36#                           'smoothness.top_25',
37                           'sunspider',
38                           ]
39
40telemetry_pagecycler_tests = [
41                               'page_cycler.dhtml',
42                               'page_cycler.intl_ar_fa_he',
43                               'page_cycler.intl_es_fr_pt-BR',
44                               'page_cycler.intl_hi_ru',
45                               'page_cycler.intl_ja_zh',
46                               'page_cycler.intl_ko_th_vi',
47                               'page_cycler.morejs',
48                               'page_cycler.moz',
49                               'page_cycler.netsim.top_10',
50                               'page_cycler.tough_layout_cases',
51                               'page_cycler.typical_25',
52# Following benchmarks are now deprecated in Telemetry:
53#                               'page_cycler.indexed_db.basic_insert',
54#                               'page_cycler.bloat',
55                               ]
56
57telemetry_toolchain_old_perf_tests = [
58                               'dromaeo.domcoremodify',
59                               'page_cycler.intl_es_fr_pt-BR',
60                               'page_cycler.intl_hi_ru',
61                               'page_cycler.intl_ja_zh',
62                               'page_cycler.intl_ko_th_vi',
63                               'page_cycler.netsim.top_10',
64                               'page_cycler.typical_25',
65                               'robohornet_pro',
66                               'spaceport',
67                               'tab_switching.top_10',
68# Following benchmarks are now deprecated in Telemetry:
69#                               'canvasmark',
70#                               'jsgamebench',
71#                               'page_cycler.bloat',
72#                               'peacekeeper.html',
73                               ]
74telemetry_toolchain_perf_tests = [
75                               'octane',
76                               'kraken',
77                               'speedometer',
78                               'dromaeo.domcoreattr',
79                               'dromaeo.domcoremodify',
80                               'smoothness.tough_webgl_cases',
81                               'page_cycler.typical_25',
82                               'media.tough_video_cases',
83                               ]
84
85class ExperimentFactory(object):
86  """Factory class for building an Experiment, given an ExperimentFile as input.
87
88  This factory is currently hardcoded to produce an experiment for running
89  ChromeOS benchmarks, but the idea is that in the future, other types
90  of experiments could be produced.
91  """
92
93  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
94                          iterations, rm_chroot_tmp, perf_args, suite,
95                          show_all_results, retries):
96    """Add all the tests in a set to the benchmarks list."""
97    for test_name in benchmark_list:
98      telemetry_benchmark = Benchmark (test_name, test_name, test_args,
99                                       iterations, rm_chroot_tmp, perf_args,
100                                       suite, show_all_results, retries)
101      benchmarks.append(telemetry_benchmark)
102
103
104  def GetExperiment(self, experiment_file, working_directory, log_dir):
105    """Construct an experiment from an experiment file."""
106    global_settings = experiment_file.GetGlobalSettings()
107    experiment_name = global_settings.GetField("name")
108    board = global_settings.GetField("board")
109    remote = global_settings.GetField("remote")
110    # This is used to remove the ",' from the remote if user
111    # add them to the remote string.
112    new_remote = []
113    for i in remote:
114      c = re.sub('["\']', '', i)
115      new_remote.append(c)
116    remote = new_remote
117    chromeos_root = global_settings.GetField("chromeos_root")
118    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
119    perf_args = global_settings.GetField("perf_args")
120    acquire_timeout= global_settings.GetField("acquire_timeout")
121    cache_dir = global_settings.GetField("cache_dir")
122    cache_only = global_settings.GetField("cache_only")
123    config.AddConfig("no_email", global_settings.GetField("no_email"))
124    share_cache = global_settings.GetField("share_cache")
125    results_dir = global_settings.GetField("results_dir")
126    use_file_locks = global_settings.GetField("use_file_locks")
127    locks_dir = global_settings.GetField("locks_dir")
128    # If we pass a blank locks_dir to the Experiment, it will use the AFE server
129    # lock mechanism.  So if the user specified use_file_locks, but did not
130    # specify a locks dir, set the locks  dir to the default locks dir in
131    # file_lock_machine.
132    if use_file_locks and not locks_dir:
133      locks_dir = file_lock_machine.Machine.LOCKS_DIR
134    chrome_src = global_settings.GetField("chrome_src")
135    show_all_results = global_settings.GetField("show_all_results")
136    log_level = global_settings.GetField("logging_level")
137    if log_level not in ("quiet", "average", "verbose"):
138      log_level = "verbose"
139    # Default cache hit conditions. The image checksum in the cache and the
140    # computed checksum of the image must match. Also a cache file must exist.
141    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
142                        CacheConditions.CHECKSUMS_MATCH]
143    if global_settings.GetField("rerun_if_failed"):
144      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
145    if global_settings.GetField("rerun"):
146      cache_conditions.append(CacheConditions.FALSE)
147    if global_settings.GetField("same_machine"):
148      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
149    if global_settings.GetField("same_specs"):
150      cache_conditions.append(CacheConditions.MACHINES_MATCH)
151
152    # Construct benchmarks.
153    # Some fields are common with global settings. The values are
154    # inherited and/or merged with the global settings values.
155    benchmarks = []
156    all_benchmark_settings = experiment_file.GetSettings("benchmark")
157    for benchmark_settings in all_benchmark_settings:
158      benchmark_name = benchmark_settings.name
159      test_name = benchmark_settings.GetField("test_name")
160      if not test_name:
161        test_name = benchmark_name
162      test_args = benchmark_settings.GetField("test_args")
163      iterations = benchmark_settings.GetField("iterations")
164      suite = benchmark_settings.GetField("suite")
165      retries = benchmark_settings.GetField("retries")
166
167      if suite == 'telemetry_Crosperf':
168        if test_name == 'all_perfv2':
169          self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
170                                    test_args, iterations, rm_chroot_tmp,
171                                    perf_args, suite, show_all_results, retries)
172        elif test_name == 'all_pagecyclers':
173          self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
174                                    test_args, iterations, rm_chroot_tmp,
175                                    perf_args, suite, show_all_results, retries)
176        elif test_name == 'all_toolchain_perf':
177          self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
178                                    test_args, iterations, rm_chroot_tmp,
179                                    perf_args, suite, show_all_results, retries)
180          # Add non-telemetry toolchain-perf benchmarks:
181          benchmarks.append(Benchmark('graphics_WebGLAquarium',
182                                      'graphics_WebGLAquarium', '', iterations,
183                                      rm_chroot_tmp, perf_args, '',
184                                      show_all_results, retries))
185        elif test_name == 'all_toolchain_perf_old':
186          self._AppendBenchmarkSet (benchmarks,
187                                    telemetry_toolchain_old_perf_tests,
188                                    test_args, iterations, rm_chroot_tmp,
189                                    perf_args, suite, show_all_results, retries)
190        else:
191          benchmark = Benchmark(test_name, test_name, test_args,
192                                iterations, rm_chroot_tmp, perf_args, suite,
193                                show_all_results, retries)
194          benchmarks.append(benchmark)
195      else:
196        # Add the single benchmark.
197        benchmark = Benchmark(benchmark_name, test_name, test_args,
198                              iterations, rm_chroot_tmp, perf_args, suite,
199                              show_all_results)
200        benchmarks.append(benchmark)
201
202    # Construct labels.
203    # Some fields are common with global settings. The values are
204    # inherited and/or merged with the global settings values.
205    labels = []
206    all_label_settings = experiment_file.GetSettings("label")
207    all_remote = list(remote)
208    for label_settings in all_label_settings:
209      label_name = label_settings.name
210      image = label_settings.GetField("chromeos_image")
211      chromeos_root = label_settings.GetField("chromeos_root")
212      my_remote = label_settings.GetField("remote")
213      new_remote = []
214      for i in my_remote:
215        c = re.sub('["\']', '', i)
216        new_remote.append(c)
217      my_remote = new_remote
218      if image == "":
219        build = label_settings.GetField("build")
220        if len(build) == 0:
221            raise Exception("Can not have empty 'build' field!")
222        image = label_settings.GetXbuddyPath (build, board, chromeos_root,
223                                              log_level)
224
225      cache_dir = label_settings.GetField("cache_dir")
226      chrome_src = label_settings.GetField("chrome_src")
227
228    # TODO(yunlian): We should consolidate code in machine_manager.py
229    # to derermine whether we are running from within google or not
230      if ("corp.google.com" in socket.gethostname() and
231          (not my_remote
232           or my_remote == remote
233           and global_settings.GetField("board") != board)):
234        my_remote = self.GetDefaultRemotes(board)
235      if global_settings.GetField("same_machine") and len(my_remote) > 1:
236        raise Exception("Only one remote is allowed when same_machine "
237                        "is turned on")
238      all_remote += my_remote
239      image_args = label_settings.GetField("image_args")
240      if test_flag.GetTestMode():
241        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
242                          image_args, cache_dir, cache_only, chrome_src)
243      else:
244        label = Label(label_name, image, chromeos_root, board, my_remote,
245                      image_args, cache_dir, cache_only, chrome_src)
246      labels.append(label)
247
248    email = global_settings.GetField("email")
249    all_remote += list(set(my_remote))
250    all_remote = list(set(all_remote))
251    experiment = Experiment(experiment_name, all_remote,
252                            working_directory, chromeos_root,
253                            cache_conditions, labels, benchmarks,
254                            experiment_file.Canonicalize(),
255                            email, acquire_timeout, log_dir, log_level,
256                            share_cache,
257                            results_dir, locks_dir)
258
259    return experiment
260
261  def GetDefaultRemotes(self, board):
262    default_remotes_file = os.path.join(os.path.dirname(__file__),
263                                        "default_remotes")
264    try:
265      with open(default_remotes_file) as f:
266        for line in f:
267          key, v = line.split(":")
268          if key.strip() == board:
269            remotes = v.strip().split(" ")
270            if remotes:
271              return remotes
272            else:
273              raise Exception("There is not remote for {0}".format(board))
274    except IOError:
275      raise Exception("IOError while reading file {0}"
276                      .format(default_remotes_file))
277    else:
278      raise Exception("There is not remote for {0}".format(board))
279