experiment_factory.py revision 6a7dfb33f661a0e0193716a3fcc46a6984486401
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""A module to generate experiments."""
6
7from __future__ import print_function
8import os
9import re
10import socket
11
12from benchmark import Benchmark
13import config
14from experiment import Experiment
15from label import Label
16from label import MockLabel
17from results_cache import CacheConditions
18import test_flag
19import file_lock_machine
20
21# Users may want to run Telemetry tests either individually, or in
22# specified sets.  Here we define sets of tests that users may want
23# to run together.
24
25telemetry_perfv2_tests = ['dromaeo.domcoreattr',
26                          'dromaeo.domcoremodify',
27                          'dromaeo.domcorequery',
28                          'dromaeo.domcoretraverse',
29                          'kraken',
30                          'octane',
31                          'robohornet_pro',
32                          'sunspider',
33                         ]
34
35telemetry_pagecycler_tests = ['page_cycler.intl_ar_fa_he',
36                              'page_cycler.intl_es_fr_pt-BR',
37                              'page_cycler.intl_hi_ru',
38                              'page_cycler.intl_ja_zh',
39                              'page_cycler.intl_ko_th_vi',
40                              'page_cycler.morejs',
41                              'page_cycler.moz',
42                              'page_cycler.netsim.top_10',
43                              'page_cycler.tough_layout_cases',
44                              'page_cycler.typical_25',
45                             ]
46
47telemetry_toolchain_old_perf_tests = ['dromaeo.domcoremodify',
48                                      'page_cycler.intl_es_fr_pt-BR',
49                                      'page_cycler.intl_hi_ru',
50                                      'page_cycler.intl_ja_zh',
51                                      'page_cycler.intl_ko_th_vi',
52                                      'page_cycler.netsim.top_10',
53                                      'page_cycler.typical_25',
54                                      'robohornet_pro',
55                                      'spaceport',
56                                      'tab_switching.top_10',
57                                     ]
58telemetry_toolchain_perf_tests = ['octane',
59                                  'kraken',
60                                  'speedometer',
61                                  'dromaeo.domcoreattr',
62                                  'dromaeo.domcoremodify',
63                                  'smoothness.tough_webgl_cases',
64                                  'page_cycler.typical_25',
65                                  'media.tough_video_cases',
66                                 ]
67
68class ExperimentFactory(object):
69  """Factory class for building an Experiment, given an ExperimentFile as input.
70
71  This factory is currently hardcoded to produce an experiment for running
72  ChromeOS benchmarks, but the idea is that in the future, other types
73  of experiments could be produced.
74  """
75
76  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
77                          iterations, rm_chroot_tmp, perf_args, suite,
78                          show_all_results, retries, run_local):
79    """Add all the tests in a set to the benchmarks list."""
80    for test_name in benchmark_list:
81      telemetry_benchmark = Benchmark(test_name, test_name, test_args,
82                                      iterations, rm_chroot_tmp, perf_args,
83                                      suite, show_all_results, retries,
84                                      run_local)
85      benchmarks.append(telemetry_benchmark)
86
87
88  def GetExperiment(self, experiment_file, working_directory, log_dir):
89    """Construct an experiment from an experiment file."""
90    global_settings = experiment_file.GetGlobalSettings()
91    experiment_name = global_settings.GetField("name")
92    board = global_settings.GetField("board")
93    remote = global_settings.GetField("remote")
94    # This is used to remove the ",' from the remote if user
95    # add them to the remote string.
96    new_remote = []
97    if remote:
98      for i in remote:
99        c = re.sub('["\']', '', i)
100        new_remote.append(c)
101    remote = new_remote
102    chromeos_root = global_settings.GetField("chromeos_root")
103    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
104    perf_args = global_settings.GetField("perf_args")
105    acquire_timeout = global_settings.GetField("acquire_timeout")
106    cache_dir = global_settings.GetField("cache_dir")
107    cache_only = global_settings.GetField("cache_only")
108    config.AddConfig("no_email", global_settings.GetField("no_email"))
109    share_cache = global_settings.GetField("share_cache")
110    results_dir = global_settings.GetField("results_dir")
111    use_file_locks = global_settings.GetField("use_file_locks")
112    locks_dir = global_settings.GetField("locks_dir")
113    # If we pass a blank locks_dir to the Experiment, it will use the AFE server
114    # lock mechanism.  So if the user specified use_file_locks, but did not
115    # specify a locks dir, set the locks  dir to the default locks dir in
116    # file_lock_machine.
117    if use_file_locks and not locks_dir:
118      locks_dir = file_lock_machine.Machine.LOCKS_DIR
119    chrome_src = global_settings.GetField("chrome_src")
120    show_all_results = global_settings.GetField("show_all_results")
121    log_level = global_settings.GetField("logging_level")
122    if log_level not in ("quiet", "average", "verbose"):
123      log_level = "verbose"
124    # Default cache hit conditions. The image checksum in the cache and the
125    # computed checksum of the image must match. Also a cache file must exist.
126    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
127                        CacheConditions.CHECKSUMS_MATCH]
128    if global_settings.GetField("rerun_if_failed"):
129      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
130    if global_settings.GetField("rerun"):
131      cache_conditions.append(CacheConditions.FALSE)
132    if global_settings.GetField("same_machine"):
133      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
134    if global_settings.GetField("same_specs"):
135      cache_conditions.append(CacheConditions.MACHINES_MATCH)
136
137    # Construct benchmarks.
138    # Some fields are common with global settings. The values are
139    # inherited and/or merged with the global settings values.
140    benchmarks = []
141    all_benchmark_settings = experiment_file.GetSettings("benchmark")
142    for benchmark_settings in all_benchmark_settings:
143      benchmark_name = benchmark_settings.name
144      test_name = benchmark_settings.GetField("test_name")
145      if not test_name:
146        test_name = benchmark_name
147      test_args = benchmark_settings.GetField("test_args")
148      iterations = benchmark_settings.GetField("iterations")
149      suite = benchmark_settings.GetField("suite")
150      retries = benchmark_settings.GetField("retries")
151      run_local = benchmark_settings.GetField("run_local")
152
153      if suite == 'telemetry_Crosperf':
154        if test_name == 'all_perfv2':
155          self._AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
156                                   test_args, iterations, rm_chroot_tmp,
157                                   perf_args, suite, show_all_results, retries,
158                                   run_local)
159        elif test_name == 'all_pagecyclers':
160          self._AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
161                                   test_args, iterations, rm_chroot_tmp,
162                                   perf_args, suite, show_all_results, retries,
163                                   run_local)
164        elif test_name == 'all_toolchain_perf':
165          self._AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
166                                   test_args, iterations, rm_chroot_tmp,
167                                   perf_args, suite, show_all_results, retries,
168                                   run_local)
169          # Add non-telemetry toolchain-perf benchmarks:
170          benchmarks.append(Benchmark('graphics_WebGLAquarium',
171                                      'graphics_WebGLAquarium', '', iterations,
172                                      rm_chroot_tmp, perf_args, '',
173                                      show_all_results, retries,
174                                      run_local=False))
175        elif test_name == 'all_toolchain_perf_old':
176          self._AppendBenchmarkSet(benchmarks,
177                                   telemetry_toolchain_old_perf_tests,
178                                   test_args, iterations, rm_chroot_tmp,
179                                   perf_args, suite, show_all_results, retries,
180                                   run_local)
181        else:
182          benchmark = Benchmark(test_name, test_name, test_args,
183                                iterations, rm_chroot_tmp, perf_args, suite,
184                                show_all_results, retries, run_local)
185          benchmarks.append(benchmark)
186      else:
187        # Add the single benchmark.
188        benchmark = Benchmark(benchmark_name, test_name, test_args,
189                              iterations, rm_chroot_tmp, perf_args, suite,
190                              show_all_results, retries, run_local=False)
191        benchmarks.append(benchmark)
192
193    if not benchmarks:
194      raise RuntimeError("No benchmarks specified")
195
196    # Construct labels.
197    # Some fields are common with global settings. The values are
198    # inherited and/or merged with the global settings values.
199    labels = []
200    all_label_settings = experiment_file.GetSettings("label")
201    all_remote = list(remote)
202    for label_settings in all_label_settings:
203      label_name = label_settings.name
204      image = label_settings.GetField("chromeos_image")
205      chromeos_root = label_settings.GetField("chromeos_root")
206      my_remote = label_settings.GetField("remote")
207      compiler = label_settings.GetField("compiler")
208      new_remote = []
209      if my_remote:
210        for i in my_remote:
211          c = re.sub('["\']', '', i)
212          new_remote.append(c)
213      my_remote = new_remote
214      if image == "":
215        build = label_settings.GetField("build")
216        if len(build) == 0:
217          raise RuntimeError("Can not have empty 'build' field!")
218        image = label_settings.GetXbuddyPath(build, board, chromeos_root,
219                                             log_level)
220
221      cache_dir = label_settings.GetField("cache_dir")
222      chrome_src = label_settings.GetField("chrome_src")
223
224    # TODO(yunlian): We should consolidate code in machine_manager.py
225    # to derermine whether we are running from within google or not
226      if ("corp.google.com" in socket.gethostname() and
227          (not my_remote
228           or my_remote == remote
229           and global_settings.GetField("board") != board)):
230        my_remote = self.GetDefaultRemotes(board)
231      if global_settings.GetField("same_machine") and len(my_remote) > 1:
232        raise RuntimeError("Only one remote is allowed when same_machine "
233                           "is turned on")
234      all_remote += my_remote
235      image_args = label_settings.GetField("image_args")
236      if test_flag.GetTestMode():
237        # pylint: disable=too-many-function-args
238        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
239                          image_args, cache_dir, cache_only, log_level,
240                          compiler, chrome_src)
241      else:
242        label = Label(label_name, image, chromeos_root, board, my_remote,
243                      image_args, cache_dir, cache_only, log_level, compiler,
244                      chrome_src)
245      labels.append(label)
246
247    if not labels:
248      raise RuntimeError("No labels specified")
249
250    email = global_settings.GetField("email")
251    all_remote += list(set(my_remote))
252    all_remote = list(set(all_remote))
253    experiment = Experiment(experiment_name, all_remote,
254                            working_directory, chromeos_root,
255                            cache_conditions, labels, benchmarks,
256                            experiment_file.Canonicalize(),
257                            email, acquire_timeout, log_dir, log_level,
258                            share_cache,
259                            results_dir, locks_dir)
260
261    return experiment
262
263  def GetDefaultRemotes(self, board):
264    default_remotes_file = os.path.join(os.path.dirname(__file__),
265                                        "default_remotes")
266    try:
267      with open(default_remotes_file) as f:
268        for line in f:
269          key, v = line.split(":")
270          if key.strip() == board:
271            remotes = v.strip().split(" ")
272            if remotes:
273              return remotes
274            else:
275              raise RuntimeError("There is no remote for {0}".format(board))
276    except IOError:
277      # TODO: rethrow instead of throwing different exception.
278      raise RuntimeError("IOError while reading file {0}"
279                         .format(default_remotes_file))
280    else:
281      raise RuntimeError("There is not remote for {0}".format(board))
282