experiment_factory.py revision e627fd61c2edba668eb2af8221892286b13f05a3
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""A module to generate experiments."""
6
7from __future__ import print_function
8import os
9import re
10import socket
11
12from benchmark import Benchmark
13import config
14from experiment import Experiment
15from label import Label
16from label import MockLabel
17from results_cache import CacheConditions
18import test_flag
19import file_lock_machine
20
21# Users may want to run Telemetry tests either individually, or in
22# specified sets.  Here we define sets of tests that users may want
23# to run together.
24
25telemetry_perfv2_tests = ['dromaeo.domcoreattr',
26                          'dromaeo.domcoremodify',
27                          'dromaeo.domcorequery',
28                          'dromaeo.domcoretraverse',
29                          'kraken',
30                          'octane',
31                          'robohornet_pro',
32                          'sunspider',
33                         ]
34
35telemetry_pagecycler_tests = ['page_cycler.intl_ar_fa_he',
36                              'page_cycler.intl_es_fr_pt-BR',
37                              'page_cycler.intl_hi_ru',
38                              'page_cycler.intl_ja_zh',
39                              'page_cycler.intl_ko_th_vi',
40                              'page_cycler.morejs',
41                              'page_cycler.moz',
42                              'page_cycler.netsim.top_10',
43                              'page_cycler.tough_layout_cases',
44                              'page_cycler.typical_25',
45                             ]
46
47telemetry_toolchain_old_perf_tests = ['dromaeo.domcoremodify',
48                                      'page_cycler.intl_es_fr_pt-BR',
49                                      'page_cycler.intl_hi_ru',
50                                      'page_cycler.intl_ja_zh',
51                                      'page_cycler.intl_ko_th_vi',
52                                      'page_cycler.netsim.top_10',
53                                      'page_cycler.typical_25',
54                                      'robohornet_pro',
55                                      'spaceport',
56                                      'tab_switching.top_10',
57                                     ]
58telemetry_toolchain_perf_tests = ['octane',
59                                  'kraken',
60                                  'speedometer',
61                                  'dromaeo.domcoreattr',
62                                  'dromaeo.domcoremodify',
63                                  'smoothness.tough_webgl_cases',
64                                  'page_cycler.typical_25',
65                                  'media.tough_video_cases',
66                                 ]
67
68class ExperimentFactory(object):
69  """Factory class for building an Experiment, given an ExperimentFile as input.
70
71  This factory is currently hardcoded to produce an experiment for running
72  ChromeOS benchmarks, but the idea is that in the future, other types
73  of experiments could be produced.
74  """
75
76  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
77                          iterations, rm_chroot_tmp, perf_args, suite,
78                          show_all_results, retries, run_local):
79    """Add all the tests in a set to the benchmarks list."""
80    for test_name in benchmark_list:
81      telemetry_benchmark = Benchmark(test_name, test_name, test_args,
82                                      iterations, rm_chroot_tmp, perf_args,
83                                      suite, show_all_results, retries,
84                                      run_local)
85      benchmarks.append(telemetry_benchmark)
86
87
88  def GetExperiment(self, experiment_file, working_directory, log_dir):
89    """Construct an experiment from an experiment file."""
90    global_settings = experiment_file.GetGlobalSettings()
91    experiment_name = global_settings.GetField("name")
92    board = global_settings.GetField("board")
93    remote = global_settings.GetField("remote")
94    # This is used to remove the ",' from the remote if user
95    # add them to the remote string.
96    new_remote = []
97    for i in remote:
98      c = re.sub('["\']', '', i)
99      new_remote.append(c)
100    remote = new_remote
101    chromeos_root = global_settings.GetField("chromeos_root")
102    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
103    perf_args = global_settings.GetField("perf_args")
104    acquire_timeout = global_settings.GetField("acquire_timeout")
105    cache_dir = global_settings.GetField("cache_dir")
106    cache_only = global_settings.GetField("cache_only")
107    config.AddConfig("no_email", global_settings.GetField("no_email"))
108    share_cache = global_settings.GetField("share_cache")
109    results_dir = global_settings.GetField("results_dir")
110    use_file_locks = global_settings.GetField("use_file_locks")
111    locks_dir = global_settings.GetField("locks_dir")
112    # If we pass a blank locks_dir to the Experiment, it will use the AFE server
113    # lock mechanism.  So if the user specified use_file_locks, but did not
114    # specify a locks dir, set the locks  dir to the default locks dir in
115    # file_lock_machine.
116    if use_file_locks and not locks_dir:
117      locks_dir = file_lock_machine.Machine.LOCKS_DIR
118    chrome_src = global_settings.GetField("chrome_src")
119    show_all_results = global_settings.GetField("show_all_results")
120    log_level = global_settings.GetField("logging_level")
121    if log_level not in ("quiet", "average", "verbose"):
122      log_level = "verbose"
123    # Default cache hit conditions. The image checksum in the cache and the
124    # computed checksum of the image must match. Also a cache file must exist.
125    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
126                        CacheConditions.CHECKSUMS_MATCH]
127    if global_settings.GetField("rerun_if_failed"):
128      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
129    if global_settings.GetField("rerun"):
130      cache_conditions.append(CacheConditions.FALSE)
131    if global_settings.GetField("same_machine"):
132      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
133    if global_settings.GetField("same_specs"):
134      cache_conditions.append(CacheConditions.MACHINES_MATCH)
135
136    # Construct benchmarks.
137    # Some fields are common with global settings. The values are
138    # inherited and/or merged with the global settings values.
139    benchmarks = []
140    all_benchmark_settings = experiment_file.GetSettings("benchmark")
141    for benchmark_settings in all_benchmark_settings:
142      benchmark_name = benchmark_settings.name
143      test_name = benchmark_settings.GetField("test_name")
144      if not test_name:
145        test_name = benchmark_name
146      test_args = benchmark_settings.GetField("test_args")
147      iterations = benchmark_settings.GetField("iterations")
148      suite = benchmark_settings.GetField("suite")
149      retries = benchmark_settings.GetField("retries")
150      run_local = benchmark_settings.GetField("run_local")
151
152      if suite == 'telemetry_Crosperf':
153        if test_name == 'all_perfv2':
154          self._AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
155                                   test_args, iterations, rm_chroot_tmp,
156                                   perf_args, suite, show_all_results, retries,
157                                   run_local)
158        elif test_name == 'all_pagecyclers':
159          self._AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
160                                   test_args, iterations, rm_chroot_tmp,
161                                   perf_args, suite, show_all_results, retries,
162                                   run_local)
163        elif test_name == 'all_toolchain_perf':
164          self._AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
165                                   test_args, iterations, rm_chroot_tmp,
166                                   perf_args, suite, show_all_results, retries,
167                                   run_local)
168          # Add non-telemetry toolchain-perf benchmarks:
169          benchmarks.append(Benchmark('graphics_WebGLAquarium',
170                                      'graphics_WebGLAquarium', '', iterations,
171                                      rm_chroot_tmp, perf_args, '',
172                                      show_all_results, retries,
173                                      run_local=False))
174        elif test_name == 'all_toolchain_perf_old':
175          self._AppendBenchmarkSet(benchmarks,
176                                   telemetry_toolchain_old_perf_tests,
177                                   test_args, iterations, rm_chroot_tmp,
178                                   perf_args, suite, show_all_results, retries,
179                                   run_local)
180        else:
181          benchmark = Benchmark(test_name, test_name, test_args,
182                                iterations, rm_chroot_tmp, perf_args, suite,
183                                show_all_results, retries, run_local)
184          benchmarks.append(benchmark)
185      else:
186        # Add the single benchmark.
187        benchmark = Benchmark(benchmark_name, test_name, test_args,
188                              iterations, rm_chroot_tmp, perf_args, suite,
189                              show_all_results, retries, run_local=False)
190        benchmarks.append(benchmark)
191
192    if not benchmarks:
193      raise RuntimeError("No benchmarks specified")
194
195    # Construct labels.
196    # Some fields are common with global settings. The values are
197    # inherited and/or merged with the global settings values.
198    labels = []
199    all_label_settings = experiment_file.GetSettings("label")
200    all_remote = list(remote)
201    for label_settings in all_label_settings:
202      label_name = label_settings.name
203      image = label_settings.GetField("chromeos_image")
204      chromeos_root = label_settings.GetField("chromeos_root")
205      my_remote = label_settings.GetField("remote")
206      compiler = label_settings.GetField("compiler")
207      new_remote = []
208      if my_remote:
209        for i in my_remote:
210          c = re.sub('["\']', '', i)
211          new_remote.append(c)
212      my_remote = new_remote
213      if image == "":
214        build = label_settings.GetField("build")
215        if len(build) == 0:
216          raise RuntimeError("Can not have empty 'build' field!")
217        image = label_settings.GetXbuddyPath(build, board, chromeos_root,
218                                             log_level)
219
220      cache_dir = label_settings.GetField("cache_dir")
221      chrome_src = label_settings.GetField("chrome_src")
222
223    # TODO(yunlian): We should consolidate code in machine_manager.py
224    # to derermine whether we are running from within google or not
225      if ("corp.google.com" in socket.gethostname() and
226          (not my_remote
227           or my_remote == remote
228           and global_settings.GetField("board") != board)):
229        my_remote = self.GetDefaultRemotes(board)
230      if global_settings.GetField("same_machine") and len(my_remote) > 1:
231        raise RuntimeError("Only one remote is allowed when same_machine "
232                           "is turned on")
233      all_remote += my_remote
234      image_args = label_settings.GetField("image_args")
235      if test_flag.GetTestMode():
236        # pylint: disable=too-many-function-args
237        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
238                          image_args, cache_dir, cache_only, log_level,
239                          compiler, chrome_src)
240      else:
241        label = Label(label_name, image, chromeos_root, board, my_remote,
242                      image_args, cache_dir, cache_only, log_level, compiler,
243                      chrome_src)
244      labels.append(label)
245
246    if not labels:
247      raise RuntimeError("No labels specified")
248
249    email = global_settings.GetField("email")
250    all_remote += list(set(my_remote))
251    all_remote = list(set(all_remote))
252    experiment = Experiment(experiment_name, all_remote,
253                            working_directory, chromeos_root,
254                            cache_conditions, labels, benchmarks,
255                            experiment_file.Canonicalize(),
256                            email, acquire_timeout, log_dir, log_level,
257                            share_cache,
258                            results_dir, locks_dir)
259
260    return experiment
261
262  def GetDefaultRemotes(self, board):
263    default_remotes_file = os.path.join(os.path.dirname(__file__),
264                                        "default_remotes")
265    try:
266      with open(default_remotes_file) as f:
267        for line in f:
268          key, v = line.split(":")
269          if key.strip() == board:
270            remotes = v.strip().split(" ")
271            if remotes:
272              return remotes
273            else:
274              raise RuntimeError("There is no remote for {0}".format(board))
275    except IOError:
276      # TODO: rethrow instead of throwing different exception.
277      raise RuntimeError("IOError while reading file {0}"
278                         .format(default_remotes_file))
279    else:
280      raise RuntimeError("There is not remote for {0}".format(board))
281