experiment_factory.py revision 88272d479f2761cc1906fea564c73033f77a6270
1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""A module to generate experiments."""
5
6from __future__ import print_function
7import os
8import re
9import socket
10
11from benchmark import Benchmark
12import config
13from experiment import Experiment
14from label import Label
15from label import MockLabel
16from results_cache import CacheConditions
17import test_flag
18import file_lock_machine
19
20# Users may want to run Telemetry tests either individually, or in
21# specified sets.  Here we define sets of tests that users may want
22# to run together.
23
24telemetry_perfv2_tests = ['dromaeo.domcoreattr',
25                          'dromaeo.domcoremodify',
26                          'dromaeo.domcorequery',
27                          'dromaeo.domcoretraverse',
28                          'kraken',
29                          'octane',
30                          'robohornet_pro',
31                          'sunspider']
32
33telemetry_pagecycler_tests = ['page_cycler.intl_ar_fa_he',
34                              'page_cycler.intl_es_fr_pt-BR',
35                              'page_cycler.intl_hi_ru',
36                              'page_cycler.intl_ja_zh',
37                              'page_cycler.intl_ko_th_vi',
38                              'page_cycler.morejs',
39                              'page_cycler.moz',
40                              'page_cycler.netsim.top_10',
41                              'page_cycler.tough_layout_cases',
42                              'page_cycler.typical_25']
43
44telemetry_toolchain_old_perf_tests = ['dromaeo.domcoremodify',
45                                      'page_cycler.intl_es_fr_pt-BR',
46                                      'page_cycler.intl_hi_ru',
47                                      'page_cycler.intl_ja_zh',
48                                      'page_cycler.intl_ko_th_vi',
49                                      'page_cycler.netsim.top_10',
50                                      'page_cycler.typical_25',
51                                      'robohornet_pro',
52                                      'spaceport',
53                                      'tab_switching.top_10']
54telemetry_toolchain_perf_tests = ['octane',
55                                  'kraken',
56                                  'speedometer',
57                                  'dromaeo.domcoreattr',
58                                  'dromaeo.domcoremodify',
59                                  'smoothness.tough_webgl_cases',
60                                  'page_cycler.typical_25',
61                                  'media.tough_video_cases']
62
63
64class ExperimentFactory(object):
65  """Factory class for building an Experiment, given an ExperimentFile as input.
66
67  This factory is currently hardcoded to produce an experiment for running
68  ChromeOS benchmarks, but the idea is that in the future, other types
69  of experiments could be produced.
70  """
71
72  def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
73                         iterations, rm_chroot_tmp, perf_args, suite,
74                         show_all_results, retries, run_local):
75    """Add all the tests in a set to the benchmarks list."""
76    for test_name in benchmark_list:
77      telemetry_benchmark = Benchmark(
78          test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
79          suite, show_all_results, retries, run_local)
80      benchmarks.append(telemetry_benchmark)
81
82  def GetExperiment(self, experiment_file, working_directory, log_dir):
83    """Construct an experiment from an experiment file."""
84    global_settings = experiment_file.GetGlobalSettings()
85    experiment_name = global_settings.GetField('name')
86    board = global_settings.GetField('board')
87    remote = global_settings.GetField('remote')
88    # This is used to remove the ",' from the remote if user
89    # add them to the remote string.
90    new_remote = []
91    if remote:
92      for i in remote:
93        c = re.sub('["\']', '', i)
94        new_remote.append(c)
95    remote = new_remote
96    chromeos_root = global_settings.GetField('chromeos_root')
97    rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
98    perf_args = global_settings.GetField('perf_args')
99    acquire_timeout = global_settings.GetField('acquire_timeout')
100    cache_dir = global_settings.GetField('cache_dir')
101    cache_only = global_settings.GetField('cache_only')
102    config.AddConfig('no_email', global_settings.GetField('no_email'))
103    share_cache = global_settings.GetField('share_cache')
104    results_dir = global_settings.GetField('results_dir')
105    use_file_locks = global_settings.GetField('use_file_locks')
106    locks_dir = global_settings.GetField('locks_dir')
107    # If we pass a blank locks_dir to the Experiment, it will use the AFE server
108    # lock mechanism.  So if the user specified use_file_locks, but did not
109    # specify a locks dir, set the locks  dir to the default locks dir in
110    # file_lock_machine.
111    if use_file_locks and not locks_dir:
112      locks_dir = file_lock_machine.Machine.LOCKS_DIR
113    chrome_src = global_settings.GetField('chrome_src')
114    show_all_results = global_settings.GetField('show_all_results')
115    log_level = global_settings.GetField('logging_level')
116    if log_level not in ('quiet', 'average', 'verbose'):
117      log_level = 'verbose'
118    # Default cache hit conditions. The image checksum in the cache and the
119    # computed checksum of the image must match. Also a cache file must exist.
120    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
121                        CacheConditions.CHECKSUMS_MATCH]
122    if global_settings.GetField('rerun_if_failed'):
123      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
124    if global_settings.GetField('rerun'):
125      cache_conditions.append(CacheConditions.FALSE)
126    if global_settings.GetField('same_machine'):
127      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
128    if global_settings.GetField('same_specs'):
129      cache_conditions.append(CacheConditions.MACHINES_MATCH)
130
131    # Construct benchmarks.
132    # Some fields are common with global settings. The values are
133    # inherited and/or merged with the global settings values.
134    benchmarks = []
135    all_benchmark_settings = experiment_file.GetSettings('benchmark')
136    for benchmark_settings in all_benchmark_settings:
137      benchmark_name = benchmark_settings.name
138      test_name = benchmark_settings.GetField('test_name')
139      if not test_name:
140        test_name = benchmark_name
141      test_args = benchmark_settings.GetField('test_args')
142      iterations = benchmark_settings.GetField('iterations')
143      suite = benchmark_settings.GetField('suite')
144      retries = benchmark_settings.GetField('retries')
145      run_local = benchmark_settings.GetField('run_local')
146
147      if suite == 'telemetry_Crosperf':
148        if test_name == 'all_perfv2':
149          self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests,
150                                  test_args, iterations, rm_chroot_tmp,
151                                  perf_args, suite, show_all_results, retries,
152                                  run_local)
153        elif test_name == 'all_pagecyclers':
154          self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
155                                  test_args, iterations, rm_chroot_tmp,
156                                  perf_args, suite, show_all_results, retries,
157                                  run_local)
158        elif test_name == 'all_toolchain_perf':
159          self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
160                                  test_args, iterations, rm_chroot_tmp,
161                                  perf_args, suite, show_all_results, retries,
162                                  run_local)
163          # Add non-telemetry toolchain-perf benchmarks:
164          benchmarks.append(Benchmark('graphics_WebGLAquarium',
165                                      'graphics_WebGLAquarium',
166                                      '',
167                                      iterations,
168                                      rm_chroot_tmp,
169                                      perf_args,
170                                      '',
171                                      show_all_results,
172                                      retries,
173                                      run_local=False))
174        elif test_name == 'all_toolchain_perf_old':
175          self.AppendBenchmarkSet(
176              benchmarks, telemetry_toolchain_old_perf_tests, test_args,
177              iterations, rm_chroot_tmp, perf_args, suite, show_all_results,
178              retries, run_local)
179        else:
180          benchmark = Benchmark(test_name, test_name, test_args, iterations,
181                                rm_chroot_tmp, perf_args, suite,
182                                show_all_results, retries, run_local)
183          benchmarks.append(benchmark)
184      else:
185        # Add the single benchmark.
186        benchmark = Benchmark(benchmark_name,
187                              test_name,
188                              test_args,
189                              iterations,
190                              rm_chroot_tmp,
191                              perf_args,
192                              suite,
193                              show_all_results,
194                              retries,
195                              run_local=False)
196        benchmarks.append(benchmark)
197
198    if not benchmarks:
199      raise RuntimeError('No benchmarks specified')
200
201    # Construct labels.
202    # Some fields are common with global settings. The values are
203    # inherited and/or merged with the global settings values.
204    labels = []
205    all_label_settings = experiment_file.GetSettings('label')
206    all_remote = list(remote)
207    for label_settings in all_label_settings:
208      label_name = label_settings.name
209      image = label_settings.GetField('chromeos_image')
210      chromeos_root = label_settings.GetField('chromeos_root')
211      my_remote = label_settings.GetField('remote')
212      compiler = label_settings.GetField('compiler')
213      new_remote = []
214      if my_remote:
215        for i in my_remote:
216          c = re.sub('["\']', '', i)
217          new_remote.append(c)
218      my_remote = new_remote
219      if image == '':
220        build = label_settings.GetField('build')
221        if len(build) == 0:
222          raise RuntimeError("Can not have empty 'build' field!")
223        image = label_settings.GetXbuddyPath(build, board, chromeos_root,
224                                             log_level)
225
226      cache_dir = label_settings.GetField('cache_dir')
227      chrome_src = label_settings.GetField('chrome_src')
228
229      # TODO(yunlian): We should consolidate code in machine_manager.py
230      # to derermine whether we are running from within google or not
231      if ('corp.google.com' in socket.gethostname() and
232          (not my_remote or my_remote == remote and
233           global_settings.GetField('board') != board)):
234        my_remote = self.GetDefaultRemotes(board)
235      if global_settings.GetField('same_machine') and len(my_remote) > 1:
236        raise RuntimeError('Only one remote is allowed when same_machine '
237                           'is turned on')
238      all_remote += my_remote
239      image_args = label_settings.GetField('image_args')
240      if test_flag.GetTestMode():
241        # pylint: disable=too-many-function-args
242        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
243                          image_args, cache_dir, cache_only, log_level,
244                          compiler, chrome_src)
245      else:
246        label = Label(label_name, image, chromeos_root, board, my_remote,
247                      image_args, cache_dir, cache_only, log_level, compiler,
248                      chrome_src)
249      labels.append(label)
250
251    if not labels:
252      raise RuntimeError('No labels specified')
253
254    email = global_settings.GetField('email')
255    all_remote += list(set(my_remote))
256    all_remote = list(set(all_remote))
257    experiment = Experiment(experiment_name, all_remote, working_directory,
258                            chromeos_root, cache_conditions, labels, benchmarks,
259                            experiment_file.Canonicalize(), email,
260                            acquire_timeout, log_dir, log_level, share_cache,
261                            results_dir, locks_dir)
262
263    return experiment
264
265  def GetDefaultRemotes(self, board):
266    default_remotes_file = os.path.join(
267        os.path.dirname(__file__), 'default_remotes')
268    try:
269      with open(default_remotes_file) as f:
270        for line in f:
271          key, v = line.split(':')
272          if key.strip() == board:
273            remotes = v.strip().split(' ')
274            if remotes:
275              return remotes
276            else:
277              raise RuntimeError('There is no remote for {0}'.format(board))
278    except IOError:
279      # TODO: rethrow instead of throwing different exception.
280      raise RuntimeError('IOError while reading file {0}'
281                         .format(default_remotes_file))
282    else:
283      raise RuntimeError('There is not remote for {0}'.format(board))
284