1# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4"""A module to generate experiments."""
5
6from __future__ import print_function
7import os
8import re
9import socket
10
11from benchmark import Benchmark
12import config
13from experiment import Experiment
14from label import Label
15from label import MockLabel
16from results_cache import CacheConditions
17import test_flag
18import file_lock_machine
19
20# Users may want to run Telemetry tests either individually, or in
21# specified sets.  Here we define sets of tests that users may want
22# to run together.
23
24telemetry_perfv2_tests = [
25    'dromaeo.domcoreattr', 'dromaeo.domcoremodify', 'dromaeo.domcorequery',
26    'dromaeo.domcoretraverse', 'kraken', 'octane', 'robohornet_pro', 'sunspider'
27]
28
29telemetry_pagecycler_tests = [
30    'page_cycler_v2.intl_ar_fa_he',
31    'page_cycler_v2.intl_es_fr_pt-BR',
32    'page_cycler_v2.intl_hi_ru',
33    'page_cycler_v2.intl_ja_zh',
34    'page_cycler_v2.intl_ko_th_vi',
35    #                              'page_cycler_v2.morejs',
36    #                              'page_cycler_v2.moz',
37    #                              'page_cycler_v2.netsim.top_10',
38    'page_cycler_v2.tough_layout_cases',
39    'page_cycler_v2.typical_25'
40]
41
42telemetry_toolchain_old_perf_tests = [
43    'dromaeo.domcoremodify', 'page_cycler_v2.intl_es_fr_pt-BR',
44    'page_cycler_v2.intl_hi_ru', 'page_cycler_v2.intl_ja_zh',
45    'page_cycler_v2.intl_ko_th_vi', 'page_cycler_v2.netsim.top_10',
46    'page_cycler_v2.typical_25', 'robohornet_pro', 'spaceport',
47    'tab_switching.top_10'
48]
49telemetry_toolchain_perf_tests = [
50    'octane',
51    'kraken',
52    'speedometer',
53    'dromaeo.domcoreattr',
54    'dromaeo.domcoremodify',
55    'smoothness.tough_webgl_cases',
56]
57graphics_perf_tests = [
58    'graphics_GLBench',
59    'graphics_GLMark2',
60    'graphics_SanAngeles',
61    'graphics_WebGLAquarium',
62    'graphics_WebGLPerformance',
63]
64telemetry_crosbolt_perf_tests = [
65    'octane',
66    'kraken',
67    'speedometer',
68    'jetstream',
69    'startup.cold.blank_page',
70    'smoothness.top_25_smooth',
71]
72crosbolt_perf_tests = [
73    'graphics_WebGLAquarium',
74    'video_PlaybackPerf.h264',
75    'video_PlaybackPerf.vp9',
76    'video_WebRtcPerf',
77    'BootPerfServerCrosPerf',
78    'power_Resume',
79    'video_PlaybackPerf.h264',
80    'build_RootFilesystemSize',
81#    'cheets_AntutuTest',
82#    'cheets_PerfBootServer',
83#    'cheets_CandyCrushTest',
84#    'cheets_LinpackTest',
85]
86
87
88class ExperimentFactory(object):
89  """Factory class for building an Experiment, given an ExperimentFile as input.
90
91  This factory is currently hardcoded to produce an experiment for running
92  ChromeOS benchmarks, but the idea is that in the future, other types
93  of experiments could be produced.
94  """
95
96  def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
97                         iterations, rm_chroot_tmp, perf_args, suite,
98                         show_all_results, retries, run_local):
99    """Add all the tests in a set to the benchmarks list."""
100    for test_name in benchmark_list:
101      telemetry_benchmark = Benchmark(test_name, test_name, test_args,
102                                      iterations, rm_chroot_tmp, perf_args,
103                                      suite, show_all_results, retries,
104                                      run_local)
105      benchmarks.append(telemetry_benchmark)
106
107  def GetExperiment(self, experiment_file, working_directory, log_dir):
108    """Construct an experiment from an experiment file."""
109    global_settings = experiment_file.GetGlobalSettings()
110    experiment_name = global_settings.GetField('name')
111    board = global_settings.GetField('board')
112    remote = global_settings.GetField('remote')
113    # This is used to remove the ",' from the remote if user
114    # add them to the remote string.
115    new_remote = []
116    if remote:
117      for i in remote:
118        c = re.sub('["\']', '', i)
119        new_remote.append(c)
120    remote = new_remote
121    chromeos_root = global_settings.GetField('chromeos_root')
122    rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
123    perf_args = global_settings.GetField('perf_args')
124    acquire_timeout = global_settings.GetField('acquire_timeout')
125    cache_dir = global_settings.GetField('cache_dir')
126    cache_only = global_settings.GetField('cache_only')
127    config.AddConfig('no_email', global_settings.GetField('no_email'))
128    share_cache = global_settings.GetField('share_cache')
129    results_dir = global_settings.GetField('results_dir')
130    use_file_locks = global_settings.GetField('use_file_locks')
131    locks_dir = global_settings.GetField('locks_dir')
132    # If we pass a blank locks_dir to the Experiment, it will use the AFE server
133    # lock mechanism.  So if the user specified use_file_locks, but did not
134    # specify a locks dir, set the locks  dir to the default locks dir in
135    # file_lock_machine.
136    if use_file_locks and not locks_dir:
137      locks_dir = file_lock_machine.Machine.LOCKS_DIR
138    chrome_src = global_settings.GetField('chrome_src')
139    show_all_results = global_settings.GetField('show_all_results')
140    log_level = global_settings.GetField('logging_level')
141    if log_level not in ('quiet', 'average', 'verbose'):
142      log_level = 'verbose'
143    # Default cache hit conditions. The image checksum in the cache and the
144    # computed checksum of the image must match. Also a cache file must exist.
145    cache_conditions = [
146        CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
147    ]
148    if global_settings.GetField('rerun_if_failed'):
149      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
150    if global_settings.GetField('rerun'):
151      cache_conditions.append(CacheConditions.FALSE)
152    if global_settings.GetField('same_machine'):
153      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
154    if global_settings.GetField('same_specs'):
155      cache_conditions.append(CacheConditions.MACHINES_MATCH)
156
157    # Construct benchmarks.
158    # Some fields are common with global settings. The values are
159    # inherited and/or merged with the global settings values.
160    benchmarks = []
161    all_benchmark_settings = experiment_file.GetSettings('benchmark')
162    for benchmark_settings in all_benchmark_settings:
163      benchmark_name = benchmark_settings.name
164      test_name = benchmark_settings.GetField('test_name')
165      if not test_name:
166        test_name = benchmark_name
167      test_args = benchmark_settings.GetField('test_args')
168      iterations = benchmark_settings.GetField('iterations')
169      suite = benchmark_settings.GetField('suite')
170      retries = benchmark_settings.GetField('retries')
171      run_local = benchmark_settings.GetField('run_local')
172
173      if suite == 'telemetry_Crosperf':
174        if test_name == 'all_perfv2':
175          self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
176                                  iterations, rm_chroot_tmp, perf_args, suite,
177                                  show_all_results, retries, run_local)
178        elif test_name == 'all_pagecyclers':
179          self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
180                                  test_args, iterations, rm_chroot_tmp,
181                                  perf_args, suite, show_all_results, retries,
182                                  run_local)
183        elif test_name == 'all_toolchain_perf':
184          self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
185                                  test_args, iterations, rm_chroot_tmp,
186                                  perf_args, suite, show_all_results, retries,
187                                  run_local)
188          # Add non-telemetry toolchain-perf benchmarks:
189          benchmarks.append(
190              Benchmark(
191                  'graphics_WebGLAquarium',
192                  'graphics_WebGLAquarium',
193                  '',
194                  iterations,
195                  rm_chroot_tmp,
196                  perf_args,
197                  '',
198                  show_all_results,
199                  retries,
200                  run_local=False))
201        elif test_name == 'all_toolchain_perf_old':
202          self.AppendBenchmarkSet(benchmarks,
203                                  telemetry_toolchain_old_perf_tests, test_args,
204                                  iterations, rm_chroot_tmp, perf_args, suite,
205                                  show_all_results, retries, run_local)
206        else:
207          benchmark = Benchmark(test_name, test_name, test_args, iterations,
208                                rm_chroot_tmp, perf_args, suite,
209                                show_all_results, retries, run_local)
210          benchmarks.append(benchmark)
211      else:
212        if test_name == 'all_graphics_perf':
213          self.AppendBenchmarkSet(benchmarks,
214                                  graphics_perf_tests, '',
215                                  iterations, rm_chroot_tmp, perf_args, '',
216                                  show_all_results, retries, run_local=False)
217        elif test_name == 'all_crosbolt_perf':
218          self.AppendBenchmarkSet(benchmarks,
219                                  telemetry_crosbolt_perf_tests, test_args,
220                                  iterations, rm_chroot_tmp, perf_args,
221                                  'telemetry_Crosperf', show_all_results,
222                                  retries, run_local)
223          self.AppendBenchmarkSet(benchmarks,
224                                  crosbolt_perf_tests, '',
225                                  iterations, rm_chroot_tmp, perf_args, '',
226                                  show_all_results, retries, run_local=False)
227        else:
228          # Add the single benchmark.
229          benchmark = Benchmark(
230              benchmark_name,
231              test_name,
232              test_args,
233              iterations,
234              rm_chroot_tmp,
235              perf_args,
236              suite,
237              show_all_results,
238              retries,
239              run_local=False)
240          benchmarks.append(benchmark)
241
242    if not benchmarks:
243      raise RuntimeError('No benchmarks specified')
244
245    # Construct labels.
246    # Some fields are common with global settings. The values are
247    # inherited and/or merged with the global settings values.
248    labels = []
249    all_label_settings = experiment_file.GetSettings('label')
250    all_remote = list(remote)
251    for label_settings in all_label_settings:
252      label_name = label_settings.name
253      image = label_settings.GetField('chromeos_image')
254      autotest_path = label_settings.GetField('autotest_path')
255      chromeos_root = label_settings.GetField('chromeos_root')
256      my_remote = label_settings.GetField('remote')
257      compiler = label_settings.GetField('compiler')
258      new_remote = []
259      if my_remote:
260        for i in my_remote:
261          c = re.sub('["\']', '', i)
262          new_remote.append(c)
263      my_remote = new_remote
264      if image == '':
265        build = label_settings.GetField('build')
266        if len(build) == 0:
267          raise RuntimeError("Can not have empty 'build' field!")
268        image, autotest_path = label_settings.GetXbuddyPath(build,
269                                                            autotest_path,
270                                                            board,
271                                                            chromeos_root,
272                                                            log_level)
273
274      cache_dir = label_settings.GetField('cache_dir')
275      chrome_src = label_settings.GetField('chrome_src')
276
277      # TODO(yunlian): We should consolidate code in machine_manager.py
278      # to derermine whether we are running from within google or not
279      if ('corp.google.com' in socket.gethostname() and
280          (not my_remote or my_remote == remote and
281           global_settings.GetField('board') != board)):
282        my_remote = self.GetDefaultRemotes(board)
283      if global_settings.GetField('same_machine') and len(my_remote) > 1:
284        raise RuntimeError('Only one remote is allowed when same_machine '
285                           'is turned on')
286      all_remote += my_remote
287      image_args = label_settings.GetField('image_args')
288      if test_flag.GetTestMode():
289        # pylint: disable=too-many-function-args
290        label = MockLabel(label_name, image, autotest_path, chromeos_root,
291                          board, my_remote, image_args, cache_dir, cache_only,
292                          log_level, compiler, chrome_src)
293      else:
294        label = Label(label_name, image, autotest_path, chromeos_root, board,
295                      my_remote, image_args, cache_dir, cache_only, log_level,
296                      compiler, chrome_src)
297      labels.append(label)
298
299    if not labels:
300      raise RuntimeError('No labels specified')
301
302    email = global_settings.GetField('email')
303    all_remote += list(set(my_remote))
304    all_remote = list(set(all_remote))
305    experiment = Experiment(experiment_name, all_remote, working_directory,
306                            chromeos_root, cache_conditions, labels, benchmarks,
307                            experiment_file.Canonicalize(), email,
308                            acquire_timeout, log_dir, log_level, share_cache,
309                            results_dir, locks_dir)
310
311    return experiment
312
313  def GetDefaultRemotes(self, board):
314    default_remotes_file = os.path.join(
315        os.path.dirname(__file__), 'default_remotes')
316    try:
317      with open(default_remotes_file) as f:
318        for line in f:
319          key, v = line.split(':')
320          if key.strip() == board:
321            remotes = v.strip().split()
322            if remotes:
323              return remotes
324            else:
325              raise RuntimeError('There is no remote for {0}'.format(board))
326    except IOError:
327      # TODO: rethrow instead of throwing different exception.
328      raise RuntimeError('IOError while reading file {0}'
329                         .format(default_remotes_file))
330    else:
331      raise RuntimeError('There is not remote for {0}'.format(board))
332