experiment_factory.py revision 5c09fc2966ac49263ce7154c2905f2a86aeda297
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""A module to generate experiments."""
8
9import os
10import re
11import socket
12
13from benchmark import Benchmark
14import config
15from experiment import Experiment
16from label import Label
17from label import MockLabel
18from results_cache import CacheConditions
19import test_flag
20
21# Users may want to run Telemetry tests either individually, or in
22# specified sets.  Here we define sets of tests that users may want
23# to run together.
24
25telemetry_perfv2_tests = [ 'dromaeo.domcoreattr',
26                           'dromaeo.domcoremodify',
27                           'dromaeo.domcorequery',
28                           'dromaeo.domcoretraverse',
29                           'kraken',
30# The following benchmark is extremely flaky, so omit it for now.
31#                           'memory.top_25',
32                           'octane',
33                           'robohornet_pro',
34# The following benchmark is broken (and hanging) for now, so omit it.
35#                           'smoothness.top_25',
36                           'sunspider',
37                           ]
38
39telemetry_pagecycler_tests = [
40                               'page_cycler.dhtml',
41                               'page_cycler.intl_ar_fa_he',
42                               'page_cycler.intl_es_fr_pt-BR',
43                               'page_cycler.intl_hi_ru',
44                               'page_cycler.intl_ja_zh',
45                               'page_cycler.intl_ko_th_vi',
46                               'page_cycler.morejs',
47                               'page_cycler.moz',
48                               'page_cycler.netsim.top_10',
49                               'page_cycler.tough_layout_cases',
50                               'page_cycler.typical_25',
51# Following benchmarks are now deprecated in Telemetry:
52#                               'page_cycler.indexed_db.basic_insert',
53#                               'page_cycler.bloat',
54                               ]
55
56telemetry_toolchain_perf_tests = [
57                               'dromaeo.domcoremodify',
58                               'page_cycler.intl_es_fr_pt-BR',
59                               'page_cycler.intl_hi_ru',
60                               'page_cycler.intl_ja_zh',
61                               'page_cycler.intl_ko_th_vi',
62                               'page_cycler.netsim.top_10',
63                               'page_cycler.typical_25',
64                               'robohornet_pro',
65                               'spaceport',
66                               'tab_switching.top_10',
67# Following benchmarks are now deprecated in Telemetry:
68#                               'canvasmark',
69#                               'jsgamebench',
70#                               'page_cycler.bloat',
71#                               'peacekeeper.html',
72                               ]
73
74class ExperimentFactory(object):
75  """Factory class for building an Experiment, given an ExperimentFile as input.
76
77  This factory is currently hardcoded to produce an experiment for running
78  ChromeOS benchmarks, but the idea is that in the future, other types
79  of experiments could be produced.
80  """
81
82  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
83                          iterations, rm_chroot_tmp, perf_args, suite,
84                          show_all_results):
85    """Add all the tests in a set to the benchmarks list."""
86    for test_name in benchmark_list:
87      telemetry_benchmark = Benchmark (test_name, test_name, test_args,
88                                       iterations, rm_chroot_tmp, perf_args,
89                                       suite, show_all_results)
90      benchmarks.append(telemetry_benchmark)
91
92
93  def GetExperiment(self, experiment_file, working_directory, log_dir):
94    """Construct an experiment from an experiment file."""
95    global_settings = experiment_file.GetGlobalSettings()
96    experiment_name = global_settings.GetField("name")
97    board = global_settings.GetField("board")
98    remote = global_settings.GetField("remote")
99    # This is used to remove the ",' from the remote if user
100    # add them to the remote string.
101    new_remote = []
102    for i in remote:
103      c = re.sub('["\']', '', i)
104      new_remote.append(c)
105    remote = new_remote
106    chromeos_root = global_settings.GetField("chromeos_root")
107    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
108    perf_args = global_settings.GetField("perf_args")
109    acquire_timeout= global_settings.GetField("acquire_timeout")
110    cache_dir = global_settings.GetField("cache_dir")
111    cache_only = global_settings.GetField("cache_only")
112    config.AddConfig("no_email", global_settings.GetField("no_email"))
113    share_cache = global_settings.GetField("share_cache")
114    results_dir = global_settings.GetField("results_dir")
115    locks_dir = global_settings.GetField("locks_dir")
116    chrome_src = global_settings.GetField("chrome_src")
117    show_all_results = global_settings.GetField("show_all_results")
118    log_level = global_settings.GetField("logging_level")
119    if log_level not in ("quiet", "average", "verbose"):
120      log_level = "verbose"
121    # Default cache hit conditions. The image checksum in the cache and the
122    # computed checksum of the image must match. Also a cache file must exist.
123    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
124                        CacheConditions.CHECKSUMS_MATCH]
125    if global_settings.GetField("rerun_if_failed"):
126      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
127    if global_settings.GetField("rerun"):
128      cache_conditions.append(CacheConditions.FALSE)
129    if global_settings.GetField("same_machine"):
130      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
131    if global_settings.GetField("same_specs"):
132      cache_conditions.append(CacheConditions.MACHINES_MATCH)
133
134    # Construct benchmarks.
135    benchmarks = []
136    all_benchmark_settings = experiment_file.GetSettings("benchmark")
137    for benchmark_settings in all_benchmark_settings:
138      benchmark_name = benchmark_settings.name
139      test_name = benchmark_settings.GetField("test_name")
140      if not test_name:
141        test_name = benchmark_name
142      test_args = benchmark_settings.GetField("test_args")
143      iterations = benchmark_settings.GetField("iterations")
144      suite = benchmark_settings.GetField("suite")
145
146      if suite == 'telemetry_Crosperf':
147        if test_name == 'all_perfv2':
148          self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
149                                    test_args, iterations, rm_chroot_tmp,
150                                    perf_args, suite, show_all_results)
151        elif test_name == 'all_pagecyclers':
152          self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
153                                    test_args, iterations, rm_chroot_tmp,
154                                    perf_args, suite, show_all_results)
155        elif test_name == 'all_toolchain_perf':
156          self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
157                                    test_args, iterations, rm_chroot_tmp,
158                                    perf_args, suite, show_all_results)
159        else:
160          benchmark = Benchmark(test_name, test_name, test_args,
161                                iterations, rm_chroot_tmp, perf_args, suite,
162                                show_all_results)
163          benchmarks.append(benchmark)
164      else:
165        # Add the single benchmark.
166        benchmark = Benchmark(benchmark_name, test_name, test_args,
167                              iterations, rm_chroot_tmp, perf_args, suite,
168                              show_all_results)
169        benchmarks.append(benchmark)
170
171    # Construct labels.
172    labels = []
173    all_label_settings = experiment_file.GetSettings("label")
174    all_remote = list(remote)
175    for label_settings in all_label_settings:
176      label_name = label_settings.name
177      image = label_settings.GetField("chromeos_image")
178      chromeos_root = label_settings.GetField("chromeos_root")
179      my_remote = label_settings.GetField("remote")
180      new_remote = []
181      for i in my_remote:
182        c = re.sub('["\']', '', i)
183        new_remote.append(c)
184      my_remote = new_remote
185      if image == "":
186        build = label_settings.GetField("build")
187        if len(build) == 0:
188            raise Exception("Can not have empty 'build' field!")
189        image = label_settings.GetXbuddyPath (build, board, chromeos_root,
190                                              log_level)
191
192      cache_dir = label_settings.GetField("cache_dir")
193      chrome_src = label_settings.GetField("chrome_src")
194
195    # TODO(yunlian): We should consolidate code in machine_manager.py
196    # to derermine whether we are running from within google or not
197      if ("corp.google.com" in socket.gethostname() and
198          (not my_remote
199           or my_remote == remote
200           and global_settings.GetField("board") != board)):
201        my_remote = self.GetDefaultRemotes(board)
202      if global_settings.GetField("same_machine") and len(my_remote) > 1:
203        raise Exception("Only one remote is allowed when same_machine "
204                        "is turned on")
205      all_remote += my_remote
206      image_args = label_settings.GetField("image_args")
207      if test_flag.GetTestMode():
208        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
209                          image_args, cache_dir, cache_only, chrome_src)
210      else:
211        label = Label(label_name, image, chromeos_root, board, my_remote,
212                      image_args, cache_dir, cache_only, chrome_src)
213      labels.append(label)
214
215    email = global_settings.GetField("email")
216    all_remote += list(set(my_remote))
217    all_remote = list(set(all_remote))
218    experiment = Experiment(experiment_name, all_remote,
219                            working_directory, chromeos_root,
220                            cache_conditions, labels, benchmarks,
221                            experiment_file.Canonicalize(),
222                            email, acquire_timeout, log_dir, log_level,
223                            share_cache,
224                            results_dir, locks_dir)
225
226    return experiment
227
228  def GetDefaultRemotes(self, board):
229    default_remotes_file = os.path.join(os.path.dirname(__file__),
230                                        "default_remotes")
231    try:
232      with open(default_remotes_file) as f:
233        for line in f:
234          key, v = line.split(":")
235          if key.strip() == board:
236            remotes = v.strip().split(" ")
237            if remotes:
238              return remotes
239            else:
240              raise Exception("There is not remote for {0}".format(board))
241    except IOError:
242      raise Exception("IOError while reading file {0}"
243                      .format(default_remotes_file))
244    else:
245      raise Exception("There is not remote for {0}".format(board))
246