experiment_factory.py revision 0e2d9a0bafc6bff6a53ca4ddf779715994f28ea8
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""A module to generate experiments."""
8
9import os
10import re
11import socket
12
13from benchmark import Benchmark
14import config
15from experiment import Experiment
16from label import Label
17from label import MockLabel
18from results_cache import CacheConditions
19import test_flag
20
21# Users may want to run Telemetry tests either individually, or in
22# specified sets.  Here we define sets of tests that users may want
23# to run together.
24
25telemetry_perfv2_tests = [ 'dromaeo.domcoreattr',
26                           'dromaeo.domcoremodify',
27                           'dromaeo.domcorequery',
28                           'dromaeo.domcoretraverse',
29                           'kraken',
30# The following benchmark is extremely flaky, so omit it for now.
31#                           'memory.top_25',
32                           'octane',
33                           'robohornet_pro',
34# The following benchmark is broken (and hanging) for now, so omit it.
35#                           'smoothness.top_25',
36                           'sunspider',
37                           ]
38
39telemetry_pagecycler_tests = [
40                               'page_cycler.dhtml',
41                               'page_cycler.intl_ar_fa_he',
42                               'page_cycler.intl_es_fr_pt-BR',
43                               'page_cycler.intl_hi_ru',
44                               'page_cycler.intl_ja_zh',
45                               'page_cycler.intl_ko_th_vi',
46                               'page_cycler.morejs',
47                               'page_cycler.moz',
48                               'page_cycler.netsim.top_10',
49                               'page_cycler.tough_layout_cases',
50                               'page_cycler.typical_25',
51# Following benchmarks are now deprecated in Telemetry:
52#                               'page_cycler.indexed_db.basic_insert',
53#                               'page_cycler.bloat',
54                               ]
55
56telemetry_toolchain_perf_tests = [
57                               'dromaeo.domcoremodify',
58                               'page_cycler.intl_es_fr_pt-BR',
59                               'page_cycler.intl_hi_ru',
60                               'page_cycler.intl_ja_zh',
61                               'page_cycler.intl_ko_th_vi',
62                               'page_cycler.netsim.top_10',
63                               'page_cycler.typical_25',
64                               'robohornet_pro',
65                               'spaceport',
66                               'tab_switching.top_10',
67# Following benchmarks are now deprecated in Telemetry:
68#                               'canvasmark',
69#                               'jsgamebench',
70#                               'page_cycler.bloat',
71#                               'peacekeeper.html',
72                               ]
73
74class ExperimentFactory(object):
75  """Factory class for building an Experiment, given an ExperimentFile as input.
76
77  This factory is currently hardcoded to produce an experiment for running
78  ChromeOS benchmarks, but the idea is that in the future, other types
79  of experiments could be produced.
80  """
81
82  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
83                          iterations, rm_chroot_tmp, perf_args, suite,
84                          show_all_results):
85    """Add all the tests in a set to the benchmarks list."""
86    for test_name in benchmark_list:
87      telemetry_benchmark = Benchmark (test_name, test_name, test_args,
88                                       iterations, rm_chroot_tmp, perf_args,
89                                       suite, show_all_results)
90      benchmarks.append(telemetry_benchmark)
91
92
93  def GetExperiment(self, experiment_file, working_directory, log_dir):
94    """Construct an experiment from an experiment file."""
95    global_settings = experiment_file.GetGlobalSettings()
96    experiment_name = global_settings.GetField("name")
97    board = global_settings.GetField("board")
98    remote = global_settings.GetField("remote")
99    # This is used to remove the ",' from the remote if user
100    # add them to the remote string.
101    new_remote = []
102    for i in remote:
103      c = re.sub('["\']', '', i)
104      new_remote.append(c)
105    remote = new_remote
106    chromeos_root = global_settings.GetField("chromeos_root")
107    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
108    perf_args = global_settings.GetField("perf_args")
109    acquire_timeout= global_settings.GetField("acquire_timeout")
110    cache_dir = global_settings.GetField("cache_dir")
111    cache_only = global_settings.GetField("cache_only")
112    config.AddConfig("no_email", global_settings.GetField("no_email"))
113    share_cache = global_settings.GetField("share_cache")
114    results_dir = global_settings.GetField("results_dir")
115    chrome_src = global_settings.GetField("chrome_src")
116    show_all_results = global_settings.GetField("show_all_results")
117    log_level = global_settings.GetField("logging_level")
118    if log_level not in ("quiet", "average", "verbose"):
119      log_level = "verbose"
120    # Default cache hit conditions. The image checksum in the cache and the
121    # computed checksum of the image must match. Also a cache file must exist.
122    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
123                        CacheConditions.CHECKSUMS_MATCH]
124    if global_settings.GetField("rerun_if_failed"):
125      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
126    if global_settings.GetField("rerun"):
127      cache_conditions.append(CacheConditions.FALSE)
128    if global_settings.GetField("same_machine"):
129      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
130    if global_settings.GetField("same_specs"):
131      cache_conditions.append(CacheConditions.MACHINES_MATCH)
132
133    # Construct benchmarks.
134    benchmarks = []
135    all_benchmark_settings = experiment_file.GetSettings("benchmark")
136    for benchmark_settings in all_benchmark_settings:
137      benchmark_name = benchmark_settings.name
138      test_name = benchmark_settings.GetField("test_name")
139      if not test_name:
140        test_name = benchmark_name
141      test_args = benchmark_settings.GetField("test_args")
142      iterations = benchmark_settings.GetField("iterations")
143      suite = benchmark_settings.GetField("suite")
144
145      if suite == 'telemetry_Crosperf':
146        if test_name == 'all_perfv2':
147          self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
148                                    test_args, iterations, rm_chroot_tmp,
149                                    perf_args, suite, show_all_results)
150        elif test_name == 'all_pagecyclers':
151          self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
152                                    test_args, iterations, rm_chroot_tmp,
153                                    perf_args, suite, show_all_results)
154        elif test_name == 'all_toolchain_perf':
155          self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
156                                    test_args, iterations, rm_chroot_tmp,
157                                    perf_args, suite, show_all_results)
158        else:
159          benchmark = Benchmark(test_name, test_name, test_args,
160                                iterations, rm_chroot_tmp, perf_args, suite,
161                                show_all_results)
162          benchmarks.append(benchmark)
163      else:
164        # Add the single benchmark.
165        benchmark = Benchmark(benchmark_name, test_name, test_args,
166                              iterations, rm_chroot_tmp, perf_args, suite,
167                              show_all_results)
168        benchmarks.append(benchmark)
169
170    # Construct labels.
171    labels = []
172    all_label_settings = experiment_file.GetSettings("label")
173    all_remote = list(remote)
174    for label_settings in all_label_settings:
175      label_name = label_settings.name
176      image = label_settings.GetField("chromeos_image")
177      chromeos_root = label_settings.GetField("chromeos_root")
178      my_remote = label_settings.GetField("remote")
179      new_remote = []
180      for i in my_remote:
181        c = re.sub('["\']', '', i)
182        new_remote.append(c)
183      my_remote = new_remote
184      if image == "":
185        build = label_settings.GetField("build")
186        if len(build) == 0:
187            raise Exception("Can not have empty 'build' field!")
188        image = label_settings.GetXbuddyPath (build, board, chromeos_root,
189                                              log_level)
190
191      cache_dir = label_settings.GetField("cache_dir")
192      chrome_src = label_settings.GetField("chrome_src")
193
194    # TODO(yunlian): We should consolidate code in machine_manager.py
195    # to derermine whether we are running from within google or not
196      if ("corp.google.com" in socket.gethostname() and
197          (not my_remote
198           or my_remote == remote
199           and global_settings.GetField("board") != board)):
200        my_remote = self.GetDefaultRemotes(board)
201      if global_settings.GetField("same_machine") and len(my_remote) > 1:
202        raise Exception("Only one remote is allowed when same_machine "
203                        "is turned on")
204      all_remote += my_remote
205      image_args = label_settings.GetField("image_args")
206      if test_flag.GetTestMode():
207        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
208                          image_args, cache_dir, cache_only, chrome_src)
209      else:
210        label = Label(label_name, image, chromeos_root, board, my_remote,
211                      image_args, cache_dir, cache_only, chrome_src)
212      labels.append(label)
213
214    email = global_settings.GetField("email")
215    all_remote += list(set(my_remote))
216    all_remote = list(set(all_remote))
217    experiment = Experiment(experiment_name, all_remote,
218                            working_directory, chromeos_root,
219                            cache_conditions, labels, benchmarks,
220                            experiment_file.Canonicalize(),
221                            email, acquire_timeout, log_dir, log_level,
222                            share_cache,
223                            results_dir)
224
225    return experiment
226
227  def GetDefaultRemotes(self, board):
228    default_remotes_file = os.path.join(os.path.dirname(__file__),
229                                        "default_remotes")
230    try:
231      with open(default_remotes_file) as f:
232        for line in f:
233          key, v = line.split(":")
234          if key.strip() == board:
235            remotes = v.strip().split(" ")
236            if remotes:
237              return remotes
238            else:
239              raise Exception("There is not remote for {0}".format(board))
240    except IOError:
241      raise Exception("IOError while reading file {0}"
242                      .format(default_remotes_file))
243    else:
244      raise Exception("There is not remote for {0}".format(board))
245