experiment_factory.py revision 2250df9b2fb7ffe523b4f90e06b0d7602b83876f
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""A module to generate experments."""
8
9import os
10import re
11import socket
12
13from benchmark import Benchmark
14import config
15from experiment import Experiment
16from label import Label
17from label import MockLabel
18from results_cache import CacheConditions
19import test_flag
20
21# Users may want to run Telemetry tests either individually, or in
22# specified sets.  Here we define sets of tests that users may want
23# to run together.
24
25telemetry_perfv2_tests = [ 'dromaeo.domcoreattr',
26                           'dromaeo.domcoremodify',
27                           'dromaeo.domcorequery',
28                           'dromaeo.domcoretraverse',
29                           'kraken',
30# The following benchmark is extremely flaky, so omit it for now.
31#                           'memory.top_25',
32                           'octane',
33                           'robohornet_pro',
34# The following benchmark is broken (and hanging) for now, so omit it.
35#                           'smoothness.top_25',
36                           'sunspider',
37                           ]
38
39telemetry_pagecycler_tests = [ 'page_cycler.indexed_db.basic_insert',
40                               'page_cycler.bloat',
41                               'page_cycler.dhtml',
42                               'page_cycler.intl_ar_fa_he',
43                               'page_cycler.intl_es_fr_pt-BR',
44                               'page_cycler.intl_hi_ru',
45                               'page_cycler.intl_ja_zh',
46                               'page_cycler.intl_ko_th_vi',
47                               'page_cycler.morejs',
48                               'page_cycler.moz',
49                               'page_cycler.netsim.top_10',
50                               'page_cycler.tough_layout_cases',
51                               'page_cycler.typical_25',
52                               ]
53
54telemetry_toolchain_perf_tests = [ 'canvasmark',
55                               'jsgamebench',
56                               'dromaeo.domcoremodify',
57                               'page_cycler.bloat',
58                               'page_cycler.intl_es_fr_pt-BR',
59                               'page_cycler.intl_hi_ru',
60                               'page_cycler.intl_ja_zh',
61                               'page_cycler.intl_ko_th_vi',
62                               'page_cycler.netsim.top_10',
63                               'page_cycler.typical_25',
64                               'peacekeeper.html',
65                               'robohornet_pro',
66                               'spaceport',
67                               'tab_switching.top_10',
68                               ]
69
70class ExperimentFactory(object):
71  """Factory class for building an Experiment, given an ExperimentFile as input.
72
73  This factory is currently hardcoded to produce an experiment for running
74  ChromeOS benchmarks, but the idea is that in the future, other types
75  of experiments could be produced.
76  """
77
78  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
79                          iterations, rm_chroot_tmp, perf_args, suite,
80                          show_all_results):
81    """Add all the tests in a set to the benchmarks list."""
82    for test_name in benchmark_list:
83      telemetry_benchmark = Benchmark (test_name, test_name, test_args,
84                                       iterations, rm_chroot_tmp, perf_args,
85                                       suite, show_all_results)
86      benchmarks.append(telemetry_benchmark)
87
88
89  def GetExperiment(self, experiment_file, working_directory, log_dir):
90    """Construct an experiment from an experiment file."""
91    global_settings = experiment_file.GetGlobalSettings()
92    experiment_name = global_settings.GetField("name")
93    board = global_settings.GetField("board")
94    remote = global_settings.GetField("remote")
95    # This is used to remove the ",' from the remote if user
96    # add them to the remote string.
97    new_remote = []
98    for i in remote:
99      c = re.sub('["\']', '', i)
100      new_remote.append(c)
101    remote = new_remote
102    chromeos_root = global_settings.GetField("chromeos_root")
103    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
104    perf_args = global_settings.GetField("perf_args")
105    acquire_timeout= global_settings.GetField("acquire_timeout")
106    cache_dir = global_settings.GetField("cache_dir")
107    config.AddConfig("no_email", global_settings.GetField("no_email"))
108    share_users = global_settings.GetField("share_users")
109    results_dir = global_settings.GetField("results_dir")
110    chrome_src = global_settings.GetField("chrome_src")
111    show_all_results = global_settings.GetField("show_all_results")
112    log_level = global_settings.GetField("logging_level")
113    if log_level not in ("quiet", "average", "verbose"):
114      log_level = "verbose"
115    # Default cache hit conditions. The image checksum in the cache and the
116    # computed checksum of the image must match. Also a cache file must exist.
117    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
118                        CacheConditions.CHECKSUMS_MATCH]
119    if global_settings.GetField("rerun_if_failed"):
120      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
121    if global_settings.GetField("rerun"):
122      cache_conditions.append(CacheConditions.FALSE)
123    if global_settings.GetField("same_machine"):
124      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
125    if global_settings.GetField("same_specs"):
126      cache_conditions.append(CacheConditions.MACHINES_MATCH)
127
128    # Construct benchmarks.
129    benchmarks = []
130    all_benchmark_settings = experiment_file.GetSettings("benchmark")
131    for benchmark_settings in all_benchmark_settings:
132      benchmark_name = benchmark_settings.name
133      test_name = benchmark_settings.GetField("test_name")
134      if not test_name:
135        test_name = benchmark_name
136      test_args = benchmark_settings.GetField("test_args")
137      iterations = benchmark_settings.GetField("iterations")
138      suite = benchmark_settings.GetField("suite")
139
140      if suite == 'telemetry_Crosperf':
141        if test_name == 'all_perfv2':
142          self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
143                                    test_args, iterations, rm_chroot_tmp,
144                                    perf_args, suite, show_all_results)
145        elif test_name == 'all_pagecyclers':
146          self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
147                                    test_args, iterations, rm_chroot_tmp,
148                                    perf_args, suite, show_all_results)
149        elif test_name == 'all_toolchain_perf':
150          self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
151                                    test_args, iterations, rm_chroot_tmp,
152                                    perf_args, suite, show_all_results)
153        else:
154          benchmark = Benchmark(test_name, test_name, test_args,
155                                iterations, rm_chroot_tmp, perf_args, suite,
156                                show_all_results)
157          benchmarks.append(benchmark)
158      else:
159        # Add the single benchmark.
160        benchmark = Benchmark(benchmark_name, test_name, test_args,
161                              iterations, rm_chroot_tmp, perf_args, suite,
162                              show_all_results)
163        benchmarks.append(benchmark)
164
165    # Construct labels.
166    labels = []
167    all_label_settings = experiment_file.GetSettings("label")
168    all_remote = list(remote)
169    for label_settings in all_label_settings:
170      label_name = label_settings.name
171      image = label_settings.GetField("chromeos_image")
172      chromeos_root = label_settings.GetField("chromeos_root")
173      my_remote = label_settings.GetField("remote")
174      new_remote = []
175      for i in my_remote:
176        c = re.sub('["\']', '', i)
177        new_remote.append(c)
178      my_remote = new_remote
179      if image == "":
180        build = label_settings.GetField("build")
181        image = label_settings.GetXbuddyPath (build, board, chromeos_root,
182                                              log_level)
183
184      cache_dir = label_settings.GetField("cache_dir")
185      chrome_src = label_settings.GetField("chrome_src")
186
187    # TODO(yunlian): We should consolidate code in machine_manager.py
188    # to derermine whether we are running from within google or not
189      if ("corp.google.com" in socket.gethostname() and
190          (not my_remote
191           or my_remote == remote
192           and global_settings.GetField("board") != board)):
193        my_remote = self.GetDefaultRemotes(board)
194      if global_settings.GetField("same_machine") and len(my_remote) > 1:
195        raise Exception("Only one remote is allowed when same_machine "
196                        "is turned on")
197      all_remote += my_remote
198      image_args = label_settings.GetField("image_args")
199      if test_flag.GetTestMode():
200        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
201                          image_args, cache_dir, chrome_src)
202      else:
203        label = Label(label_name, image, chromeos_root, board, my_remote,
204                      image_args, cache_dir, chrome_src)
205      labels.append(label)
206
207    email = global_settings.GetField("email")
208    all_remote += list(set(my_remote))
209    all_remote = list(set(all_remote))
210    experiment = Experiment(experiment_name, all_remote,
211                            working_directory, chromeos_root,
212                            cache_conditions, labels, benchmarks,
213                            experiment_file.Canonicalize(),
214                            email, acquire_timeout, log_dir, log_level,
215                            share_users,
216                            results_dir)
217
218    return experiment
219
220  def GetDefaultRemotes(self, board):
221    default_remotes_file = os.path.join(os.path.dirname(__file__),
222                                        "default_remotes")
223    try:
224      with open(default_remotes_file) as f:
225        for line in f:
226          key, v = line.split(":")
227          if key.strip() == board:
228            remotes = v.strip().split(" ")
229            if remotes:
230              return remotes
231            else:
232              raise Exception("There is not remote for {0}".format(board))
233    except IOError:
234      raise Exception("IOError while reading file {0}"
235                      .format(default_remotes_file))
236    else:
237      raise Exception("There is not remote for {0}".format(board))
238