experiment_factory.py revision 43f1a45c8dddfc4ff8c9dfcd87070811abf936dd
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""A module to generate experments."""
8
9import os
10import re
11import socket
12
13from benchmark import Benchmark
14import config
15from experiment import Experiment
16from label import Label
17from label import MockLabel
18from results_cache import CacheConditions
19import test_flag
20
21# Users may want to run Telemetry tests either individually, or in
22# specified sets.  Here we define sets of tests that users may want
23# to run together.
24
25telemetry_perfv2_tests = [ 'dromaeo.domcoreattr',
26                           'dromaeo.domcoremodify',
27                           'dromaeo.domcorequery',
28                           'dromaeo.domcoretraverse',
29                           'kraken',
30# The following benchmark is extremely flaky, so omit it for now.
31#                           'memory.top_25',
32                           'octane',
33                           'robohornet_pro',
34# The following benchmark is broken (and hanging) for now, so omit it.
35#                           'smoothness.top_25',
36                           'sunspider',
37                           ]
38
39telemetry_pagecycler_tests = [ 'page_cycler.indexed_db.basic_insert',
40                               'page_cycler.bloat',
41                               'page_cycler.dhtml',
42                               'page_cycler.intl_ar_fa_he',
43                               'page_cycler.intl_es_fr_pt-BR',
44                               'page_cycler.intl_hi_ru',
45                               'page_cycler.intl_ja_zh',
46                               'page_cycler.intl_ko_th_vi',
47                               'page_cycler.morejs',
48                               'page_cycler.moz',
49                               'page_cycler.netsim.top_10',
50                               'page_cycler.tough_layout_cases',
51                               'page_cycler.typical_25',
52                               ]
53
54telemetry_toolchain_perf_tests = [ 'canvasmark',
55                               'jsgamebench',
56                               'dromaeo.domcoremodify',
57                               'page_cycler.bloat',
58                               'page_cycler.intl_es_fr_pt-BR',
59                               'page_cycler.intl_hi_ru',
60                               'page_cycler.intl_ja_zh',
61                               'page_cycler.intl_ko_th_vi',
62                               'page_cycler.netsim.top_10',
63                               'page_cycler.typical_25',
64                               'peacekeeper.html',
65                               'robohornet_pro',
66                               'spaceport',
67                               'tab_switching.top_10',
68                               ]
69
70class ExperimentFactory(object):
71  """Factory class for building an Experiment, given an ExperimentFile as input.
72
73  This factory is currently hardcoded to produce an experiment for running
74  ChromeOS benchmarks, but the idea is that in the future, other types
75  of experiments could be produced.
76  """
77
78  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
79                          iterations, outlier_range, key_results_only,
80                          rm_chroot_tmp, perf_args, suite, use_test_that,
81                          show_all_results):
82    """Add all the tests in a set to the benchmarks list."""
83    for test_name in benchmark_list:
84      telemetry_benchmark = Benchmark (test_name, test_name, test_args,
85                                       iterations, outlier_range,
86                                       key_results_only, rm_chroot_tmp,
87                                       perf_args, suite, use_test_that,
88                                       show_all_results)
89      benchmarks.append(telemetry_benchmark)
90
91
92  def GetExperiment(self, experiment_file, working_directory, log_dir):
93    """Construct an experiment from an experiment file."""
94    global_settings = experiment_file.GetGlobalSettings()
95    experiment_name = global_settings.GetField("name")
96    remote = global_settings.GetField("remote")
97    # This is used to remove the ",' from the remote if user
98    # add them to the remote string.
99    new_remote = []
100    for i in remote:
101      c = re.sub('["\']', '', i)
102      new_remote.append(c)
103    remote = new_remote
104    chromeos_root = global_settings.GetField("chromeos_root")
105    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
106    key_results_only = global_settings.GetField("key_results_only")
107    acquire_timeout= global_settings.GetField("acquire_timeout")
108    cache_dir = global_settings.GetField("cache_dir")
109    config.AddConfig("no_email", global_settings.GetField("no_email"))
110    share_users = global_settings.GetField("share_users")
111    results_dir = global_settings.GetField("results_dir")
112    chrome_src = global_settings.GetField("chrome_src")
113    build = global_settings.GetField("build")
114    use_test_that = global_settings.GetField("use_test_that")
115    show_all_results = global_settings.GetField("show_all_results")
116    # Default cache hit conditions. The image checksum in the cache and the
117    # computed checksum of the image must match. Also a cache file must exist.
118    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
119                        CacheConditions.CHECKSUMS_MATCH]
120    if global_settings.GetField("rerun_if_failed"):
121      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
122    if global_settings.GetField("rerun"):
123      cache_conditions.append(CacheConditions.FALSE)
124    if global_settings.GetField("same_machine"):
125      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
126    if global_settings.GetField("same_specs"):
127      cache_conditions.append(CacheConditions.MACHINES_MATCH)
128
129    # Construct benchmarks.
130    benchmarks = []
131    all_benchmark_settings = experiment_file.GetSettings("benchmark")
132    for benchmark_settings in all_benchmark_settings:
133      benchmark_name = benchmark_settings.name
134      test_name = benchmark_settings.GetField("test_name")
135      if not test_name:
136        test_name = benchmark_name
137      test_args = benchmark_settings.GetField("test_args")
138      iterations = benchmark_settings.GetField("iterations")
139      outlier_range = benchmark_settings.GetField("outlier_range")
140      perf_args = benchmark_settings.GetField("perf_args")
141      rm_chroot_tmp = benchmark_settings.GetField("rm_chroot_tmp")
142      key_results_only = benchmark_settings.GetField("key_results_only")
143      suite = benchmark_settings.GetField("suite")
144      use_test_that = benchmark_settings.GetField("use_test_that")
145      show_all_results = benchmark_settings.GetField("show_all_results")
146      log_level = benchmark_settings.GetField("logging_level")
147      if log_level not in ("quiet", "average", "verbose"):
148        log_level = "verbose"
149
150      if suite == 'telemetry_Crosperf':
151        if test_name == 'all_perfv2':
152          self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
153                                    test_args, iterations, outlier_range,
154                                    key_results_only, rm_chroot_tmp,
155                                    perf_args, suite, use_test_that,
156                                    show_all_results)
157        elif test_name == 'all_pagecyclers':
158          self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
159                                    test_args, iterations, outlier_range,
160                                    key_results_only, rm_chroot_tmp,
161                                    perf_args, suite, use_test_that,
162                                    show_all_results)
163        elif test_name == 'all_toolchain_perf':
164          self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
165                                    test_args, iterations, outlier_range,
166                                    key_results_only, rm_chroot_tmp,
167                                    perf_args, suite, use_test_that,
168                                    show_all_results)
169        else:
170          benchmark = Benchmark(test_name, test_name, test_args,
171                                iterations, outlier_range,
172                                key_results_only, rm_chroot_tmp,
173                                perf_args, suite, use_test_that,
174                                show_all_results)
175          benchmarks.append(benchmark)
176      else:
177        # Add the single benchmark.
178        benchmark = Benchmark(benchmark_name, test_name, test_args,
179                              iterations, outlier_range,
180                              key_results_only, rm_chroot_tmp,
181                              perf_args, suite, use_test_that,
182                              show_all_results)
183        benchmarks.append(benchmark)
184
185    # Construct labels.
186    labels = []
187    all_label_settings = experiment_file.GetSettings("label")
188    all_remote = list(remote)
189    for label_settings in all_label_settings:
190      label_name = label_settings.name
191      board = label_settings.GetField("board")
192      image = label_settings.GetField("chromeos_image")
193      chromeos_root = label_settings.GetField("chromeos_root")
194      if image == "":
195        build = label_settings.GetField("build")
196        image = label_settings.GetXbuddyPath (build, board, chromeos_root,
197                                              log_level)
198      my_remote = label_settings.GetField("remote")
199      new_remote = []
200      for i in my_remote:
201        c = re.sub('["\']', '', i)
202        new_remote.append(c)
203      my_remote = new_remote
204
205      image_md5sum = label_settings.GetField("md5sum")
206      cache_dir = label_settings.GetField("cache_dir")
207      chrome_src = label_settings.GetField("chrome_src")
208
209    # TODO(yunlian): We should consolidate code in machine_manager.py
210    # to derermine whether we are running from within google or not
211      if ("corp.google.com" in socket.gethostname() and
212          (not my_remote
213           or my_remote == remote
214           and global_settings.GetField("board") != board)):
215        my_remote = self.GetDefaultRemotes(board)
216      if global_settings.GetField("same_machine") and len(my_remote) > 1:
217        raise Exception("Only one remote is allowed when same_machine "
218                        "is turned on")
219      all_remote += my_remote
220      image_args = label_settings.GetField("image_args")
221      if test_flag.GetTestMode():
222        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
223                          image_args, image_md5sum, cache_dir, chrome_src)
224      else:
225        label = Label(label_name, image, chromeos_root, board, my_remote,
226                      image_args, image_md5sum, cache_dir, chrome_src)
227      labels.append(label)
228
229    email = global_settings.GetField("email")
230    all_remote = list(set(all_remote))
231    experiment = Experiment(experiment_name, all_remote,
232                            working_directory, chromeos_root,
233                            cache_conditions, labels, benchmarks,
234                            experiment_file.Canonicalize(),
235                            email, acquire_timeout, log_dir, log_level,
236                            share_users,
237                            results_dir)
238
239    return experiment
240
241  def GetDefaultRemotes(self, board):
242    default_remotes_file = os.path.join(os.path.dirname(__file__),
243                                        "default_remotes")
244    try:
245      with open(default_remotes_file) as f:
246        for line in f:
247          key, v = line.split(":")
248          if key.strip() == board:
249            remotes = v.strip().split(" ")
250            if remotes:
251              return remotes
252            else:
253              raise Exception("There is not remote for {0}".format(board))
254    except IOError:
255      raise Exception("IOError while reading file {0}"
256                      .format(default_remotes_file))
257    else:
258      raise Exception("There is not remote for {0}".format(board))
259