experiment_factory.py revision 0cc4e7790afbd514675801a1ffb90517c147270f
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""A module to generate experments."""
8
9import os
10import re
11import socket
12
13from benchmark import Benchmark
14import config
15from experiment import Experiment
16from label import Label
17from label import MockLabel
18from results_cache import CacheConditions
19import test_flag
20
21# Users may want to run Telemetry tests either individually, or in
22# specified sets.  Here we define sets of tests that users may want
23# to run together.
24
25telemetry_perfv2_tests = [ 'dromaeo.domcoreattr',
26                           'dromaeo.domcoremodify',
27                           'dromaeo.domcorequery',
28                           'dromaeo.domcoretraverse',
29                           'kraken',
30# The following benchmark is extremely flaky, so omit it for now.
31#                           'memory.top_25',
32                           'octane',
33                           'robohornet_pro',
34                           'smoothness.top_25',
35                           'sunspider',
36                           ]
37
38telemetry_pagecycler_tests = [ 'page_cycler.indexed_db.basic_insert',
39                               'page_cycler.bloat',
40                               'page_cycler.dhtml',
41                               'page_cycler.intl_ar_fa_he',
42                               'page_cycler.intl_es_fr_pt-BR',
43                               'page_cycler.intl_hi_ru',
44                               'page_cycler.intl_ja_zh',
45                               'page_cycler.intl_ko_th_vi',
46                               'page_cycler.morejs',
47                               'page_cycler.moz',
48                               'page_cycler.netsim.top_10',
49                               'page_cycler.tough_layout_cases',
50                               'page_cycler.typical_25',
51                               ]
52
53telemetry_toolchain_perf_tests = [ 'canvasmark',
54                               'jsgamebench',
55                               'dromaeo.domcoremodify',
56                               'page_cycler.intl_es_fr_pt-BR',
57                               'page_cycler.intl_hi_ru',
58                               'page_cycler.intl_ja_zh',
59                               'page_cycler.intl_ko_th_vi',
60                               'page_cycler.netsim.top_10',
61                               'page_cycler.typical_25',
62                               'peacekeeper.html',
63                               'robohornet_pro',
64                               'spaceport',
65                               'tab_switching.top_10',
66                               ]
67
68class ExperimentFactory(object):
69  """Factory class for building an Experiment, given an ExperimentFile as input.
70
71  This factory is currently hardcoded to produce an experiment for running
72  ChromeOS benchmarks, but the idea is that in the future, other types
73  of experiments could be produced.
74  """
75
76  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
77                          iterations, outlier_range, key_results_only,
78                          rm_chroot_tmp, perf_args, suite, use_test_that,
79                          show_all_results):
80    """Add all the tests in a set to the benchmarks list."""
81    for test_name in benchmark_list:
82      telemetry_benchmark = Benchmark (test_name, test_name, test_args,
83                                       iterations, outlier_range,
84                                       key_results_only, rm_chroot_tmp,
85                                       perf_args, suite, use_test_that,
86                                       show_all_results)
87      benchmarks.append(telemetry_benchmark)
88
89
90  def GetExperiment(self, experiment_file, working_directory, log_dir):
91    """Construct an experiment from an experiment file."""
92    global_settings = experiment_file.GetGlobalSettings()
93    experiment_name = global_settings.GetField("name")
94    remote = global_settings.GetField("remote")
95    # This is used to remove the ",' from the remote if user
96    # add them to the remote string.
97    new_remote = []
98    for i in remote:
99      c = re.sub('["\']', '', i)
100      new_remote.append(c)
101    remote = new_remote
102    chromeos_root = global_settings.GetField("chromeos_root")
103    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
104    key_results_only = global_settings.GetField("key_results_only")
105    acquire_timeout= global_settings.GetField("acquire_timeout")
106    cache_dir = global_settings.GetField("cache_dir")
107    config.AddConfig("no_email", global_settings.GetField("no_email"))
108    share_users = global_settings.GetField("share_users")
109    results_dir = global_settings.GetField("results_dir")
110    chrome_src = global_settings.GetField("chrome_src")
111    build = global_settings.GetField("build")
112    use_test_that = global_settings.GetField("use_test_that")
113    show_all_results = global_settings.GetField("show_all_results")
114    # Default cache hit conditions. The image checksum in the cache and the
115    # computed checksum of the image must match. Also a cache file must exist.
116    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
117                        CacheConditions.CHECKSUMS_MATCH]
118    if global_settings.GetField("rerun_if_failed"):
119      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
120    if global_settings.GetField("rerun"):
121      cache_conditions.append(CacheConditions.FALSE)
122    if global_settings.GetField("same_machine"):
123      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
124    if global_settings.GetField("same_specs"):
125      cache_conditions.append(CacheConditions.MACHINES_MATCH)
126
127    # Construct benchmarks.
128    benchmarks = []
129    all_benchmark_settings = experiment_file.GetSettings("benchmark")
130    for benchmark_settings in all_benchmark_settings:
131      benchmark_name = benchmark_settings.name
132      test_name = benchmark_settings.GetField("test_name")
133      if not test_name:
134        test_name = benchmark_name
135      test_args = benchmark_settings.GetField("test_args")
136      iterations = benchmark_settings.GetField("iterations")
137      outlier_range = benchmark_settings.GetField("outlier_range")
138      perf_args = benchmark_settings.GetField("perf_args")
139      rm_chroot_tmp = benchmark_settings.GetField("rm_chroot_tmp")
140      key_results_only = benchmark_settings.GetField("key_results_only")
141      suite = benchmark_settings.GetField("suite")
142      use_test_that = benchmark_settings.GetField("use_test_that")
143      show_all_results = benchmark_settings.GetField("show_all_results")
144
145      if suite == 'telemetry_Crosperf':
146        if test_name == 'all_perfv2':
147          self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
148                                    test_args, iterations, outlier_range,
149                                    key_results_only, rm_chroot_tmp,
150                                    perf_args, suite, use_test_that,
151                                    show_all_results)
152        elif test_name == 'all_pagecyclers':
153          self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
154                                    test_args, iterations, outlier_range,
155                                    key_results_only, rm_chroot_tmp,
156                                    perf_args, suite, use_test_that,
157                                    show_all_results)
158        elif test_name == 'all_toolchain_perf':
159          self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
160                                    test_args, iterations, outlier_range,
161                                    key_results_only, rm_chroot_tmp,
162                                    perf_args, suite, use_test_that,
163                                    show_all_results)
164        else:
165          benchmark = Benchmark(test_name, test_name, test_args,
166                                iterations, outlier_range,
167                                key_results_only, rm_chroot_tmp,
168                                perf_args, suite, use_test_that,
169                                show_all_results)
170          benchmarks.append(benchmark)
171      else:
172        # Add the single benchmark.
173        benchmark = Benchmark(benchmark_name, test_name, test_args,
174                              iterations, outlier_range,
175                              key_results_only, rm_chroot_tmp,
176                              perf_args, suite, use_test_that,
177                              show_all_results)
178        benchmarks.append(benchmark)
179
180    # Construct labels.
181    labels = []
182    all_label_settings = experiment_file.GetSettings("label")
183    all_remote = list(remote)
184    for label_settings in all_label_settings:
185      label_name = label_settings.name
186      board = label_settings.GetField("board")
187      image = label_settings.GetField("chromeos_image")
188      if image == "":
189        build = label_settings.GetField("build")
190        image = label_settings.GetXbuddyPath (build, board)
191      chromeos_root = label_settings.GetField("chromeos_root")
192      my_remote = label_settings.GetField("remote")
193      new_remote = []
194      for i in my_remote:
195        c = re.sub('["\']', '', i)
196        new_remote.append(c)
197      my_remote = new_remote
198
199      image_md5sum = label_settings.GetField("md5sum")
200      cache_dir = label_settings.GetField("cache_dir")
201      chrome_src = label_settings.GetField("chrome_src")
202
203    # TODO(yunlian): We should consolidate code in machine_manager.py
204    # to derermine whether we are running from within google or not
205      if ("corp.google.com" in socket.gethostname() and
206          (not my_remote
207           or my_remote == remote
208           and global_settings.GetField("board") != board)):
209        my_remote = self.GetDefaultRemotes(board)
210      if global_settings.GetField("same_machine") and len(my_remote) > 1:
211        raise Exception("Only one remote is allowed when same_machine "
212                        "is turned on")
213      all_remote += my_remote
214      image_args = label_settings.GetField("image_args")
215      if test_flag.GetTestMode():
216        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
217                          image_args, image_md5sum, cache_dir, chrome_src)
218      else:
219        label = Label(label_name, image, chromeos_root, board, my_remote,
220                      image_args, image_md5sum, cache_dir, chrome_src)
221      labels.append(label)
222
223    email = global_settings.GetField("email")
224    all_remote = list(set(all_remote))
225    experiment = Experiment(experiment_name, all_remote,
226                            working_directory, chromeos_root,
227                            cache_conditions, labels, benchmarks,
228                            experiment_file.Canonicalize(),
229                            email, acquire_timeout, log_dir, share_users,
230                            results_dir)
231
232    return experiment
233
234  def GetDefaultRemotes(self, board):
235    default_remotes_file = os.path.join(os.path.dirname(__file__),
236                                        "default_remotes")
237    try:
238      with open(default_remotes_file) as f:
239        for line in f:
240          key, v = line.split(":")
241          if key.strip() == board:
242            remotes = v.strip().split(" ")
243            if remotes:
244              return remotes
245            else:
246              raise Exception("There is not remote for {0}".format(board))
247    except IOError:
248      raise Exception("IOError while reading file {0}"
249                      .format(default_remotes_file))
250    else:
251      raise Exception("There is not remote for {0}".format(board))
252