experiment_factory.py revision 126e0c31bf6b973c500f86296391b954ccbae218
1#!/usr/bin/python
2
3# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""A module to generate experments."""
8
9import os
10import re
11import socket
12
13from benchmark import Benchmark
14import config
15from experiment import Experiment
16from label import Label
17from label import MockLabel
18from results_cache import CacheConditions
19import test_flag
20
21# Users may want to run Telemetry tests either individually, or in
22# specified sets.  Here we define sets of tests that users may want
23# to run together.
24
25telemetry_perfv2_tests = [ 'dromaeo.domcoreattr',
26                           'dromaeo.domcoremodify',
27                           'dromaeo.domcorequery',
28                           'dromaeo.domcoretraverse',
29                           'kraken',
30# The following benchmark is extremely flaky, so omit it for now.
31#                           'memory.top_25',
32                           'octane',
33                           'robohornet_pro',
34                           'smoothness.top_25',
35                           'sunspider',
36                           ]
37
38telemetry_pagecycler_tests = [ 'page_cycler.indexed_db.basic_insert',
39                               'page_cycler.bloat',
40                               'page_cycler.dhtml',
41                               'page_cycler.intl_ar_fa_he',
42                               'page_cycler.intl_es_fr_pt-BR',
43                               'page_cycler.intl_hi_ru',
44                               'page_cycler.intl_ja_zh',
45                               'page_cycler.intl_ko_th_vi',
46                               'page_cycler.morejs',
47                               'page_cycler.moz',
48                               'page_cycler.netsim.top_10',
49                               'page_cycler.tough_layout_cases',
50                               'page_cycler.typical_25',
51                               ]
52
53telemetry_toolchain_perf_tests = [ 'canvasmark',
54                               'jsgamebench',
55                               'dromaeo.domcoremodify',
56                               'page_cycler.intl_es_fr_pt-BR',
57                               'page_cycler.intl_hi_ru',
58                               'page_cycler.intl_ja_zh',
59                               'page_cycler.intl_ko_th_vi',
60                               'page_cycler.netsim.top_10',
61                               'page_cycler.typical_25',
62                               'peacekeeper.html',
63                               'robohornet_pro',
64                               'spaceport',
65                               'tab_switching.top_10',
66                               ]
67
68class ExperimentFactory(object):
69  """Factory class for building an Experiment, given an ExperimentFile as input.
70
71  This factory is currently hardcoded to produce an experiment for running
72  ChromeOS benchmarks, but the idea is that in the future, other types
73  of experiments could be produced.
74  """
75
76  def _AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
77                          iterations, outlier_range, key_results_only,
78                          rm_chroot_tmp, perf_args, suite, use_test_that,
79                          show_all_results):
80    """Add all the tests in a set to the benchmarks list."""
81    for test_name in benchmark_list:
82      telemetry_benchmark = Benchmark (test_name, test_name, test_args,
83                                       iterations, outlier_range,
84                                       key_results_only, rm_chroot_tmp,
85                                       perf_args, suite, use_test_that,
86                                       show_all_results)
87      benchmarks.append(telemetry_benchmark)
88
89
90  def GetExperiment(self, experiment_file, working_directory, log_dir):
91    """Construct an experiment from an experiment file."""
92    global_settings = experiment_file.GetGlobalSettings()
93    experiment_name = global_settings.GetField("name")
94    remote = global_settings.GetField("remote")
95    # This is used to remove the ",' from the remote if user
96    # add them to the remote string.
97    new_remote = []
98    for i in remote:
99      c = re.sub('["\']', '', i)
100      new_remote.append(c)
101    remote = new_remote
102    chromeos_root = global_settings.GetField("chromeos_root")
103    rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
104    key_results_only = global_settings.GetField("key_results_only")
105    acquire_timeout= global_settings.GetField("acquire_timeout")
106    cache_dir = global_settings.GetField("cache_dir")
107    config.AddConfig("no_email", global_settings.GetField("no_email"))
108    share_users = global_settings.GetField("share_users")
109    results_dir = global_settings.GetField("results_dir")
110    chrome_src = global_settings.GetField("chrome_src")
111    use_test_that = global_settings.GetField("use_test_that")
112    show_all_results = global_settings.GetField("show_all_results")
113    # Default cache hit conditions. The image checksum in the cache and the
114    # computed checksum of the image must match. Also a cache file must exist.
115    cache_conditions = [CacheConditions.CACHE_FILE_EXISTS,
116                        CacheConditions.CHECKSUMS_MATCH]
117    if global_settings.GetField("rerun_if_failed"):
118      cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
119    if global_settings.GetField("rerun"):
120      cache_conditions.append(CacheConditions.FALSE)
121    if global_settings.GetField("same_machine"):
122      cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
123    if global_settings.GetField("same_specs"):
124      cache_conditions.append(CacheConditions.MACHINES_MATCH)
125
126    # Construct benchmarks.
127    benchmarks = []
128    all_benchmark_settings = experiment_file.GetSettings("benchmark")
129    for benchmark_settings in all_benchmark_settings:
130      benchmark_name = benchmark_settings.name
131      test_name = benchmark_settings.GetField("test_name")
132      if not test_name:
133        test_name = benchmark_name
134      test_args = benchmark_settings.GetField("test_args")
135      iterations = benchmark_settings.GetField("iterations")
136      outlier_range = benchmark_settings.GetField("outlier_range")
137      perf_args = benchmark_settings.GetField("perf_args")
138      rm_chroot_tmp = benchmark_settings.GetField("rm_chroot_tmp")
139      key_results_only = benchmark_settings.GetField("key_results_only")
140      suite = benchmark_settings.GetField("suite")
141      use_test_that = benchmark_settings.GetField("use_test_that")
142      show_all_results = benchmark_settings.GetField("show_all_results")
143
144      if suite == 'telemetry_Crosperf':
145        if test_name == 'all_perfv2':
146          self._AppendBenchmarkSet (benchmarks, telemetry_perfv2_tests,
147                                    test_args, iterations, outlier_range,
148                                    key_results_only, rm_chroot_tmp,
149                                    perf_args, suite, use_test_that,
150                                    show_all_results)
151        elif test_name == 'all_pagecyclers':
152          self._AppendBenchmarkSet (benchmarks, telemetry_pagecycler_tests,
153                                    test_args, iterations, outlier_range,
154                                    key_results_only, rm_chroot_tmp,
155                                    perf_args, suite, use_test_that,
156                                    show_all_results)
157        elif test_name == 'all_toolchain_perf':
158          self._AppendBenchmarkSet (benchmarks, telemetry_toolchain_perf_tests,
159                                    test_args, iterations, outlier_range,
160                                    key_results_only, rm_chroot_tmp,
161                                    perf_args, suite, use_test_that,
162                                    show_all_results)
163        else:
164          benchmark = Benchmark(test_name, test_name, test_args,
165                                iterations, outlier_range,
166                                key_results_only, rm_chroot_tmp,
167                                perf_args, suite, use_test_that,
168                                show_all_results)
169          benchmarks.append(benchmark)
170      else:
171        # Add the single benchmark.
172        benchmark = Benchmark(benchmark_name, test_name, test_args,
173                              iterations, outlier_range,
174                              key_results_only, rm_chroot_tmp,
175                              perf_args, suite, use_test_that,
176                              show_all_results)
177        benchmarks.append(benchmark)
178
179    # Construct labels.
180    labels = []
181    all_label_settings = experiment_file.GetSettings("label")
182    all_remote = list(remote)
183    for label_settings in all_label_settings:
184      label_name = label_settings.name
185      image = label_settings.GetField("chromeos_image")
186      chromeos_root = label_settings.GetField("chromeos_root")
187      board = label_settings.GetField("board")
188      my_remote = label_settings.GetField("remote")
189      new_remote = []
190      for i in my_remote:
191        c = re.sub('["\']', '', i)
192        new_remote.append(c)
193      my_remote = new_remote
194
195      image_md5sum = label_settings.GetField("md5sum")
196      cache_dir = label_settings.GetField("cache_dir")
197      chrome_src = label_settings.GetField("chrome_src")
198
199    # TODO(yunlian): We should consolidate code in machine_manager.py
200    # to derermine whether we are running from within google or not
201      if ("corp.google.com" in socket.gethostname() and
202          (not my_remote
203           or my_remote == remote
204           and global_settings.GetField("board") != board)):
205        my_remote = self.GetDefaultRemotes(board)
206      if global_settings.GetField("same_machine") and len(my_remote) > 1:
207        raise Exception("Only one remote is allowed when same_machine "
208                        "is turned on")
209      all_remote += my_remote
210      image_args = label_settings.GetField("image_args")
211      if test_flag.GetTestMode():
212        label = MockLabel(label_name, image, chromeos_root, board, my_remote,
213                          image_args, image_md5sum, cache_dir, chrome_src)
214      else:
215        label = Label(label_name, image, chromeos_root, board, my_remote,
216                      image_args, image_md5sum, cache_dir, chrome_src)
217      labels.append(label)
218
219    email = global_settings.GetField("email")
220    all_remote = list(set(all_remote))
221    experiment = Experiment(experiment_name, all_remote,
222                            working_directory, chromeos_root,
223                            cache_conditions, labels, benchmarks,
224                            experiment_file.Canonicalize(),
225                            email, acquire_timeout, log_dir, share_users,
226                            results_dir)
227
228    return experiment
229
230  def GetDefaultRemotes(self, board):
231    default_remotes_file = os.path.join(os.path.dirname(__file__),
232                                        "default_remotes")
233    try:
234      with open(default_remotes_file) as f:
235        for line in f:
236          key, v = line.split(":")
237          if key.strip() == board:
238            remotes = v.strip().split(" ")
239            if remotes:
240              return remotes
241            else:
242              raise Exception("There is not remote for {0}".format(board))
243    except IOError:
244      raise Exception("IOError while reading file {0}"
245                      .format(default_remotes_file))
246    else:
247      raise Exception("There is not remote for {0}".format(board))
248