1# Copyright 2012 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import argparse
6import logging
7import sys
8
9from telemetry import benchmark
10from telemetry import story
11from telemetry.core import discover
12from telemetry.core import util
13from telemetry.internal.browser import browser_options
14from telemetry.internal.results import results_options
15from telemetry.internal import story_runner
16from telemetry.internal.util import binary_manager
17from telemetry.page import legacy_page_test
18from telemetry.util import matching
19from telemetry.util import wpr_modes
20from telemetry.web_perf import timeline_based_measurement
21from telemetry.web_perf import timeline_based_page_test
22
23DEFAULT_LOG_FORMAT = (
24  '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d  '
25  '%(message)s')
26
27
28class RecorderPageTest(legacy_page_test.LegacyPageTest):
29  def __init__(self):
30    super(RecorderPageTest, self).__init__()
31    self.page_test = None
32
33  def CustomizeBrowserOptions(self, options):
34    if self.page_test:
35      self.page_test.CustomizeBrowserOptions(options)
36
37  def WillStartBrowser(self, browser):
38    if self.page_test:
39      self.page_test.WillStartBrowser(browser)
40
41  def DidStartBrowser(self, browser):
42    if self.page_test:
43      self.page_test.DidStartBrowser(browser)
44
45  def WillNavigateToPage(self, page, tab):
46    """Override to ensure all resources are fetched from network."""
47    tab.ClearCache(force=False)
48    if self.page_test:
49      self.page_test.WillNavigateToPage(page, tab)
50
51  def DidNavigateToPage(self, page, tab):
52    if self.page_test:
53      self.page_test.DidNavigateToPage(page, tab)
54    tab.WaitForDocumentReadyStateToBeComplete()
55    util.WaitFor(tab.HasReachedQuiescence, 30)
56
57  def CleanUpAfterPage(self, page, tab):
58    if self.page_test:
59      self.page_test.CleanUpAfterPage(page, tab)
60
61  def ValidateAndMeasurePage(self, page, tab, results):
62    if self.page_test:
63      self.page_test.ValidateAndMeasurePage(page, tab, results)
64
65  def RunNavigateSteps(self, page, tab):
66    if self.page_test:
67      self.page_test.RunNavigateSteps(page, tab)
68    else:
69      super(RecorderPageTest, self).RunNavigateSteps(page, tab)
70
71
72def _GetSubclasses(base_dir, cls):
73  """Returns all subclasses of |cls| in |base_dir|.
74
75  Args:
76    cls: a class
77
78  Returns:
79    dict of {underscored_class_name: benchmark class}
80  """
81  return discover.DiscoverClasses(base_dir, base_dir, cls,
82                                  index_by_class_name=True)
83
84
85def _MaybeGetInstanceOfClass(target, base_dir, cls):
86  if isinstance(target, cls):
87    return target
88  classes = _GetSubclasses(base_dir, cls)
89  return classes[target]() if target in classes else None
90
91
92def _PrintAllImpl(all_items, item_name, output_stream):
93  output_stream.write('Available %s\' names with descriptions:\n' % item_name)
94  keys = sorted(all_items.keys())
95  key_description = [(k, all_items[k].Description()) for k in keys]
96  _PrintPairs(key_description, output_stream)
97  output_stream.write('\n')
98
99
100def _PrintAllBenchmarks(base_dir, output_stream):
101  # TODO: reuse the logic of finding supported benchmarks in benchmark_runner.py
102  # so this only prints out benchmarks that are supported by the recording
103  # platform.
104  _PrintAllImpl(_GetSubclasses(base_dir, benchmark.Benchmark), 'benchmarks',
105                output_stream)
106
107
108def _PrintAllStories(base_dir, output_stream):
109  # TODO: actually print all stories once record_wpr support general
110  # stories recording.
111  _PrintAllImpl(_GetSubclasses(base_dir, story.StorySet), 'story sets',
112                output_stream)
113
114
115def _PrintPairs(pairs, output_stream, prefix=''):
116  """Prints a list of string pairs with alignment."""
117  first_column_length = max(len(a) for a, _ in pairs)
118  format_string = '%s%%-%ds  %%s\n' % (prefix, first_column_length)
119  for a, b in pairs:
120    output_stream.write(format_string % (a, b.strip()))
121
122
123class WprRecorder(object):
124
125  def __init__(self, base_dir, target, args=None):
126    self._base_dir = base_dir
127    self._record_page_test = RecorderPageTest()
128    self._options = self._CreateOptions()
129
130    self._benchmark = _MaybeGetInstanceOfClass(target, base_dir,
131                                               benchmark.Benchmark)
132    self._parser = self._options.CreateParser(usage='See %prog --help')
133    self._AddCommandLineArgs()
134    self._ParseArgs(args)
135    self._ProcessCommandLineArgs()
136    if self._benchmark is not None:
137      test = self._benchmark.CreatePageTest(self.options)
138      if isinstance(test, timeline_based_measurement.TimelineBasedMeasurement):
139        test = timeline_based_page_test.TimelineBasedPageTest(test)
140      # This must be called after the command line args are added.
141      self._record_page_test.page_test = test
142
143    self._page_set_base_dir = (
144        self._options.page_set_base_dir if self._options.page_set_base_dir
145        else self._base_dir)
146    self._story_set = self._GetStorySet(target)
147
148  @property
149  def options(self):
150    return self._options
151
152  def _CreateOptions(self):
153    options = browser_options.BrowserFinderOptions()
154    options.browser_options.wpr_mode = wpr_modes.WPR_RECORD
155    return options
156
157  def CreateResults(self):
158    if self._benchmark is not None:
159      benchmark_metadata = self._benchmark.GetMetadata()
160    else:
161      benchmark_metadata = benchmark.BenchmarkMetadata('record_wpr')
162
163    return results_options.CreateResults(benchmark_metadata, self._options)
164
165  def _AddCommandLineArgs(self):
166    self._parser.add_option('--page-set-base-dir', action='store',
167                            type='string')
168    story_runner.AddCommandLineArgs(self._parser)
169    if self._benchmark is not None:
170      self._benchmark.AddCommandLineArgs(self._parser)
171      self._benchmark.SetArgumentDefaults(self._parser)
172    self._parser.add_option('--upload', action='store_true')
173    self._SetArgumentDefaults()
174
175  def _SetArgumentDefaults(self):
176    self._parser.set_defaults(**{'output_formats': ['none']})
177
178  def _ParseArgs(self, args=None):
179    args_to_parse = sys.argv[1:] if args is None else args
180    self._parser.parse_args(args_to_parse)
181
182  def _ProcessCommandLineArgs(self):
183    story_runner.ProcessCommandLineArgs(self._parser, self._options)
184
185    if self._options.use_live_sites:
186      self._parser.error("Can't --use-live-sites while recording")
187
188    if self._benchmark is not None:
189      self._benchmark.ProcessCommandLineArgs(self._parser, self._options)
190
191  def _GetStorySet(self, target):
192    if self._benchmark is not None:
193      return self._benchmark.CreateStorySet(self._options)
194    story_set = _MaybeGetInstanceOfClass(target, self._page_set_base_dir,
195                                         story.StorySet)
196    if story_set is None:
197      sys.stderr.write('Target %s is neither benchmark nor story set.\n'
198                       % target)
199      if not self._HintMostLikelyBenchmarksStories(target):
200        sys.stderr.write(
201            'Found no similar benchmark or story. Please use '
202            '--list-benchmarks or --list-stories to list candidates.\n')
203        self._parser.print_usage()
204      sys.exit(1)
205    return story_set
206
207  def _HintMostLikelyBenchmarksStories(self, target):
208    def _Impl(all_items, category_name):
209      candidates = matching.GetMostLikelyMatchedObject(
210          all_items.iteritems(), target, name_func=lambda kv: kv[1].Name())
211      if candidates:
212        sys.stderr.write('\nDo you mean any of those %s below?\n' %
213                         category_name)
214        _PrintPairs([(k, v.Description()) for k, v in candidates], sys.stderr)
215        return True
216      return False
217
218    has_benchmark_hint = _Impl(
219        _GetSubclasses(self._base_dir, benchmark.Benchmark), 'benchmarks')
220    has_story_hint = _Impl(
221        _GetSubclasses(self._base_dir, story.StorySet), 'stories')
222    return has_benchmark_hint or has_story_hint
223
224  def Record(self, results):
225    assert self._story_set.wpr_archive_info, (
226      'Pageset archive_data_file path must be specified.')
227    self._story_set.wpr_archive_info.AddNewTemporaryRecording()
228    self._record_page_test.CustomizeBrowserOptions(self._options)
229    story_runner.Run(self._record_page_test, self._story_set,
230        self._options, results)
231
232  def HandleResults(self, results, upload_to_cloud_storage):
233    if results.failures or results.skipped_values:
234      logging.warning('Some pages failed and/or were skipped. The recording '
235                      'has not been updated for these pages.')
236    results.PrintSummary()
237    self._story_set.wpr_archive_info.AddRecordedStories(
238        results.pages_that_succeeded,
239        upload_to_cloud_storage)
240
241
242def Main(environment, **log_config_kwargs):
243  # the log level is set in browser_options
244  log_config_kwargs.pop('level', None)
245  log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
246  logging.basicConfig(**log_config_kwargs)
247
248  parser = argparse.ArgumentParser(
249      usage='Record a benchmark or a story (page set).')
250  parser.add_argument(
251      'benchmark',
252      help=('benchmark name. This argument is optional. If both benchmark name '
253            'and story name are specified, this takes precedence as the '
254            'target of the recording.'),
255      nargs='?')
256  parser.add_argument('--story', help='story (page set) name')
257  parser.add_argument('--list-stories', dest='list_stories',
258                      action='store_true', help='list all story names.')
259  parser.add_argument('--list-benchmarks', dest='list_benchmarks',
260                      action='store_true', help='list all benchmark names.')
261  parser.add_argument('--upload', action='store_true',
262                      help='upload to cloud storage.')
263  args, extra_args = parser.parse_known_args()
264
265  if args.list_benchmarks or args.list_stories:
266    if args.list_benchmarks:
267      _PrintAllBenchmarks(environment.top_level_dir, sys.stderr)
268    if args.list_stories:
269      _PrintAllStories(environment.top_level_dir, sys.stderr)
270    return 0
271
272  target = args.benchmark or args.story
273
274  if not target:
275    sys.stderr.write('Please specify target (benchmark or story). Please refer '
276                     'usage below\n\n')
277    parser.print_help()
278    return 0
279
280  binary_manager.InitDependencyManager(environment.client_configs)
281
282  # TODO(nednguyen): update WprRecorder so that it handles the difference
283  # between recording a benchmark vs recording a story better based on
284  # the distinction between args.benchmark & args.story
285  wpr_recorder = WprRecorder(environment.top_level_dir, target, extra_args)
286  results = wpr_recorder.CreateResults()
287  wpr_recorder.Record(results)
288  wpr_recorder.HandleResults(results, args.upload)
289  return min(255, len(results.failures))
290