1# Copyright 2014 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import logging
6import os
7from collections import defaultdict
8
9from telemetry.core import util
10from telemetry.core.platform import tracing_category_filter
11from telemetry.core.platform import tracing_options
12from telemetry.page import page_test
13from telemetry.timeline import model as model_module
14from telemetry.value import string as string_value_module
15from telemetry.web_perf import timeline_interaction_record as tir_module
16from telemetry.web_perf.metrics import fast_metric
17from telemetry.web_perf.metrics import responsiveness_metric
18from telemetry.web_perf.metrics import smoothness
19
20# TimelineBasedMeasurement considers all instrumentation as producing a single
21# timeline. But, depending on the amount of instrumentation that is enabled,
22# overhead increases. The user of the measurement must therefore chose between
23# a few levels of instrumentation.
24NO_OVERHEAD_LEVEL = 'no-overhead'
25V8_OVERHEAD_LEVEL = 'v8-overhead'
26MINIMAL_OVERHEAD_LEVEL = 'minimal-overhead'
27DEBUG_OVERHEAD_LEVEL = 'debug-overhead'
28
29ALL_OVERHEAD_LEVELS = [
30  NO_OVERHEAD_LEVEL,
31  V8_OVERHEAD_LEVEL,
32  MINIMAL_OVERHEAD_LEVEL,
33  DEBUG_OVERHEAD_LEVEL
34]
35
36
37class InvalidInteractions(Exception):
38  pass
39
40
41def _GetMetricFromMetricType(metric_type):
42  if metric_type == tir_module.IS_FAST:
43    return fast_metric.FastMetric()
44  if metric_type == tir_module.IS_SMOOTH:
45    return smoothness.SmoothnessMetric()
46  if metric_type == tir_module.IS_RESPONSIVE:
47    return responsiveness_metric.ResponsivenessMetric()
48  raise Exception('Unrecognized metric type: %s' % metric_type)
49
50
51# TODO(nednguyen): Get rid of this results wrapper hack after we add interaction
52# record to telemetry value system.
53class _ResultsWrapper(object):
54  def __init__(self, results, label):
55    self._results = results
56    self._result_prefix = label
57
58  @property
59  def current_page(self):
60    return self._results.current_page
61
62  def _GetResultName(self, trace_name):
63    return '%s-%s' % (self._result_prefix, trace_name)
64
65  def AddValue(self, value):
66    value.name = self._GetResultName(value.name)
67    self._results.AddValue(value)
68
69class _TimelineBasedMetrics(object):
70  def __init__(self, model, renderer_thread,
71               get_metric_from_metric_type_callback):
72    self._model = model
73    self._renderer_thread = renderer_thread
74    self._get_metric_from_metric_type_callback = \
75        get_metric_from_metric_type_callback
76
77  def FindTimelineInteractionRecords(self):
78    # TODO(nduca): Add support for page-load interaction record.
79    return [tir_module.TimelineInteractionRecord.FromAsyncEvent(event) for
80            event in self._renderer_thread.async_slices
81            if tir_module.IsTimelineInteractionRecord(event.name)]
82
83  def AddResults(self, results):
84    all_interactions = self.FindTimelineInteractionRecords()
85    if len(all_interactions) == 0:
86      raise InvalidInteractions('Expected at least one interaction record on '
87                                'the page')
88
89    interactions_by_label = defaultdict(list)
90    for i in all_interactions:
91      interactions_by_label[i.label].append(i)
92
93    for label, interactions in interactions_by_label.iteritems():
94      are_repeatable = [i.repeatable for i in interactions]
95      if not all(are_repeatable) and len(interactions) > 1:
96        raise InvalidInteractions('Duplicate unrepeatable interaction records '
97                                  'on the page')
98      wrapped_results = _ResultsWrapper(results, label)
99      self.UpdateResultsByMetric(interactions, wrapped_results)
100
101  def UpdateResultsByMetric(self, interactions, wrapped_results):
102    for metric_type in tir_module.METRICS:
103      # For each metric type, either all or none of the interactions should
104      # have that metric.
105      interactions_with_metric = [i for i in interactions if
106                                  i.HasMetric(metric_type)]
107      if not interactions_with_metric:
108        continue
109      if len(interactions_with_metric) != len(interactions):
110        raise InvalidInteractions('Interaction records with the same logical '
111                                  'name must have the same flags.')
112      metric = self._get_metric_from_metric_type_callback(metric_type)
113      metric.AddResults(self._model, self._renderer_thread,
114                        interactions, wrapped_results)
115
116
117class TimelineBasedMeasurement(page_test.PageTest):
118  """Collects multiple metrics pages based on their interaction records.
119
120  A timeline measurement shifts the burden of what metrics to collect onto the
121  page under test, or the pageset running that page. Instead of the measurement
122  having a fixed set of values it collects about the page, the page being tested
123  issues (via javascript) an Interaction record into the user timing API that
124  describing what the page is doing at that time, as well as a standardized set
125  of flags describing the semantics of the work being done. The
126  TimelineBasedMeasurement object collects a trace that includes both these
127  interaction recorsd, and a user-chosen amount of performance data using
128  Telemetry's various timeline-producing APIs, tracing especially.
129
130  It then passes the recorded timeline to different TimelineBasedMetrics based
131  on those flags. This allows a single run through a page to produce load timing
132  data, smoothness data, critical jank information and overall cpu usage
133  information.
134
135  For information on how to mark up a page to work with
136  TimelineBasedMeasurement, refer to the
137  perf.metrics.timeline_interaction_record module.
138
139  """
140  def __init__(self):
141    super(TimelineBasedMeasurement, self).__init__('RunSmoothness')
142
143  @classmethod
144  def AddCommandLineArgs(cls, parser):
145    parser.add_option(
146        '--overhead-level', dest='overhead_level', type='choice',
147        choices=ALL_OVERHEAD_LEVELS,
148        default=NO_OVERHEAD_LEVEL,
149        help='How much overhead to incur during the measurement.')
150    parser.add_option(
151        '--trace-dir', dest='trace_dir', type='string', default=None,
152        help=('Where to save the trace after the run. If this flag '
153              'is not set, the trace will not be saved.'))
154
155  def WillNavigateToPage(self, page, tab):
156    if not tab.browser.platform.tracing_controller.IsChromeTracingSupported(
157        tab.browser):
158      raise Exception('Not supported')
159
160    assert self.options.overhead_level in ALL_OVERHEAD_LEVELS
161    if self.options.overhead_level == NO_OVERHEAD_LEVEL:
162      category_filter = tracing_category_filter.CreateNoOverheadFilter()
163    # TODO(ernstm): Remove this overhead level when benchmark relevant v8 events
164    # become available in the 'benchmark' category.
165    elif self.options.overhead_level == V8_OVERHEAD_LEVEL:
166      category_filter = tracing_category_filter.CreateNoOverheadFilter()
167      category_filter.AddIncludedCategory('v8')
168    elif self.options.overhead_level == MINIMAL_OVERHEAD_LEVEL:
169      category_filter = tracing_category_filter.CreateMinimalOverheadFilter()
170    else:
171      category_filter = tracing_category_filter.CreateDebugOverheadFilter()
172
173    for delay in page.GetSyntheticDelayCategories():
174      category_filter.AddSyntheticDelay(delay)
175    options = tracing_options.TracingOptions()
176    options.enable_chrome_trace = True
177    tab.browser.platform.tracing_controller.Start(options, category_filter)
178
179  def ValidateAndMeasurePage(self, page, tab, results):
180    """ Collect all possible metrics and added them to results. """
181    trace_result = tab.browser.platform.tracing_controller.Stop()
182    trace_dir = self.options.trace_dir
183    if trace_dir:
184      trace_file_path = util.GetSequentialFileName(
185          os.path.join(trace_dir, 'trace')) + '.json'
186      try:
187        with open(trace_file_path, 'w') as f:
188          trace_result.Serialize(f)
189        results.AddValue(string_value_module.StringValue(
190            page, 'trace_path', 'string', trace_file_path))
191      except IOError, e:
192        logging.error('Cannot open %s. %s' % (trace_file_path, e))
193
194    model = model_module.TimelineModel(trace_result)
195    renderer_thread = model.GetRendererThreadFromTabId(tab.id)
196    meta_metrics = _TimelineBasedMetrics(
197        model, renderer_thread, _GetMetricFromMetricType)
198    meta_metrics.AddResults(results)
199
200  def CleanUpAfterPage(self, page, tab):
201    if tab.browser.platform.tracing_controller.is_tracing_running:
202      tab.browser.platform.tracing_controller.Stop()
203