1# Copyright 2014 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import collections
6import logging
7import os
8import plistlib
9import shutil
10import tempfile
11import xml.parsers.expat
12
13from telemetry import decorators
14from telemetry.core import util
15from telemetry.core.platform import platform_backend
16from telemetry.core.platform import power_monitor
17
18
19class PowerMetricsPowerMonitor(power_monitor.PowerMonitor):
20  def __init__(self, backend):
21    super(PowerMetricsPowerMonitor, self).__init__()
22    self._powermetrics_process = None
23    self._backend = backend
24    self._output_filename = None
25    self._output_directory = None
26
27  @property
28  def binary_path(self):
29    return '/usr/bin/powermetrics'
30
31  def StartMonitoringPower(self, browser):
32    assert not self._powermetrics_process, (
33        "Must call StopMonitoringPower().")
34    # Empirically powermetrics creates an empty output file immediately upon
35    # starting.  We detect file creation as a signal that measurement has
36    # started.  In order to avoid various race conditions in tempfile creation
37    # we create a temp directory and have powermetrics create it's output
38    # there rather than say, creating a tempfile, deleting it and reusing its
39    # name.
40    self._output_directory = tempfile.mkdtemp()
41    self._output_filename = os.path.join(self._output_directory,
42        'powermetrics.output')
43    args = ['-f', 'plist',
44            '-u', self._output_filename,
45            '-i0',
46            '--show-usage-summary']
47    self._powermetrics_process = self._backend.LaunchApplication(
48        self.binary_path, args, elevate_privilege=True)
49
50    # Block until output file is written to ensure this function call is
51    # synchronous in respect to powermetrics starting.
52    def _OutputFileExists():
53      return os.path.isfile(self._output_filename)
54    util.WaitFor(_OutputFileExists, 1)
55
56  @decorators.Cache
57  def CanMonitorPower(self):
58    mavericks_or_later = (
59        self._backend.GetOSVersionName() >= platform_backend.MAVERICKS)
60    binary_path = self.binary_path
61    return mavericks_or_later and self._backend.CanLaunchApplication(
62        binary_path)
63
64  @staticmethod
65  def _ParsePlistString(plist_string):
66    """Wrapper to parse a plist from a string and catch any errors.
67
68    Sometimes powermetrics will exit in the middle of writing it's output,
69    empirically it seems that it always writes at least one sample in it's
70    entirety so we can safely ignore any errors in it's output.
71
72    Returns:
73        Parser output on succesful parse, None on parse error.
74    """
75    try:
76      return plistlib.readPlistFromString(plist_string)
77    except xml.parsers.expat.ExpatError:
78      return None
79
80  @staticmethod
81  def ParsePowerMetricsOutput(powermetrics_output):
82    """Parse output of powermetrics command line utility.
83
84    Returns:
85        Dictionary in the format returned by StopMonitoringPower() or None
86        if |powermetrics_output| is empty - crbug.com/353250 .
87    """
88    if len(powermetrics_output) == 0:
89      logging.warning("powermetrics produced zero length output")
90      return None
91
92    # Container to collect samples for running averages.
93    # out_path - list containing the key path in the output dictionary.
94    # src_path - list containing the key path to get the data from in
95    #    powermetrics' output.
96    def ConstructMetric(out_path, src_path):
97      RunningAverage = collections.namedtuple('RunningAverage', [
98        'out_path', 'src_path', 'samples'])
99      return RunningAverage(out_path, src_path, [])
100
101    # List of RunningAverage objects specifying metrics we want to aggregate.
102    metrics = [
103        ConstructMetric(
104            ['component_utilization', 'whole_package', 'average_frequency_hz'],
105            ['processor','freq_hz']),
106        ConstructMetric(
107            ['component_utilization', 'whole_package', 'idle_percent'],
108            ['processor','packages', 0, 'c_state_ratio'])]
109
110    def DataWithMetricKeyPath(metric, powermetrics_output):
111      """Retrieve the sample from powermetrics' output for a given metric.
112
113      Args:
114          metric: The RunningAverage object we want to collect a new sample for.
115          powermetrics_output: Dictionary containing powermetrics output.
116
117      Returns:
118          The sample corresponding to |metric|'s keypath."""
119      # Get actual data corresponding to key path.
120      out_data = powermetrics_output
121      for k in metric.src_path:
122        out_data = out_data[k]
123
124      assert type(out_data) in [int, float], (
125          "Was expecting a number: %s (%s)" % (type(out_data), out_data))
126      return float(out_data)
127
128    sample_durations = []
129    total_energy_consumption_mwh = 0
130    # powermetrics outputs multiple plists separated by null terminators.
131    raw_plists = powermetrics_output.split('\0')
132    raw_plists = [x for x in raw_plists if len(x) > 0]
133    assert(len(raw_plists) == 1)
134
135    # -------- Examine contents of first plist for systems specs. --------
136    plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
137    if not plist:
138      logging.warning("powermetrics produced invalid output, output length: "
139          "%d" % len(powermetrics_output))
140      return {}
141
142    if 'GPU' in plist:
143      metrics.extend([
144          ConstructMetric(
145              ['component_utilization', 'gpu', 'average_frequency_hz'],
146              ['GPU', 0, 'freq_hz']),
147          ConstructMetric(
148              ['component_utilization', 'gpu', 'idle_percent'],
149              ['GPU', 0, 'c_state_ratio'])])
150
151
152    # There's no way of knowing ahead of time how many cpus and packages the
153    # current system has. Iterate over cores and cpus - construct metrics for
154    # each one.
155    if 'processor' in plist:
156      core_dict = plist['processor']['packages'][0]['cores']
157      num_cores = len(core_dict)
158      cpu_num = 0
159      for core_idx in xrange(num_cores):
160        num_cpus = len(core_dict[core_idx]['cpus'])
161        base_src_path = ['processor', 'packages', 0, 'cores', core_idx]
162        for cpu_idx in xrange(num_cpus):
163          base_out_path = ['component_utilization', 'cpu%d' % cpu_num]
164          # C State ratio is per-package, component CPUs of that package may
165          # have different frequencies.
166          metrics.append(ConstructMetric(
167              base_out_path + ['average_frequency_hz'],
168              base_src_path + ['cpus', cpu_idx, 'freq_hz']))
169          metrics.append(ConstructMetric(
170              base_out_path + ['idle_percent'],
171              base_src_path + ['c_state_ratio']))
172          cpu_num += 1
173
174    # -------- Parse Data Out of Plists --------
175    plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
176    if not plist:
177      logging.error("Error parsing plist.")
178      return {}
179
180    # Duration of this sample.
181    sample_duration_ms = int(plist['elapsed_ns']) / 10**6
182    sample_durations.append(sample_duration_ms)
183
184    if 'processor' not in plist:
185      logging.error("'processor' field not found in plist.")
186      return {}
187    processor = plist['processor']
188
189    total_energy_consumption_mwh = (
190        (float(processor.get('package_joules', 0)) / 3600.) * 10**3 )
191
192    for m in metrics:
193      m.samples.append(DataWithMetricKeyPath(m, plist))
194
195    # -------- Collect and Process Data --------
196    out_dict = {}
197    out_dict['identifier'] = 'powermetrics'
198    out_dict['energy_consumption_mwh'] = total_energy_consumption_mwh
199
200    def StoreMetricAverage(metric, sample_durations, out):
201      """Calculate average value of samples in a metric and store in output
202         path as specified by metric.
203
204      Args:
205          metric: A RunningAverage object containing samples to average.
206          sample_durations: A list which parallels the samples list containing
207              the time slice for each sample.
208          out: The output dicat, average is stored in the location specified by
209              metric.out_path.
210      """
211      if len(metric.samples) == 0:
212        return
213
214      assert len(metric.samples) == len(sample_durations)
215      avg = 0
216      for i in xrange(len(metric.samples)):
217        avg += metric.samples[i] * sample_durations[i]
218      avg /= sum(sample_durations)
219
220      # Store data in output, creating empty dictionaries as we go.
221      for k in metric.out_path[:-1]:
222        if not out.has_key(k):
223          out[k] = {}
224        out = out[k]
225      out[metric.out_path[-1]] = avg
226
227    for m in metrics:
228      StoreMetricAverage(m, sample_durations, out_dict)
229    return out_dict
230
231  def StopMonitoringPower(self):
232    assert self._powermetrics_process, (
233        "StartMonitoringPower() not called.")
234    # Tell powermetrics to take an immediate sample.
235    try:
236      self._powermetrics_process.terminate()
237      (power_stdout, power_stderr) = self._powermetrics_process.communicate()
238      returncode = self._powermetrics_process.returncode
239      assert returncode in [0, -15], (
240          """powermetrics error
241          return code=%d
242          stdout=(%s)
243          stderr=(%s)""" % (returncode, power_stdout, power_stderr))
244
245      with open(self._output_filename, 'rb') as output_file:
246        powermetrics_output = output_file.read()
247      return PowerMetricsPowerMonitor.ParsePowerMetricsOutput(
248          powermetrics_output)
249
250    finally:
251      shutil.rmtree(self._output_directory)
252      self._output_directory = None
253      self._output_filename = None
254      self._powermetrics_process = None
255