1# Copyright 2014 The Chromium Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5import collections 6import logging 7import os 8import plistlib 9import shutil 10import tempfile 11import xml.parsers.expat 12 13from telemetry.core import os_version 14from telemetry import decorators 15from telemetry.internal.platform import power_monitor 16 17import py_utils 18 19 20# TODO: rename this class (seems like this is used by mac) 21class PowerMetricsPowerMonitor(power_monitor.PowerMonitor): 22 23 def __init__(self, backend): 24 super(PowerMetricsPowerMonitor, self).__init__() 25 self._powermetrics_process = None 26 self._backend = backend 27 self._output_filename = None 28 self._output_directory = None 29 30 @property 31 def binary_path(self): 32 return '/usr/bin/powermetrics' 33 34 def StartMonitoringPower(self, browser): 35 self._CheckStart() 36 # Empirically powermetrics creates an empty output file immediately upon 37 # starting. We detect file creation as a signal that measurement has 38 # started. In order to avoid various race conditions in tempfile creation 39 # we create a temp directory and have powermetrics create it's output 40 # there rather than say, creating a tempfile, deleting it and reusing its 41 # name. 42 self._output_directory = tempfile.mkdtemp() 43 self._output_filename = os.path.join(self._output_directory, 44 'powermetrics.output') 45 args = ['-f', 'plist', 46 '-u', self._output_filename, 47 '-i0', 48 '--show-usage-summary'] 49 self._powermetrics_process = self._backend.LaunchApplication( 50 self.binary_path, args, elevate_privilege=True) 51 52 # Block until output file is written to ensure this function call is 53 # synchronous in respect to powermetrics starting. 54 def _OutputFileExists(): 55 return os.path.isfile(self._output_filename) 56 py_utils.WaitFor(_OutputFileExists, 1) 57 58 @decorators.Cache 59 def CanMonitorPower(self): 60 mavericks_or_later = ( 61 self._backend.GetOSVersionName() >= os_version.MAVERICKS) 62 binary_path = self.binary_path 63 return mavericks_or_later and self._backend.CanLaunchApplication( 64 binary_path) 65 66 @staticmethod 67 def _ParsePlistString(plist_string): 68 """Wrapper to parse a plist from a string and catch any errors. 69 70 Sometimes powermetrics will exit in the middle of writing it's output, 71 empirically it seems that it always writes at least one sample in it's 72 entirety so we can safely ignore any errors in it's output. 73 74 Returns: 75 Parser output on successful parse, None on parse error. 76 """ 77 try: 78 return plistlib.readPlistFromString(plist_string) 79 except xml.parsers.expat.ExpatError: 80 return None 81 82 @staticmethod 83 def ParsePowerMetricsOutput(powermetrics_output): 84 """Parse output of powermetrics command line utility. 85 86 Returns: 87 Dictionary in the format returned by StopMonitoringPower() or None 88 if |powermetrics_output| is empty - crbug.com/353250 . 89 """ 90 if len(powermetrics_output) == 0: 91 logging.warning('powermetrics produced zero length output') 92 return {} 93 94 # Container to collect samples for running averages. 95 # out_path - list containing the key path in the output dictionary. 96 # src_path - list containing the key path to get the data from in 97 # powermetrics' output. 98 def ConstructMetric(out_path, src_path): 99 RunningAverage = collections.namedtuple('RunningAverage', [ 100 'out_path', 'src_path', 'samples']) 101 return RunningAverage(out_path, src_path, []) 102 103 # List of RunningAverage objects specifying metrics we want to aggregate. 104 metrics = [ 105 ConstructMetric( 106 ['platform_info', 'average_frequency_hz'], 107 ['processor', 'freq_hz']), 108 ConstructMetric( 109 ['platform_info', 'idle_percent'], 110 ['processor', 'packages', 0, 'c_state_ratio'])] 111 112 def DataWithMetricKeyPath(metric, powermetrics_output): 113 """Retrieve the sample from powermetrics' output for a given metric. 114 115 Args: 116 metric: The RunningAverage object we want to collect a new sample for. 117 powermetrics_output: Dictionary containing powermetrics output. 118 119 Returns: 120 The sample corresponding to |metric|'s keypath.""" 121 # Get actual data corresponding to key path. 122 out_data = powermetrics_output 123 for k in metric.src_path: 124 out_data = out_data[k] 125 126 assert type(out_data) in [int, float], ( 127 'Was expecting a number: %s (%s)' % (type(out_data), out_data)) 128 return float(out_data) 129 130 sample_durations = [] 131 total_energy_consumption_mwh = 0 132 # powermetrics outputs multiple plists separated by null terminators. 133 raw_plists = powermetrics_output.split('\0') 134 raw_plists = [x for x in raw_plists if len(x) > 0] 135 assert len(raw_plists) == 1 136 137 # -------- Examine contents of first plist for systems specs. -------- 138 plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0]) 139 if not plist: 140 logging.warning('powermetrics produced invalid output, output length: ' 141 '%d', len(powermetrics_output)) 142 return {} 143 144 # Powermetrics doesn't record power usage when running on a VM. 145 hw_model = plist.get('hw_model') 146 if hw_model and hw_model.startswith('VMware'): 147 return {} 148 149 if 'GPU' in plist: 150 metrics.extend([ 151 ConstructMetric( 152 ['component_utilization', 'gpu', 'average_frequency_hz'], 153 ['GPU', 0, 'freq_hz']), 154 ConstructMetric( 155 ['component_utilization', 'gpu', 'idle_percent'], 156 ['GPU', 0, 'c_state_ratio'])]) 157 158 # There's no way of knowing ahead of time how many cpus and packages the 159 # current system has. Iterate over cores and cpus - construct metrics for 160 # each one. 161 if 'processor' in plist: 162 core_dict = plist['processor']['packages'][0]['cores'] 163 num_cores = len(core_dict) 164 cpu_num = 0 165 for core_idx in xrange(num_cores): 166 num_cpus = len(core_dict[core_idx]['cpus']) 167 base_src_path = ['processor', 'packages', 0, 'cores', core_idx] 168 for cpu_idx in xrange(num_cpus): 169 base_out_path = ['component_utilization', 'cpu%d' % cpu_num] 170 # C State ratio is per-package, component CPUs of that package may 171 # have different frequencies. 172 metrics.append(ConstructMetric( 173 base_out_path + ['average_frequency_hz'], 174 base_src_path + ['cpus', cpu_idx, 'freq_hz'])) 175 metrics.append(ConstructMetric( 176 base_out_path + ['idle_percent'], 177 base_src_path + ['c_state_ratio'])) 178 cpu_num += 1 179 180 # -------- Parse Data Out of Plists -------- 181 plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0]) 182 if not plist: 183 logging.error('Error parsing plist.') 184 return {} 185 186 # Duration of this sample. 187 sample_duration_ms = int(plist['elapsed_ns']) / 10 ** 6 188 sample_durations.append(sample_duration_ms) 189 190 if 'processor' not in plist: 191 logging.error("'processor' field not found in plist.") 192 return {} 193 processor = plist['processor'] 194 195 total_energy_consumption_mwh = ( 196 (float(processor.get('package_joules', 0)) / 3600.) * 10 ** 3) 197 198 for m in metrics: 199 try: 200 m.samples.append(DataWithMetricKeyPath(m, plist)) 201 except KeyError: 202 # Old CPUs don't have c-states, so if data is missing, just ignore it. 203 logging.info('Field missing from powermetrics output: %s', m.src_path) 204 continue 205 206 # -------- Collect and Process Data -------- 207 out_dict = {} 208 out_dict['identifier'] = 'powermetrics' 209 out_dict['energy_consumption_mwh'] = total_energy_consumption_mwh 210 211 def StoreMetricAverage(metric, sample_durations, out): 212 """Calculate average value of samples in a metric and store in output 213 path as specified by metric. 214 215 Args: 216 metric: A RunningAverage object containing samples to average. 217 sample_durations: A list which parallels the samples list containing 218 the time slice for each sample. 219 out: The output dicat, average is stored in the location specified by 220 metric.out_path. 221 """ 222 if len(metric.samples) == 0: 223 return 224 225 assert len(metric.samples) == len(sample_durations) 226 avg = 0 227 for i in xrange(len(metric.samples)): 228 avg += metric.samples[i] * sample_durations[i] 229 avg /= sum(sample_durations) 230 231 # Store data in output, creating empty dictionaries as we go. 232 for k in metric.out_path[:-1]: 233 if not out.has_key(k): 234 out[k] = {} 235 out = out[k] 236 out[metric.out_path[-1]] = avg 237 238 for m in metrics: 239 StoreMetricAverage(m, sample_durations, out_dict) 240 241 if 'tasks' not in plist: 242 logging.error("'tasks' field not found in plist.") 243 return {} 244 245 # The following CPU metrics are already time-normalized, and segmented by 246 # process. Sum the metrics across all Chrome processes. 247 cputime = 0 248 energy_impact = 0 249 browser_process_count = 0 250 idle_wakeups = 0 251 for task in plist['tasks']: 252 if 'Chrome' in task['name'] or 'Chromium' in task['name']: 253 if 'Helper' not in task['name']: 254 browser_process_count += 1 255 cputime += float(task['cputime_ms_per_s']) 256 energy_impact += float(task.get('energy_impact', 0)) 257 idle_wakeups += float(task['idle_wakeups_per_s']) 258 if browser_process_count == 0: 259 logging.warning('No Chrome or Chromium browser process found with ' 260 'powermetrics. Chrome CPU metrics will not be emitted.') 261 return {} 262 elif browser_process_count >= 2: 263 logging.warning('powermetrics found more than one Chrome or Chromium ' 264 'browser. Chrome CPU metrics will not be emitted.') 265 # During Telemetry unit tests, there may be multiple Chrome browsers 266 # present. Don't add cpu metrics, but don't return {} either. 267 else: # browser_process_count == 1: 268 chrome_dict = {} 269 chrome_dict['cputime_ms_per_s'] = cputime 270 chrome_dict['energy_impact'] = energy_impact 271 chrome_dict['idle_wakeups_per_s'] = idle_wakeups 272 out_dict['component_utilization']['chrome'] = chrome_dict 273 274 return out_dict 275 276 def _KillPowerMetricsProcess(self): 277 """Kill a running powermetrics process.""" 278 try: 279 if self._powermetrics_process.poll() is None: 280 self._powermetrics_process.terminate() 281 except OSError as e: 282 logging.warning( 283 'Error when trying to terminate powermetric process: %s', repr(e)) 284 if self._powermetrics_process.poll() is None: 285 # terminate() can fail when Powermetrics does not have the SetUID set. 286 self._backend.LaunchApplication( 287 '/usr/bin/pkill', 288 ['-SIGTERM', os.path.basename(self.binary_path)], 289 elevate_privilege=True) 290 291 def StopMonitoringPower(self): 292 self._CheckStop() 293 # Tell powermetrics to take an immediate sample. 294 try: 295 self._KillPowerMetricsProcess() 296 (power_stdout, power_stderr) = self._powermetrics_process.communicate() 297 returncode = self._powermetrics_process.returncode 298 assert returncode in [0, -15], ( 299 """powermetrics error 300 return code=%d 301 stdout=(%s) 302 stderr=(%s)""" % (returncode, power_stdout, power_stderr)) 303 304 with open(self._output_filename, 'rb') as output_file: 305 powermetrics_output = output_file.read() 306 return PowerMetricsPowerMonitor.ParsePowerMetricsOutput( 307 powermetrics_output) 308 except Exception as e: 309 logging.warning( 310 'Error when trying to collect power monitoring data: %s', repr(e)) 311 return PowerMetricsPowerMonitor.ParsePowerMetricsOutput('') 312 finally: 313 shutil.rmtree(self._output_directory) 314 self._output_directory = None 315 self._output_filename = None 316 self._powermetrics_process = None 317