www_server.py revision 0529e5d033099cbfc42635f6f6183833b09dff6e
1# Copyright 2014 The Chromium Authors. All rights reserved. 2# Use of this source code is governed by a BSD-style license that can be 3# found in the LICENSE file. 4 5"""This module implements a simple WSGI server for the memory_inspector Web UI. 6 7The WSGI server essentially handles two kinds of requests: 8 - /ajax/foo/bar: The AJAX endpoints which exchange JSON data with the JS. 9 Requests routing is achieved using a simple @uri decorator which simply 10 performs regex matching on the request path. 11 - /static/content: Anything not matching the /ajax/ prefix is treated as a 12 static content request (for serving the index.html and JS/CSS resources). 13 14The following HTTP status code are returned by the server: 15 - 200 - OK: The request was handled correctly. 16 - 404 - Not found: None of the defined handlers did match the /request/path. 17 - 410 - Gone: The path was matched but the handler returned an empty response. 18 This typically happens when the target device is disconnected. 19""" 20 21import cgi 22import collections 23import datetime 24import dateutil.parser 25import os 26import memory_inspector 27import mimetypes 28import json 29import re 30import urlparse 31import uuid 32import wsgiref.simple_server 33 34from memory_inspector.core import backends 35from memory_inspector.core import memory_map 36from memory_inspector.classification import mmap_classifier 37from memory_inspector.classification import native_heap_classifier 38from memory_inspector.data import serialization 39from memory_inspector.data import file_storage 40from memory_inspector.frontends import background_tasks 41 42 43_HTTP_OK = '200 - OK' 44_HTTP_GONE = '410 - Gone' 45_HTTP_NOT_FOUND = '404 - Not Found' 46_PERSISTENT_STORAGE_PATH = os.path.join( 47 os.path.expanduser('~'), '.config', 'memory_inspector') 48_CONTENT_DIR = os.path.abspath(os.path.join( 49 os.path.dirname(__file__), 'www_content')) 50_APP_PROCESS_RE = r'^[\w.:]+$' # Regex for matching app processes. 51_STATS_HIST_SIZE = 120 # Keep at most 120 samples of stats per process. 52_CACHE_LEN = 10 # Max length of |_cached_objs|. 53 54# |_cached_objs| keeps the state of short-lived objects that the client needs to 55# _cached_objs subsequent AJAX calls. 56_cached_objs = collections.OrderedDict() 57_persistent_storage = file_storage.Storage(_PERSISTENT_STORAGE_PATH) 58_proc_stats_history = {} # /Android/device/PID -> deque([stats@T=0, stats@T=1]) 59 60 61class UriHandler(object): 62 """Base decorator used to automatically route /requests/by/path. 63 64 Each handler is called with the following args: 65 args: a tuple of the matching regex groups. 66 req_vars: a dictionary of request args (querystring for GET, body for POST). 67 Each handler must return a tuple with the following elements: 68 http_code: a string with the HTTP status code (e.g., '200 - OK') 69 headers: a list of HTTP headers (e.g., [('Content-Type': 'foo/bar')]) 70 body: the HTTP response body. 71 """ 72 _handlers = [] 73 74 def __init__(self, path_regex, verb='GET', output_filter=None): 75 self._path_regex = path_regex 76 self._verb = verb 77 default_output_filter = lambda *x: x # Just return the same args unchanged. 78 self._output_filter = output_filter or default_output_filter 79 80 def __call__(self, handler): 81 UriHandler._handlers += [( 82 self._verb, self._path_regex, self._output_filter, handler)] 83 84 @staticmethod 85 def Handle(method, path, req_vars): 86 """Finds a matching handler and calls it (or returns a 404 - Not Found).""" 87 for (match_method, path_regex, output_filter, fn) in UriHandler._handlers: 88 if method != match_method: 89 continue 90 m = re.match(path_regex, path) 91 if not m: 92 continue 93 (http_code, headers, body) = fn(m.groups(), req_vars) 94 return output_filter(http_code, headers, body) 95 return (_HTTP_NOT_FOUND, [], 'No AJAX handlers found') 96 97 98class AjaxHandler(UriHandler): 99 """Decorator for routing AJAX requests. 100 101 This decorator essentially groups the JSON serialization and the cache headers 102 which is shared by most of the handlers defined below. 103 """ 104 def __init__(self, path_regex, verb='GET'): 105 super(AjaxHandler, self).__init__( 106 path_regex, verb, AjaxHandler.AjaxOutputFilter) 107 108 @staticmethod 109 def AjaxOutputFilter(http_code, headers, body): 110 serialized_content = json.dumps(body, cls=serialization.Encoder) 111 extra_headers = [('Cache-Control', 'no-cache'), 112 ('Expires', 'Fri, 19 Sep 1986 05:00:00 GMT')] 113 return http_code, headers + extra_headers, serialized_content 114 115 116@AjaxHandler('/ajax/backends') 117def _ListBackends(args, req_vars): # pylint: disable=W0613 118 return _HTTP_OK, [], [backend.name for backend in backends.ListBackends()] 119 120 121@AjaxHandler('/ajax/devices') 122def _ListDevices(args, req_vars): # pylint: disable=W0613 123 resp = [] 124 for device in backends.ListDevices(): 125 # The device settings must loaded at discovery time (i.e. here), not during 126 # startup, because it might have been plugged later. 127 for k, v in _persistent_storage.LoadSettings(device.id).iteritems(): 128 device.settings[k] = v 129 130 resp += [{'backend': device.backend.name, 131 'id': device.id, 132 'name': device.name}] 133 return _HTTP_OK, [], resp 134 135 136@AjaxHandler(r'/ajax/dump/mmap/(\w+)/(\w+)/(\d+)') 137def _DumpMmapsForProcess(args, req_vars): # pylint: disable=W0613 138 """Dumps memory maps for a process. 139 140 The response is formatted according to the Google Charts DataTable format. 141 """ 142 process = _GetProcess(args) 143 if not process: 144 return _HTTP_GONE, [], 'Device not found or process died' 145 mmap = process.DumpMemoryMaps() 146 table = _ConvertMmapToGTable(mmap) 147 148 # Store the dump in the cache. The client might need it later for profiling. 149 cache_id = _CacheObject(mmap) 150 return _HTTP_OK, [], {'table': table, 'id': cache_id} 151 152 153@AjaxHandler('/ajax/initialize/(\w+)/(\w+)$', 'POST') 154def _InitializeDevice(args, req_vars): # pylint: disable=W0613 155 device = _GetDevice(args) 156 if not device: 157 return _HTTP_GONE, [], 'Device not found' 158 device.Initialize() 159 if req_vars['enableNativeTracing']: 160 device.EnableNativeTracing(True) 161 return _HTTP_OK, [], { 162 'isNativeTracingEnabled': device.IsNativeTracingEnabled()} 163 164 165@AjaxHandler(r'/ajax/profile/create', 'POST') 166def _CreateProfile(args, req_vars): # pylint: disable=W0613 167 """Creates (and caches) a profile from a set of dumps. 168 169 The profiling data can be retrieved afterwards using the /profile/{PROFILE_ID} 170 endpoints (below). 171 """ 172 classifier = None # A classifier module (/classification/*_classifier.py). 173 dumps = {} # dump-time -> obj. to classify (e.g., |memory_map.Map|). 174 for arg in 'type', 'source', 'ruleset': 175 assert(arg in req_vars), 'Expecting %s argument in POST data' % arg 176 177 # Step 1: collect the memory dumps, according to what the client specified in 178 # the 'type' and 'source' POST arguments. 179 180 # Case 1a: The client requests to load data from an archive. 181 if req_vars['source'] == 'archive': 182 archive = _persistent_storage.OpenArchive(req_vars['archive']) 183 if not archive: 184 return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive'] 185 first_timestamp = None 186 for timestamp_str in req_vars['snapshots']: 187 timestamp = dateutil.parser.parse(timestamp_str) 188 first_timestamp = first_timestamp or timestamp 189 time_delta = int((timestamp - first_timestamp).total_seconds()) 190 if req_vars['type'] == 'mmap': 191 dumps[time_delta] = archive.LoadMemMaps(timestamp) 192 elif req_vars['type'] == 'nheap': 193 dumps[time_delta] = archive.LoadNativeHeap(timestamp) 194 195 # Case 1b: Use a dump recently cached (only mmap, via _DumpMmapsForProcess). 196 elif req_vars['source'] == 'cache': 197 assert(req_vars['type'] == 'mmap'), 'Only cached mmap dumps are supported.' 198 dumps[0] = _GetCacheObject(req_vars['id']) 199 200 if not dumps: 201 return _HTTP_GONE, [], 'No memory dumps could be retrieved' 202 203 # Initialize the classifier (mmap or nheap) and prepare symbols for nheap. 204 if req_vars['type'] == 'mmap': 205 classifier = mmap_classifier 206 elif req_vars['type'] == 'nheap': 207 classifier = native_heap_classifier 208 if not archive.HasSymbols(): 209 return _HTTP_GONE, [], 'No symbols in archive %s' % req_vars['archive'] 210 symbols = archive.LoadSymbols() 211 for nheap in dumps.itervalues(): 212 nheap.SymbolizeUsingSymbolDB(symbols) 213 214 if not classifier: 215 return _HTTP_GONE, [], 'Classifier %s not supported.' % req_vars['type'] 216 217 # Step 2: Load the rule-set specified by the client in the 'ruleset' POST arg. 218 if req_vars['ruleset'] == 'heuristic': 219 assert(req_vars['type'] == 'nheap'), ( 220 'heuristic rules are supported only for nheap') 221 rules = native_heap_classifier.InferHeuristicRulesFromHeap(dumps[0]) 222 else: 223 rules_path = os.path.join( 224 memory_inspector.ROOT_DIR, 'classification_rules', req_vars['ruleset']) 225 if not os.path.isfile(rules_path): 226 return _HTTP_GONE, [], 'Cannot find the rule-set %s' % rules_path 227 with open(rules_path) as f: 228 rules = classifier.LoadRules(f.read()) 229 230 # Step 3: Aggregate the dump data using the classifier and generate the 231 # profile data (which will be kept cached here in the server). 232 # The resulting profile will consist of 1+ snapshots (depending on the number 233 # dumps the client has requested to process) and a number of 1+ metrics 234 # (depending on the buckets' keys returned by the classifier). 235 236 # Converts the {time: dump_obj} dict into a {time: |AggregatedResult|} dict. 237 # using the classifier. 238 snapshots = collections.OrderedDict((time, classifier.Classify(dump, rules)) 239 for time, dump in sorted(dumps.iteritems())) 240 241 # Add the profile to the cache (and eventually discard old items). 242 # |profile_id| is the key that the client will use in subsequent requests 243 # (to the /ajax/profile/{ID}/ endpoints) to refer to this particular profile. 244 profile_id = _CacheObject(snapshots) 245 246 first_snapshot = next(snapshots.itervalues()) 247 return _HTTP_OK, [], {'id': profile_id, 248 'times': snapshots.keys(), 249 'metrics': first_snapshot.keys, 250 'rootBucket': first_snapshot.total.name + '/'} 251 252 253@AjaxHandler(r'/ajax/profile/(\w+)/tree/(\d+)/(\d+)') 254def _GetProfileTreeDataForSnapshot(args, req_vars): # pylint: disable=W0613 255 """Gets the data for the tree chart for a given time and metric. 256 257 The response is formatted according to the Google Charts DataTable format. 258 """ 259 snapshot_id = args[0] 260 metric_index = int(args[1]) 261 time = int(args[2]) 262 snapshots = _GetCacheObject(snapshot_id) 263 if not snapshots: 264 return _HTTP_GONE, [], 'Cannot find the selected profile.' 265 if time not in snapshots: 266 return _HTTP_GONE, [], 'Cannot find snapshot at T=%d.' % time 267 snapshot = snapshots[time] 268 if metric_index >= len(snapshot.keys): 269 return _HTTP_GONE, [], 'Invalid metric id %d' % metric_index 270 271 resp = {'cols': [{'label': 'bucket', 'type': 'string'}, 272 {'label': 'parent', 'type': 'string'}], 273 'rows': []} 274 275 def VisitBucketAndAddRows(bucket, parent_id=''): 276 """Recursively creates the (node, parent) visiting |ResultTree| in DFS.""" 277 node_id = parent_id + bucket.name + '/' 278 node_label = '<dl><dt>%s</dt><dd>%s</dd></dl>' % ( 279 bucket.name, _StrMem(bucket.values[metric_index])) 280 resp['rows'] += [{'c': [ 281 {'v': node_id, 'f': node_label}, 282 {'v': parent_id, 'f': None}, 283 ]}] 284 for child in bucket.children: 285 VisitBucketAndAddRows(child, node_id) 286 287 VisitBucketAndAddRows(snapshot.total) 288 return _HTTP_OK, [], resp 289 290 291@AjaxHandler(r'/ajax/profile/(\w+)/time_serie/(\d+)/(.*)$') 292def _GetTimeSerieForSnapshot(args, req_vars): # pylint: disable=W0613 293 """Gets the data for the area chart for a given metric and bucket. 294 295 The response is formatted according to the Google Charts DataTable format. 296 """ 297 snapshot_id = args[0] 298 metric_index = int(args[1]) 299 bucket_path = args[2] 300 snapshots = _GetCacheObject(snapshot_id) 301 if not snapshots: 302 return _HTTP_GONE, [], 'Cannot find the selected profile.' 303 if metric_index >= len(next(snapshots.itervalues()).keys): 304 return _HTTP_GONE, [], 'Invalid metric id %d' % metric_index 305 306 def FindBucketByPath(bucket, path, parent_path=''): # Essentially a DFS. 307 cur_path = parent_path + bucket.name + '/' 308 if cur_path == path: 309 return bucket 310 for child in bucket.children: 311 res = FindBucketByPath(child, path, cur_path) 312 if res: 313 return res 314 return None 315 316 # The resulting data table will look like this (assuming len(metrics) == 2): 317 # Time Ashmem Dalvik Other 318 # 0 (1024,0) (4096,1024) (0,0) 319 # 30 (512,512) (1024,1024) (0,512) 320 # 60 (0,512) (1024,0) (512,0) 321 resp = {'cols': [], 'rows': []} 322 for time, aggregated_result in snapshots.iteritems(): 323 bucket = FindBucketByPath(aggregated_result.total, bucket_path) 324 if not bucket: 325 return _HTTP_GONE, [], 'Bucket %s not found' % bucket_path 326 327 # If the user selected a non-leaf bucket, display the breakdown of its 328 # direct children. Otherwise just the leaf bucket. 329 children_buckets = bucket.children if bucket.children else [bucket] 330 331 # Create the columns (form the buckets) when processing the first snapshot. 332 if not resp['cols']: 333 resp['cols'] += [{'label': 'Time', 'type': 'string'}] 334 for child_bucket in children_buckets: 335 resp['cols'] += [{'label': child_bucket.name, 'type': 'number'}] 336 337 row = [{'v': str(time), 'f': None}] 338 for child_bucket in children_buckets: 339 row += [{'v': child_bucket.values[metric_index] / 1024, 'f': None}] 340 resp['rows'] += [{'c': row}] 341 342 return _HTTP_OK, [], resp 343 344 345@AjaxHandler(r'/ajax/ps/(\w+)/(\w+)$') # /ajax/ps/Android/a0b1c2[?all=1] 346def _ListProcesses(args, req_vars): # pylint: disable=W0613 347 """Lists processes and their CPU / mem stats. 348 349 The response is formatted according to the Google Charts DataTable format. 350 """ 351 device = _GetDevice(args) 352 if not device: 353 return _HTTP_GONE, [], 'Device not found' 354 resp = { 355 'cols': [ 356 {'label': 'Pid', 'type':'number'}, 357 {'label': 'Name', 'type':'string'}, 358 {'label': 'Cpu %', 'type':'number'}, 359 {'label': 'Mem RSS Kb', 'type':'number'}, 360 {'label': '# Threads', 'type':'number'}, 361 ], 362 'rows': []} 363 for process in device.ListProcesses(): 364 # Exclude system apps if the request didn't contain the ?all=1 arg. 365 if not req_vars.get('all') and not re.match(_APP_PROCESS_RE, process.name): 366 continue 367 stats = process.GetStats() 368 resp['rows'] += [{'c': [ 369 {'v': process.pid, 'f': None}, 370 {'v': process.name, 'f': None}, 371 {'v': stats.cpu_usage, 'f': None}, 372 {'v': stats.vm_rss, 'f': None}, 373 {'v': stats.threads, 'f': None}, 374 ]}] 375 return _HTTP_OK, [], resp 376 377 378@AjaxHandler(r'/ajax/stats/(\w+)/(\w+)$') # /ajax/stats/Android/a0b1c2 379def _GetDeviceStats(args, req_vars): # pylint: disable=W0613 380 """Lists device CPU / mem stats. 381 382 The response is formatted according to the Google Charts DataTable format. 383 """ 384 device = _GetDevice(args) 385 if not device: 386 return _HTTP_GONE, [], 'Device not found' 387 device_stats = device.GetStats() 388 389 cpu_stats = { 390 'cols': [ 391 {'label': 'CPU', 'type':'string'}, 392 {'label': 'Usr %', 'type':'number'}, 393 {'label': 'Sys %', 'type':'number'}, 394 {'label': 'Idle %', 'type':'number'}, 395 ], 396 'rows': []} 397 398 for cpu_idx in xrange(len(device_stats.cpu_times)): 399 cpu = device_stats.cpu_times[cpu_idx] 400 cpu_stats['rows'] += [{'c': [ 401 {'v': '# %d' % cpu_idx, 'f': None}, 402 {'v': cpu['usr'], 'f': None}, 403 {'v': cpu['sys'], 'f': None}, 404 {'v': cpu['idle'], 'f': None}, 405 ]}] 406 407 mem_stats = { 408 'cols': [ 409 {'label': 'Section', 'type':'string'}, 410 {'label': 'MB', 'type':'number', 'pattern': ''}, 411 ], 412 'rows': []} 413 414 for key, value in device_stats.memory_stats.iteritems(): 415 mem_stats['rows'] += [{'c': [ 416 {'v': key, 'f': None}, 417 {'v': value / 1024, 'f': None} 418 ]}] 419 420 return _HTTP_OK, [], {'cpu': cpu_stats, 'mem': mem_stats} 421 422 423@AjaxHandler(r'/ajax/stats/(\w+)/(\w+)/(\d+)$') # /ajax/stats/Android/a0b1c2/42 424def _GetProcessStats(args, req_vars): # pylint: disable=W0613 425 """Lists CPU / mem stats for a given process (and keeps history). 426 427 The response is formatted according to the Google Charts DataTable format. 428 """ 429 process = _GetProcess(args) 430 if not process: 431 return _HTTP_GONE, [], 'Device not found' 432 433 proc_uri = '/'.join(args) 434 cur_stats = process.GetStats() 435 if proc_uri not in _proc_stats_history: 436 _proc_stats_history[proc_uri] = collections.deque(maxlen=_STATS_HIST_SIZE) 437 history = _proc_stats_history[proc_uri] 438 history.append(cur_stats) 439 440 cpu_stats = { 441 'cols': [ 442 {'label': 'T', 'type':'string'}, 443 {'label': 'CPU %', 'type':'number'}, 444 {'label': '# Threads', 'type':'number'}, 445 ], 446 'rows': [] 447 } 448 449 mem_stats = { 450 'cols': [ 451 {'label': 'T', 'type':'string'}, 452 {'label': 'Mem RSS Kb', 'type':'number'}, 453 {'label': 'Page faults', 'type':'number'}, 454 ], 455 'rows': [] 456 } 457 458 for stats in history: 459 cpu_stats['rows'] += [{'c': [ 460 {'v': str(datetime.timedelta(seconds=stats.run_time)), 'f': None}, 461 {'v': stats.cpu_usage, 'f': None}, 462 {'v': stats.threads, 'f': None}, 463 ]}] 464 mem_stats['rows'] += [{'c': [ 465 {'v': str(datetime.timedelta(seconds=stats.run_time)), 'f': None}, 466 {'v': stats.vm_rss, 'f': None}, 467 {'v': stats.page_faults, 'f': None}, 468 ]}] 469 470 return _HTTP_OK, [], {'cpu': cpu_stats, 'mem': mem_stats} 471 472 473@AjaxHandler(r'/ajax/settings/(\w+)/?(\w+)?$') # /ajax/settings/Android[/id] 474def _GetDeviceOrBackendSettings(args, req_vars): # pylint: disable=W0613 475 backend = backends.GetBackend(args[0]) 476 if not backend: 477 return _HTTP_GONE, [], 'Backend not found' 478 if args[1]: 479 device = _GetDevice(args) 480 if not device: 481 return _HTTP_GONE, [], 'Device not found' 482 settings = device.settings 483 else: 484 settings = backend.settings 485 486 assert(isinstance(settings, backends.Settings)) 487 resp = {} 488 for key in settings.expected_keys: 489 resp[key] = {'description': settings.expected_keys[key], 490 'value': settings.values[key]} 491 return _HTTP_OK, [], resp 492 493 494@AjaxHandler(r'/ajax/settings/(\w+)/?(\w+)?$', 'POST') 495def _SetDeviceOrBackendSettings(args, req_vars): # pylint: disable=W0613 496 backend = backends.GetBackend(args[0]) 497 if not backend: 498 return _HTTP_GONE, [], 'Backend not found' 499 if args[1]: 500 device = _GetDevice(args) 501 if not device: 502 return _HTTP_GONE, [], 'Device not found' 503 settings = device.settings 504 storage_name = device.id 505 else: 506 settings = backend.settings 507 storage_name = backend.name 508 509 for key in req_vars.iterkeys(): 510 settings[key] = req_vars[key] 511 _persistent_storage.StoreSettings(storage_name, settings.values) 512 return _HTTP_OK, [], '' 513 514 515@AjaxHandler(r'/ajax/storage/list') 516def _ListStorage(args, req_vars): # pylint: disable=W0613 517 resp = { 518 'cols': [ 519 {'label': 'Archive', 'type':'string'}, 520 {'label': 'Snapshot', 'type':'string'}, 521 {'label': 'Mem maps', 'type':'boolean'}, 522 {'label': 'N. Heap', 'type':'boolean'}, 523 ], 524 'rows': []} 525 for archive_name in _persistent_storage.ListArchives(): 526 archive = _persistent_storage.OpenArchive(archive_name) 527 first_timestamp = None 528 for timestamp in archive.ListSnapshots(): 529 first_timestamp = timestamp if not first_timestamp else first_timestamp 530 time_delta = '%d s.' % (timestamp - first_timestamp).total_seconds() 531 resp['rows'] += [{'c': [ 532 {'v': archive_name, 'f': None}, 533 {'v': timestamp.isoformat(), 'f': time_delta}, 534 {'v': archive.HasMemMaps(timestamp), 'f': None}, 535 {'v': archive.HasNativeHeap(timestamp), 'f': None}, 536 ]}] 537 return _HTTP_OK, [], resp 538 539 540@AjaxHandler(r'/ajax/storage/(.+)/(.+)/mmaps') 541def _LoadMmapsFromStorage(args, req_vars): # pylint: disable=W0613 542 archive = _persistent_storage.OpenArchive(args[0]) 543 if not archive: 544 return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive'] 545 546 timestamp = dateutil.parser.parse(args[1]) 547 if not archive.HasMemMaps(timestamp): 548 return _HTTP_GONE, [], 'No mmaps for snapshot %s' % timestamp 549 mmap = archive.LoadMemMaps(timestamp) 550 return _HTTP_OK, [], {'table': _ConvertMmapToGTable(mmap)} 551 552 553@AjaxHandler(r'/ajax/storage/(.+)/(.+)/nheap') 554def _LoadNheapFromStorage(args, req_vars): 555 """Returns a Google Charts DataTable dictionary for the nheap.""" 556 archive = _persistent_storage.OpenArchive(args[0]) 557 if not archive: 558 return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive'] 559 560 timestamp = dateutil.parser.parse(args[1]) 561 if not archive.HasNativeHeap(timestamp): 562 return _HTTP_GONE, [], 'No native heap dump for snapshot %s' % timestamp 563 564 nheap = archive.LoadNativeHeap(timestamp) 565 symbols = archive.LoadSymbols() 566 nheap.SymbolizeUsingSymbolDB(symbols) 567 568 resp = { 569 'cols': [ 570 {'label': 'Total size [KB]', 'type':'number'}, 571 {'label': 'Alloc size [B]', 'type':'number'}, 572 {'label': 'Count', 'type':'number'}, 573 {'label': 'Stack Trace', 'type':'string'}, 574 ], 575 'rows': []} 576 for alloc in nheap.allocations: 577 strace = '<dl>' 578 for frame in alloc.stack_trace.frames: 579 # Use the fallback libname.so+0xaddr if symbol info is not available. 580 symbol_name = frame.symbol.name if frame.symbol else '??' 581 source_info = (str(frame.symbol.source_info[0]) if 582 frame.symbol and frame.symbol.source_info else frame.raw_address) 583 strace += '<dd title="%s">%s</dd><dt>%s</dt>' % ( 584 cgi.escape(source_info), 585 cgi.escape(os.path.basename(source_info)), 586 cgi.escape(symbol_name)) 587 strace += '</dl>' 588 589 resp['rows'] += [{'c': [ 590 {'v': alloc.total_size, 'f': alloc.total_size / 1024}, 591 {'v': alloc.size, 'f': None}, 592 {'v': alloc.count, 'f': None}, 593 {'v': strace, 'f': None}, 594 ]}] 595 return _HTTP_OK, [], resp 596 597 598# /ajax/tracer/start/Android/device-id/pid 599@AjaxHandler(r'/ajax/tracer/start/(\w+)/(\w+)/(\d+)', 'POST') 600def _StartTracer(args, req_vars): 601 for arg in 'interval', 'count', 'traceNativeHeap': 602 assert(arg in req_vars), 'Expecting %s argument in POST data' % arg 603 process = _GetProcess(args) 604 if not process: 605 return _HTTP_GONE, [], 'Device not found or process died' 606 task_id = background_tasks.StartTracer( 607 storage_path=_PERSISTENT_STORAGE_PATH, 608 process=process, 609 interval=int(req_vars['interval']), 610 count=int(req_vars['count']), 611 trace_native_heap=req_vars['traceNativeHeap']) 612 return _HTTP_OK, [], task_id 613 614 615@AjaxHandler(r'/ajax/tracer/status/(\d+)') # /ajax/tracer/status/{task_id} 616def _GetTracerStatus(args, req_vars): # pylint: disable=W0613 617 task = background_tasks.Get(int(args[0])) 618 if not task: 619 return _HTTP_GONE, [], 'Task not found' 620 return _HTTP_OK, [], task.GetProgress() 621 622 623@UriHandler(r'^(?!/ajax)/(.*)$') 624def _StaticContent(args, req_vars): # pylint: disable=W0613 625 # Give the browser a 1-day TTL cache to minimize the start-up time. 626 cache_headers = [('Cache-Control', 'max-age=86400, public')] 627 req_path = args[0] if args[0] else 'index.html' 628 file_path = os.path.abspath(os.path.join(_CONTENT_DIR, req_path)) 629 if (os.path.isfile(file_path) and 630 os.path.commonprefix([file_path, _CONTENT_DIR]) == _CONTENT_DIR): 631 mtype = 'text/plain' 632 guessed_mime = mimetypes.guess_type(file_path) 633 if guessed_mime and guessed_mime[0]: 634 mtype = guessed_mime[0] 635 with open(file_path, 'rb') as f: 636 body = f.read() 637 return _HTTP_OK, cache_headers + [('Content-Type', mtype)], body 638 return _HTTP_NOT_FOUND, cache_headers, file_path + ' not found' 639 640 641def _GetDevice(args): 642 """Returns a |backends.Device| instance from a /backend/device URI.""" 643 assert(len(args) >= 2), 'Malformed request. Expecting /backend/device' 644 return backends.GetDevice(backend_name=args[0], device_id=args[1]) 645 646 647def _GetProcess(args): 648 """Returns a |backends.Process| instance from a /backend/device/pid URI.""" 649 assert(len(args) >= 3 and args[2].isdigit()), ( 650 'Malformed request. Expecting /backend/device/pid') 651 device = _GetDevice(args) 652 if not device: 653 return None 654 return device.GetProcess(int(args[2])) 655 656def _ConvertMmapToGTable(mmap): 657 """Returns a Google Charts DataTable dictionary for the given mmap.""" 658 assert(isinstance(mmap, memory_map.Map)) 659 table = { 660 'cols': [ 661 {'label': 'Start', 'type':'string'}, 662 {'label': 'End', 'type':'string'}, 663 {'label': 'Length Kb', 'type':'number'}, 664 {'label': 'Prot', 'type':'string'}, 665 {'label': 'Priv. Dirty Kb', 'type':'number'}, 666 {'label': 'Priv. Clean Kb', 'type':'number'}, 667 {'label': 'Shared Dirty Kb', 'type':'number'}, 668 {'label': 'Shared Clean Kb', 'type':'number'}, 669 {'label': 'File', 'type':'string'}, 670 {'label': 'Offset', 'type':'number'}, 671 {'label': 'Resident Pages', 'type':'string'}, 672 ], 673 'rows': []} 674 for entry in mmap.entries: 675 table['rows'] += [{'c': [ 676 {'v': '%08x' % entry.start, 'f': None}, 677 {'v': '%08x' % entry.end, 'f': None}, 678 {'v': entry.len / 1024, 'f': None}, 679 {'v': entry.prot_flags, 'f': None}, 680 {'v': entry.priv_dirty_bytes / 1024, 'f': None}, 681 {'v': entry.priv_clean_bytes / 1024, 'f': None}, 682 {'v': entry.shared_dirty_bytes / 1024, 'f': None}, 683 {'v': entry.shared_clean_bytes / 1024, 'f': None}, 684 {'v': entry.mapped_file, 'f': None}, 685 {'v': entry.mapped_offset, 'f': None}, 686 {'v': '[%s]' % (','.join(map(str, entry.resident_pages))), 'f': None}, 687 ]}] 688 return table 689 690def _CacheObject(obj_to_store): 691 """Stores an object in the server-side cache and returns its unique id.""" 692 if len(_cached_objs) >= _CACHE_LEN: 693 _cached_objs.popitem(last=False) 694 obj_id = uuid.uuid4().hex 695 _cached_objs[obj_id] = obj_to_store 696 return str(obj_id) 697 698 699def _GetCacheObject(obj_id): 700 """Retrieves an object in the server-side cache by its id.""" 701 return _cached_objs.get(obj_id) 702 703 704def _StrMem(nbytes): 705 """Converts a number (of bytes) into a human readable string (kb, mb).""" 706 if nbytes < 2**10: 707 return '%d B' % nbytes 708 if nbytes < 2**20: 709 return '%.1f KB' % round(nbytes / 1024.0) 710 return '%.1f MB' % (nbytes / 1048576.0) 711 712 713def _HttpRequestHandler(environ, start_response): 714 """Parses a single HTTP request and delegates the handling through UriHandler. 715 716 This essentially wires up wsgiref.simple_server with our @UriHandler(s). 717 """ 718 path = environ['PATH_INFO'] 719 method = environ['REQUEST_METHOD'] 720 if method == 'POST': 721 req_body_size = int(environ.get('CONTENT_LENGTH', 0)) 722 req_body = environ['wsgi.input'].read(req_body_size) 723 req_vars = json.loads(req_body) 724 else: 725 req_vars = urlparse.parse_qs(environ['QUERY_STRING']) 726 (http_code, headers, body) = UriHandler.Handle(method, path, req_vars) 727 start_response(http_code, headers) 728 return [body] 729 730 731def Start(http_port): 732 # Load the saved backends' settings (some of them might be needed to bootstrap 733 # as, for instance, the adb path for the Android backend). 734 for backend in backends.ListBackends(): 735 for k, v in _persistent_storage.LoadSettings(backend.name).iteritems(): 736 backend.settings[k] = v 737 738 httpd = wsgiref.simple_server.make_server('', http_port, _HttpRequestHandler) 739 try: 740 httpd.serve_forever() 741 except KeyboardInterrupt: 742 pass # Don't print useless stack traces when the user hits CTRL-C. 743 background_tasks.TerminateAll()