1# Copyright 2013 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import logging
6import sys
7
8from lib.bucket import BUCKET_ID, COMMITTED, ALLOC_COUNT, FREE_COUNT
9from lib.policy import PolicySet
10from lib.subcommand import SubCommand
11
12
13LOGGER = logging.getLogger('dmprof')
14
15
16class ExpandCommand(SubCommand):
17  def __init__(self):
18    super(ExpandCommand, self).__init__(
19        'Usage: %prog expand <dump> <policy> <component> <depth>')
20
21  def do(self, sys_argv):
22    _, args = self._parse_args(sys_argv, 4)
23    dump_path = args[1]
24    target_policy = args[2]
25    component_name = args[3]
26    depth = args[4]
27    (bucket_set, dump) = SubCommand.load_basic_files(dump_path, False)
28    policy_set = PolicySet.load(SubCommand._parse_policy_list(target_policy))
29
30    ExpandCommand._output(dump, policy_set[target_policy], bucket_set,
31                          component_name, int(depth), sys.stdout)
32    return 0
33
34  @staticmethod
35  def _output(dump, policy, bucket_set, component_name, depth, out):
36    """Prints all stacktraces in a given component of given depth.
37
38    Args:
39        dump: A Dump object.
40        policy: A Policy object.
41        bucket_set: A BucketSet object.
42        component_name: A name of component for filtering.
43        depth: An integer representing depth to be printed.
44        out: An IO object to output.
45    """
46    sizes = {}
47
48    ExpandCommand._accumulate(
49        dump, policy, bucket_set, component_name, depth, sizes)
50
51    sorted_sizes_list = sorted(
52        sizes.iteritems(), key=(lambda x: x[1]), reverse=True)
53    total = 0
54    # TODO(dmikurube): Better formatting.
55    for size_pair in sorted_sizes_list:
56      out.write('%10d %s\n' % (size_pair[1], size_pair[0]))
57      total += size_pair[1]
58    LOGGER.info('total: %d\n' % total)
59
60  @staticmethod
61  def _add_size(precedence, bucket, depth, committed, sizes):
62    stacktrace_sequence = precedence
63    for function, sourcefile in zip(
64        bucket.symbolized_stackfunction[
65            0 : min(len(bucket.symbolized_stackfunction), 1 + depth)],
66        bucket.symbolized_stacksourcefile[
67            0 : min(len(bucket.symbolized_stacksourcefile), 1 + depth)]):
68      stacktrace_sequence += '%s(@%s) ' % (function, sourcefile)
69    if not stacktrace_sequence in sizes:
70      sizes[stacktrace_sequence] = 0
71    sizes[stacktrace_sequence] += committed
72
73  @staticmethod
74  def _accumulate(dump, policy, bucket_set, component_name, depth, sizes):
75    rule = policy.find_rule(component_name)
76    if not rule:
77      pass
78    elif rule.allocator_type == 'malloc':
79      for line in dump.iter_stacktrace:
80        words = line.split()
81        bucket = bucket_set.get(int(words[BUCKET_ID]))
82        if not bucket or bucket.allocator_type == 'malloc':
83          component_match = policy.find_malloc(bucket)
84        elif bucket.allocator_type == 'mmap':
85          continue
86        else:
87          assert False
88        if component_match == component_name:
89          precedence = ''
90          precedence += '(alloc=%d) ' % int(words[ALLOC_COUNT])
91          precedence += '(free=%d) ' % int(words[FREE_COUNT])
92          if bucket.typeinfo:
93            precedence += '(type=%s) ' % bucket.symbolized_typeinfo
94            precedence += '(type.name=%s) ' % bucket.typeinfo_name
95          ExpandCommand._add_size(precedence, bucket, depth,
96                                  int(words[COMMITTED]), sizes)
97    elif rule.allocator_type == 'mmap':
98      for _, region in dump.iter_map:
99        if region[0] != 'hooked':
100          continue
101        component_match, bucket = policy.find_mmap(region, bucket_set)
102        if component_match == component_name:
103          ExpandCommand._add_size('', bucket, depth,
104                                  region[1]['committed'], sizes)
105