1# Copyright 2013 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import json
6import logging
7import sys
8
9from lib.ordered_dict import OrderedDict
10from lib.subcommand import SubCommand
11from lib.sorter import MallocUnit, MMapUnit, SorterSet, UnhookedUnit, UnitSet
12
13
14LOGGER = logging.getLogger('dmprof')
15
16
17class CatCommand(SubCommand):
18  def __init__(self):
19    super(CatCommand, self).__init__('Usage: %prog cat <first-dump>')
20    self._parser.add_option('--alternative-dirs', dest='alternative_dirs',
21                            metavar='/path/on/target@/path/on/host[:...]',
22                            help='Read files in /path/on/host/ instead of '
23                                 'files in /path/on/target/.')
24    self._parser.add_option('--indent', dest='indent', action='store_true',
25                            help='Indent the output.')
26
27  def do(self, sys_argv):
28    options, args = self._parse_args(sys_argv, 1)
29    dump_path = args[1]
30    # TODO(dmikurube): Support shared memory.
31    alternative_dirs_dict = {}
32    if options.alternative_dirs:
33      for alternative_dir_pair in options.alternative_dirs.split(':'):
34        target_path, host_path = alternative_dir_pair.split('@', 1)
35        alternative_dirs_dict[target_path] = host_path
36    (bucket_set, dumps) = SubCommand.load_basic_files(
37        dump_path, True, alternative_dirs=alternative_dirs_dict)
38
39    # Load all sorters.
40    sorters = SorterSet()
41
42    json_root = OrderedDict()
43    json_root['version'] = 1
44    json_root['run_id'] = None
45    json_root['roots'] = []
46    for sorter in sorters:
47      if sorter.root:
48        json_root['roots'].append([sorter.world, sorter.name])
49    json_root['default_template'] = 'l2'
50    json_root['templates'] = sorters.templates.as_dict()
51
52    orders = OrderedDict()
53    orders['worlds'] = OrderedDict()
54    for world in ['vm', 'malloc']:
55      orders['worlds'][world] = OrderedDict()
56      orders['worlds'][world]['breakdown'] = OrderedDict()
57      for sorter in sorters.iter_world(world):
58        order = []
59        for rule in sorter.iter_rule():
60          if rule.name not in order:
61            order.append(rule.name)
62        orders['worlds'][world]['breakdown'][sorter.name] = order
63    json_root['orders'] = orders
64
65    json_root['snapshots'] = []
66
67    for dump in dumps:
68      if json_root['run_id'] and json_root['run_id'] != dump.run_id:
69        LOGGER.error('Inconsistent heap profile dumps.')
70        json_root['run_id'] = ''
71      else:
72        json_root['run_id'] = dump.run_id
73
74      LOGGER.info('Sorting a dump %s...' % dump.path)
75      json_root['snapshots'].append(
76          self._fill_snapshot(dump, bucket_set, sorters))
77
78    if options.indent:
79      json.dump(json_root, sys.stdout, indent=2)
80    else:
81      json.dump(json_root, sys.stdout)
82    print ''
83
84  @staticmethod
85  def _fill_snapshot(dump, bucket_set, sorters):
86    root = OrderedDict()
87    root['time'] = dump.time
88    root['worlds'] = OrderedDict()
89    root['worlds']['vm'] = CatCommand._fill_world(
90        dump, bucket_set, sorters, 'vm')
91    root['worlds']['malloc'] = CatCommand._fill_world(
92        dump, bucket_set, sorters, 'malloc')
93    return root
94
95  @staticmethod
96  def _fill_world(dump, bucket_set, sorters, world):
97    root = OrderedDict()
98
99    root['name'] = world
100    if world == 'vm':
101      root['unit_fields'] = ['size', 'reserved']
102    elif world == 'malloc':
103      root['unit_fields'] = ['size', 'alloc_count', 'free_count']
104
105    # Make { vm | malloc } units with their sizes.
106    root['units'] = OrderedDict()
107    unit_set = UnitSet(world)
108    if world == 'vm':
109      for unit in CatCommand._iterate_vm_unit(dump, None, bucket_set):
110        unit_set.append(unit)
111      for unit in unit_set:
112        root['units'][unit.unit_id] = [unit.committed, unit.reserved]
113    elif world == 'malloc':
114      for unit in CatCommand._iterate_malloc_unit(dump, bucket_set):
115        unit_set.append(unit)
116      for unit in unit_set:
117        root['units'][unit.unit_id] = [
118            unit.size, unit.alloc_count, unit.free_count]
119
120    # Iterate for { vm | malloc } sorters.
121    root['breakdown'] = OrderedDict()
122    for sorter in sorters.iter_world(world):
123      LOGGER.info('  Sorting with %s:%s.' % (sorter.world, sorter.name))
124      breakdown = OrderedDict()
125      for rule in sorter.iter_rule():
126        category = OrderedDict()
127        category['name'] = rule.name
128        subs = []
129        for sub_world, sub_breakdown in rule.iter_subs():
130          subs.append([sub_world, sub_breakdown])
131        if subs:
132          category['subs'] = subs
133        if rule.hidden:
134          category['hidden'] = True
135        category['units'] = []
136        breakdown[rule.name] = category
137      for unit in unit_set:
138        found = sorter.find(unit)
139        if found:
140          # Note that a bucket which doesn't match any rule is just dropped.
141          breakdown[found.name]['units'].append(unit.unit_id)
142      root['breakdown'][sorter.name] = breakdown
143
144    return root
145
146  @staticmethod
147  def _iterate_vm_unit(dump, pfn_dict, bucket_set):
148    unit_id = 0
149    for _, region in dump.iter_map:
150      unit_id += 1
151      if region[0] == 'unhooked':
152        if pfn_dict and dump.pageframe_length:
153          for pageframe in region[1]['pageframe']:
154            yield UnhookedUnit(unit_id, pageframe.size, pageframe.size,
155                               region, pageframe, pfn_dict)
156        else:
157          yield UnhookedUnit(unit_id,
158                             int(region[1]['committed']),
159                             int(region[1]['reserved']),
160                             region)
161      elif region[0] == 'hooked':
162        if pfn_dict and dump.pageframe_length:
163          for pageframe in region[1]['pageframe']:
164            yield MMapUnit(unit_id,
165                           pageframe.size,
166                           pageframe.size,
167                           region, bucket_set, pageframe, pfn_dict)
168        else:
169          yield MMapUnit(unit_id,
170                         int(region[1]['committed']),
171                         int(region[1]['reserved']),
172                         region,
173                         bucket_set)
174      else:
175        LOGGER.error('Unrecognized mapping status: %s' % region[0])
176
177  @staticmethod
178  def _iterate_malloc_unit(dump, bucket_set):
179    for bucket_id, _, committed, allocs, frees in dump.iter_stacktrace:
180      bucket = bucket_set.get(bucket_id)
181      if bucket and bucket.allocator_type == 'malloc':
182        yield MallocUnit(bucket_id, committed, allocs, frees, bucket)
183      elif not bucket:
184        # 'Not-found' buckets are all assumed as malloc buckets.
185        yield MallocUnit(bucket_id, committed, allocs, frees, None)
186