1// Copyright 2015 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/trace_event/malloc_dump_provider.h"
6
7#include <stddef.h>
8
9#include "base/allocator/allocator_extension.h"
10#include "base/allocator/allocator_shim.h"
11#include "base/allocator/features.h"
12#include "base/trace_event/heap_profiler_allocation_context.h"
13#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
14#include "base/trace_event/heap_profiler_allocation_register.h"
15#include "base/trace_event/heap_profiler_heap_dump_writer.h"
16#include "base/trace_event/process_memory_dump.h"
17#include "base/trace_event/trace_event_argument.h"
18#include "build/build_config.h"
19
20#if defined(OS_MACOSX)
21#include <malloc/malloc.h>
22#else
23#include <malloc.h>
24#endif
25
26namespace base {
27namespace trace_event {
28
29#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
30namespace {
31
32using allocator::AllocatorDispatch;
33
34void* HookAlloc(const AllocatorDispatch* self, size_t size) {
35  const AllocatorDispatch* const next = self->next;
36  void* ptr = next->alloc_function(next, size);
37  if (ptr)
38    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
39  return ptr;
40}
41
42void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) {
43  const AllocatorDispatch* const next = self->next;
44  void* ptr = next->alloc_zero_initialized_function(next, n, size);
45  if (ptr)
46    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size);
47  return ptr;
48}
49
50void* HookllocAligned(const AllocatorDispatch* self,
51                      size_t alignment,
52                      size_t size) {
53  const AllocatorDispatch* const next = self->next;
54  void* ptr = next->alloc_aligned_function(next, alignment, size);
55  if (ptr)
56    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
57  return ptr;
58}
59
60void* HookRealloc(const AllocatorDispatch* self, void* address, size_t size) {
61  const AllocatorDispatch* const next = self->next;
62  void* ptr = next->realloc_function(next, address, size);
63  MallocDumpProvider::GetInstance()->RemoveAllocation(address);
64  if (size > 0)  // realloc(size == 0) means free().
65    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
66  return ptr;
67}
68
69void HookFree(const AllocatorDispatch* self, void* address) {
70  if (address)
71    MallocDumpProvider::GetInstance()->RemoveAllocation(address);
72  const AllocatorDispatch* const next = self->next;
73  next->free_function(next, address);
74}
75
76AllocatorDispatch g_allocator_hooks = {
77    &HookAlloc,         /* alloc_function */
78    &HookZeroInitAlloc, /* alloc_zero_initialized_function */
79    &HookllocAligned,   /* alloc_aligned_function */
80    &HookRealloc,       /* realloc_function */
81    &HookFree,          /* free_function */
82    nullptr,            /* next */
83};
84
85}  // namespace
86#endif  // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
87
88// static
89const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
90
91// static
92MallocDumpProvider* MallocDumpProvider::GetInstance() {
93  return Singleton<MallocDumpProvider,
94                   LeakySingletonTraits<MallocDumpProvider>>::get();
95}
96
97MallocDumpProvider::MallocDumpProvider()
98    : heap_profiler_enabled_(false), tid_dumping_heap_(kInvalidThreadId) {}
99
100MallocDumpProvider::~MallocDumpProvider() {}
101
102// Called at trace dump point time. Creates a snapshot the memory counters for
103// the current process.
104bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
105                                      ProcessMemoryDump* pmd) {
106  size_t total_virtual_size = 0;
107  size_t resident_size = 0;
108  size_t allocated_objects_size = 0;
109#if defined(USE_TCMALLOC)
110  bool res =
111      allocator::GetNumericProperty("generic.heap_size", &total_virtual_size);
112  DCHECK(res);
113  res = allocator::GetNumericProperty("generic.total_physical_bytes",
114                                      &resident_size);
115  DCHECK(res);
116  res = allocator::GetNumericProperty("generic.current_allocated_bytes",
117                                      &allocated_objects_size);
118  DCHECK(res);
119#elif defined(OS_MACOSX) || defined(OS_IOS)
120  malloc_statistics_t stats;
121  memset(&stats, 0, sizeof(stats));
122  malloc_zone_statistics(nullptr, &stats);
123  total_virtual_size = stats.size_allocated;
124  allocated_objects_size = stats.size_in_use;
125
126  // The resident size is approximated to the max size in use, which would count
127  // the total size of all regions other than the free bytes at the end of each
128  // region. In each allocation region the allocations are rounded off to a
129  // fixed quantum, so the excess region will not be resident.
130  // See crrev.com/1531463004 for detailed explanation.
131  resident_size = stats.max_size_in_use;
132#else
133  struct mallinfo info = mallinfo();
134  DCHECK_GE(info.arena + info.hblkhd, info.uordblks);
135
136  // In case of Android's jemalloc |arena| is 0 and the outer pages size is
137  // reported by |hblkhd|. In case of dlmalloc the total is given by
138  // |arena| + |hblkhd|. For more details see link: http://goo.gl/fMR8lF.
139  total_virtual_size = info.arena + info.hblkhd;
140  resident_size = info.uordblks;
141  allocated_objects_size = info.uordblks;
142#endif
143
144  MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("malloc");
145  outer_dump->AddScalar("virtual_size", MemoryAllocatorDump::kUnitsBytes,
146                        total_virtual_size);
147  outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
148                        MemoryAllocatorDump::kUnitsBytes, resident_size);
149
150  // Total allocated space is given by |uordblks|.
151  MemoryAllocatorDump* inner_dump = pmd->CreateAllocatorDump(kAllocatedObjects);
152  inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
153                        MemoryAllocatorDump::kUnitsBytes,
154                        allocated_objects_size);
155
156  if (resident_size - allocated_objects_size > 0) {
157    // Explicitly specify why is extra memory resident. In tcmalloc it accounts
158    // for free lists and caches. In mac and ios it accounts for the
159    // fragmentation and metadata.
160    MemoryAllocatorDump* other_dump =
161        pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches");
162    other_dump->AddScalar(MemoryAllocatorDump::kNameSize,
163                          MemoryAllocatorDump::kUnitsBytes,
164                          resident_size - allocated_objects_size);
165  }
166
167  // Heap profiler dumps.
168  if (!heap_profiler_enabled_)
169    return true;
170
171  // The dumps of the heap profiler should be created only when heap profiling
172  // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested.
173  // However, when enabled, the overhead of the heap profiler should be always
174  // reported to avoid oscillations of the malloc total in LIGHT dumps.
175
176  tid_dumping_heap_ = PlatformThread::CurrentId();
177  // At this point the Insert/RemoveAllocation hooks will ignore this thread.
178  // Enclosing all the temporariy data structures in a scope, so that the heap
179  // profiler does not see unabalanced malloc/free calls from these containers.
180  {
181    TraceEventMemoryOverhead overhead;
182    hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
183    {
184      AutoLock lock(allocation_register_lock_);
185      if (allocation_register_) {
186        if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
187          for (const auto& alloc_size : *allocation_register_) {
188            AllocationMetrics& metrics = metrics_by_context[alloc_size.context];
189            metrics.size += alloc_size.size;
190            metrics.count++;
191          }
192        }
193        allocation_register_->EstimateTraceMemoryOverhead(&overhead);
194      }
195    }  // lock(allocation_register_lock_)
196    pmd->DumpHeapUsage(metrics_by_context, overhead, "malloc");
197  }
198  tid_dumping_heap_ = kInvalidThreadId;
199
200  return true;
201}
202
203void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) {
204#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
205  if (enabled) {
206    {
207      AutoLock lock(allocation_register_lock_);
208      allocation_register_.reset(new AllocationRegister());
209    }
210    allocator::InsertAllocatorDispatch(&g_allocator_hooks);
211  } else {
212    AutoLock lock(allocation_register_lock_);
213    allocation_register_.reset();
214    // Insert/RemoveAllocation below will no-op if the register is torn down.
215    // Once disabled, heap profiling will not re-enabled anymore for the
216    // lifetime of the process.
217  }
218#endif
219  heap_profiler_enabled_ = enabled;
220}
221
222void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
223  // CurrentId() can be a slow operation (crbug.com/497226). This apparently
224  // redundant condition short circuits the CurrentID() calls when unnecessary.
225  if (tid_dumping_heap_ != kInvalidThreadId &&
226      tid_dumping_heap_ == PlatformThread::CurrentId())
227    return;
228
229  // AllocationContextTracker will return nullptr when called re-reentrantly.
230  // This is the case of GetInstanceForCurrentThread() being called for the
231  // first time, which causes a new() inside the tracker which re-enters the
232  // heap profiler, in which case we just want to early out.
233  auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
234  if (!tracker)
235    return;
236  AllocationContext context = tracker->GetContextSnapshot();
237
238  AutoLock lock(allocation_register_lock_);
239  if (!allocation_register_)
240    return;
241
242  allocation_register_->Insert(address, size, context);
243}
244
245void MallocDumpProvider::RemoveAllocation(void* address) {
246  // No re-entrancy is expected here as none of the calls below should
247  // cause a free()-s (|allocation_register_| does its own heap management).
248  if (tid_dumping_heap_ != kInvalidThreadId &&
249      tid_dumping_heap_ == PlatformThread::CurrentId())
250    return;
251  AutoLock lock(allocation_register_lock_);
252  if (!allocation_register_)
253    return;
254  allocation_register_->Remove(address);
255}
256
257}  // namespace trace_event
258}  // namespace base
259