1// Copyright 2015 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/trace_event/process_memory_dump.h"
6
7#include <errno.h>
8
9#include <vector>
10
11#include "base/memory/ptr_util.h"
12#include "base/process/process_metrics.h"
13#include "base/strings/stringprintf.h"
14#include "base/trace_event/heap_profiler_heap_dump_writer.h"
15#include "base/trace_event/memory_infra_background_whitelist.h"
16#include "base/trace_event/process_memory_totals.h"
17#include "base/trace_event/trace_event_argument.h"
18#include "build/build_config.h"
19
20#if defined(OS_IOS)
21#include <mach/vm_page_size.h>
22#endif
23
24#if defined(OS_POSIX)
25#include <sys/mman.h>
26#endif
27
28#if defined(OS_WIN)
29#include <Psapi.h>
30#endif
31
32namespace base {
33namespace trace_event {
34
35namespace {
36
37const char kEdgeTypeOwnership[] = "ownership";
38
39std::string GetSharedGlobalAllocatorDumpName(
40    const MemoryAllocatorDumpGuid& guid) {
41  return "global/" + guid.ToString();
42}
43
44#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
45size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
46  return (mapped_size + page_size - 1) / page_size;
47}
48#endif
49
50}  // namespace
51
52// static
53bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
54
55#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
56// static
57size_t ProcessMemoryDump::GetSystemPageSize() {
58#if defined(OS_IOS)
59  // On iOS, getpagesize() returns the user page sizes, but for allocating
60  // arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
61  // as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
62  // Refer to http://crbug.com/542671 and Apple rdar://23651782
63  return vm_kernel_page_size;
64#else
65  return base::GetPageSize();
66#endif  // defined(OS_IOS)
67}
68
69// static
70size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
71                                             size_t mapped_size) {
72  const size_t page_size = GetSystemPageSize();
73  const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
74  DCHECK_EQ(0u, start_pointer % page_size);
75
76  size_t offset = 0;
77  size_t total_resident_size = 0;
78  bool failure = false;
79
80  // An array as large as number of pages in memory segment needs to be passed
81  // to the query function. To avoid allocating a large array, the given block
82  // of memory is split into chunks of size |kMaxChunkSize|.
83  const size_t kMaxChunkSize = 8 * 1024 * 1024;
84  size_t max_vec_size =
85      GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
86#if defined(OS_MACOSX) || defined(OS_IOS)
87  std::unique_ptr<char[]> vec(new char[max_vec_size]);
88#elif defined(OS_WIN)
89  std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
90      new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
91#elif defined(OS_POSIX)
92  std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
93#endif
94
95  while (offset < mapped_size) {
96    uintptr_t chunk_start = (start_pointer + offset);
97    const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
98    const size_t page_count = GetSystemPageCount(chunk_size, page_size);
99    size_t resident_page_count = 0;
100
101#if defined(OS_MACOSX) || defined(OS_IOS)
102    // mincore in MAC does not fail with EAGAIN.
103    failure =
104        !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
105    for (size_t i = 0; i < page_count; i++)
106      resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
107#elif defined(OS_WIN)
108    for (size_t i = 0; i < page_count; i++) {
109      vec[i].VirtualAddress =
110          reinterpret_cast<void*>(chunk_start + i * page_size);
111    }
112    DWORD vec_size = static_cast<DWORD>(
113        page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
114    failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
115
116    for (size_t i = 0; i < page_count; i++)
117      resident_page_count += vec[i].VirtualAttributes.Valid;
118#elif defined(OS_POSIX)
119    int error_counter = 0;
120    int result = 0;
121    // HANDLE_EINTR tries for 100 times. So following the same pattern.
122    do {
123      result =
124          mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
125    } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
126    failure = !!result;
127
128    for (size_t i = 0; i < page_count; i++)
129      resident_page_count += vec[i] & 1;
130#endif
131
132    if (failure)
133      break;
134
135    total_resident_size += resident_page_count * page_size;
136    offset += kMaxChunkSize;
137  }
138
139  DCHECK(!failure);
140  if (failure) {
141    total_resident_size = 0;
142    LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
143  }
144  return total_resident_size;
145}
146#endif  // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
147
148ProcessMemoryDump::ProcessMemoryDump(
149    scoped_refptr<MemoryDumpSessionState> session_state,
150    const MemoryDumpArgs& dump_args)
151    : has_process_totals_(false),
152      has_process_mmaps_(false),
153      session_state_(std::move(session_state)),
154      dump_args_(dump_args) {}
155
156ProcessMemoryDump::~ProcessMemoryDump() {}
157
158MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
159    const std::string& absolute_name) {
160  return AddAllocatorDumpInternal(
161      MakeUnique<MemoryAllocatorDump>(absolute_name, this));
162}
163
164MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
165    const std::string& absolute_name,
166    const MemoryAllocatorDumpGuid& guid) {
167  return AddAllocatorDumpInternal(
168      MakeUnique<MemoryAllocatorDump>(absolute_name, this, guid));
169}
170
171MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
172    std::unique_ptr<MemoryAllocatorDump> mad) {
173  // In background mode return the black hole dump, if invalid dump name is
174  // given.
175  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
176      !IsMemoryAllocatorDumpNameWhitelisted(mad->absolute_name())) {
177    return GetBlackHoleMad();
178  }
179
180  auto insertion_result = allocator_dumps_.insert(
181      std::make_pair(mad->absolute_name(), std::move(mad)));
182  MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
183  DCHECK(insertion_result.second) << "Duplicate name: "
184                                  << inserted_mad->absolute_name();
185  return inserted_mad;
186}
187
188MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
189    const std::string& absolute_name) const {
190  auto it = allocator_dumps_.find(absolute_name);
191  if (it != allocator_dumps_.end())
192    return it->second.get();
193  if (black_hole_mad_)
194    return black_hole_mad_.get();
195  return nullptr;
196}
197
198MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
199    const std::string& absolute_name) {
200  MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
201  return mad ? mad : CreateAllocatorDump(absolute_name);
202}
203
204MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
205    const MemoryAllocatorDumpGuid& guid) {
206  // Global dumps are disabled in background mode.
207  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
208    return GetBlackHoleMad();
209
210  // A shared allocator dump can be shared within a process and the guid could
211  // have been created already.
212  MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
213  if (mad) {
214    // The weak flag is cleared because this method should create a non-weak
215    // dump.
216    mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
217    return mad;
218  }
219  return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
220}
221
222MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
223    const MemoryAllocatorDumpGuid& guid) {
224  // Global dumps are disabled in background mode.
225  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
226    return GetBlackHoleMad();
227
228  MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
229  if (mad)
230    return mad;
231  mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
232  mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
233  return mad;
234}
235
236MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
237    const MemoryAllocatorDumpGuid& guid) const {
238  return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
239}
240
241void ProcessMemoryDump::DumpHeapUsage(
242    const base::hash_map<base::trace_event::AllocationContext,
243        base::trace_event::AllocationMetrics>& metrics_by_context,
244    base::trace_event::TraceEventMemoryOverhead& overhead,
245    const char* allocator_name) {
246  if (!metrics_by_context.empty()) {
247    DCHECK_EQ(0ul, heap_dumps_.count(allocator_name));
248    std::unique_ptr<TracedValue> heap_dump = ExportHeapDump(
249        metrics_by_context, *session_state());
250    heap_dumps_[allocator_name] = std::move(heap_dump);
251  }
252
253  std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
254                                             allocator_name);
255  overhead.DumpInto(base_name.c_str(), this);
256}
257
258void ProcessMemoryDump::Clear() {
259  if (has_process_totals_) {
260    process_totals_.Clear();
261    has_process_totals_ = false;
262  }
263
264  if (has_process_mmaps_) {
265    process_mmaps_.Clear();
266    has_process_mmaps_ = false;
267  }
268
269  allocator_dumps_.clear();
270  allocator_dumps_edges_.clear();
271  heap_dumps_.clear();
272}
273
274void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
275  DCHECK(!other->has_process_totals() && !other->has_process_mmaps());
276
277  // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
278  // into this ProcessMemoryDump, checking for duplicates.
279  for (auto& it : other->allocator_dumps_)
280    AddAllocatorDumpInternal(std::move(it.second));
281  other->allocator_dumps_.clear();
282
283  // Move all the edges.
284  allocator_dumps_edges_.insert(allocator_dumps_edges_.end(),
285                                other->allocator_dumps_edges_.begin(),
286                                other->allocator_dumps_edges_.end());
287  other->allocator_dumps_edges_.clear();
288
289  for (auto& it : other->heap_dumps_) {
290    DCHECK_EQ(0ul, heap_dumps_.count(it.first));
291    heap_dumps_.insert(std::make_pair(it.first, std::move(it.second)));
292  }
293  other->heap_dumps_.clear();
294}
295
296void ProcessMemoryDump::AsValueInto(TracedValue* value) const {
297  if (has_process_totals_) {
298    value->BeginDictionary("process_totals");
299    process_totals_.AsValueInto(value);
300    value->EndDictionary();
301  }
302
303  if (has_process_mmaps_) {
304    value->BeginDictionary("process_mmaps");
305    process_mmaps_.AsValueInto(value);
306    value->EndDictionary();
307  }
308
309  if (allocator_dumps_.size() > 0) {
310    value->BeginDictionary("allocators");
311    for (const auto& allocator_dump_it : allocator_dumps_)
312      allocator_dump_it.second->AsValueInto(value);
313    value->EndDictionary();
314  }
315
316  if (heap_dumps_.size() > 0) {
317    value->BeginDictionary("heaps");
318    for (const auto& name_and_dump : heap_dumps_)
319      value->SetValueWithCopiedName(name_and_dump.first, *name_and_dump.second);
320    value->EndDictionary();  // "heaps"
321  }
322
323  value->BeginArray("allocators_graph");
324  for (const MemoryAllocatorDumpEdge& edge : allocator_dumps_edges_) {
325    value->BeginDictionary();
326    value->SetString("source", edge.source.ToString());
327    value->SetString("target", edge.target.ToString());
328    value->SetInteger("importance", edge.importance);
329    value->SetString("type", edge.type);
330    value->EndDictionary();
331  }
332  value->EndArray();
333}
334
335void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
336                                         const MemoryAllocatorDumpGuid& target,
337                                         int importance) {
338  allocator_dumps_edges_.push_back(
339      {source, target, importance, kEdgeTypeOwnership});
340}
341
342void ProcessMemoryDump::AddOwnershipEdge(
343    const MemoryAllocatorDumpGuid& source,
344    const MemoryAllocatorDumpGuid& target) {
345  AddOwnershipEdge(source, target, 0 /* importance */);
346}
347
348void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
349                                         const std::string& target_node_name) {
350  // Do not create new dumps for suballocations in background mode.
351  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
352    return;
353
354  std::string child_mad_name = target_node_name + "/__" + source.ToString();
355  MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
356  AddOwnershipEdge(source, target_child_mad->guid());
357}
358
359MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
360  DCHECK(is_black_hole_non_fatal_for_testing_);
361  if (!black_hole_mad_)
362    black_hole_mad_.reset(new MemoryAllocatorDump("discarded", this));
363  return black_hole_mad_.get();
364}
365
366}  // namespace trace_event
367}  // namespace base
368