1//=-- lsan_common.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_flag_parser.h"
20#include "sanitizer_common/sanitizer_placement_new.h"
21#include "sanitizer_common/sanitizer_procmaps.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "sanitizer_common/sanitizer_suppressions.h"
25#include "sanitizer_common/sanitizer_report_decorator.h"
26#include "sanitizer_common/sanitizer_tls_get_addr.h"
27
28#if CAN_SANITIZE_LEAKS
29namespace __lsan {
30
31// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
32// also to protect the global list of root regions.
33BlockingMutex global_mutex(LINKER_INITIALIZED);
34
35THREADLOCAL int disable_counter;
36bool DisabledInThisThread() { return disable_counter > 0; }
37void DisableInThisThread() { disable_counter++; }
38void EnableInThisThread() {
39  if (!disable_counter && common_flags()->detect_leaks) {
40    Report("Unmatched call to __lsan_enable().\n");
41    Die();
42  }
43  disable_counter--;
44}
45
46Flags lsan_flags;
47
48void Flags::SetDefaults() {
49#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
50#include "lsan_flags.inc"
51#undef LSAN_FLAG
52}
53
54void RegisterLsanFlags(FlagParser *parser, Flags *f) {
55#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
56  RegisterFlag(parser, #Name, Description, &f->Name);
57#include "lsan_flags.inc"
58#undef LSAN_FLAG
59}
60
61#define LOG_POINTERS(...)                           \
62  do {                                              \
63    if (flags()->log_pointers) Report(__VA_ARGS__); \
64  } while (0);
65
66#define LOG_THREADS(...)                           \
67  do {                                             \
68    if (flags()->log_threads) Report(__VA_ARGS__); \
69  } while (0);
70
71ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
72static SuppressionContext *suppression_ctx = nullptr;
73static const char kSuppressionLeak[] = "leak";
74static const char *kSuppressionTypes[] = { kSuppressionLeak };
75
76void InitializeSuppressions() {
77  CHECK_EQ(nullptr, suppression_ctx);
78  suppression_ctx = new (suppression_placeholder) // NOLINT
79      SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
80  suppression_ctx->ParseFromFile(flags()->suppressions);
81  if (&__lsan_default_suppressions)
82    suppression_ctx->Parse(__lsan_default_suppressions());
83}
84
85static SuppressionContext *GetSuppressionContext() {
86  CHECK(suppression_ctx);
87  return suppression_ctx;
88}
89
90struct RootRegion {
91  const void *begin;
92  uptr size;
93};
94
95InternalMmapVector<RootRegion> *root_regions;
96
97void InitializeRootRegions() {
98  CHECK(!root_regions);
99  ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
100  root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
101}
102
103void InitCommonLsan() {
104  InitializeRootRegions();
105  if (common_flags()->detect_leaks) {
106    // Initialization which can fail or print warnings should only be done if
107    // LSan is actually enabled.
108    InitializeSuppressions();
109    InitializePlatformSpecificModules();
110  }
111}
112
113class Decorator: public __sanitizer::SanitizerCommonDecorator {
114 public:
115  Decorator() : SanitizerCommonDecorator() { }
116  const char *Error() { return Red(); }
117  const char *Leak() { return Blue(); }
118  const char *End() { return Default(); }
119};
120
121static inline bool CanBeAHeapPointer(uptr p) {
122  // Since our heap is located in mmap-ed memory, we can assume a sensible lower
123  // bound on heap addresses.
124  const uptr kMinAddress = 4 * 4096;
125  if (p < kMinAddress) return false;
126#if defined(__x86_64__)
127  // Accept only canonical form user-space addresses.
128  return ((p >> 47) == 0);
129#elif defined(__mips64)
130  return ((p >> 40) == 0);
131#elif defined(__aarch64__)
132  unsigned runtimeVMA =
133    (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
134  return ((p >> runtimeVMA) == 0);
135#else
136  return true;
137#endif
138}
139
140// Scans the memory range, looking for byte patterns that point into allocator
141// chunks. Marks those chunks with |tag| and adds them to |frontier|.
142// There are two usage modes for this function: finding reachable chunks
143// (|tag| = kReachable) and finding indirectly leaked chunks
144// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
145// so |frontier| = 0.
146void ScanRangeForPointers(uptr begin, uptr end,
147                          Frontier *frontier,
148                          const char *region_type, ChunkTag tag) {
149  CHECK(tag == kReachable || tag == kIndirectlyLeaked);
150  const uptr alignment = flags()->pointer_alignment();
151  LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
152  uptr pp = begin;
153  if (pp % alignment)
154    pp = pp + alignment - pp % alignment;
155  for (; pp + sizeof(void *) <= end; pp += alignment) {  // NOLINT
156    void *p = *reinterpret_cast<void **>(pp);
157    if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
158    uptr chunk = PointsIntoChunk(p);
159    if (!chunk) continue;
160    // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
161    if (chunk == begin) continue;
162    LsanMetadata m(chunk);
163    if (m.tag() == kReachable || m.tag() == kIgnored) continue;
164
165    // Do this check relatively late so we can log only the interesting cases.
166    if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
167      LOG_POINTERS(
168          "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
169          "%zu.\n",
170          pp, p, chunk, chunk + m.requested_size(), m.requested_size());
171      continue;
172    }
173
174    m.set_tag(tag);
175    LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
176                 chunk, chunk + m.requested_size(), m.requested_size());
177    if (frontier)
178      frontier->push_back(chunk);
179  }
180}
181
182void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
183  Frontier *frontier = reinterpret_cast<Frontier *>(arg);
184  ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
185}
186
187// Scans thread data (stacks and TLS) for heap pointers.
188static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
189                           Frontier *frontier) {
190  InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
191  uptr registers_begin = reinterpret_cast<uptr>(registers.data());
192  uptr registers_end = registers_begin + registers.size();
193  for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
194    uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
195    LOG_THREADS("Processing thread %d.\n", os_id);
196    uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
197    DTLS *dtls;
198    bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
199                                              &tls_begin, &tls_end,
200                                              &cache_begin, &cache_end, &dtls);
201    if (!thread_found) {
202      // If a thread can't be found in the thread registry, it's probably in the
203      // process of destruction. Log this event and move on.
204      LOG_THREADS("Thread %d not found in registry.\n", os_id);
205      continue;
206    }
207    uptr sp;
208    bool have_registers =
209        (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
210    if (!have_registers) {
211      Report("Unable to get registers from thread %d.\n");
212      // If unable to get SP, consider the entire stack to be reachable.
213      sp = stack_begin;
214    }
215
216    if (flags()->use_registers && have_registers)
217      ScanRangeForPointers(registers_begin, registers_end, frontier,
218                           "REGISTERS", kReachable);
219
220    if (flags()->use_stacks) {
221      LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
222      if (sp < stack_begin || sp >= stack_end) {
223        // SP is outside the recorded stack range (e.g. the thread is running a
224        // signal handler on alternate stack, or swapcontext was used).
225        // Again, consider the entire stack range to be reachable.
226        LOG_THREADS("WARNING: stack pointer not in stack range.\n");
227        uptr page_size = GetPageSizeCached();
228        int skipped = 0;
229        while (stack_begin < stack_end &&
230               !IsAccessibleMemoryRange(stack_begin, 1)) {
231          skipped++;
232          stack_begin += page_size;
233        }
234        LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
235                    skipped, stack_begin, stack_end);
236      } else {
237        // Shrink the stack range to ignore out-of-scope values.
238        stack_begin = sp;
239      }
240      ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
241                           kReachable);
242      ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
243    }
244
245    if (flags()->use_tls) {
246      LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
247      if (cache_begin == cache_end) {
248        ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
249      } else {
250        // Because LSan should not be loaded with dlopen(), we can assume
251        // that allocator cache will be part of static TLS image.
252        CHECK_LE(tls_begin, cache_begin);
253        CHECK_GE(tls_end, cache_end);
254        if (tls_begin < cache_begin)
255          ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
256                               kReachable);
257        if (tls_end > cache_end)
258          ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
259      }
260      if (dtls) {
261        for (uptr j = 0; j < dtls->dtv_size; ++j) {
262          uptr dtls_beg = dtls->dtv[j].beg;
263          uptr dtls_end = dtls_beg + dtls->dtv[j].size;
264          if (dtls_beg < dtls_end) {
265            LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
266            ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
267                                 kReachable);
268          }
269        }
270      }
271    }
272  }
273}
274
275static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
276                              uptr root_end) {
277  MemoryMappingLayout proc_maps(/*cache_enabled*/true);
278  uptr begin, end, prot;
279  while (proc_maps.Next(&begin, &end,
280                        /*offset*/ nullptr, /*filename*/ nullptr,
281                        /*filename_size*/ 0, &prot)) {
282    uptr intersection_begin = Max(root_begin, begin);
283    uptr intersection_end = Min(end, root_end);
284    if (intersection_begin >= intersection_end) continue;
285    bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
286    LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
287                 root_begin, root_end, begin, end,
288                 is_readable ? "readable" : "unreadable");
289    if (is_readable)
290      ScanRangeForPointers(intersection_begin, intersection_end, frontier,
291                           "ROOT", kReachable);
292  }
293}
294
295// Scans root regions for heap pointers.
296static void ProcessRootRegions(Frontier *frontier) {
297  if (!flags()->use_root_regions) return;
298  CHECK(root_regions);
299  for (uptr i = 0; i < root_regions->size(); i++) {
300    RootRegion region = (*root_regions)[i];
301    uptr begin_addr = reinterpret_cast<uptr>(region.begin);
302    ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
303  }
304}
305
306static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
307  while (frontier->size()) {
308    uptr next_chunk = frontier->back();
309    frontier->pop_back();
310    LsanMetadata m(next_chunk);
311    ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
312                         "HEAP", tag);
313  }
314}
315
316// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
317// which are reachable from it as indirectly leaked.
318static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
319  chunk = GetUserBegin(chunk);
320  LsanMetadata m(chunk);
321  if (m.allocated() && m.tag() != kReachable) {
322    ScanRangeForPointers(chunk, chunk + m.requested_size(),
323                         /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
324  }
325}
326
327// ForEachChunk callback. If chunk is marked as ignored, adds its address to
328// frontier.
329static void CollectIgnoredCb(uptr chunk, void *arg) {
330  CHECK(arg);
331  chunk = GetUserBegin(chunk);
332  LsanMetadata m(chunk);
333  if (m.allocated() && m.tag() == kIgnored) {
334    LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
335                 chunk, chunk + m.requested_size(), m.requested_size());
336    reinterpret_cast<Frontier *>(arg)->push_back(chunk);
337  }
338}
339
340// Sets the appropriate tag on each chunk.
341static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
342  // Holds the flood fill frontier.
343  Frontier frontier(1);
344
345  ForEachChunk(CollectIgnoredCb, &frontier);
346  ProcessGlobalRegions(&frontier);
347  ProcessThreads(suspended_threads, &frontier);
348  ProcessRootRegions(&frontier);
349  FloodFillTag(&frontier, kReachable);
350
351  // The check here is relatively expensive, so we do this in a separate flood
352  // fill. That way we can skip the check for chunks that are reachable
353  // otherwise.
354  LOG_POINTERS("Processing platform-specific allocations.\n");
355  CHECK_EQ(0, frontier.size());
356  ProcessPlatformSpecificAllocations(&frontier);
357  FloodFillTag(&frontier, kReachable);
358
359  // Iterate over leaked chunks and mark those that are reachable from other
360  // leaked chunks.
361  LOG_POINTERS("Scanning leaked chunks.\n");
362  ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
363}
364
365// ForEachChunk callback. Resets the tags to pre-leak-check state.
366static void ResetTagsCb(uptr chunk, void *arg) {
367  (void)arg;
368  chunk = GetUserBegin(chunk);
369  LsanMetadata m(chunk);
370  if (m.allocated() && m.tag() != kIgnored)
371    m.set_tag(kDirectlyLeaked);
372}
373
374static void PrintStackTraceById(u32 stack_trace_id) {
375  CHECK(stack_trace_id);
376  StackDepotGet(stack_trace_id).Print();
377}
378
379// ForEachChunk callback. Aggregates information about unreachable chunks into
380// a LeakReport.
381static void CollectLeaksCb(uptr chunk, void *arg) {
382  CHECK(arg);
383  LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
384  chunk = GetUserBegin(chunk);
385  LsanMetadata m(chunk);
386  if (!m.allocated()) return;
387  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
388    u32 resolution = flags()->resolution;
389    u32 stack_trace_id = 0;
390    if (resolution > 0) {
391      StackTrace stack = StackDepotGet(m.stack_trace_id());
392      stack.size = Min(stack.size, resolution);
393      stack_trace_id = StackDepotPut(stack);
394    } else {
395      stack_trace_id = m.stack_trace_id();
396    }
397    leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
398                                m.tag());
399  }
400}
401
402static void PrintMatchedSuppressions() {
403  InternalMmapVector<Suppression *> matched(1);
404  GetSuppressionContext()->GetMatched(&matched);
405  if (!matched.size())
406    return;
407  const char *line = "-----------------------------------------------------";
408  Printf("%s\n", line);
409  Printf("Suppressions used:\n");
410  Printf("  count      bytes template\n");
411  for (uptr i = 0; i < matched.size(); i++)
412    Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
413        &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
414  Printf("%s\n\n", line);
415}
416
417struct CheckForLeaksParam {
418  bool success;
419  LeakReport leak_report;
420};
421
422static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
423                                  void *arg) {
424  CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
425  CHECK(param);
426  CHECK(!param->success);
427  ClassifyAllChunks(suspended_threads);
428  ForEachChunk(CollectLeaksCb, &param->leak_report);
429  // Clean up for subsequent leak checks. This assumes we did not overwrite any
430  // kIgnored tags.
431  ForEachChunk(ResetTagsCb, nullptr);
432  param->success = true;
433}
434
435static bool CheckForLeaks() {
436  if (&__lsan_is_turned_off && __lsan_is_turned_off())
437      return false;
438  EnsureMainThreadIDIsCorrect();
439  CheckForLeaksParam param;
440  param.success = false;
441  LockThreadRegistry();
442  LockAllocator();
443  DoStopTheWorld(CheckForLeaksCallback, &param);
444  UnlockAllocator();
445  UnlockThreadRegistry();
446
447  if (!param.success) {
448    Report("LeakSanitizer has encountered a fatal error.\n");
449    Report(
450        "HINT: For debugging, try setting environment variable "
451        "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
452    Die();
453  }
454  param.leak_report.ApplySuppressions();
455  uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
456  if (unsuppressed_count > 0) {
457    Decorator d;
458    Printf("\n"
459           "================================================================="
460           "\n");
461    Printf("%s", d.Error());
462    Report("ERROR: LeakSanitizer: detected memory leaks\n");
463    Printf("%s", d.End());
464    param.leak_report.ReportTopLeaks(flags()->max_leaks);
465  }
466  if (common_flags()->print_suppressions)
467    PrintMatchedSuppressions();
468  if (unsuppressed_count > 0) {
469    param.leak_report.PrintSummary();
470    return true;
471  }
472  return false;
473}
474
475void DoLeakCheck() {
476  BlockingMutexLock l(&global_mutex);
477  static bool already_done;
478  if (already_done) return;
479  already_done = true;
480  bool have_leaks = CheckForLeaks();
481  if (!have_leaks) {
482    return;
483  }
484  if (common_flags()->exitcode) {
485    Die();
486  }
487}
488
489static int DoRecoverableLeakCheck() {
490  BlockingMutexLock l(&global_mutex);
491  bool have_leaks = CheckForLeaks();
492  return have_leaks ? 1 : 0;
493}
494
495static Suppression *GetSuppressionForAddr(uptr addr) {
496  Suppression *s = nullptr;
497
498  // Suppress by module name.
499  SuppressionContext *suppressions = GetSuppressionContext();
500  if (const char *module_name =
501          Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
502    if (suppressions->Match(module_name, kSuppressionLeak, &s))
503      return s;
504
505  // Suppress by file or function name.
506  SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
507  for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
508    if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
509        suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
510      break;
511    }
512  }
513  frames->ClearAll();
514  return s;
515}
516
517static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
518  StackTrace stack = StackDepotGet(stack_trace_id);
519  for (uptr i = 0; i < stack.size; i++) {
520    Suppression *s = GetSuppressionForAddr(
521        StackTrace::GetPreviousInstructionPc(stack.trace[i]));
522    if (s) return s;
523  }
524  return nullptr;
525}
526
527///// LeakReport implementation. /////
528
529// A hard limit on the number of distinct leaks, to avoid quadratic complexity
530// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
531// in real-world applications.
532// FIXME: Get rid of this limit by changing the implementation of LeakReport to
533// use a hash table.
534const uptr kMaxLeaksConsidered = 5000;
535
536void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
537                                uptr leaked_size, ChunkTag tag) {
538  CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
539  bool is_directly_leaked = (tag == kDirectlyLeaked);
540  uptr i;
541  for (i = 0; i < leaks_.size(); i++) {
542    if (leaks_[i].stack_trace_id == stack_trace_id &&
543        leaks_[i].is_directly_leaked == is_directly_leaked) {
544      leaks_[i].hit_count++;
545      leaks_[i].total_size += leaked_size;
546      break;
547    }
548  }
549  if (i == leaks_.size()) {
550    if (leaks_.size() == kMaxLeaksConsidered) return;
551    Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
552                  is_directly_leaked, /* is_suppressed */ false };
553    leaks_.push_back(leak);
554  }
555  if (flags()->report_objects) {
556    LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
557    leaked_objects_.push_back(obj);
558  }
559}
560
561static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
562  if (leak1.is_directly_leaked == leak2.is_directly_leaked)
563    return leak1.total_size > leak2.total_size;
564  else
565    return leak1.is_directly_leaked;
566}
567
568void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
569  CHECK(leaks_.size() <= kMaxLeaksConsidered);
570  Printf("\n");
571  if (leaks_.size() == kMaxLeaksConsidered)
572    Printf("Too many leaks! Only the first %zu leaks encountered will be "
573           "reported.\n",
574           kMaxLeaksConsidered);
575
576  uptr unsuppressed_count = UnsuppressedLeakCount();
577  if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
578    Printf("The %zu top leak(s):\n", num_leaks_to_report);
579  InternalSort(&leaks_, leaks_.size(), LeakComparator);
580  uptr leaks_reported = 0;
581  for (uptr i = 0; i < leaks_.size(); i++) {
582    if (leaks_[i].is_suppressed) continue;
583    PrintReportForLeak(i);
584    leaks_reported++;
585    if (leaks_reported == num_leaks_to_report) break;
586  }
587  if (leaks_reported < unsuppressed_count) {
588    uptr remaining = unsuppressed_count - leaks_reported;
589    Printf("Omitting %zu more leak(s).\n", remaining);
590  }
591}
592
593void LeakReport::PrintReportForLeak(uptr index) {
594  Decorator d;
595  Printf("%s", d.Leak());
596  Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
597         leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
598         leaks_[index].total_size, leaks_[index].hit_count);
599  Printf("%s", d.End());
600
601  PrintStackTraceById(leaks_[index].stack_trace_id);
602
603  if (flags()->report_objects) {
604    Printf("Objects leaked above:\n");
605    PrintLeakedObjectsForLeak(index);
606    Printf("\n");
607  }
608}
609
610void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
611  u32 leak_id = leaks_[index].id;
612  for (uptr j = 0; j < leaked_objects_.size(); j++) {
613    if (leaked_objects_[j].leak_id == leak_id)
614      Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
615             leaked_objects_[j].size);
616  }
617}
618
619void LeakReport::PrintSummary() {
620  CHECK(leaks_.size() <= kMaxLeaksConsidered);
621  uptr bytes = 0, allocations = 0;
622  for (uptr i = 0; i < leaks_.size(); i++) {
623      if (leaks_[i].is_suppressed) continue;
624      bytes += leaks_[i].total_size;
625      allocations += leaks_[i].hit_count;
626  }
627  InternalScopedString summary(kMaxSummaryLength);
628  summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
629                 allocations);
630  ReportErrorSummary(summary.data());
631}
632
633void LeakReport::ApplySuppressions() {
634  for (uptr i = 0; i < leaks_.size(); i++) {
635    Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
636    if (s) {
637      s->weight += leaks_[i].total_size;
638      atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
639          leaks_[i].hit_count);
640      leaks_[i].is_suppressed = true;
641    }
642  }
643}
644
645uptr LeakReport::UnsuppressedLeakCount() {
646  uptr result = 0;
647  for (uptr i = 0; i < leaks_.size(); i++)
648    if (!leaks_[i].is_suppressed) result++;
649  return result;
650}
651
652} // namespace __lsan
653#else // CAN_SANITIZE_LEAKS
654namespace __lsan {
655void InitCommonLsan() { }
656void DoLeakCheck() { }
657void DisableInThisThread() { }
658void EnableInThisThread() { }
659}
660#endif // CAN_SANITIZE_LEAKS
661
662using namespace __lsan;  // NOLINT
663
664extern "C" {
665SANITIZER_INTERFACE_ATTRIBUTE
666void __lsan_ignore_object(const void *p) {
667#if CAN_SANITIZE_LEAKS
668  if (!common_flags()->detect_leaks)
669    return;
670  // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
671  // locked.
672  BlockingMutexLock l(&global_mutex);
673  IgnoreObjectResult res = IgnoreObjectLocked(p);
674  if (res == kIgnoreObjectInvalid)
675    VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
676  if (res == kIgnoreObjectAlreadyIgnored)
677    VReport(1, "__lsan_ignore_object(): "
678           "heap object at %p is already being ignored\n", p);
679  if (res == kIgnoreObjectSuccess)
680    VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
681#endif // CAN_SANITIZE_LEAKS
682}
683
684SANITIZER_INTERFACE_ATTRIBUTE
685void __lsan_register_root_region(const void *begin, uptr size) {
686#if CAN_SANITIZE_LEAKS
687  BlockingMutexLock l(&global_mutex);
688  CHECK(root_regions);
689  RootRegion region = {begin, size};
690  root_regions->push_back(region);
691  VReport(1, "Registered root region at %p of size %llu\n", begin, size);
692#endif // CAN_SANITIZE_LEAKS
693}
694
695SANITIZER_INTERFACE_ATTRIBUTE
696void __lsan_unregister_root_region(const void *begin, uptr size) {
697#if CAN_SANITIZE_LEAKS
698  BlockingMutexLock l(&global_mutex);
699  CHECK(root_regions);
700  bool removed = false;
701  for (uptr i = 0; i < root_regions->size(); i++) {
702    RootRegion region = (*root_regions)[i];
703    if (region.begin == begin && region.size == size) {
704      removed = true;
705      uptr last_index = root_regions->size() - 1;
706      (*root_regions)[i] = (*root_regions)[last_index];
707      root_regions->pop_back();
708      VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
709      break;
710    }
711  }
712  if (!removed) {
713    Report(
714        "__lsan_unregister_root_region(): region at %p of size %llu has not "
715        "been registered.\n",
716        begin, size);
717    Die();
718  }
719#endif // CAN_SANITIZE_LEAKS
720}
721
722SANITIZER_INTERFACE_ATTRIBUTE
723void __lsan_disable() {
724#if CAN_SANITIZE_LEAKS
725  __lsan::DisableInThisThread();
726#endif
727}
728
729SANITIZER_INTERFACE_ATTRIBUTE
730void __lsan_enable() {
731#if CAN_SANITIZE_LEAKS
732  __lsan::EnableInThisThread();
733#endif
734}
735
736SANITIZER_INTERFACE_ATTRIBUTE
737void __lsan_do_leak_check() {
738#if CAN_SANITIZE_LEAKS
739  if (common_flags()->detect_leaks)
740    __lsan::DoLeakCheck();
741#endif // CAN_SANITIZE_LEAKS
742}
743
744SANITIZER_INTERFACE_ATTRIBUTE
745int __lsan_do_recoverable_leak_check() {
746#if CAN_SANITIZE_LEAKS
747  if (common_flags()->detect_leaks)
748    return __lsan::DoRecoverableLeakCheck();
749#endif // CAN_SANITIZE_LEAKS
750  return 0;
751}
752
753#if !SANITIZER_SUPPORTS_WEAK_HOOKS
754SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
755int __lsan_is_turned_off() {
756  return 0;
757}
758#endif
759} // extern "C"
760