lsan_common.cc revision 7847d77b246635211c3bf465421d49d7af5226c1
1//=-- lsan_common.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "sanitizer_common/sanitizer_stackdepot.h"
21#include "sanitizer_common/sanitizer_stacktrace.h"
22#include "sanitizer_common/sanitizer_stoptheworld.h"
23#include "sanitizer_common/sanitizer_suppressions.h"
24#include "sanitizer_common/sanitizer_report_decorator.h"
25
26#if CAN_SANITIZE_LEAKS
27namespace __lsan {
28
29// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
30BlockingMutex global_mutex(LINKER_INITIALIZED);
31
32THREADLOCAL int disable_counter;
33bool DisabledInThisThread() { return disable_counter > 0; }
34
35Flags lsan_flags;
36
37static void InitializeFlags() {
38  Flags *f = flags();
39  // Default values.
40  f->report_objects = false;
41  f->resolution = 0;
42  f->max_leaks = 0;
43  f->exitcode = 23;
44  f->suppressions="";
45  f->use_registers = true;
46  f->use_globals = true;
47  f->use_stacks = true;
48  f->use_tls = true;
49  f->use_unaligned = false;
50  f->verbosity = 0;
51  f->log_pointers = false;
52  f->log_threads = false;
53
54  const char *options = GetEnv("LSAN_OPTIONS");
55  if (options) {
56    ParseFlag(options, &f->use_registers, "use_registers");
57    ParseFlag(options, &f->use_globals, "use_globals");
58    ParseFlag(options, &f->use_stacks, "use_stacks");
59    ParseFlag(options, &f->use_tls, "use_tls");
60    ParseFlag(options, &f->use_unaligned, "use_unaligned");
61    ParseFlag(options, &f->report_objects, "report_objects");
62    ParseFlag(options, &f->resolution, "resolution");
63    CHECK_GE(&f->resolution, 0);
64    ParseFlag(options, &f->max_leaks, "max_leaks");
65    CHECK_GE(&f->max_leaks, 0);
66    ParseFlag(options, &f->verbosity, "verbosity");
67    ParseFlag(options, &f->log_pointers, "log_pointers");
68    ParseFlag(options, &f->log_threads, "log_threads");
69    ParseFlag(options, &f->exitcode, "exitcode");
70    ParseFlag(options, &f->suppressions, "suppressions");
71  }
72}
73
74SuppressionContext *suppression_ctx;
75
76void InitializeSuppressions() {
77  CHECK(!suppression_ctx);
78  ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
79  suppression_ctx = new(placeholder_) SuppressionContext;
80  char *suppressions_from_file;
81  uptr buffer_size;
82  if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
83                       &buffer_size, 1 << 26 /* max_len */))
84    suppression_ctx->Parse(suppressions_from_file);
85  if (flags()->suppressions[0] && !buffer_size) {
86    Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
87           flags()->suppressions);
88    Die();
89  }
90  if (&__lsan_default_suppressions)
91    suppression_ctx->Parse(__lsan_default_suppressions());
92}
93
94void InitCommonLsan() {
95  InitializeFlags();
96  InitializeSuppressions();
97  InitializePlatformSpecificModules();
98}
99
100class Decorator: private __sanitizer::AnsiColorDecorator {
101 public:
102  Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
103  const char *Error() { return Red(); }
104  const char *Leak() { return Blue(); }
105  const char *End() { return Default(); }
106};
107
108static inline bool CanBeAHeapPointer(uptr p) {
109  // Since our heap is located in mmap-ed memory, we can assume a sensible lower
110  // bound on heap addresses.
111  const uptr kMinAddress = 4 * 4096;
112  if (p < kMinAddress) return false;
113#ifdef __x86_64__
114  // Accept only canonical form user-space addresses.
115  return ((p >> 47) == 0);
116#else
117  return true;
118#endif
119}
120
121// Scans the memory range, looking for byte patterns that point into allocator
122// chunks. Marks those chunks with |tag| and adds them to |frontier|.
123// There are two usage modes for this function: finding reachable or ignored
124// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
125// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
126// so |frontier| = 0.
127void ScanRangeForPointers(uptr begin, uptr end,
128                          Frontier *frontier,
129                          const char *region_type, ChunkTag tag) {
130  const uptr alignment = flags()->pointer_alignment();
131  if (flags()->log_pointers)
132    Report("Scanning %s range %p-%p.\n", region_type, begin, end);
133  uptr pp = begin;
134  if (pp % alignment)
135    pp = pp + alignment - pp % alignment;
136  for (; pp + sizeof(void *) <= end; pp += alignment) {  // NOLINT
137    void *p = *reinterpret_cast<void **>(pp);
138    if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
139    uptr chunk = PointsIntoChunk(p);
140    if (!chunk) continue;
141    LsanMetadata m(chunk);
142    // Reachable beats ignored beats leaked.
143    if (m.tag() == kReachable) continue;
144    if (m.tag() == kIgnored && tag != kReachable) continue;
145    m.set_tag(tag);
146    if (flags()->log_pointers)
147      Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
148             chunk, chunk + m.requested_size(), m.requested_size());
149    if (frontier)
150      frontier->push_back(chunk);
151  }
152}
153
154// Scans thread data (stacks and TLS) for heap pointers.
155static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
156                           Frontier *frontier) {
157  InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
158  uptr registers_begin = reinterpret_cast<uptr>(registers.data());
159  uptr registers_end = registers_begin + registers.size();
160  for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
161    uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
162    if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
163    uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
164    bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
165                                              &tls_begin, &tls_end,
166                                              &cache_begin, &cache_end);
167    if (!thread_found) {
168      // If a thread can't be found in the thread registry, it's probably in the
169      // process of destruction. Log this event and move on.
170      if (flags()->log_threads)
171        Report("Thread %d not found in registry.\n", os_id);
172      continue;
173    }
174    uptr sp;
175    bool have_registers =
176        (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
177    if (!have_registers) {
178      Report("Unable to get registers from thread %d.\n");
179      // If unable to get SP, consider the entire stack to be reachable.
180      sp = stack_begin;
181    }
182
183    if (flags()->use_registers && have_registers)
184      ScanRangeForPointers(registers_begin, registers_end, frontier,
185                           "REGISTERS", kReachable);
186
187    if (flags()->use_stacks) {
188      if (flags()->log_threads)
189        Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
190      if (sp < stack_begin || sp >= stack_end) {
191        // SP is outside the recorded stack range (e.g. the thread is running a
192        // signal handler on alternate stack). Again, consider the entire stack
193        // range to be reachable.
194        if (flags()->log_threads)
195          Report("WARNING: stack pointer not in stack range.\n");
196      } else {
197        // Shrink the stack range to ignore out-of-scope values.
198        stack_begin = sp;
199      }
200      ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
201                           kReachable);
202    }
203
204    if (flags()->use_tls) {
205      if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
206      if (cache_begin == cache_end) {
207        ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
208      } else {
209        // Because LSan should not be loaded with dlopen(), we can assume
210        // that allocator cache will be part of static TLS image.
211        CHECK_LE(tls_begin, cache_begin);
212        CHECK_GE(tls_end, cache_end);
213        if (tls_begin < cache_begin)
214          ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
215                               kReachable);
216        if (tls_end > cache_end)
217          ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
218      }
219    }
220  }
221}
222
223static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
224  while (frontier->size()) {
225    uptr next_chunk = frontier->back();
226    frontier->pop_back();
227    LsanMetadata m(next_chunk);
228    ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
229                         "HEAP", tag);
230  }
231}
232
233// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
234// which are reachable from it as indirectly leaked.
235static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
236  chunk = GetUserBegin(chunk);
237  LsanMetadata m(chunk);
238  if (m.allocated() && m.tag() != kReachable) {
239    ScanRangeForPointers(chunk, chunk + m.requested_size(),
240                         /* frontier */ 0, "HEAP", kIndirectlyLeaked);
241  }
242}
243
244// ForEachChunk callback. If chunk is marked as ignored, adds its address to
245// frontier.
246static void CollectIgnoredCb(uptr chunk, void *arg) {
247  CHECK(arg);
248  chunk = GetUserBegin(chunk);
249  LsanMetadata m(chunk);
250  if (m.allocated() && m.tag() == kIgnored)
251    reinterpret_cast<Frontier *>(arg)->push_back(chunk);
252}
253
254// Sets the appropriate tag on each chunk.
255static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
256  // Holds the flood fill frontier.
257  Frontier frontier(GetPageSizeCached());
258
259  if (flags()->use_globals)
260    ProcessGlobalRegions(&frontier);
261  ProcessThreads(suspended_threads, &frontier);
262  FloodFillTag(&frontier, kReachable);
263  // The check here is relatively expensive, so we do this in a separate flood
264  // fill. That way we can skip the check for chunks that are reachable
265  // otherwise.
266  ProcessPlatformSpecificAllocations(&frontier);
267  FloodFillTag(&frontier, kReachable);
268
269  if (flags()->log_pointers)
270    Report("Scanning ignored chunks.\n");
271  CHECK_EQ(0, frontier.size());
272  ForEachChunk(CollectIgnoredCb, &frontier);
273  FloodFillTag(&frontier, kIgnored);
274
275  // Iterate over leaked chunks and mark those that are reachable from other
276  // leaked chunks.
277  if (flags()->log_pointers)
278    Report("Scanning leaked chunks.\n");
279  ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
280}
281
282static void PrintStackTraceById(u32 stack_trace_id) {
283  CHECK(stack_trace_id);
284  uptr size = 0;
285  const uptr *trace = StackDepotGet(stack_trace_id, &size);
286  StackTrace::PrintStack(trace, size, common_flags()->symbolize,
287                         common_flags()->strip_path_prefix, 0);
288}
289
290// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
291static void CollectLeaksCb(uptr chunk, void *arg) {
292  CHECK(arg);
293  LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
294  chunk = GetUserBegin(chunk);
295  LsanMetadata m(chunk);
296  if (!m.allocated()) return;
297  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
298    uptr resolution = flags()->resolution;
299    if (resolution > 0) {
300      uptr size = 0;
301      const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
302      size = Min(size, resolution);
303      leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
304    } else {
305      leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
306    }
307  }
308}
309
310// ForEachChunkCallback. Prints addresses of unreachable chunks.
311static void PrintLeakedCb(uptr chunk, void *arg) {
312  chunk = GetUserBegin(chunk);
313  LsanMetadata m(chunk);
314  if (!m.allocated()) return;
315  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
316    Printf("%s leaked %zu byte object at %p.\n",
317           m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
318           m.requested_size(), chunk);
319  }
320}
321
322static void PrintMatchedSuppressions() {
323  InternalMmapVector<Suppression *> matched(1);
324  suppression_ctx->GetMatched(&matched);
325  if (!matched.size())
326    return;
327  const char *line = "-----------------------------------------------------";
328  Printf("%s\n", line);
329  Printf("Suppressions used:\n");
330  Printf("  count      bytes template\n");
331  for (uptr i = 0; i < matched.size(); i++)
332    Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
333           matched[i]->weight, matched[i]->templ);
334  Printf("%s\n\n", line);
335}
336
337static void PrintLeaked() {
338  Printf("\n");
339  Printf("Reporting individual objects:\n");
340  ForEachChunk(PrintLeakedCb, 0 /* arg */);
341}
342
343struct DoLeakCheckParam {
344  bool success;
345  LeakReport leak_report;
346};
347
348static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
349                                void *arg) {
350  DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
351  CHECK(param);
352  CHECK(!param->success);
353  CHECK(param->leak_report.IsEmpty());
354  ClassifyAllChunks(suspended_threads);
355  ForEachChunk(CollectLeaksCb, &param->leak_report);
356  if (!param->leak_report.IsEmpty() && flags()->report_objects)
357    PrintLeaked();
358  param->success = true;
359}
360
361void DoLeakCheck() {
362  EnsureMainThreadIDIsCorrect();
363  BlockingMutexLock l(&global_mutex);
364  static bool already_done;
365  if (already_done) return;
366  already_done = true;
367  if (&__lsan_is_turned_off && __lsan_is_turned_off())
368    return;
369
370  DoLeakCheckParam param;
371  param.success = false;
372  LockThreadRegistry();
373  LockAllocator();
374  StopTheWorld(DoLeakCheckCallback, &param);
375  UnlockAllocator();
376  UnlockThreadRegistry();
377
378  if (!param.success) {
379    Report("LeakSanitizer has encountered a fatal error.\n");
380    Die();
381  }
382  uptr have_unsuppressed = param.leak_report.ApplySuppressions();
383  if (have_unsuppressed) {
384    Decorator d;
385    Printf("\n"
386           "================================================================="
387           "\n");
388    Printf("%s", d.Error());
389    Report("ERROR: LeakSanitizer: detected memory leaks\n");
390    Printf("%s", d.End());
391    param.leak_report.PrintLargest(flags()->max_leaks);
392  }
393  if (have_unsuppressed || (flags()->verbosity >= 1)) {
394    PrintMatchedSuppressions();
395    param.leak_report.PrintSummary();
396  }
397  if (have_unsuppressed && flags()->exitcode)
398    internal__exit(flags()->exitcode);
399}
400
401static Suppression *GetSuppressionForAddr(uptr addr) {
402  static const uptr kMaxAddrFrames = 16;
403  InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
404  for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
405  uptr addr_frames_num =
406      getSymbolizer()->SymbolizeCode(addr, addr_frames.data(), kMaxAddrFrames);
407  for (uptr i = 0; i < addr_frames_num; i++) {
408    Suppression* s;
409    if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
410        suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
411        suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
412      return s;
413  }
414  return 0;
415}
416
417static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
418  uptr size = 0;
419  const uptr *trace = StackDepotGet(stack_trace_id, &size);
420  for (uptr i = 0; i < size; i++) {
421    Suppression *s =
422        GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
423    if (s) return s;
424  }
425  return 0;
426}
427
428///// LeakReport implementation. /////
429
430// A hard limit on the number of distinct leaks, to avoid quadratic complexity
431// in LeakReport::Add(). We don't expect to ever see this many leaks in
432// real-world applications.
433// FIXME: Get rid of this limit by changing the implementation of LeakReport to
434// use a hash table.
435const uptr kMaxLeaksConsidered = 5000;
436
437void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
438  CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
439  bool is_directly_leaked = (tag == kDirectlyLeaked);
440  for (uptr i = 0; i < leaks_.size(); i++)
441    if (leaks_[i].stack_trace_id == stack_trace_id &&
442        leaks_[i].is_directly_leaked == is_directly_leaked) {
443      leaks_[i].hit_count++;
444      leaks_[i].total_size += leaked_size;
445      return;
446    }
447  if (leaks_.size() == kMaxLeaksConsidered) return;
448  Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
449                is_directly_leaked, /* is_suppressed */ false };
450  leaks_.push_back(leak);
451}
452
453static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
454  if (leak1.is_directly_leaked == leak2.is_directly_leaked)
455    return leak1.total_size > leak2.total_size;
456  else
457    return leak1.is_directly_leaked;
458}
459
460void LeakReport::PrintLargest(uptr num_leaks_to_print) {
461  CHECK(leaks_.size() <= kMaxLeaksConsidered);
462  Printf("\n");
463  if (leaks_.size() == kMaxLeaksConsidered)
464    Printf("Too many leaks! Only the first %zu leaks encountered will be "
465           "reported.\n",
466           kMaxLeaksConsidered);
467
468  uptr unsuppressed_count = 0;
469  for (uptr i = 0; i < leaks_.size(); i++)
470    if (!leaks_[i].is_suppressed) unsuppressed_count++;
471  if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
472    Printf("The %zu largest leak(s):\n", num_leaks_to_print);
473  InternalSort(&leaks_, leaks_.size(), LeakComparator);
474  uptr leaks_printed = 0;
475  Decorator d;
476  for (uptr i = 0; i < leaks_.size(); i++) {
477    if (leaks_[i].is_suppressed) continue;
478    Printf("%s", d.Leak());
479    Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
480           leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
481           leaks_[i].total_size, leaks_[i].hit_count);
482    Printf("%s", d.End());
483    PrintStackTraceById(leaks_[i].stack_trace_id);
484    Printf("\n");
485    leaks_printed++;
486    if (leaks_printed == num_leaks_to_print) break;
487  }
488  if (leaks_printed < unsuppressed_count) {
489    uptr remaining = unsuppressed_count - leaks_printed;
490    Printf("Omitting %zu more leak(s).\n", remaining);
491  }
492}
493
494void LeakReport::PrintSummary() {
495  CHECK(leaks_.size() <= kMaxLeaksConsidered);
496  uptr bytes = 0, allocations = 0;
497  for (uptr i = 0; i < leaks_.size(); i++) {
498      if (leaks_[i].is_suppressed) continue;
499      bytes += leaks_[i].total_size;
500      allocations += leaks_[i].hit_count;
501  }
502  const int kMaxSummaryLength = 128;
503  InternalScopedBuffer<char> summary(kMaxSummaryLength);
504  internal_snprintf(summary.data(), kMaxSummaryLength,
505                    "LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).",
506                    bytes, allocations);
507  __sanitizer_report_error_summary(summary.data());
508}
509
510uptr LeakReport::ApplySuppressions() {
511  uptr unsuppressed_count = 0;
512  for (uptr i = 0; i < leaks_.size(); i++) {
513    Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
514    if (s) {
515      s->weight += leaks_[i].total_size;
516      s->hit_count += leaks_[i].hit_count;
517      leaks_[i].is_suppressed = true;
518    } else {
519    unsuppressed_count++;
520    }
521  }
522  return unsuppressed_count;
523}
524}  // namespace __lsan
525#endif  // CAN_SANITIZE_LEAKS
526
527using namespace __lsan;  // NOLINT
528
529extern "C" {
530SANITIZER_INTERFACE_ATTRIBUTE
531void __lsan_ignore_object(const void *p) {
532#if CAN_SANITIZE_LEAKS
533  // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
534  // locked.
535  BlockingMutexLock l(&global_mutex);
536  IgnoreObjectResult res = IgnoreObjectLocked(p);
537  if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
538    Report("__lsan_ignore_object(): no heap object found at %p", p);
539  if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
540    Report("__lsan_ignore_object(): "
541           "heap object at %p is already being ignored\n", p);
542  if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
543    Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
544#endif  // CAN_SANITIZE_LEAKS
545}
546
547SANITIZER_INTERFACE_ATTRIBUTE
548void __lsan_disable() {
549#if CAN_SANITIZE_LEAKS
550  __lsan::disable_counter++;
551#endif
552}
553
554SANITIZER_INTERFACE_ATTRIBUTE
555void __lsan_enable() {
556#if CAN_SANITIZE_LEAKS
557  if (!__lsan::disable_counter) {
558    Report("Unmatched call to __lsan_enable().\n");
559    Die();
560  }
561  __lsan::disable_counter--;
562#endif
563}
564
565SANITIZER_INTERFACE_ATTRIBUTE
566void __lsan_do_leak_check() {
567#if CAN_SANITIZE_LEAKS
568  if (common_flags()->detect_leaks)
569    __lsan::DoLeakCheck();
570#endif  // CAN_SANITIZE_LEAKS
571}
572
573#if !SANITIZER_SUPPORTS_WEAK_HOOKS
574SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
575int __lsan_is_turned_off() {
576  return 0;
577}
578#endif
579}  // extern "C"
580