lsan_common.cc revision c6ac98d7fcc81768b2ef7ddc785c27e3fc1bdef6
1//=-- lsan_common.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_common.h"
16
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_flags.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "sanitizer_common/sanitizer_stackdepot.h"
21#include "sanitizer_common/sanitizer_stacktrace.h"
22#include "sanitizer_common/sanitizer_stoptheworld.h"
23#include "sanitizer_common/sanitizer_suppressions.h"
24
25#if CAN_SANITIZE_LEAKS
26namespace __lsan {
27
28// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
29BlockingMutex global_mutex(LINKER_INITIALIZED);
30
31THREADLOCAL int disable_counter;
32bool DisabledInThisThread() { return disable_counter > 0; }
33
34Flags lsan_flags;
35
36static void InitializeFlags() {
37  Flags *f = flags();
38  // Default values.
39  f->report_objects = false;
40  f->resolution = 0;
41  f->max_leaks = 0;
42  f->exitcode = 23;
43  f->suppressions="";
44  f->use_registers = true;
45  f->use_globals = true;
46  f->use_stacks = true;
47  f->use_tls = true;
48  f->use_unaligned = false;
49  f->verbosity = 0;
50  f->log_pointers = false;
51  f->log_threads = false;
52
53  const char *options = GetEnv("LSAN_OPTIONS");
54  if (options) {
55    ParseFlag(options, &f->use_registers, "use_registers");
56    ParseFlag(options, &f->use_globals, "use_globals");
57    ParseFlag(options, &f->use_stacks, "use_stacks");
58    ParseFlag(options, &f->use_tls, "use_tls");
59    ParseFlag(options, &f->use_unaligned, "use_unaligned");
60    ParseFlag(options, &f->report_objects, "report_objects");
61    ParseFlag(options, &f->resolution, "resolution");
62    CHECK_GE(&f->resolution, 0);
63    ParseFlag(options, &f->max_leaks, "max_leaks");
64    CHECK_GE(&f->max_leaks, 0);
65    ParseFlag(options, &f->verbosity, "verbosity");
66    ParseFlag(options, &f->log_pointers, "log_pointers");
67    ParseFlag(options, &f->log_threads, "log_threads");
68    ParseFlag(options, &f->exitcode, "exitcode");
69    ParseFlag(options, &f->suppressions, "suppressions");
70  }
71}
72
73SuppressionContext *suppression_ctx;
74
75void InitializeSuppressions() {
76  CHECK(!suppression_ctx);
77  ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
78  suppression_ctx = new(placeholder_) SuppressionContext;
79  char *suppressions_from_file;
80  uptr buffer_size;
81  if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
82                       &buffer_size, 1 << 26 /* max_len */))
83    suppression_ctx->Parse(suppressions_from_file);
84  if (flags()->suppressions[0] && !buffer_size) {
85    Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
86           flags()->suppressions);
87    Die();
88  }
89  if (&__lsan_default_suppressions)
90    suppression_ctx->Parse(__lsan_default_suppressions());
91}
92
93void InitCommonLsan() {
94  InitializeFlags();
95  InitializeSuppressions();
96  InitializePlatformSpecificModules();
97}
98
99static inline bool CanBeAHeapPointer(uptr p) {
100  // Since our heap is located in mmap-ed memory, we can assume a sensible lower
101  // bound on heap addresses.
102  const uptr kMinAddress = 4 * 4096;
103  if (p < kMinAddress) return false;
104#ifdef __x86_64__
105  // Accept only canonical form user-space addresses.
106  return ((p >> 47) == 0);
107#else
108  return true;
109#endif
110}
111
112// Scans the memory range, looking for byte patterns that point into allocator
113// chunks. Marks those chunks with |tag| and adds them to |frontier|.
114// There are two usage modes for this function: finding reachable or ignored
115// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
116// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
117// so |frontier| = 0.
118void ScanRangeForPointers(uptr begin, uptr end,
119                          Frontier *frontier,
120                          const char *region_type, ChunkTag tag) {
121  const uptr alignment = flags()->pointer_alignment();
122  if (flags()->log_pointers)
123    Report("Scanning %s range %p-%p.\n", region_type, begin, end);
124  uptr pp = begin;
125  if (pp % alignment)
126    pp = pp + alignment - pp % alignment;
127  for (; pp + sizeof(void *) <= end; pp += alignment) {  // NOLINT
128    void *p = *reinterpret_cast<void**>(pp);
129    if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
130    uptr chunk = PointsIntoChunk(p);
131    if (!chunk) continue;
132    LsanMetadata m(chunk);
133    // Reachable beats ignored beats leaked.
134    if (m.tag() == kReachable) continue;
135    if (m.tag() == kIgnored && tag != kReachable) continue;
136    m.set_tag(tag);
137    if (flags()->log_pointers)
138      Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
139             chunk, chunk + m.requested_size(), m.requested_size());
140    if (frontier)
141      frontier->push_back(chunk);
142  }
143}
144
145// Scans thread data (stacks and TLS) for heap pointers.
146static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
147                           Frontier *frontier) {
148  InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
149  uptr registers_begin = reinterpret_cast<uptr>(registers.data());
150  uptr registers_end = registers_begin + registers.size();
151  for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
152    uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
153    if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
154    uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
155    bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
156                                              &tls_begin, &tls_end,
157                                              &cache_begin, &cache_end);
158    if (!thread_found) {
159      // If a thread can't be found in the thread registry, it's probably in the
160      // process of destruction. Log this event and move on.
161      if (flags()->log_threads)
162        Report("Thread %d not found in registry.\n", os_id);
163      continue;
164    }
165    uptr sp;
166    bool have_registers =
167        (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
168    if (!have_registers) {
169      Report("Unable to get registers from thread %d.\n");
170      // If unable to get SP, consider the entire stack to be reachable.
171      sp = stack_begin;
172    }
173
174    if (flags()->use_registers && have_registers)
175      ScanRangeForPointers(registers_begin, registers_end, frontier,
176                           "REGISTERS", kReachable);
177
178    if (flags()->use_stacks) {
179      if (flags()->log_threads)
180        Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
181      if (sp < stack_begin || sp >= stack_end) {
182        // SP is outside the recorded stack range (e.g. the thread is running a
183        // signal handler on alternate stack). Again, consider the entire stack
184        // range to be reachable.
185        if (flags()->log_threads)
186          Report("WARNING: stack pointer not in stack range.\n");
187      } else {
188        // Shrink the stack range to ignore out-of-scope values.
189        stack_begin = sp;
190      }
191      ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
192                           kReachable);
193    }
194
195    if (flags()->use_tls) {
196      if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
197      if (cache_begin == cache_end) {
198        ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
199      } else {
200        // Because LSan should not be loaded with dlopen(), we can assume
201        // that allocator cache will be part of static TLS image.
202        CHECK_LE(tls_begin, cache_begin);
203        CHECK_GE(tls_end, cache_end);
204        if (tls_begin < cache_begin)
205          ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
206                               kReachable);
207        if (tls_end > cache_end)
208          ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
209      }
210    }
211  }
212}
213
214static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
215  while (frontier->size()) {
216    uptr next_chunk = frontier->back();
217    frontier->pop_back();
218    LsanMetadata m(next_chunk);
219    ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
220                         "HEAP", tag);
221  }
222}
223
224// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
225// which are reachable from it as indirectly leaked.
226static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
227  chunk = GetUserBegin(chunk);
228  LsanMetadata m(chunk);
229  if (m.allocated() && m.tag() != kReachable) {
230    ScanRangeForPointers(chunk, chunk + m.requested_size(),
231                         /* frontier */ 0, "HEAP", kIndirectlyLeaked);
232  }
233}
234
235// ForEachChunk callback. If chunk is marked as ignored, adds its address to
236// frontier.
237static void CollectIgnoredCb(uptr chunk, void *arg) {
238  CHECK(arg);
239  chunk = GetUserBegin(chunk);
240  LsanMetadata m(chunk);
241  if (m.allocated() && m.tag() == kIgnored)
242    reinterpret_cast<Frontier *>(arg)->push_back(chunk);
243}
244
245// Sets the appropriate tag on each chunk.
246static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
247  // Holds the flood fill frontier.
248  Frontier frontier(GetPageSizeCached());
249
250  if (flags()->use_globals)
251    ProcessGlobalRegions(&frontier);
252  ProcessThreads(suspended_threads, &frontier);
253  FloodFillTag(&frontier, kReachable);
254  // The check here is relatively expensive, so we do this in a separate flood
255  // fill. That way we can skip the check for chunks that are reachable
256  // otherwise.
257  ProcessPlatformSpecificAllocations(&frontier);
258  FloodFillTag(&frontier, kReachable);
259
260  if (flags()->log_pointers)
261    Report("Scanning ignored chunks.\n");
262  CHECK_EQ(0, frontier.size());
263  ForEachChunk(CollectIgnoredCb, &frontier);
264  FloodFillTag(&frontier, kIgnored);
265
266  // Iterate over leaked chunks and mark those that are reachable from other
267  // leaked chunks.
268  if (flags()->log_pointers)
269    Report("Scanning leaked chunks.\n");
270  ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
271}
272
273static void PrintStackTraceById(u32 stack_trace_id) {
274  CHECK(stack_trace_id);
275  uptr size = 0;
276  const uptr *trace = StackDepotGet(stack_trace_id, &size);
277  StackTrace::PrintStack(trace, size, common_flags()->symbolize,
278                         common_flags()->strip_path_prefix, 0);
279}
280
281// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
282static void CollectLeaksCb(uptr chunk, void *arg) {
283  CHECK(arg);
284  LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
285  chunk = GetUserBegin(chunk);
286  LsanMetadata m(chunk);
287  if (!m.allocated()) return;
288  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
289    uptr resolution = flags()->resolution;
290    if (resolution > 0) {
291      uptr size = 0;
292      const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
293      size = Min(size, resolution);
294      leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
295    } else {
296      leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
297    }
298  }
299}
300
301// ForEachChunkCallback. Prints addresses of unreachable chunks.
302static void PrintLeakedCb(uptr chunk, void *arg) {
303  chunk = GetUserBegin(chunk);
304  LsanMetadata m(chunk);
305  if (!m.allocated()) return;
306  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
307    Printf("%s leaked %zu byte object at %p.\n",
308           m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
309           m.requested_size(), chunk);
310  }
311}
312
313static void PrintMatchedSuppressions() {
314  InternalMmapVector<Suppression *> matched(1);
315  suppression_ctx->GetMatched(&matched);
316  if (!matched.size())
317    return;
318  const char *line = "-----------------------------------------------------";
319  Printf("%s\n", line);
320  Printf("Suppressions used:\n");
321  Printf("  count      bytes template\n");
322  for (uptr i = 0; i < matched.size(); i++)
323    Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
324           matched[i]->weight, matched[i]->templ);
325  Printf("%s\n\n", line);
326}
327
328static void PrintLeaked() {
329  Printf("\n");
330  Printf("Reporting individual objects:\n");
331  ForEachChunk(PrintLeakedCb, 0 /* arg */);
332}
333
334struct DoLeakCheckParam {
335  bool success;
336  LeakReport leak_report;
337};
338
339static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
340                                void *arg) {
341  DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
342  CHECK(param);
343  CHECK(!param->success);
344  CHECK(param->leak_report.IsEmpty());
345  ClassifyAllChunks(suspended_threads);
346  ForEachChunk(CollectLeaksCb, &param->leak_report);
347  if (!param->leak_report.IsEmpty() && flags()->report_objects)
348    PrintLeaked();
349  param->success = true;
350}
351
352void DoLeakCheck() {
353  EnsureMainThreadIDIsCorrect();
354  BlockingMutexLock l(&global_mutex);
355  static bool already_done;
356  CHECK(!already_done);
357  already_done = true;
358  if (&__lsan_is_turned_off && __lsan_is_turned_off())
359    return;
360
361  DoLeakCheckParam param;
362  param.success = false;
363  LockThreadRegistry();
364  LockAllocator();
365  StopTheWorld(DoLeakCheckCallback, &param);
366  UnlockAllocator();
367  UnlockThreadRegistry();
368
369  if (!param.success) {
370    Report("LeakSanitizer has encountered a fatal error.\n");
371    Die();
372  }
373  uptr have_unsuppressed = param.leak_report.ApplySuppressions();
374  if (have_unsuppressed) {
375    Printf("\n"
376           "================================================================="
377           "\n");
378    Report("ERROR: LeakSanitizer: detected memory leaks\n");
379    param.leak_report.PrintLargest(flags()->max_leaks);
380  }
381  if (have_unsuppressed || (flags()->verbosity >= 1)) {
382    PrintMatchedSuppressions();
383    param.leak_report.PrintSummary();
384  }
385  if (have_unsuppressed && flags()->exitcode)
386    internal__exit(flags()->exitcode);
387}
388
389static Suppression *GetSuppressionForAddr(uptr addr) {
390  static const uptr kMaxAddrFrames = 16;
391  InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
392  for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
393  uptr addr_frames_num = __sanitizer::SymbolizeCode(addr, addr_frames.data(),
394                                                    kMaxAddrFrames);
395  for (uptr i = 0; i < addr_frames_num; i++) {
396    Suppression* s;
397    if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
398        suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
399        suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
400      return s;
401  }
402  return 0;
403}
404
405static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
406  uptr size = 0;
407  const uptr *trace = StackDepotGet(stack_trace_id, &size);
408  for (uptr i = 0; i < size; i++) {
409    Suppression *s =
410        GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
411    if (s) return s;
412  }
413  return 0;
414}
415
416///// LeakReport implementation. /////
417
418// A hard limit on the number of distinct leaks, to avoid quadratic complexity
419// in LeakReport::Add(). We don't expect to ever see this many leaks in
420// real-world applications.
421// FIXME: Get rid of this limit by changing the implementation of LeakReport to
422// use a hash table.
423const uptr kMaxLeaksConsidered = 1000;
424
425void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
426  CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
427  bool is_directly_leaked = (tag == kDirectlyLeaked);
428  for (uptr i = 0; i < leaks_.size(); i++)
429    if (leaks_[i].stack_trace_id == stack_trace_id &&
430        leaks_[i].is_directly_leaked == is_directly_leaked) {
431      leaks_[i].hit_count++;
432      leaks_[i].total_size += leaked_size;
433      return;
434    }
435  if (leaks_.size() == kMaxLeaksConsidered) return;
436  Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
437                is_directly_leaked, /* is_suppressed */ false };
438  leaks_.push_back(leak);
439}
440
441static bool IsLarger(const Leak &leak1, const Leak &leak2) {
442  return leak1.total_size > leak2.total_size;
443}
444
445void LeakReport::PrintLargest(uptr num_leaks_to_print) {
446  CHECK(leaks_.size() <= kMaxLeaksConsidered);
447  Printf("\n");
448  if (leaks_.size() == kMaxLeaksConsidered)
449    Printf("Too many leaks! Only the first %zu leaks encountered will be "
450           "reported.\n",
451           kMaxLeaksConsidered);
452
453  uptr unsuppressed_count = 0;
454  for (uptr i = 0; i < leaks_.size(); i++)
455    if (!leaks_[i].is_suppressed) unsuppressed_count++;
456  if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
457    Printf("The %zu largest leak(s):\n", num_leaks_to_print);
458  InternalSort(&leaks_, leaks_.size(), IsLarger);
459  uptr leaks_printed = 0;
460  for (uptr i = 0; i < leaks_.size(); i++) {
461    if (leaks_[i].is_suppressed) continue;
462    Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
463           leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
464           leaks_[i].total_size, leaks_[i].hit_count);
465    PrintStackTraceById(leaks_[i].stack_trace_id);
466    Printf("\n");
467    leaks_printed++;
468    if (leaks_printed == num_leaks_to_print) break;
469  }
470  if (leaks_printed < unsuppressed_count) {
471    uptr remaining = unsuppressed_count - leaks_printed;
472    Printf("Omitting %zu more leak(s).\n", remaining);
473  }
474}
475
476void LeakReport::PrintSummary() {
477  CHECK(leaks_.size() <= kMaxLeaksConsidered);
478  uptr bytes = 0, allocations = 0;
479  for (uptr i = 0; i < leaks_.size(); i++) {
480      if (leaks_[i].is_suppressed) continue;
481      bytes += leaks_[i].total_size;
482      allocations += leaks_[i].hit_count;
483  }
484  const int kMaxSummaryLength = 128;
485  InternalScopedBuffer<char> summary(kMaxSummaryLength);
486  internal_snprintf(summary.data(), kMaxSummaryLength,
487                    "LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).",
488                    bytes, allocations);
489  __sanitizer_report_error_summary(summary.data());
490}
491
492uptr LeakReport::ApplySuppressions() {
493  uptr unsuppressed_count = 0;
494  for (uptr i = 0; i < leaks_.size(); i++) {
495    Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
496    if (s) {
497      s->weight += leaks_[i].total_size;
498      s->hit_count += leaks_[i].hit_count;
499      leaks_[i].is_suppressed = true;
500    } else {
501    unsuppressed_count++;
502    }
503  }
504  return unsuppressed_count;
505}
506}  // namespace __lsan
507#endif  // CAN_SANITIZE_LEAKS
508
509using namespace __lsan;  // NOLINT
510
511extern "C" {
512SANITIZER_INTERFACE_ATTRIBUTE
513void __lsan_ignore_object(const void *p) {
514#if CAN_SANITIZE_LEAKS
515  // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
516  // locked.
517  BlockingMutexLock l(&global_mutex);
518  IgnoreObjectResult res = IgnoreObjectLocked(p);
519  if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
520    Report("__lsan_ignore_object(): no heap object found at %p", p);
521  if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
522    Report("__lsan_ignore_object(): "
523           "heap object at %p is already being ignored\n", p);
524  if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
525    Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
526#endif  // CAN_SANITIZE_LEAKS
527}
528
529SANITIZER_INTERFACE_ATTRIBUTE
530void __lsan_disable() {
531#if CAN_SANITIZE_LEAKS
532  __lsan::disable_counter++;
533#endif
534}
535
536SANITIZER_INTERFACE_ATTRIBUTE
537void __lsan_enable() {
538#if CAN_SANITIZE_LEAKS
539  if (!__lsan::disable_counter) {
540    Report("Unmatched call to __lsan_enable().\n");
541    Die();
542  }
543  __lsan::disable_counter--;
544#endif
545}
546
547#if !SANITIZER_SUPPORTS_WEAK_HOOKS
548SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
549int __lsan_is_turned_off() {
550  return 0;
551}
552#endif
553}  // extern "C"
554