lsan_common.cc revision 0bc81775b75f2c8c6c8c0e1af4008771d5b882ab
1//=-- lsan_common.cc ------------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of LeakSanitizer. 11// Implementation of common leak checking functionality. 12// 13//===----------------------------------------------------------------------===// 14 15#include "lsan_common.h" 16 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_flags.h" 19#include "sanitizer_common/sanitizer_stackdepot.h" 20#include "sanitizer_common/sanitizer_stacktrace.h" 21#include "sanitizer_common/sanitizer_stoptheworld.h" 22 23#if CAN_SANITIZE_LEAKS 24namespace __lsan { 25 26Flags lsan_flags; 27 28static void InitializeFlags() { 29 Flags *f = flags(); 30 // Default values. 31 f->sources = kSourceAllAligned; 32 f->report_blocks = false; 33 f->resolution = 0; 34 f->max_leaks = 0; 35 f->log_pointers = false; 36 f->log_threads = false; 37 38 const char *options = GetEnv("LSAN_OPTIONS"); 39 if (options) { 40 bool aligned = true; 41 ParseFlag(options, &aligned, "aligned"); 42 if (!aligned) f->sources |= kSourceUnaligned; 43 ParseFlag(options, &f->report_blocks, "report_blocks"); 44 ParseFlag(options, &f->resolution, "resolution"); 45 CHECK_GE(&f->resolution, 0); 46 ParseFlag(options, &f->max_leaks, "max_leaks"); 47 CHECK_GE(&f->max_leaks, 0); 48 ParseFlag(options, &f->log_pointers, "log_pointers"); 49 ParseFlag(options, &f->log_threads, "log_threads"); 50 } 51} 52 53void InitCommonLsan() { 54 InitializeFlags(); 55 InitializePlatformSpecificModules(); 56} 57 58static inline bool CanBeAHeapPointer(uptr p) { 59 // Since our heap is located in mmap-ed memory, we can assume a sensible lower 60 // boundary on heap addresses. 61 const uptr kMinAddress = 4 * 4096; 62 if (p < kMinAddress) return false; 63#ifdef __x86_64__ 64 // Accept only canonical form user-space addresses. 65 return ((p >> 47) == 0); 66#else 67 return true; 68#endif 69} 70 71// Scan the memory range, looking for byte patterns that point into allocator 72// chunks. Mark those chunks with tag and add them to the frontier. 73// There are two usage modes for this function: finding non-leaked chunks 74// (tag = kReachable) and finding indirectly leaked chunks 75// (tag = kIndirectlyLeaked). In the second case, there's no flood fill, 76// so frontier = 0. 77void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier, 78 const char *region_type, ChunkTag tag) { 79 const uptr alignment = flags()->pointer_alignment(); 80 if (flags()->log_pointers) 81 Report("Scanning %s range %p-%p.\n", region_type, begin, end); 82 uptr pp = begin; 83 if (pp % alignment) 84 pp = pp + alignment - pp % alignment; 85 for (; pp + sizeof(uptr) <= end; pp += alignment) { 86 void *p = *reinterpret_cast<void**>(pp); 87 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; 88 // FIXME: PointsIntoChunk is SLOW because GetBlockBegin() in 89 // LargeMmapAllocator involves a lock and a linear search. 90 void *chunk = PointsIntoChunk(p); 91 if (!chunk) continue; 92 LsanMetadata m(chunk); 93 if (m.tag() == kReachable) continue; 94 m.set_tag(tag); 95 if (flags()->log_pointers) 96 Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p, 97 chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(), 98 m.requested_size()); 99 if (frontier) 100 frontier->push_back(reinterpret_cast<uptr>(chunk)); 101 } 102} 103 104// Scan thread data (stacks and TLS) for heap pointers. 105static void ProcessThreads(SuspendedThreadsList const &suspended_threads, 106 InternalVector<uptr> *frontier) { 107 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); 108 uptr registers_begin = reinterpret_cast<uptr>(registers.data()); 109 uptr registers_end = registers_begin + registers.size(); 110 for (uptr i = 0; i < suspended_threads.thread_count(); i++) { 111 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); 112 if (flags()->log_threads) Report("Processing thread %d.\n", os_id); 113 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; 114 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, 115 &tls_begin, &tls_end, 116 &cache_begin, &cache_end); 117 if (!thread_found) { 118 // If a thread can't be found in the thread registry, it's probably in the 119 // process of destruction. Log this event and move on. 120 if (flags()->log_threads) 121 Report("Thread %d not found in registry.\n", os_id); 122 continue; 123 } 124 uptr sp; 125 bool have_registers = 126 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); 127 if (!have_registers) { 128 Report("Unable to get registers from thread %d.\n"); 129 // If unable to get SP, consider the entire stack to be reachable. 130 sp = stack_begin; 131 } 132 133 if (flags()->use_registers() && have_registers) 134 ScanRangeForPointers(registers_begin, registers_end, frontier, 135 "REGISTERS", kReachable); 136 137 if (flags()->use_stacks()) { 138 if (flags()->log_threads) 139 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); 140 if (sp < stack_begin || sp >= stack_end) { 141 // SP is outside the recorded stack range (e.g. the thread is running a 142 // signal handler on alternate stack). Again, consider the entire stack 143 // range to be reachable. 144 if (flags()->log_threads) 145 Report("WARNING: stack_pointer not in stack_range.\n"); 146 } else { 147 // Shrink the stack range to ignore out-of-scope values. 148 stack_begin = sp; 149 } 150 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", 151 kReachable); 152 } 153 154 if (flags()->use_tls()) { 155 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); 156 // Because LSan should not be loaded with dlopen(), we can assume 157 // that allocator cache will be part of static TLS image. 158 CHECK_LE(tls_begin, cache_begin); 159 CHECK_GE(tls_end, cache_end); 160 if (tls_begin < cache_begin) 161 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", 162 kReachable); 163 if (tls_end > cache_end) 164 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); 165 } 166 } 167} 168 169static void FloodFillReachable(InternalVector<uptr> *frontier) { 170 while (frontier->size()) { 171 uptr next_chunk = frontier->back(); 172 frontier->pop_back(); 173 LsanMetadata m(reinterpret_cast<void *>(next_chunk)); 174 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, 175 "HEAP", kReachable); 176 } 177} 178 179// Mark leaked chunks which are reachable from other leaked chunks. 180void MarkIndirectlyLeakedCb::operator()(void *p) const { 181 p = GetUserBegin(p); 182 LsanMetadata m(p); 183 if (m.allocated() && m.tag() != kReachable) { 184 ScanRangeForPointers(reinterpret_cast<uptr>(p), 185 reinterpret_cast<uptr>(p) + m.requested_size(), 186 /* frontier */ 0, "HEAP", kIndirectlyLeaked); 187 } 188} 189 190// Set the appropriate tag on each chunk. 191static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { 192 // Holds the flood fill frontier. 193 InternalVector<uptr> frontier(GetPageSizeCached()); 194 195 if (flags()->use_globals()) 196 ProcessGlobalRegions(&frontier); 197 ProcessThreads(suspended_threads, &frontier); 198 FloodFillReachable(&frontier); 199 ProcessPlatformSpecificAllocations(&frontier); 200 FloodFillReachable(&frontier); 201 202 // Now all reachable chunks are marked. Iterate over leaked chunks and mark 203 // those that are reachable from other leaked chunks. 204 if (flags()->log_pointers) 205 Report("Now scanning leaked blocks for pointers.\n"); 206 ForEachChunk(MarkIndirectlyLeakedCb()); 207} 208 209void ClearTagCb::operator()(void *p) const { 210 p = GetUserBegin(p); 211 LsanMetadata m(p); 212 m.set_tag(kDirectlyLeaked); 213} 214 215static void PrintStackTraceById(u32 stack_trace_id) { 216 CHECK(stack_trace_id); 217 uptr size = 0; 218 const uptr *trace = StackDepotGet(stack_trace_id, &size); 219 StackTrace::PrintStack(trace, size, common_flags()->symbolize, 220 common_flags()->strip_path_prefix, 0); 221} 222 223static void LockAndSuspendThreads(StopTheWorldCallback callback, void *arg) { 224 LockThreadRegistry(); 225 LockAllocator(); 226 StopTheWorld(callback, arg); 227 // Allocator must be unlocked by the callback. 228 UnlockThreadRegistry(); 229} 230 231///// Normal leak checking. ///// 232 233void CollectLeaksCb::operator()(void *p) const { 234 p = GetUserBegin(p); 235 LsanMetadata m(p); 236 if (!m.allocated()) return; 237 if (m.tag() != kReachable) { 238 uptr resolution = flags()->resolution; 239 if (resolution > 0) { 240 uptr size = 0; 241 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); 242 size = Min(size, resolution); 243 leak_report_->Add(StackDepotPut(trace, size), m.requested_size(), 244 m.tag()); 245 } else { 246 leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag()); 247 } 248 } 249} 250 251static void CollectLeaks(LeakReport *leak_report) { 252 ForEachChunk(CollectLeaksCb(leak_report)); 253} 254 255void PrintLeakedCb::operator()(void *p) const { 256 p = GetUserBegin(p); 257 LsanMetadata m(p); 258 if (!m.allocated()) return; 259 if (m.tag() != kReachable) { 260 CHECK(m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked); 261 Printf("%s leaked %llu byte block at %p\n", 262 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", 263 m.requested_size(), p); 264 } 265} 266 267static void PrintLeaked() { 268 Printf("\nReporting individual blocks:\n"); 269 ForEachChunk(PrintLeakedCb()); 270} 271 272static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, 273 void *arg) { 274 // Allocator must not be locked when we call GetRegionBegin(). 275 UnlockAllocator(); 276 bool *success = reinterpret_cast<bool *>(arg); 277 ClassifyAllChunks(suspended_threads); 278 LeakReport leak_report; 279 CollectLeaks(&leak_report); 280 if (!leak_report.IsEmpty()) { 281 leak_report.PrintLargest(flags()->max_leaks); 282 if (flags()->report_blocks) 283 PrintLeaked(); 284 } 285 ForEachChunk(ClearTagCb()); 286 *success = true; 287} 288 289void DoLeakCheck() { 290 bool success = false; 291 LockAndSuspendThreads(DoLeakCheckCallback, &success); 292 if (!success) 293 Report("Leak check failed!\n"); 294} 295 296///// Reporting of leaked blocks' addresses (for testing). ///// 297 298void ReportLeakedCb::operator()(void *p) const { 299 p = GetUserBegin(p); 300 LsanMetadata m(p); 301 if (m.allocated() && m.tag() != kReachable) 302 leaked_->push_back(p); 303} 304 305struct ReportLeakedParam { 306 InternalVector<void *> *leaked; 307 uptr sources; 308 bool success; 309}; 310 311static void ReportLeakedCallback(const SuspendedThreadsList &suspended_threads, 312 void *arg) { 313 // Allocator must not be locked when we call GetRegionBegin(). 314 UnlockAllocator(); 315 ReportLeakedParam *param = reinterpret_cast<ReportLeakedParam *>(arg); 316 flags()->sources = param->sources; 317 ClassifyAllChunks(suspended_threads); 318 ForEachChunk(ReportLeakedCb(param->leaked)); 319 ForEachChunk(ClearTagCb()); 320 param->success = true; 321} 322 323void ReportLeaked(InternalVector<void *> *leaked, uptr sources) { 324 CHECK_EQ(0, leaked->size()); 325 ReportLeakedParam param; 326 param.leaked = leaked; 327 param.success = false; 328 param.sources = sources; 329 LockAndSuspendThreads(ReportLeakedCallback, ¶m); 330 CHECK(param.success); 331} 332 333///// LeakReport implementation. ///// 334 335// A hard limit on the number of distinct leaks, to avoid quadratic complexity 336// in LeakReport::Add(). We don't expect to ever see this many leaks in 337// real-world applications. 338// FIXME: Get rid of this limit by changing the implementation of LeakReport to 339// use a hash table. 340const uptr kMaxLeaksConsidered = 1000; 341 342void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { 343 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 344 bool is_directly_leaked = (tag == kDirectlyLeaked); 345 for (uptr i = 0; i < leaks_.size(); i++) 346 if (leaks_[i].stack_trace_id == stack_trace_id && 347 leaks_[i].is_directly_leaked == is_directly_leaked) { 348 leaks_[i].hit_count++; 349 leaks_[i].total_size += leaked_size; 350 return; 351 } 352 if (leaks_.size() == kMaxLeaksConsidered) return; 353 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, 354 is_directly_leaked }; 355 leaks_.push_back(leak); 356} 357 358static bool IsLarger(const Leak &leak1, const Leak &leak2) { 359 return leak1.total_size > leak2.total_size; 360} 361 362void LeakReport::PrintLargest(uptr max_leaks) { 363 CHECK(leaks_.size() <= kMaxLeaksConsidered); 364 Printf("\n"); 365 if (leaks_.size() == kMaxLeaksConsidered) 366 Printf("Too many leaks! Only the first %llu leaks encountered will be " 367 "reported.\n", 368 kMaxLeaksConsidered); 369 if (max_leaks > 0 && max_leaks < leaks_.size()) 370 Printf("The %llu largest leak%s:\n", max_leaks, max_leaks > 1 ? "s" : ""); 371 InternalSort(&leaks_, leaks_.size(), IsLarger); 372 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size(); 373 for (uptr i = 0; i < max_leaks; i++) { 374 Printf("\n%s leak of %llu bytes in %llu objects allocated from:\n", 375 leaks_[i].is_directly_leaked ? "Direct" : "Indirect", 376 leaks_[i].total_size, leaks_[i].hit_count); 377 PrintStackTraceById(leaks_[i].stack_trace_id); 378 } 379 if (max_leaks < leaks_.size()) { 380 uptr remaining = leaks_.size() - max_leaks; 381 Printf("\nOmitting %llu more leak%s.\n", remaining, 382 remaining > 1 ? "s" : ""); 383 } 384} 385 386} // namespace __lsan 387#endif // CAN_SANITIZE_LEAKS 388