lsan_common.cc revision 200afbd8ba4904241c1ebcef4fa79d739ca01f73
1//=-- lsan_common.cc ------------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of LeakSanitizer. 11// Implementation of common leak checking functionality. 12// 13//===----------------------------------------------------------------------===// 14 15#include "lsan_common.h" 16 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_flags.h" 19#include "sanitizer_common/sanitizer_stackdepot.h" 20#include "sanitizer_common/sanitizer_stacktrace.h" 21#include "sanitizer_common/sanitizer_stoptheworld.h" 22 23#if CAN_SANITIZE_LEAKS 24namespace __lsan { 25 26// This mutex is used to prevent races between DoLeakCheck and SuppressObject. 27BlockingMutex global_mutex(LINKER_INITIALIZED); 28 29THREADLOCAL int disable_counter; 30bool DisabledInThisThread() { return disable_counter > 0; } 31 32Flags lsan_flags; 33 34static void InitializeFlags() { 35 Flags *f = flags(); 36 // Default values. 37 f->report_objects = false; 38 f->resolution = 0; 39 f->max_leaks = 0; 40 f->exitcode = 23; 41 f->use_registers = true; 42 f->use_globals = true; 43 f->use_stacks = true; 44 f->use_tls = true; 45 f->use_unaligned = false; 46 f->verbosity = 0; 47 f->log_pointers = false; 48 f->log_threads = false; 49 50 const char *options = GetEnv("LSAN_OPTIONS"); 51 if (options) { 52 ParseFlag(options, &f->use_registers, "use_registers"); 53 ParseFlag(options, &f->use_globals, "use_globals"); 54 ParseFlag(options, &f->use_stacks, "use_stacks"); 55 ParseFlag(options, &f->use_tls, "use_tls"); 56 ParseFlag(options, &f->use_unaligned, "use_unaligned"); 57 ParseFlag(options, &f->report_objects, "report_objects"); 58 ParseFlag(options, &f->resolution, "resolution"); 59 CHECK_GE(&f->resolution, 0); 60 ParseFlag(options, &f->max_leaks, "max_leaks"); 61 CHECK_GE(&f->max_leaks, 0); 62 ParseFlag(options, &f->verbosity, "verbosity"); 63 ParseFlag(options, &f->log_pointers, "log_pointers"); 64 ParseFlag(options, &f->log_threads, "log_threads"); 65 ParseFlag(options, &f->exitcode, "exitcode"); 66 } 67} 68 69void InitCommonLsan() { 70 InitializeFlags(); 71 InitializePlatformSpecificModules(); 72} 73 74static inline bool CanBeAHeapPointer(uptr p) { 75 // Since our heap is located in mmap-ed memory, we can assume a sensible lower 76 // boundary on heap addresses. 77 const uptr kMinAddress = 4 * 4096; 78 if (p < kMinAddress) return false; 79#ifdef __x86_64__ 80 // Accept only canonical form user-space addresses. 81 return ((p >> 47) == 0); 82#else 83 return true; 84#endif 85} 86 87// Scan the memory range, looking for byte patterns that point into allocator 88// chunks. Mark those chunks with tag and add them to the frontier. 89// There are two usage modes for this function: finding reachable or ignored 90// chunks (tag = kReachable or kIgnored) and finding indirectly leaked chunks 91// (tag = kIndirectlyLeaked). In the second case, there's no flood fill, 92// so frontier = 0. 93void ScanRangeForPointers(uptr begin, uptr end, 94 Frontier *frontier, 95 const char *region_type, ChunkTag tag) { 96 const uptr alignment = flags()->pointer_alignment(); 97 if (flags()->log_pointers) 98 Report("Scanning %s range %p-%p.\n", region_type, begin, end); 99 uptr pp = begin; 100 if (pp % alignment) 101 pp = pp + alignment - pp % alignment; 102 for (; pp + sizeof(uptr) <= end; pp += alignment) { 103 void *p = *reinterpret_cast<void**>(pp); 104 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; 105 void *chunk = PointsIntoChunk(p); 106 if (!chunk) continue; 107 LsanMetadata m(chunk); 108 // Reachable beats ignored beats leaked. 109 if (m.tag() == kReachable) continue; 110 if (m.tag() == kIgnored && tag != kReachable) continue; 111 m.set_tag(tag); 112 if (flags()->log_pointers) 113 Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p, 114 chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(), 115 m.requested_size()); 116 if (frontier) 117 frontier->push_back(reinterpret_cast<uptr>(chunk)); 118 } 119} 120 121// Scan thread data (stacks and TLS) for heap pointers. 122static void ProcessThreads(SuspendedThreadsList const &suspended_threads, 123 Frontier *frontier) { 124 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); 125 uptr registers_begin = reinterpret_cast<uptr>(registers.data()); 126 uptr registers_end = registers_begin + registers.size(); 127 for (uptr i = 0; i < suspended_threads.thread_count(); i++) { 128 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); 129 if (flags()->log_threads) Report("Processing thread %d.\n", os_id); 130 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; 131 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, 132 &tls_begin, &tls_end, 133 &cache_begin, &cache_end); 134 if (!thread_found) { 135 // If a thread can't be found in the thread registry, it's probably in the 136 // process of destruction. Log this event and move on. 137 if (flags()->log_threads) 138 Report("Thread %d not found in registry.\n", os_id); 139 continue; 140 } 141 uptr sp; 142 bool have_registers = 143 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); 144 if (!have_registers) { 145 Report("Unable to get registers from thread %d.\n"); 146 // If unable to get SP, consider the entire stack to be reachable. 147 sp = stack_begin; 148 } 149 150 if (flags()->use_registers && have_registers) 151 ScanRangeForPointers(registers_begin, registers_end, frontier, 152 "REGISTERS", kReachable); 153 154 if (flags()->use_stacks) { 155 if (flags()->log_threads) 156 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); 157 if (sp < stack_begin || sp >= stack_end) { 158 // SP is outside the recorded stack range (e.g. the thread is running a 159 // signal handler on alternate stack). Again, consider the entire stack 160 // range to be reachable. 161 if (flags()->log_threads) 162 Report("WARNING: stack_pointer not in stack_range.\n"); 163 } else { 164 // Shrink the stack range to ignore out-of-scope values. 165 stack_begin = sp; 166 } 167 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", 168 kReachable); 169 } 170 171 if (flags()->use_tls) { 172 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); 173 if (cache_begin == cache_end) { 174 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); 175 } else { 176 // Because LSan should not be loaded with dlopen(), we can assume 177 // that allocator cache will be part of static TLS image. 178 CHECK_LE(tls_begin, cache_begin); 179 CHECK_GE(tls_end, cache_end); 180 if (tls_begin < cache_begin) 181 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", 182 kReachable); 183 if (tls_end > cache_end) 184 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); 185 } 186 } 187 } 188} 189 190static void FloodFillTag(Frontier *frontier, ChunkTag tag) { 191 while (frontier->size()) { 192 uptr next_chunk = frontier->back(); 193 frontier->pop_back(); 194 LsanMetadata m(reinterpret_cast<void *>(next_chunk)); 195 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, 196 "HEAP", tag); 197 } 198} 199 200// Mark leaked chunks which are reachable from other leaked chunks. 201void MarkIndirectlyLeakedCb::operator()(void *p) const { 202 p = GetUserBegin(p); 203 LsanMetadata m(p); 204 if (m.allocated() && m.tag() != kReachable) { 205 ScanRangeForPointers(reinterpret_cast<uptr>(p), 206 reinterpret_cast<uptr>(p) + m.requested_size(), 207 /* frontier */ 0, "HEAP", kIndirectlyLeaked); 208 } 209} 210 211void CollectIgnoredCb::operator()(void *p) const { 212 p = GetUserBegin(p); 213 LsanMetadata m(p); 214 if (m.allocated() && m.tag() == kIgnored) 215 frontier_->push_back(reinterpret_cast<uptr>(p)); 216} 217 218// Set the appropriate tag on each chunk. 219static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { 220 // Holds the flood fill frontier. 221 Frontier frontier(GetPageSizeCached()); 222 223 if (flags()->use_globals) 224 ProcessGlobalRegions(&frontier); 225 ProcessThreads(suspended_threads, &frontier); 226 FloodFillTag(&frontier, kReachable); 227 // The check here is relatively expensive, so we do this in a separate flood 228 // fill. That way we can skip the check for chunks that are reachable 229 // otherwise. 230 ProcessPlatformSpecificAllocations(&frontier); 231 FloodFillTag(&frontier, kReachable); 232 233 if (flags()->log_pointers) 234 Report("Scanning ignored chunks.\n"); 235 CHECK_EQ(0, frontier.size()); 236 ForEachChunk(CollectIgnoredCb(&frontier)); 237 FloodFillTag(&frontier, kIgnored); 238 239 // Iterate over leaked chunks and mark those that are reachable from other 240 // leaked chunks. 241 if (flags()->log_pointers) 242 Report("Scanning leaked chunks.\n"); 243 ForEachChunk(MarkIndirectlyLeakedCb()); 244} 245 246static void PrintStackTraceById(u32 stack_trace_id) { 247 CHECK(stack_trace_id); 248 uptr size = 0; 249 const uptr *trace = StackDepotGet(stack_trace_id, &size); 250 StackTrace::PrintStack(trace, size, common_flags()->symbolize, 251 common_flags()->strip_path_prefix, 0); 252} 253 254void CollectLeaksCb::operator()(void *p) const { 255 p = GetUserBegin(p); 256 LsanMetadata m(p); 257 if (!m.allocated()) return; 258 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 259 uptr resolution = flags()->resolution; 260 if (resolution > 0) { 261 uptr size = 0; 262 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); 263 size = Min(size, resolution); 264 leak_report_->Add(StackDepotPut(trace, size), m.requested_size(), 265 m.tag()); 266 } else { 267 leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag()); 268 } 269 } 270} 271 272static void CollectLeaks(LeakReport *leak_report) { 273 ForEachChunk(CollectLeaksCb(leak_report)); 274} 275 276void PrintLeakedCb::operator()(void *p) const { 277 p = GetUserBegin(p); 278 LsanMetadata m(p); 279 if (!m.allocated()) return; 280 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 281 Printf("%s leaked %llu byte object at %p.\n", 282 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", 283 m.requested_size(), p); 284 } 285} 286 287static void PrintLeaked() { 288 Printf("\n"); 289 Printf("Reporting individual objects:\n"); 290 ForEachChunk(PrintLeakedCb()); 291} 292 293struct DoLeakCheckParam { 294 bool success; 295 LeakReport leak_report; 296}; 297 298static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, 299 void *arg) { 300 DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg); 301 CHECK(param); 302 CHECK(!param->success); 303 CHECK(param->leak_report.IsEmpty()); 304 ClassifyAllChunks(suspended_threads); 305 CollectLeaks(¶m->leak_report); 306 if (!param->leak_report.IsEmpty() && flags()->report_objects) 307 PrintLeaked(); 308 param->success = true; 309} 310 311void DoLeakCheck() { 312 BlockingMutexLock l(&global_mutex); 313 static bool already_done; 314 CHECK(!already_done); 315 already_done = true; 316 317 DoLeakCheckParam param; 318 param.success = false; 319 LockThreadRegistry(); 320 LockAllocator(); 321 StopTheWorld(DoLeakCheckCallback, ¶m); 322 UnlockAllocator(); 323 UnlockThreadRegistry(); 324 325 if (!param.success) { 326 Report("LeakSanitizer has encountered a fatal error.\n"); 327 Die(); 328 } 329 if (!param.leak_report.IsEmpty()) { 330 Printf("\n=================================================================" 331 "\n"); 332 Report("ERROR: LeakSanitizer: detected memory leaks\n"); 333 param.leak_report.PrintLargest(flags()->max_leaks); 334 param.leak_report.PrintSummary(); 335 if (flags()->exitcode) 336 internal__exit(flags()->exitcode); 337 } 338} 339 340///// LeakReport implementation. ///// 341 342// A hard limit on the number of distinct leaks, to avoid quadratic complexity 343// in LeakReport::Add(). We don't expect to ever see this many leaks in 344// real-world applications. 345// FIXME: Get rid of this limit by changing the implementation of LeakReport to 346// use a hash table. 347const uptr kMaxLeaksConsidered = 1000; 348 349void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { 350 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 351 bool is_directly_leaked = (tag == kDirectlyLeaked); 352 for (uptr i = 0; i < leaks_.size(); i++) 353 if (leaks_[i].stack_trace_id == stack_trace_id && 354 leaks_[i].is_directly_leaked == is_directly_leaked) { 355 leaks_[i].hit_count++; 356 leaks_[i].total_size += leaked_size; 357 return; 358 } 359 if (leaks_.size() == kMaxLeaksConsidered) return; 360 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, 361 is_directly_leaked }; 362 leaks_.push_back(leak); 363} 364 365static bool IsLarger(const Leak &leak1, const Leak &leak2) { 366 return leak1.total_size > leak2.total_size; 367} 368 369void LeakReport::PrintLargest(uptr max_leaks) { 370 CHECK(leaks_.size() <= kMaxLeaksConsidered); 371 Printf("\n"); 372 if (leaks_.size() == kMaxLeaksConsidered) 373 Printf("Too many leaks! Only the first %llu leaks encountered will be " 374 "reported.\n", 375 kMaxLeaksConsidered); 376 if (max_leaks > 0 && max_leaks < leaks_.size()) 377 Printf("The %llu largest leak(s):\n", max_leaks); 378 InternalSort(&leaks_, leaks_.size(), IsLarger); 379 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size(); 380 for (uptr i = 0; i < max_leaks; i++) { 381 Printf("%s leak of %llu byte(s) in %llu object(s) allocated from:\n", 382 leaks_[i].is_directly_leaked ? "Direct" : "Indirect", 383 leaks_[i].total_size, leaks_[i].hit_count); 384 PrintStackTraceById(leaks_[i].stack_trace_id); 385 Printf("\n"); 386 } 387 if (max_leaks < leaks_.size()) { 388 uptr remaining = leaks_.size() - max_leaks; 389 Printf("Omitting %llu more leak(s).\n", remaining); 390 } 391} 392 393void LeakReport::PrintSummary() { 394 CHECK(leaks_.size() <= kMaxLeaksConsidered); 395 uptr bytes = 0, allocations = 0; 396 for (uptr i = 0; i < leaks_.size(); i++) { 397 bytes += leaks_[i].total_size; 398 allocations += leaks_[i].hit_count; 399 } 400 Printf( 401 "SUMMARY: LeakSanitizer: %llu byte(s) leaked in %llu allocation(s).\n\n", 402 bytes, allocations); 403} 404 405} // namespace __lsan 406#endif // CAN_SANITIZE_LEAKS 407 408using namespace __lsan; // NOLINT 409 410extern "C" { 411SANITIZER_INTERFACE_ATTRIBUTE 412void __lsan_ignore_object(const void *p) { 413#if CAN_SANITIZE_LEAKS 414 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not 415 // locked. 416 BlockingMutexLock l(&global_mutex); 417 IgnoreObjectResult res = IgnoreObjectLocked(p); 418 if (res == kIgnoreObjectInvalid && flags()->verbosity >= 1) 419 Report("__lsan_ignore_object(): no heap object found at %p", p); 420 if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 1) 421 Report("__lsan_ignore_object(): " 422 "heap object at %p is already being ignored\n", p); 423 if (res == kIgnoreObjectSuccess && flags()->verbosity >= 2) 424 Report("__lsan_ignore_object(): ignoring heap object at %p\n", p); 425#endif // CAN_SANITIZE_LEAKS 426} 427 428SANITIZER_INTERFACE_ATTRIBUTE 429void __lsan_disable() { 430#if CAN_SANITIZE_LEAKS 431 __lsan::disable_counter++; 432#endif 433} 434 435SANITIZER_INTERFACE_ATTRIBUTE 436void __lsan_enable() { 437#if CAN_SANITIZE_LEAKS 438 if (!__lsan::disable_counter) { 439 Report("Unmatched call to __lsan_enable().\n"); 440 Die(); 441 } 442 __lsan::disable_counter--; 443#endif 444} 445} // extern "C" 446