lsan_common.cc revision 9fbfd96608070eb71e11fbfe42ec9e84016429ae
1//=-- lsan_common.cc ------------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of LeakSanitizer. 11// Implementation of common leak checking functionality. 12// 13//===----------------------------------------------------------------------===// 14 15#include "lsan_common.h" 16 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_flags.h" 19#include "sanitizer_common/sanitizer_stackdepot.h" 20#include "sanitizer_common/sanitizer_stacktrace.h" 21#include "sanitizer_common/sanitizer_stoptheworld.h" 22 23#if CAN_SANITIZE_LEAKS 24namespace __lsan { 25 26// This mutex is used to prevent races between DoLeakCheck and IgnoreObject. 27BlockingMutex global_mutex(LINKER_INITIALIZED); 28 29THREADLOCAL int disable_counter; 30bool DisabledInThisThread() { return disable_counter > 0; } 31 32Flags lsan_flags; 33 34static void InitializeFlags() { 35 Flags *f = flags(); 36 // Default values. 37 f->report_objects = false; 38 f->resolution = 0; 39 f->max_leaks = 0; 40 f->exitcode = 23; 41 f->use_registers = true; 42 f->use_globals = true; 43 f->use_stacks = true; 44 f->use_tls = true; 45 f->use_unaligned = false; 46 f->verbosity = 0; 47 f->log_pointers = false; 48 f->log_threads = false; 49 50 const char *options = GetEnv("LSAN_OPTIONS"); 51 if (options) { 52 ParseFlag(options, &f->use_registers, "use_registers"); 53 ParseFlag(options, &f->use_globals, "use_globals"); 54 ParseFlag(options, &f->use_stacks, "use_stacks"); 55 ParseFlag(options, &f->use_tls, "use_tls"); 56 ParseFlag(options, &f->use_unaligned, "use_unaligned"); 57 ParseFlag(options, &f->report_objects, "report_objects"); 58 ParseFlag(options, &f->resolution, "resolution"); 59 CHECK_GE(&f->resolution, 0); 60 ParseFlag(options, &f->max_leaks, "max_leaks"); 61 CHECK_GE(&f->max_leaks, 0); 62 ParseFlag(options, &f->verbosity, "verbosity"); 63 ParseFlag(options, &f->log_pointers, "log_pointers"); 64 ParseFlag(options, &f->log_threads, "log_threads"); 65 ParseFlag(options, &f->exitcode, "exitcode"); 66 } 67} 68 69void InitCommonLsan() { 70 InitializeFlags(); 71 InitializePlatformSpecificModules(); 72} 73 74static inline bool CanBeAHeapPointer(uptr p) { 75 // Since our heap is located in mmap-ed memory, we can assume a sensible lower 76 // boundary on heap addresses. 77 const uptr kMinAddress = 4 * 4096; 78 if (p < kMinAddress) return false; 79#ifdef __x86_64__ 80 // Accept only canonical form user-space addresses. 81 return ((p >> 47) == 0); 82#else 83 return true; 84#endif 85} 86 87// Scans the memory range, looking for byte patterns that point into allocator 88// chunks. Marks those chunks with |tag| and adds them to |frontier|. 89// There are two usage modes for this function: finding reachable or ignored 90// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks 91// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, 92// so |frontier| = 0. 93void ScanRangeForPointers(uptr begin, uptr end, 94 Frontier *frontier, 95 const char *region_type, ChunkTag tag) { 96 const uptr alignment = flags()->pointer_alignment(); 97 if (flags()->log_pointers) 98 Report("Scanning %s range %p-%p.\n", region_type, begin, end); 99 uptr pp = begin; 100 if (pp % alignment) 101 pp = pp + alignment - pp % alignment; 102 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT 103 void *p = *reinterpret_cast<void**>(pp); 104 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; 105 uptr chunk = PointsIntoChunk(p); 106 if (!chunk) continue; 107 LsanMetadata m(chunk); 108 // Reachable beats ignored beats leaked. 109 if (m.tag() == kReachable) continue; 110 if (m.tag() == kIgnored && tag != kReachable) continue; 111 m.set_tag(tag); 112 if (flags()->log_pointers) 113 Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, 114 chunk, chunk + m.requested_size(), m.requested_size()); 115 if (frontier) 116 frontier->push_back(chunk); 117 } 118} 119 120// Scans thread data (stacks and TLS) for heap pointers. 121static void ProcessThreads(SuspendedThreadsList const &suspended_threads, 122 Frontier *frontier) { 123 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); 124 uptr registers_begin = reinterpret_cast<uptr>(registers.data()); 125 uptr registers_end = registers_begin + registers.size(); 126 for (uptr i = 0; i < suspended_threads.thread_count(); i++) { 127 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); 128 if (flags()->log_threads) Report("Processing thread %d.\n", os_id); 129 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; 130 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, 131 &tls_begin, &tls_end, 132 &cache_begin, &cache_end); 133 if (!thread_found) { 134 // If a thread can't be found in the thread registry, it's probably in the 135 // process of destruction. Log this event and move on. 136 if (flags()->log_threads) 137 Report("Thread %d not found in registry.\n", os_id); 138 continue; 139 } 140 uptr sp; 141 bool have_registers = 142 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); 143 if (!have_registers) { 144 Report("Unable to get registers from thread %d.\n"); 145 // If unable to get SP, consider the entire stack to be reachable. 146 sp = stack_begin; 147 } 148 149 if (flags()->use_registers && have_registers) 150 ScanRangeForPointers(registers_begin, registers_end, frontier, 151 "REGISTERS", kReachable); 152 153 if (flags()->use_stacks) { 154 if (flags()->log_threads) 155 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); 156 if (sp < stack_begin || sp >= stack_end) { 157 // SP is outside the recorded stack range (e.g. the thread is running a 158 // signal handler on alternate stack). Again, consider the entire stack 159 // range to be reachable. 160 if (flags()->log_threads) 161 Report("WARNING: stack_pointer not in stack_range.\n"); 162 } else { 163 // Shrink the stack range to ignore out-of-scope values. 164 stack_begin = sp; 165 } 166 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", 167 kReachable); 168 } 169 170 if (flags()->use_tls) { 171 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); 172 if (cache_begin == cache_end) { 173 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); 174 } else { 175 // Because LSan should not be loaded with dlopen(), we can assume 176 // that allocator cache will be part of static TLS image. 177 CHECK_LE(tls_begin, cache_begin); 178 CHECK_GE(tls_end, cache_end); 179 if (tls_begin < cache_begin) 180 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", 181 kReachable); 182 if (tls_end > cache_end) 183 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); 184 } 185 } 186 } 187} 188 189static void FloodFillTag(Frontier *frontier, ChunkTag tag) { 190 while (frontier->size()) { 191 uptr next_chunk = frontier->back(); 192 frontier->pop_back(); 193 LsanMetadata m(next_chunk); 194 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, 195 "HEAP", tag); 196 } 197} 198 199// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks 200// which are reachable from it as indirectly leaked. 201static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { 202 chunk = GetUserBegin(chunk); 203 LsanMetadata m(chunk); 204 if (m.allocated() && m.tag() != kReachable) { 205 ScanRangeForPointers(chunk, chunk + m.requested_size(), 206 /* frontier */ 0, "HEAP", kIndirectlyLeaked); 207 } 208} 209 210// ForEachChunk callback. If chunk is marked as ignored, adds its address to 211// frontier. 212static void CollectIgnoredCb(uptr chunk, void *arg) { 213 CHECK(arg); 214 chunk = GetUserBegin(chunk); 215 LsanMetadata m(chunk); 216 if (m.allocated() && m.tag() == kIgnored) 217 reinterpret_cast<Frontier *>(arg)->push_back(chunk); 218} 219 220// Sets the appropriate tag on each chunk. 221static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { 222 // Holds the flood fill frontier. 223 Frontier frontier(GetPageSizeCached()); 224 225 if (flags()->use_globals) 226 ProcessGlobalRegions(&frontier); 227 ProcessThreads(suspended_threads, &frontier); 228 FloodFillTag(&frontier, kReachable); 229 // The check here is relatively expensive, so we do this in a separate flood 230 // fill. That way we can skip the check for chunks that are reachable 231 // otherwise. 232 ProcessPlatformSpecificAllocations(&frontier); 233 FloodFillTag(&frontier, kReachable); 234 235 if (flags()->log_pointers) 236 Report("Scanning ignored chunks.\n"); 237 CHECK_EQ(0, frontier.size()); 238 ForEachChunk(CollectIgnoredCb, &frontier); 239 FloodFillTag(&frontier, kIgnored); 240 241 // Iterate over leaked chunks and mark those that are reachable from other 242 // leaked chunks. 243 if (flags()->log_pointers) 244 Report("Scanning leaked chunks.\n"); 245 ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */); 246} 247 248static void PrintStackTraceById(u32 stack_trace_id) { 249 CHECK(stack_trace_id); 250 uptr size = 0; 251 const uptr *trace = StackDepotGet(stack_trace_id, &size); 252 StackTrace::PrintStack(trace, size, common_flags()->symbolize, 253 common_flags()->strip_path_prefix, 0); 254} 255 256// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport. 257static void CollectLeaksCb(uptr chunk, void *arg) { 258 CHECK(arg); 259 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg); 260 chunk = GetUserBegin(chunk); 261 LsanMetadata m(chunk); 262 if (!m.allocated()) return; 263 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 264 uptr resolution = flags()->resolution; 265 if (resolution > 0) { 266 uptr size = 0; 267 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); 268 size = Min(size, resolution); 269 leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag()); 270 } else { 271 leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag()); 272 } 273 } 274} 275 276// ForEachChunkCallback. Prints addresses of unreachable chunks. 277static void PrintLeakedCb(uptr chunk, void *arg) { 278 chunk = GetUserBegin(chunk); 279 LsanMetadata m(chunk); 280 if (!m.allocated()) return; 281 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 282 Printf("%s leaked %zu byte object at %p.\n", 283 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", 284 m.requested_size(), chunk); 285 } 286} 287 288static void PrintLeaked() { 289 Printf("\n"); 290 Printf("Reporting individual objects:\n"); 291 ForEachChunk(PrintLeakedCb, 0 /* arg */); 292} 293 294struct DoLeakCheckParam { 295 bool success; 296 LeakReport leak_report; 297}; 298 299static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, 300 void *arg) { 301 DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg); 302 CHECK(param); 303 CHECK(!param->success); 304 CHECK(param->leak_report.IsEmpty()); 305 ClassifyAllChunks(suspended_threads); 306 ForEachChunk(CollectLeaksCb, ¶m->leak_report); 307 if (!param->leak_report.IsEmpty() && flags()->report_objects) 308 PrintLeaked(); 309 param->success = true; 310} 311 312void DoLeakCheck() { 313 BlockingMutexLock l(&global_mutex); 314 static bool already_done; 315 CHECK(!already_done); 316 already_done = true; 317 if (&__lsan_is_turned_off && __lsan_is_turned_off()) 318 return; 319 320 DoLeakCheckParam param; 321 param.success = false; 322 LockThreadRegistry(); 323 LockAllocator(); 324 StopTheWorld(DoLeakCheckCallback, ¶m); 325 UnlockAllocator(); 326 UnlockThreadRegistry(); 327 328 if (!param.success) { 329 Report("LeakSanitizer has encountered a fatal error.\n"); 330 Die(); 331 } 332 if (!param.leak_report.IsEmpty()) { 333 Printf("\n=================================================================" 334 "\n"); 335 Report("ERROR: LeakSanitizer: detected memory leaks\n"); 336 param.leak_report.PrintLargest(flags()->max_leaks); 337 param.leak_report.PrintSummary(); 338 if (flags()->exitcode) 339 internal__exit(flags()->exitcode); 340 } 341} 342 343///// LeakReport implementation. ///// 344 345// A hard limit on the number of distinct leaks, to avoid quadratic complexity 346// in LeakReport::Add(). We don't expect to ever see this many leaks in 347// real-world applications. 348// FIXME: Get rid of this limit by changing the implementation of LeakReport to 349// use a hash table. 350const uptr kMaxLeaksConsidered = 1000; 351 352void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { 353 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 354 bool is_directly_leaked = (tag == kDirectlyLeaked); 355 for (uptr i = 0; i < leaks_.size(); i++) 356 if (leaks_[i].stack_trace_id == stack_trace_id && 357 leaks_[i].is_directly_leaked == is_directly_leaked) { 358 leaks_[i].hit_count++; 359 leaks_[i].total_size += leaked_size; 360 return; 361 } 362 if (leaks_.size() == kMaxLeaksConsidered) return; 363 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, 364 is_directly_leaked }; 365 leaks_.push_back(leak); 366} 367 368static bool IsLarger(const Leak &leak1, const Leak &leak2) { 369 return leak1.total_size > leak2.total_size; 370} 371 372void LeakReport::PrintLargest(uptr max_leaks) { 373 CHECK(leaks_.size() <= kMaxLeaksConsidered); 374 Printf("\n"); 375 if (leaks_.size() == kMaxLeaksConsidered) 376 Printf("Too many leaks! Only the first %zu leaks encountered will be " 377 "reported.\n", 378 kMaxLeaksConsidered); 379 if (max_leaks > 0 && max_leaks < leaks_.size()) 380 Printf("The %zu largest leak(s):\n", max_leaks); 381 InternalSort(&leaks_, leaks_.size(), IsLarger); 382 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size(); 383 for (uptr i = 0; i < max_leaks; i++) { 384 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", 385 leaks_[i].is_directly_leaked ? "Direct" : "Indirect", 386 leaks_[i].total_size, leaks_[i].hit_count); 387 PrintStackTraceById(leaks_[i].stack_trace_id); 388 Printf("\n"); 389 } 390 if (max_leaks < leaks_.size()) { 391 uptr remaining = leaks_.size() - max_leaks; 392 Printf("Omitting %zu more leak(s).\n", remaining); 393 } 394} 395 396void LeakReport::PrintSummary() { 397 CHECK(leaks_.size() <= kMaxLeaksConsidered); 398 uptr bytes = 0, allocations = 0; 399 for (uptr i = 0; i < leaks_.size(); i++) { 400 bytes += leaks_[i].total_size; 401 allocations += leaks_[i].hit_count; 402 } 403 Printf( 404 "SUMMARY: LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).\n\n", 405 bytes, allocations); 406} 407} // namespace __lsan 408#endif // CAN_SANITIZE_LEAKS 409 410using namespace __lsan; // NOLINT 411 412extern "C" { 413SANITIZER_INTERFACE_ATTRIBUTE 414void __lsan_ignore_object(const void *p) { 415#if CAN_SANITIZE_LEAKS 416 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not 417 // locked. 418 BlockingMutexLock l(&global_mutex); 419 IgnoreObjectResult res = IgnoreObjectLocked(p); 420 if (res == kIgnoreObjectInvalid && flags()->verbosity >= 1) 421 Report("__lsan_ignore_object(): no heap object found at %p", p); 422 if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 1) 423 Report("__lsan_ignore_object(): " 424 "heap object at %p is already being ignored\n", p); 425 if (res == kIgnoreObjectSuccess && flags()->verbosity >= 2) 426 Report("__lsan_ignore_object(): ignoring heap object at %p\n", p); 427#endif // CAN_SANITIZE_LEAKS 428} 429 430SANITIZER_INTERFACE_ATTRIBUTE 431void __lsan_disable() { 432#if CAN_SANITIZE_LEAKS 433 __lsan::disable_counter++; 434#endif 435} 436 437SANITIZER_INTERFACE_ATTRIBUTE 438void __lsan_enable() { 439#if CAN_SANITIZE_LEAKS 440 if (!__lsan::disable_counter) { 441 Report("Unmatched call to __lsan_enable().\n"); 442 Die(); 443 } 444 __lsan::disable_counter--; 445#endif 446} 447 448#if !SANITIZER_SUPPORTS_WEAK_HOOKS 449SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 450int __lsan_is_turned_off() { 451 return 0; 452} 453#endif 454} // extern "C" 455