lsan_common.cc revision b3b46dad13a2111a51fb1a67f36c8b633410e9b7
1//=-- lsan_common.cc ------------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of LeakSanitizer. 11// Implementation of common leak checking functionality. 12// 13//===----------------------------------------------------------------------===// 14 15#include "lsan_common.h" 16 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_flags.h" 19#include "sanitizer_common/sanitizer_stackdepot.h" 20#include "sanitizer_common/sanitizer_stacktrace.h" 21#include "sanitizer_common/sanitizer_stoptheworld.h" 22 23#if CAN_SANITIZE_LEAKS 24namespace __lsan { 25 26// This mutex is used to prevent races between DoLeakCheck and SuppressObject. 27BlockingMutex global_mutex(LINKER_INITIALIZED); 28 29Flags lsan_flags; 30 31static void InitializeFlags() { 32 Flags *f = flags(); 33 // Default values. 34 f->report_objects = false; 35 f->resolution = 0; 36 f->max_leaks = 0; 37 f->exitcode = 23; 38 f->use_registers = true; 39 f->use_globals = true; 40 f->use_stacks = true; 41 f->use_tls = true; 42 f->use_unaligned = false; 43 f->verbosity = 0; 44 f->log_pointers = false; 45 f->log_threads = false; 46 47 const char *options = GetEnv("LSAN_OPTIONS"); 48 if (options) { 49 ParseFlag(options, &f->use_registers, "use_registers"); 50 ParseFlag(options, &f->use_globals, "use_globals"); 51 ParseFlag(options, &f->use_stacks, "use_stacks"); 52 ParseFlag(options, &f->use_tls, "use_tls"); 53 ParseFlag(options, &f->use_unaligned, "use_unaligned"); 54 ParseFlag(options, &f->report_objects, "report_objects"); 55 ParseFlag(options, &f->resolution, "resolution"); 56 CHECK_GE(&f->resolution, 0); 57 ParseFlag(options, &f->max_leaks, "max_leaks"); 58 CHECK_GE(&f->max_leaks, 0); 59 ParseFlag(options, &f->verbosity, "verbosity"); 60 ParseFlag(options, &f->log_pointers, "log_pointers"); 61 ParseFlag(options, &f->log_threads, "log_threads"); 62 ParseFlag(options, &f->exitcode, "exitcode"); 63 } 64} 65 66void InitCommonLsan() { 67 InitializeFlags(); 68 InitializePlatformSpecificModules(); 69} 70 71static inline bool CanBeAHeapPointer(uptr p) { 72 // Since our heap is located in mmap-ed memory, we can assume a sensible lower 73 // boundary on heap addresses. 74 const uptr kMinAddress = 4 * 4096; 75 if (p < kMinAddress) return false; 76#ifdef __x86_64__ 77 // Accept only canonical form user-space addresses. 78 return ((p >> 47) == 0); 79#else 80 return true; 81#endif 82} 83 84// Scan the memory range, looking for byte patterns that point into allocator 85// chunks. Mark those chunks with tag and add them to the frontier. 86// There are two usage modes for this function: finding reachable or suppressed 87// chunks (tag = kReachable or kIgnored) and finding indirectly leaked chunks 88// (tag = kIndirectlyLeaked). In the second case, there's no flood fill, 89// so frontier = 0. 90void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier, 91 const char *region_type, ChunkTag tag) { 92 const uptr alignment = flags()->pointer_alignment(); 93 if (flags()->log_pointers) 94 Report("Scanning %s range %p-%p.\n", region_type, begin, end); 95 uptr pp = begin; 96 if (pp % alignment) 97 pp = pp + alignment - pp % alignment; 98 for (; pp + sizeof(uptr) <= end; pp += alignment) { 99 void *p = *reinterpret_cast<void**>(pp); 100 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; 101 void *chunk = PointsIntoChunk(p); 102 if (!chunk) continue; 103 LsanMetadata m(chunk); 104 // Reachable beats suppressed beats leaked. 105 if (m.tag() == kReachable) continue; 106 if (m.tag() == kIgnored && tag != kReachable) continue; 107 m.set_tag(tag); 108 if (flags()->log_pointers) 109 Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p, 110 chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(), 111 m.requested_size()); 112 if (frontier) 113 frontier->push_back(reinterpret_cast<uptr>(chunk)); 114 } 115} 116 117// Scan thread data (stacks and TLS) for heap pointers. 118static void ProcessThreads(SuspendedThreadsList const &suspended_threads, 119 InternalVector<uptr> *frontier) { 120 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); 121 uptr registers_begin = reinterpret_cast<uptr>(registers.data()); 122 uptr registers_end = registers_begin + registers.size(); 123 for (uptr i = 0; i < suspended_threads.thread_count(); i++) { 124 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); 125 if (flags()->log_threads) Report("Processing thread %d.\n", os_id); 126 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; 127 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, 128 &tls_begin, &tls_end, 129 &cache_begin, &cache_end); 130 if (!thread_found) { 131 // If a thread can't be found in the thread registry, it's probably in the 132 // process of destruction. Log this event and move on. 133 if (flags()->log_threads) 134 Report("Thread %d not found in registry.\n", os_id); 135 continue; 136 } 137 uptr sp; 138 bool have_registers = 139 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); 140 if (!have_registers) { 141 Report("Unable to get registers from thread %d.\n"); 142 // If unable to get SP, consider the entire stack to be reachable. 143 sp = stack_begin; 144 } 145 146 if (flags()->use_registers && have_registers) 147 ScanRangeForPointers(registers_begin, registers_end, frontier, 148 "REGISTERS", kReachable); 149 150 if (flags()->use_stacks) { 151 if (flags()->log_threads) 152 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); 153 if (sp < stack_begin || sp >= stack_end) { 154 // SP is outside the recorded stack range (e.g. the thread is running a 155 // signal handler on alternate stack). Again, consider the entire stack 156 // range to be reachable. 157 if (flags()->log_threads) 158 Report("WARNING: stack_pointer not in stack_range.\n"); 159 } else { 160 // Shrink the stack range to ignore out-of-scope values. 161 stack_begin = sp; 162 } 163 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", 164 kReachable); 165 } 166 167 if (flags()->use_tls) { 168 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); 169 if (cache_begin == cache_end) { 170 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); 171 } else { 172 // Because LSan should not be loaded with dlopen(), we can assume 173 // that allocator cache will be part of static TLS image. 174 CHECK_LE(tls_begin, cache_begin); 175 CHECK_GE(tls_end, cache_end); 176 if (tls_begin < cache_begin) 177 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", 178 kReachable); 179 if (tls_end > cache_end) 180 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); 181 } 182 } 183 } 184} 185 186static void FloodFillTag(InternalVector<uptr> *frontier, ChunkTag tag) { 187 while (frontier->size()) { 188 uptr next_chunk = frontier->back(); 189 frontier->pop_back(); 190 LsanMetadata m(reinterpret_cast<void *>(next_chunk)); 191 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, 192 "HEAP", tag); 193 } 194} 195 196// Mark leaked chunks which are reachable from other leaked chunks. 197void MarkIndirectlyLeakedCb::operator()(void *p) const { 198 p = GetUserBegin(p); 199 LsanMetadata m(p); 200 if (m.allocated() && m.tag() != kReachable) { 201 ScanRangeForPointers(reinterpret_cast<uptr>(p), 202 reinterpret_cast<uptr>(p) + m.requested_size(), 203 /* frontier */ 0, "HEAP", kIndirectlyLeaked); 204 } 205} 206 207void CollectSuppressedCb::operator()(void *p) const { 208 p = GetUserBegin(p); 209 LsanMetadata m(p); 210 if (m.allocated() && m.tag() == kIgnored) 211 frontier_->push_back(reinterpret_cast<uptr>(p)); 212} 213 214// Set the appropriate tag on each chunk. 215static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { 216 // Holds the flood fill frontier. 217 InternalVector<uptr> frontier(GetPageSizeCached()); 218 219 if (flags()->use_globals) 220 ProcessGlobalRegions(&frontier); 221 ProcessThreads(suspended_threads, &frontier); 222 FloodFillTag(&frontier, kReachable); 223 // The check here is relatively expensive, so we do this in a separate flood 224 // fill. That way we can skip the check for chunks that are reachable 225 // otherwise. 226 ProcessPlatformSpecificAllocations(&frontier); 227 FloodFillTag(&frontier, kReachable); 228 229 if (flags()->log_pointers) 230 Report("Scanning ignored chunks.\n"); 231 CHECK_EQ(0, frontier.size()); 232 ForEachChunk(CollectSuppressedCb(&frontier)); 233 FloodFillTag(&frontier, kIgnored); 234 235 // Iterate over leaked chunks and mark those that are reachable from other 236 // leaked chunks. 237 if (flags()->log_pointers) 238 Report("Scanning leaked chunks.\n"); 239 ForEachChunk(MarkIndirectlyLeakedCb()); 240} 241 242static void PrintStackTraceById(u32 stack_trace_id) { 243 CHECK(stack_trace_id); 244 uptr size = 0; 245 const uptr *trace = StackDepotGet(stack_trace_id, &size); 246 StackTrace::PrintStack(trace, size, common_flags()->symbolize, 247 common_flags()->strip_path_prefix, 0); 248} 249 250void CollectLeaksCb::operator()(void *p) const { 251 p = GetUserBegin(p); 252 LsanMetadata m(p); 253 if (!m.allocated()) return; 254 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 255 uptr resolution = flags()->resolution; 256 if (resolution > 0) { 257 uptr size = 0; 258 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); 259 size = Min(size, resolution); 260 leak_report_->Add(StackDepotPut(trace, size), m.requested_size(), 261 m.tag()); 262 } else { 263 leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag()); 264 } 265 } 266} 267 268static void CollectLeaks(LeakReport *leak_report) { 269 ForEachChunk(CollectLeaksCb(leak_report)); 270} 271 272void PrintLeakedCb::operator()(void *p) const { 273 p = GetUserBegin(p); 274 LsanMetadata m(p); 275 if (!m.allocated()) return; 276 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 277 Printf("%s leaked %llu byte object at %p\n", 278 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", 279 m.requested_size(), p); 280 } 281} 282 283static void PrintLeaked() { 284 Printf("Reporting individual objects:\n"); 285 Printf("============================\n"); 286 ForEachChunk(PrintLeakedCb()); 287 Printf("\n"); 288} 289 290enum LeakCheckResult { 291 kFatalError, 292 kLeaksFound, 293 kNoLeaks 294}; 295 296static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, 297 void *arg) { 298 LeakCheckResult *result = reinterpret_cast<LeakCheckResult *>(arg); 299 CHECK_EQ(*result, kFatalError); 300 ClassifyAllChunks(suspended_threads); 301 LeakReport leak_report; 302 CollectLeaks(&leak_report); 303 if (leak_report.IsEmpty()) { 304 *result = kNoLeaks; 305 return; 306 } 307 Printf("\n"); 308 Printf("=================================================================\n"); 309 Report("ERROR: LeakSanitizer: detected memory leaks\n"); 310 leak_report.PrintLargest(flags()->max_leaks); 311 if (flags()->report_objects) 312 PrintLeaked(); 313 leak_report.PrintSummary(); 314 Printf("\n"); 315 *result = kLeaksFound; 316} 317 318void DoLeakCheck() { 319 BlockingMutexLock l(&global_mutex); 320 static bool already_done; 321 CHECK(!already_done); 322 already_done = true; 323 LeakCheckResult result = kFatalError; 324 LockThreadRegistry(); 325 LockAllocator(); 326 StopTheWorld(DoLeakCheckCallback, &result); 327 UnlockAllocator(); 328 UnlockThreadRegistry(); 329 if (result == kFatalError) { 330 Report("LeakSanitizer has encountered a fatal error.\n"); 331 Die(); 332 } else if (result == kLeaksFound) { 333 if (flags()->exitcode) 334 internal__exit(flags()->exitcode); 335 } 336} 337 338///// LeakReport implementation. ///// 339 340// A hard limit on the number of distinct leaks, to avoid quadratic complexity 341// in LeakReport::Add(). We don't expect to ever see this many leaks in 342// real-world applications. 343// FIXME: Get rid of this limit by changing the implementation of LeakReport to 344// use a hash table. 345const uptr kMaxLeaksConsidered = 1000; 346 347void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { 348 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 349 bool is_directly_leaked = (tag == kDirectlyLeaked); 350 for (uptr i = 0; i < leaks_.size(); i++) 351 if (leaks_[i].stack_trace_id == stack_trace_id && 352 leaks_[i].is_directly_leaked == is_directly_leaked) { 353 leaks_[i].hit_count++; 354 leaks_[i].total_size += leaked_size; 355 return; 356 } 357 if (leaks_.size() == kMaxLeaksConsidered) return; 358 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, 359 is_directly_leaked }; 360 leaks_.push_back(leak); 361} 362 363static bool IsLarger(const Leak &leak1, const Leak &leak2) { 364 return leak1.total_size > leak2.total_size; 365} 366 367void LeakReport::PrintLargest(uptr max_leaks) { 368 CHECK(leaks_.size() <= kMaxLeaksConsidered); 369 Printf("\n"); 370 if (leaks_.size() == kMaxLeaksConsidered) 371 Printf("Too many leaks! Only the first %llu leaks encountered will be " 372 "reported.\n", 373 kMaxLeaksConsidered); 374 if (max_leaks > 0 && max_leaks < leaks_.size()) 375 Printf("The %llu largest leak(s):\n", max_leaks); 376 InternalSort(&leaks_, leaks_.size(), IsLarger); 377 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size(); 378 for (uptr i = 0; i < max_leaks; i++) { 379 Printf("%s leak of %llu byte(s) in %llu object(s) allocated from:\n", 380 leaks_[i].is_directly_leaked ? "Direct" : "Indirect", 381 leaks_[i].total_size, leaks_[i].hit_count); 382 PrintStackTraceById(leaks_[i].stack_trace_id); 383 Printf("\n"); 384 } 385 if (max_leaks < leaks_.size()) { 386 uptr remaining = leaks_.size() - max_leaks; 387 Printf("Omitting %llu more leak(s).\n", remaining); 388 } 389} 390 391void LeakReport::PrintSummary() { 392 CHECK(leaks_.size() <= kMaxLeaksConsidered); 393 uptr bytes = 0, allocations = 0; 394 for (uptr i = 0; i < leaks_.size(); i++) { 395 bytes += leaks_[i].total_size; 396 allocations += leaks_[i].hit_count; 397 } 398 Printf("SUMMARY: LeakSanitizer: %llu byte(s) leaked in %llu allocation(s).\n", 399 bytes, allocations); 400} 401 402} // namespace __lsan 403 404using namespace __lsan; // NOLINT 405 406extern "C" { 407SANITIZER_INTERFACE_ATTRIBUTE 408void __lsan_ignore_object(const void *p) { 409 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not 410 // locked. 411 BlockingMutexLock l(&global_mutex); 412 IgnoreObjectResult res = IgnoreObjectLocked(p); 413 if (res == kIgnoreObjectInvalid && flags()->verbosity >= 1) 414 Report("__lsan_ignore_object(): no heap object found at %p", p); 415 if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 1) 416 Report("__lsan_ignore_object(): " 417 "heap object at %p is already being ignored\n", p); 418 if (res == kIgnoreObjectSuccess && flags()->verbosity >= 2) 419 Report("__lsan_ignore_object(): ignoring heap object at %p\n", p); 420} 421} // extern "C" 422#endif // CAN_SANITIZE_LEAKS 423