lsan_common.cc revision c085fe807836b566b4fce7ccdfb307bb025693c5
1//=-- lsan_common.cc ------------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of LeakSanitizer. 11// Implementation of common leak checking functionality. 12// 13//===----------------------------------------------------------------------===// 14 15#include "lsan_common.h" 16 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_flags.h" 19#include "sanitizer_common/sanitizer_stackdepot.h" 20#include "sanitizer_common/sanitizer_stacktrace.h" 21#include "sanitizer_common/sanitizer_stoptheworld.h" 22 23#if CAN_SANITIZE_LEAKS 24namespace __lsan { 25 26// This mutex is used to prevent races between DoLeakCheck and SuppressObject. 27BlockingMutex global_mutex(LINKER_INITIALIZED); 28 29Flags lsan_flags; 30 31static void InitializeFlags() { 32 Flags *f = flags(); 33 // Default values. 34 f->report_objects = false; 35 f->resolution = 0; 36 f->max_leaks = 0; 37 f->exitcode = 23; 38 f->use_registers = true; 39 f->use_globals = true; 40 f->use_stacks = true; 41 f->use_tls = true; 42 f->use_unaligned = false; 43 f->verbosity = 0; 44 f->log_pointers = false; 45 f->log_threads = false; 46 47 const char *options = GetEnv("LSAN_OPTIONS"); 48 if (options) { 49 ParseFlag(options, &f->use_registers, "use_registers"); 50 ParseFlag(options, &f->use_globals, "use_globals"); 51 ParseFlag(options, &f->use_stacks, "use_stacks"); 52 ParseFlag(options, &f->use_tls, "use_tls"); 53 ParseFlag(options, &f->use_unaligned, "use_unaligned"); 54 ParseFlag(options, &f->report_objects, "report_objects"); 55 ParseFlag(options, &f->resolution, "resolution"); 56 CHECK_GE(&f->resolution, 0); 57 ParseFlag(options, &f->max_leaks, "max_leaks"); 58 CHECK_GE(&f->max_leaks, 0); 59 ParseFlag(options, &f->verbosity, "verbosity"); 60 ParseFlag(options, &f->log_pointers, "log_pointers"); 61 ParseFlag(options, &f->log_threads, "log_threads"); 62 ParseFlag(options, &f->exitcode, "exitcode"); 63 } 64} 65 66void InitCommonLsan() { 67 InitializeFlags(); 68 InitializePlatformSpecificModules(); 69} 70 71static inline bool CanBeAHeapPointer(uptr p) { 72 // Since our heap is located in mmap-ed memory, we can assume a sensible lower 73 // boundary on heap addresses. 74 const uptr kMinAddress = 4 * 4096; 75 if (p < kMinAddress) return false; 76#ifdef __x86_64__ 77 // Accept only canonical form user-space addresses. 78 return ((p >> 47) == 0); 79#else 80 return true; 81#endif 82} 83 84// Scan the memory range, looking for byte patterns that point into allocator 85// chunks. Mark those chunks with tag and add them to the frontier. 86// There are two usage modes for this function: finding reachable or suppressed 87// chunks (tag = kReachable or kIgnored) and finding indirectly leaked chunks 88// (tag = kIndirectlyLeaked). In the second case, there's no flood fill, 89// so frontier = 0. 90void ScanRangeForPointers(uptr begin, uptr end, 91 Frontier *frontier, 92 const char *region_type, ChunkTag tag) { 93 const uptr alignment = flags()->pointer_alignment(); 94 if (flags()->log_pointers) 95 Report("Scanning %s range %p-%p.\n", region_type, begin, end); 96 uptr pp = begin; 97 if (pp % alignment) 98 pp = pp + alignment - pp % alignment; 99 for (; pp + sizeof(uptr) <= end; pp += alignment) { 100 void *p = *reinterpret_cast<void**>(pp); 101 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; 102 void *chunk = PointsIntoChunk(p); 103 if (!chunk) continue; 104 LsanMetadata m(chunk); 105 // Reachable beats suppressed beats leaked. 106 if (m.tag() == kReachable) continue; 107 if (m.tag() == kIgnored && tag != kReachable) continue; 108 m.set_tag(tag); 109 if (flags()->log_pointers) 110 Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p, 111 chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(), 112 m.requested_size()); 113 if (frontier) 114 frontier->push_back(reinterpret_cast<uptr>(chunk)); 115 } 116} 117 118// Scan thread data (stacks and TLS) for heap pointers. 119static void ProcessThreads(SuspendedThreadsList const &suspended_threads, 120 Frontier *frontier) { 121 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); 122 uptr registers_begin = reinterpret_cast<uptr>(registers.data()); 123 uptr registers_end = registers_begin + registers.size(); 124 for (uptr i = 0; i < suspended_threads.thread_count(); i++) { 125 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); 126 if (flags()->log_threads) Report("Processing thread %d.\n", os_id); 127 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; 128 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, 129 &tls_begin, &tls_end, 130 &cache_begin, &cache_end); 131 if (!thread_found) { 132 // If a thread can't be found in the thread registry, it's probably in the 133 // process of destruction. Log this event and move on. 134 if (flags()->log_threads) 135 Report("Thread %d not found in registry.\n", os_id); 136 continue; 137 } 138 uptr sp; 139 bool have_registers = 140 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); 141 if (!have_registers) { 142 Report("Unable to get registers from thread %d.\n"); 143 // If unable to get SP, consider the entire stack to be reachable. 144 sp = stack_begin; 145 } 146 147 if (flags()->use_registers && have_registers) 148 ScanRangeForPointers(registers_begin, registers_end, frontier, 149 "REGISTERS", kReachable); 150 151 if (flags()->use_stacks) { 152 if (flags()->log_threads) 153 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); 154 if (sp < stack_begin || sp >= stack_end) { 155 // SP is outside the recorded stack range (e.g. the thread is running a 156 // signal handler on alternate stack). Again, consider the entire stack 157 // range to be reachable. 158 if (flags()->log_threads) 159 Report("WARNING: stack_pointer not in stack_range.\n"); 160 } else { 161 // Shrink the stack range to ignore out-of-scope values. 162 stack_begin = sp; 163 } 164 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", 165 kReachable); 166 } 167 168 if (flags()->use_tls) { 169 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); 170 if (cache_begin == cache_end) { 171 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); 172 } else { 173 // Because LSan should not be loaded with dlopen(), we can assume 174 // that allocator cache will be part of static TLS image. 175 CHECK_LE(tls_begin, cache_begin); 176 CHECK_GE(tls_end, cache_end); 177 if (tls_begin < cache_begin) 178 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", 179 kReachable); 180 if (tls_end > cache_end) 181 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); 182 } 183 } 184 } 185} 186 187static void FloodFillTag(Frontier *frontier, ChunkTag tag) { 188 while (frontier->size()) { 189 uptr next_chunk = frontier->back(); 190 frontier->pop_back(); 191 LsanMetadata m(reinterpret_cast<void *>(next_chunk)); 192 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, 193 "HEAP", tag); 194 } 195} 196 197// Mark leaked chunks which are reachable from other leaked chunks. 198void MarkIndirectlyLeakedCb::operator()(void *p) const { 199 p = GetUserBegin(p); 200 LsanMetadata m(p); 201 if (m.allocated() && m.tag() != kReachable) { 202 ScanRangeForPointers(reinterpret_cast<uptr>(p), 203 reinterpret_cast<uptr>(p) + m.requested_size(), 204 /* frontier */ 0, "HEAP", kIndirectlyLeaked); 205 } 206} 207 208void CollectSuppressedCb::operator()(void *p) const { 209 p = GetUserBegin(p); 210 LsanMetadata m(p); 211 if (m.allocated() && m.tag() == kIgnored) 212 frontier_->push_back(reinterpret_cast<uptr>(p)); 213} 214 215// Set the appropriate tag on each chunk. 216static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { 217 // Holds the flood fill frontier. 218 Frontier frontier(GetPageSizeCached()); 219 220 if (flags()->use_globals) 221 ProcessGlobalRegions(&frontier); 222 ProcessThreads(suspended_threads, &frontier); 223 FloodFillTag(&frontier, kReachable); 224 // The check here is relatively expensive, so we do this in a separate flood 225 // fill. That way we can skip the check for chunks that are reachable 226 // otherwise. 227 ProcessPlatformSpecificAllocations(&frontier); 228 FloodFillTag(&frontier, kReachable); 229 230 if (flags()->log_pointers) 231 Report("Scanning ignored chunks.\n"); 232 CHECK_EQ(0, frontier.size()); 233 ForEachChunk(CollectSuppressedCb(&frontier)); 234 FloodFillTag(&frontier, kIgnored); 235 236 // Iterate over leaked chunks and mark those that are reachable from other 237 // leaked chunks. 238 if (flags()->log_pointers) 239 Report("Scanning leaked chunks.\n"); 240 ForEachChunk(MarkIndirectlyLeakedCb()); 241} 242 243static void PrintStackTraceById(u32 stack_trace_id) { 244 CHECK(stack_trace_id); 245 uptr size = 0; 246 const uptr *trace = StackDepotGet(stack_trace_id, &size); 247 StackTrace::PrintStack(trace, size, common_flags()->symbolize, 248 common_flags()->strip_path_prefix, 0); 249} 250 251void CollectLeaksCb::operator()(void *p) const { 252 p = GetUserBegin(p); 253 LsanMetadata m(p); 254 if (!m.allocated()) return; 255 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 256 uptr resolution = flags()->resolution; 257 if (resolution > 0) { 258 uptr size = 0; 259 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); 260 size = Min(size, resolution); 261 leak_report_->Add(StackDepotPut(trace, size), m.requested_size(), 262 m.tag()); 263 } else { 264 leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag()); 265 } 266 } 267} 268 269static void CollectLeaks(LeakReport *leak_report) { 270 ForEachChunk(CollectLeaksCb(leak_report)); 271} 272 273void PrintLeakedCb::operator()(void *p) const { 274 p = GetUserBegin(p); 275 LsanMetadata m(p); 276 if (!m.allocated()) return; 277 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 278 Printf("%s leaked %llu byte object at %p.\n", 279 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", 280 m.requested_size(), p); 281 } 282} 283 284static void PrintLeaked() { 285 Printf("\n"); 286 Printf("Reporting individual objects:\n"); 287 ForEachChunk(PrintLeakedCb()); 288} 289 290struct DoLeakCheckParam { 291 bool success; 292 LeakReport leak_report; 293}; 294 295static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, 296 void *arg) { 297 DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg); 298 CHECK(param); 299 CHECK(!param->success); 300 CHECK(param->leak_report.IsEmpty()); 301 ClassifyAllChunks(suspended_threads); 302 CollectLeaks(¶m->leak_report); 303 if (!param->leak_report.IsEmpty() && flags()->report_objects) 304 PrintLeaked(); 305 param->success = true; 306} 307 308void DoLeakCheck() { 309 BlockingMutexLock l(&global_mutex); 310 static bool already_done; 311 CHECK(!already_done); 312 already_done = true; 313 314 DoLeakCheckParam param; 315 param.success = false; 316 LockThreadRegistry(); 317 LockAllocator(); 318 StopTheWorld(DoLeakCheckCallback, ¶m); 319 UnlockAllocator(); 320 UnlockThreadRegistry(); 321 322 if (!param.success) { 323 Report("LeakSanitizer has encountered a fatal error.\n"); 324 Die(); 325 } 326 if (!param.leak_report.IsEmpty()) { 327 Printf("\n=================================================================" 328 "\n"); 329 Report("ERROR: LeakSanitizer: detected memory leaks\n"); 330 param.leak_report.PrintLargest(flags()->max_leaks); 331 param.leak_report.PrintSummary(); 332 if (flags()->exitcode) 333 internal__exit(flags()->exitcode); 334 } 335} 336 337///// LeakReport implementation. ///// 338 339// A hard limit on the number of distinct leaks, to avoid quadratic complexity 340// in LeakReport::Add(). We don't expect to ever see this many leaks in 341// real-world applications. 342// FIXME: Get rid of this limit by changing the implementation of LeakReport to 343// use a hash table. 344const uptr kMaxLeaksConsidered = 1000; 345 346void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { 347 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 348 bool is_directly_leaked = (tag == kDirectlyLeaked); 349 for (uptr i = 0; i < leaks_.size(); i++) 350 if (leaks_[i].stack_trace_id == stack_trace_id && 351 leaks_[i].is_directly_leaked == is_directly_leaked) { 352 leaks_[i].hit_count++; 353 leaks_[i].total_size += leaked_size; 354 return; 355 } 356 if (leaks_.size() == kMaxLeaksConsidered) return; 357 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, 358 is_directly_leaked }; 359 leaks_.push_back(leak); 360} 361 362static bool IsLarger(const Leak &leak1, const Leak &leak2) { 363 return leak1.total_size > leak2.total_size; 364} 365 366void LeakReport::PrintLargest(uptr max_leaks) { 367 CHECK(leaks_.size() <= kMaxLeaksConsidered); 368 Printf("\n"); 369 if (leaks_.size() == kMaxLeaksConsidered) 370 Printf("Too many leaks! Only the first %llu leaks encountered will be " 371 "reported.\n", 372 kMaxLeaksConsidered); 373 if (max_leaks > 0 && max_leaks < leaks_.size()) 374 Printf("The %llu largest leak(s):\n", max_leaks); 375 InternalSort(&leaks_, leaks_.size(), IsLarger); 376 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size(); 377 for (uptr i = 0; i < max_leaks; i++) { 378 Printf("%s leak of %llu byte(s) in %llu object(s) allocated from:\n", 379 leaks_[i].is_directly_leaked ? "Direct" : "Indirect", 380 leaks_[i].total_size, leaks_[i].hit_count); 381 PrintStackTraceById(leaks_[i].stack_trace_id); 382 Printf("\n"); 383 } 384 if (max_leaks < leaks_.size()) { 385 uptr remaining = leaks_.size() - max_leaks; 386 Printf("Omitting %llu more leak(s).\n", remaining); 387 } 388} 389 390void LeakReport::PrintSummary() { 391 CHECK(leaks_.size() <= kMaxLeaksConsidered); 392 uptr bytes = 0, allocations = 0; 393 for (uptr i = 0; i < leaks_.size(); i++) { 394 bytes += leaks_[i].total_size; 395 allocations += leaks_[i].hit_count; 396 } 397 Printf("SUMMARY: LeakSanitizer: %llu byte(s) leaked in %llu allocation(s).\n\n", 398 bytes, allocations); 399} 400 401} // namespace __lsan 402 403using namespace __lsan; // NOLINT 404 405extern "C" { 406SANITIZER_INTERFACE_ATTRIBUTE 407void __lsan_ignore_object(const void *p) { 408 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not 409 // locked. 410 BlockingMutexLock l(&global_mutex); 411 IgnoreObjectResult res = IgnoreObjectLocked(p); 412 if (res == kIgnoreObjectInvalid && flags()->verbosity >= 1) 413 Report("__lsan_ignore_object(): no heap object found at %p", p); 414 if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 1) 415 Report("__lsan_ignore_object(): " 416 "heap object at %p is already being ignored\n", p); 417 if (res == kIgnoreObjectSuccess && flags()->verbosity >= 2) 418 Report("__lsan_ignore_object(): ignoring heap object at %p\n", p); 419} 420} // extern "C" 421#endif // CAN_SANITIZE_LEAKS 422