lsan_common.cc revision ebe3a3608be122e799e264931f9cecf4cbc84edd
1//=-- lsan_common.cc ------------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of LeakSanitizer. 11// Implementation of common leak checking functionality. 12// 13//===----------------------------------------------------------------------===// 14 15#include "lsan_common.h" 16 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_flags.h" 19#include "sanitizer_common/sanitizer_stackdepot.h" 20#include "sanitizer_common/sanitizer_stacktrace.h" 21#include "sanitizer_common/sanitizer_stoptheworld.h" 22 23#if CAN_SANITIZE_LEAKS 24namespace __lsan { 25 26Flags lsan_flags; 27 28static void InitializeFlags() { 29 Flags *f = flags(); 30 // Default values. 31 f->report_blocks = false; 32 f->resolution = 0; 33 f->max_leaks = 0; 34 f->exitcode = 23; 35 f->use_registers = true; 36 f->use_globals = true; 37 f->use_stacks = true; 38 f->use_tls = true; 39 f->use_unaligned = false; 40 f->log_pointers = false; 41 f->log_threads = false; 42 43 const char *options = GetEnv("LSAN_OPTIONS"); 44 if (options) { 45 ParseFlag(options, &f->use_registers, "use_registers"); 46 ParseFlag(options, &f->use_globals, "use_globals"); 47 ParseFlag(options, &f->use_stacks, "use_stacks"); 48 ParseFlag(options, &f->use_tls, "use_tls"); 49 ParseFlag(options, &f->use_unaligned, "use_unaligned"); 50 ParseFlag(options, &f->report_blocks, "report_blocks"); 51 ParseFlag(options, &f->resolution, "resolution"); 52 CHECK_GE(&f->resolution, 0); 53 ParseFlag(options, &f->max_leaks, "max_leaks"); 54 CHECK_GE(&f->max_leaks, 0); 55 ParseFlag(options, &f->log_pointers, "log_pointers"); 56 ParseFlag(options, &f->log_threads, "log_threads"); 57 ParseFlag(options, &f->exitcode, "exitcode"); 58 } 59} 60 61void InitCommonLsan() { 62 InitializeFlags(); 63 InitializePlatformSpecificModules(); 64} 65 66static inline bool CanBeAHeapPointer(uptr p) { 67 // Since our heap is located in mmap-ed memory, we can assume a sensible lower 68 // boundary on heap addresses. 69 const uptr kMinAddress = 4 * 4096; 70 if (p < kMinAddress) return false; 71#ifdef __x86_64__ 72 // Accept only canonical form user-space addresses. 73 return ((p >> 47) == 0); 74#else 75 return true; 76#endif 77} 78 79// Scan the memory range, looking for byte patterns that point into allocator 80// chunks. Mark those chunks with tag and add them to the frontier. 81// There are two usage modes for this function: finding non-leaked chunks 82// (tag = kReachable) and finding indirectly leaked chunks 83// (tag = kIndirectlyLeaked). In the second case, there's no flood fill, 84// so frontier = 0. 85void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier, 86 const char *region_type, ChunkTag tag) { 87 const uptr alignment = flags()->pointer_alignment(); 88 if (flags()->log_pointers) 89 Report("Scanning %s range %p-%p.\n", region_type, begin, end); 90 uptr pp = begin; 91 if (pp % alignment) 92 pp = pp + alignment - pp % alignment; 93 for (; pp + sizeof(uptr) <= end; pp += alignment) { 94 void *p = *reinterpret_cast<void**>(pp); 95 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; 96 // FIXME: PointsIntoChunk is SLOW because GetBlockBegin() in 97 // LargeMmapAllocator involves a lock and a linear search. 98 void *chunk = PointsIntoChunk(p); 99 if (!chunk) continue; 100 LsanMetadata m(chunk); 101 if (m.tag() == kReachable) continue; 102 m.set_tag(tag); 103 if (flags()->log_pointers) 104 Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p, 105 chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(), 106 m.requested_size()); 107 if (frontier) 108 frontier->push_back(reinterpret_cast<uptr>(chunk)); 109 } 110} 111 112// Scan thread data (stacks and TLS) for heap pointers. 113static void ProcessThreads(SuspendedThreadsList const &suspended_threads, 114 InternalVector<uptr> *frontier) { 115 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); 116 uptr registers_begin = reinterpret_cast<uptr>(registers.data()); 117 uptr registers_end = registers_begin + registers.size(); 118 for (uptr i = 0; i < suspended_threads.thread_count(); i++) { 119 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); 120 if (flags()->log_threads) Report("Processing thread %d.\n", os_id); 121 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; 122 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, 123 &tls_begin, &tls_end, 124 &cache_begin, &cache_end); 125 if (!thread_found) { 126 // If a thread can't be found in the thread registry, it's probably in the 127 // process of destruction. Log this event and move on. 128 if (flags()->log_threads) 129 Report("Thread %d not found in registry.\n", os_id); 130 continue; 131 } 132 uptr sp; 133 bool have_registers = 134 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); 135 if (!have_registers) { 136 Report("Unable to get registers from thread %d.\n"); 137 // If unable to get SP, consider the entire stack to be reachable. 138 sp = stack_begin; 139 } 140 141 if (flags()->use_registers && have_registers) 142 ScanRangeForPointers(registers_begin, registers_end, frontier, 143 "REGISTERS", kReachable); 144 145 if (flags()->use_stacks) { 146 if (flags()->log_threads) 147 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); 148 if (sp < stack_begin || sp >= stack_end) { 149 // SP is outside the recorded stack range (e.g. the thread is running a 150 // signal handler on alternate stack). Again, consider the entire stack 151 // range to be reachable. 152 if (flags()->log_threads) 153 Report("WARNING: stack_pointer not in stack_range.\n"); 154 } else { 155 // Shrink the stack range to ignore out-of-scope values. 156 stack_begin = sp; 157 } 158 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", 159 kReachable); 160 } 161 162 if (flags()->use_tls) { 163 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); 164 if (cache_begin == cache_end) { 165 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); 166 } else { 167 // Because LSan should not be loaded with dlopen(), we can assume 168 // that allocator cache will be part of static TLS image. 169 CHECK_LE(tls_begin, cache_begin); 170 CHECK_GE(tls_end, cache_end); 171 if (tls_begin < cache_begin) 172 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", 173 kReachable); 174 if (tls_end > cache_end) 175 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); 176 } 177 } 178 } 179} 180 181static void FloodFillReachable(InternalVector<uptr> *frontier) { 182 while (frontier->size()) { 183 uptr next_chunk = frontier->back(); 184 frontier->pop_back(); 185 LsanMetadata m(reinterpret_cast<void *>(next_chunk)); 186 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, 187 "HEAP", kReachable); 188 } 189} 190 191// Mark leaked chunks which are reachable from other leaked chunks. 192void MarkIndirectlyLeakedCb::operator()(void *p) const { 193 p = GetUserBegin(p); 194 LsanMetadata m(p); 195 if (m.allocated() && m.tag() != kReachable) { 196 ScanRangeForPointers(reinterpret_cast<uptr>(p), 197 reinterpret_cast<uptr>(p) + m.requested_size(), 198 /* frontier */ 0, "HEAP", kIndirectlyLeaked); 199 } 200} 201 202// Set the appropriate tag on each chunk. 203static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { 204 // Holds the flood fill frontier. 205 InternalVector<uptr> frontier(GetPageSizeCached()); 206 207 if (flags()->use_globals) 208 ProcessGlobalRegions(&frontier); 209 ProcessThreads(suspended_threads, &frontier); 210 FloodFillReachable(&frontier); 211 ProcessPlatformSpecificAllocations(&frontier); 212 FloodFillReachable(&frontier); 213 214 // Now all reachable chunks are marked. Iterate over leaked chunks and mark 215 // those that are reachable from other leaked chunks. 216 if (flags()->log_pointers) 217 Report("Now scanning leaked blocks for pointers.\n"); 218 ForEachChunk(MarkIndirectlyLeakedCb()); 219} 220 221void ClearTagCb::operator()(void *p) const { 222 p = GetUserBegin(p); 223 LsanMetadata m(p); 224 m.set_tag(kDirectlyLeaked); 225} 226 227static void PrintStackTraceById(u32 stack_trace_id) { 228 CHECK(stack_trace_id); 229 uptr size = 0; 230 const uptr *trace = StackDepotGet(stack_trace_id, &size); 231 StackTrace::PrintStack(trace, size, common_flags()->symbolize, 232 common_flags()->strip_path_prefix, 0); 233} 234 235static void LockAndSuspendThreads(StopTheWorldCallback callback, void *arg) { 236 LockThreadRegistry(); 237 LockAllocator(); 238 StopTheWorld(callback, arg); 239 // Allocator must be unlocked by the callback. 240 UnlockThreadRegistry(); 241} 242 243///// Normal leak checking. ///// 244 245void CollectLeaksCb::operator()(void *p) const { 246 p = GetUserBegin(p); 247 LsanMetadata m(p); 248 if (!m.allocated()) return; 249 if (m.tag() != kReachable) { 250 uptr resolution = flags()->resolution; 251 if (resolution > 0) { 252 uptr size = 0; 253 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); 254 size = Min(size, resolution); 255 leak_report_->Add(StackDepotPut(trace, size), m.requested_size(), 256 m.tag()); 257 } else { 258 leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag()); 259 } 260 } 261} 262 263static void CollectLeaks(LeakReport *leak_report) { 264 ForEachChunk(CollectLeaksCb(leak_report)); 265} 266 267void PrintLeakedCb::operator()(void *p) const { 268 p = GetUserBegin(p); 269 LsanMetadata m(p); 270 if (!m.allocated()) return; 271 if (m.tag() != kReachable) { 272 CHECK(m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked); 273 Printf("%s leaked %llu byte block at %p\n", 274 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", 275 m.requested_size(), p); 276 } 277} 278 279static void PrintLeaked() { 280 Printf("Reporting individual blocks:\n"); 281 Printf("============================\n"); 282 ForEachChunk(PrintLeakedCb()); 283 Printf("\n"); 284} 285 286enum LeakCheckResult { 287 kFatalError, 288 kLeaksFound, 289 kNoLeaks 290}; 291 292static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, 293 void *arg) { 294 LeakCheckResult *result = reinterpret_cast<LeakCheckResult *>(arg); 295 CHECK_EQ(*result, kFatalError); 296 // Allocator must not be locked when we call GetRegionBegin(). 297 UnlockAllocator(); 298 ClassifyAllChunks(suspended_threads); 299 LeakReport leak_report; 300 CollectLeaks(&leak_report); 301 if (leak_report.IsEmpty()) { 302 *result = kNoLeaks; 303 return; 304 } 305 Printf("\n"); 306 Printf("=================================================================\n"); 307 Report("ERROR: LeakSanitizer: detected memory leaks\n"); 308 leak_report.PrintLargest(flags()->max_leaks); 309 if (flags()->report_blocks) 310 PrintLeaked(); 311 leak_report.PrintSummary(); 312 Printf("\n"); 313 ForEachChunk(ClearTagCb()); 314 *result = kLeaksFound; 315} 316 317void DoLeakCheck() { 318 LeakCheckResult result = kFatalError; 319 LockAndSuspendThreads(DoLeakCheckCallback, &result); 320 if (result == kFatalError) { 321 Report("LeakSanitizer has encountered a fatal error.\n"); 322 Die(); 323 } else if (result == kLeaksFound) { 324 if (flags()->exitcode) 325 internal__exit(flags()->exitcode); 326 } 327} 328 329///// LeakReport implementation. ///// 330 331// A hard limit on the number of distinct leaks, to avoid quadratic complexity 332// in LeakReport::Add(). We don't expect to ever see this many leaks in 333// real-world applications. 334// FIXME: Get rid of this limit by changing the implementation of LeakReport to 335// use a hash table. 336const uptr kMaxLeaksConsidered = 1000; 337 338void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { 339 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 340 bool is_directly_leaked = (tag == kDirectlyLeaked); 341 for (uptr i = 0; i < leaks_.size(); i++) 342 if (leaks_[i].stack_trace_id == stack_trace_id && 343 leaks_[i].is_directly_leaked == is_directly_leaked) { 344 leaks_[i].hit_count++; 345 leaks_[i].total_size += leaked_size; 346 return; 347 } 348 if (leaks_.size() == kMaxLeaksConsidered) return; 349 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, 350 is_directly_leaked }; 351 leaks_.push_back(leak); 352} 353 354static bool IsLarger(const Leak &leak1, const Leak &leak2) { 355 return leak1.total_size > leak2.total_size; 356} 357 358void LeakReport::PrintLargest(uptr max_leaks) { 359 CHECK(leaks_.size() <= kMaxLeaksConsidered); 360 Printf("\n"); 361 if (leaks_.size() == kMaxLeaksConsidered) 362 Printf("Too many leaks! Only the first %llu leaks encountered will be " 363 "reported.\n", 364 kMaxLeaksConsidered); 365 if (max_leaks > 0 && max_leaks < leaks_.size()) 366 Printf("The %llu largest leak(s):\n", max_leaks); 367 InternalSort(&leaks_, leaks_.size(), IsLarger); 368 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size(); 369 for (uptr i = 0; i < max_leaks; i++) { 370 Printf("%s leak of %llu byte(s) in %llu object(s) allocated from:\n", 371 leaks_[i].is_directly_leaked ? "Direct" : "Indirect", 372 leaks_[i].total_size, leaks_[i].hit_count); 373 PrintStackTraceById(leaks_[i].stack_trace_id); 374 Printf("\n"); 375 } 376 if (max_leaks < leaks_.size()) { 377 uptr remaining = leaks_.size() - max_leaks; 378 Printf("Omitting %llu more leak(s).\n", remaining); 379 } 380} 381 382void LeakReport::PrintSummary() { 383 CHECK(leaks_.size() <= kMaxLeaksConsidered); 384 uptr bytes = 0, allocations = 0; 385 for (uptr i = 0; i < leaks_.size(); i++) { 386 bytes += leaks_[i].total_size; 387 allocations += leaks_[i].hit_count; 388 } 389 Printf("SUMMARY: LeakSanitizer: %llu byte(s) leaked in %llu allocation(s).\n", 390 bytes, allocations); 391} 392} // namespace __lsan 393#endif // CAN_SANITIZE_LEAKS 394