lsan_common.cc revision 5e719a705666988781b9735d62cafc808ade60e2
14f1732b8068970b368a89271158ca29daf25650eztenghui//=-- lsan_common.cc ------------------------------------------------------===// 24f1732b8068970b368a89271158ca29daf25650eztenghui// 34f1732b8068970b368a89271158ca29daf25650eztenghui// The LLVM Compiler Infrastructure 44f1732b8068970b368a89271158ca29daf25650eztenghui// 54f1732b8068970b368a89271158ca29daf25650eztenghui// This file is distributed under the University of Illinois Open Source 64f1732b8068970b368a89271158ca29daf25650eztenghui// License. See LICENSE.TXT for details. 74f1732b8068970b368a89271158ca29daf25650eztenghui// 84f1732b8068970b368a89271158ca29daf25650eztenghui//===----------------------------------------------------------------------===// 94f1732b8068970b368a89271158ca29daf25650eztenghui// 104f1732b8068970b368a89271158ca29daf25650eztenghui// This file is a part of LeakSanitizer. 114f1732b8068970b368a89271158ca29daf25650eztenghui// Implementation of common leak checking functionality. 124f1732b8068970b368a89271158ca29daf25650eztenghui// 134f1732b8068970b368a89271158ca29daf25650eztenghui//===----------------------------------------------------------------------===// 144f1732b8068970b368a89271158ca29daf25650eztenghui 154f1732b8068970b368a89271158ca29daf25650eztenghui#include "lsan_common.h" 164f1732b8068970b368a89271158ca29daf25650eztenghui 174f1732b8068970b368a89271158ca29daf25650eztenghui#include "sanitizer_common/sanitizer_common.h" 184f1732b8068970b368a89271158ca29daf25650eztenghui#include "sanitizer_common/sanitizer_flags.h" 194f1732b8068970b368a89271158ca29daf25650eztenghui#include "sanitizer_common/sanitizer_stackdepot.h" 204f1732b8068970b368a89271158ca29daf25650eztenghui#include "sanitizer_common/sanitizer_stacktrace.h" 214f1732b8068970b368a89271158ca29daf25650eztenghui#include "sanitizer_common/sanitizer_stoptheworld.h" 224f1732b8068970b368a89271158ca29daf25650eztenghui 234f1732b8068970b368a89271158ca29daf25650eztenghui#if CAN_SANITIZE_LEAKS 244f1732b8068970b368a89271158ca29daf25650eztenghuinamespace __lsan { 254f1732b8068970b368a89271158ca29daf25650eztenghui 264f1732b8068970b368a89271158ca29daf25650eztenghuiFlags lsan_flags; 274f1732b8068970b368a89271158ca29daf25650eztenghui 284f1732b8068970b368a89271158ca29daf25650eztenghuistatic void InitializeFlags() { 294f1732b8068970b368a89271158ca29daf25650eztenghui Flags *f = flags(); 304f1732b8068970b368a89271158ca29daf25650eztenghui // Default values. 314f1732b8068970b368a89271158ca29daf25650eztenghui f->report_blocks = false; 324f1732b8068970b368a89271158ca29daf25650eztenghui f->resolution = 0; 334f1732b8068970b368a89271158ca29daf25650eztenghui f->max_leaks = 0; 344f1732b8068970b368a89271158ca29daf25650eztenghui f->exitcode = 23; 354f1732b8068970b368a89271158ca29daf25650eztenghui f->use_registers = true; 364f1732b8068970b368a89271158ca29daf25650eztenghui f->use_globals = true; 374f1732b8068970b368a89271158ca29daf25650eztenghui f->use_stacks = true; 384f1732b8068970b368a89271158ca29daf25650eztenghui f->use_tls = true; 3984333e0475bc911adc16417f4ca327c975cf6c36Andreas Huber f->use_unaligned = false; 404f1732b8068970b368a89271158ca29daf25650eztenghui f->log_pointers = false; 414f1732b8068970b368a89271158ca29daf25650eztenghui f->log_threads = false; 424f1732b8068970b368a89271158ca29daf25650eztenghui 434f1732b8068970b368a89271158ca29daf25650eztenghui const char *options = GetEnv("LSAN_OPTIONS"); 444f1732b8068970b368a89271158ca29daf25650eztenghui if (options) { 454f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->use_registers, "use_registers"); 464f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->use_globals, "use_globals"); 474f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->use_stacks, "use_stacks"); 484f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->use_tls, "use_tls"); 494f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->use_unaligned, "use_unaligned"); 504f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->report_blocks, "report_blocks"); 514f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->resolution, "resolution"); 524f1732b8068970b368a89271158ca29daf25650eztenghui CHECK_GE(&f->resolution, 0); 534f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->max_leaks, "max_leaks"); 544f1732b8068970b368a89271158ca29daf25650eztenghui CHECK_GE(&f->max_leaks, 0); 554f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->log_pointers, "log_pointers"); 564f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->log_threads, "log_threads"); 574f1732b8068970b368a89271158ca29daf25650eztenghui ParseFlag(options, &f->exitcode, "exitcode"); 584f1732b8068970b368a89271158ca29daf25650eztenghui } 594f1732b8068970b368a89271158ca29daf25650eztenghui} 604f1732b8068970b368a89271158ca29daf25650eztenghui 614f1732b8068970b368a89271158ca29daf25650eztenghuivoid InitCommonLsan() { 624f1732b8068970b368a89271158ca29daf25650eztenghui InitializeFlags(); 634f1732b8068970b368a89271158ca29daf25650eztenghui InitializePlatformSpecificModules(); 644f1732b8068970b368a89271158ca29daf25650eztenghui} 654f1732b8068970b368a89271158ca29daf25650eztenghui 664f1732b8068970b368a89271158ca29daf25650eztenghuistatic inline bool CanBeAHeapPointer(uptr p) { 674f1732b8068970b368a89271158ca29daf25650eztenghui // Since our heap is located in mmap-ed memory, we can assume a sensible lower 684f1732b8068970b368a89271158ca29daf25650eztenghui // boundary on heap addresses. 694f1732b8068970b368a89271158ca29daf25650eztenghui const uptr kMinAddress = 4 * 4096; 704f1732b8068970b368a89271158ca29daf25650eztenghui if (p < kMinAddress) return false; 714f1732b8068970b368a89271158ca29daf25650eztenghui#ifdef __x86_64__ 724f1732b8068970b368a89271158ca29daf25650eztenghui // Accept only canonical form user-space addresses. 734f1732b8068970b368a89271158ca29daf25650eztenghui return ((p >> 47) == 0); 744f1732b8068970b368a89271158ca29daf25650eztenghui#else 754f1732b8068970b368a89271158ca29daf25650eztenghui return true; 764f1732b8068970b368a89271158ca29daf25650eztenghui#endif 774f1732b8068970b368a89271158ca29daf25650eztenghui} 7884333e0475bc911adc16417f4ca327c975cf6c36Andreas Huber 794f1732b8068970b368a89271158ca29daf25650eztenghui// Scan the memory range, looking for byte patterns that point into allocator 804f1732b8068970b368a89271158ca29daf25650eztenghui// chunks. Mark those chunks with tag and add them to the frontier. 814f1732b8068970b368a89271158ca29daf25650eztenghui// There are two usage modes for this function: finding reachable or suppressed 824f1732b8068970b368a89271158ca29daf25650eztenghui// chunks (tag = kReachable or kSuppressed) and finding indirectly leaked chunks 834f1732b8068970b368a89271158ca29daf25650eztenghui// (tag = kIndirectlyLeaked). In the second case, there's no flood fill, 844f1732b8068970b368a89271158ca29daf25650eztenghui// so frontier = 0. 854f1732b8068970b368a89271158ca29daf25650eztenghuivoid ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier, 864f1732b8068970b368a89271158ca29daf25650eztenghui const char *region_type, ChunkTag tag) { 874f1732b8068970b368a89271158ca29daf25650eztenghui const uptr alignment = flags()->pointer_alignment(); 884f1732b8068970b368a89271158ca29daf25650eztenghui if (flags()->log_pointers) 894f1732b8068970b368a89271158ca29daf25650eztenghui Report("Scanning %s range %p-%p.\n", region_type, begin, end); 904f1732b8068970b368a89271158ca29daf25650eztenghui uptr pp = begin; 914f1732b8068970b368a89271158ca29daf25650eztenghui if (pp % alignment) 924f1732b8068970b368a89271158ca29daf25650eztenghui pp = pp + alignment - pp % alignment; 934f1732b8068970b368a89271158ca29daf25650eztenghui for (; pp + sizeof(uptr) <= end; pp += alignment) { 944f1732b8068970b368a89271158ca29daf25650eztenghui void *p = *reinterpret_cast<void**>(pp); 954f1732b8068970b368a89271158ca29daf25650eztenghui if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; 964f1732b8068970b368a89271158ca29daf25650eztenghui void *chunk = PointsIntoChunk(p); 974f1732b8068970b368a89271158ca29daf25650eztenghui if (!chunk) continue; 984f1732b8068970b368a89271158ca29daf25650eztenghui LsanMetadata m(chunk); 994f1732b8068970b368a89271158ca29daf25650eztenghui // Reachable beats suppressed beats leaked. 1004f1732b8068970b368a89271158ca29daf25650eztenghui if (m.tag() == kReachable) continue; 1014f1732b8068970b368a89271158ca29daf25650eztenghui if (m.tag() == kSuppressed && tag != kReachable) continue; 1024f1732b8068970b368a89271158ca29daf25650eztenghui m.set_tag(tag); 1034f1732b8068970b368a89271158ca29daf25650eztenghui if (flags()->log_pointers) 1044f1732b8068970b368a89271158ca29daf25650eztenghui Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p, 1054f1732b8068970b368a89271158ca29daf25650eztenghui chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(), 1064f1732b8068970b368a89271158ca29daf25650eztenghui m.requested_size()); 1074f1732b8068970b368a89271158ca29daf25650eztenghui if (frontier) 1084f1732b8068970b368a89271158ca29daf25650eztenghui frontier->push_back(reinterpret_cast<uptr>(chunk)); 1094f1732b8068970b368a89271158ca29daf25650eztenghui } 1104f1732b8068970b368a89271158ca29daf25650eztenghui} 1114f1732b8068970b368a89271158ca29daf25650eztenghui 1124f1732b8068970b368a89271158ca29daf25650eztenghui// Scan thread data (stacks and TLS) for heap pointers. 1134f1732b8068970b368a89271158ca29daf25650eztenghuistatic void ProcessThreads(SuspendedThreadsList const &suspended_threads, 1144f1732b8068970b368a89271158ca29daf25650eztenghui InternalVector<uptr> *frontier) { 1154f1732b8068970b368a89271158ca29daf25650eztenghui InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); 1164f1732b8068970b368a89271158ca29daf25650eztenghui uptr registers_begin = reinterpret_cast<uptr>(registers.data()); 1174f1732b8068970b368a89271158ca29daf25650eztenghui uptr registers_end = registers_begin + registers.size(); 1184f1732b8068970b368a89271158ca29daf25650eztenghui for (uptr i = 0; i < suspended_threads.thread_count(); i++) { 1194f1732b8068970b368a89271158ca29daf25650eztenghui uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); 1204f1732b8068970b368a89271158ca29daf25650eztenghui if (flags()->log_threads) Report("Processing thread %d.\n", os_id); 1214f1732b8068970b368a89271158ca29daf25650eztenghui uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; 1224f1732b8068970b368a89271158ca29daf25650eztenghui bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, 1234f1732b8068970b368a89271158ca29daf25650eztenghui &tls_begin, &tls_end, 1244f1732b8068970b368a89271158ca29daf25650eztenghui &cache_begin, &cache_end); 1254f1732b8068970b368a89271158ca29daf25650eztenghui if (!thread_found) { 1264f1732b8068970b368a89271158ca29daf25650eztenghui // If a thread can't be found in the thread registry, it's probably in the 127 // process of destruction. Log this event and move on. 128 if (flags()->log_threads) 129 Report("Thread %d not found in registry.\n", os_id); 130 continue; 131 } 132 uptr sp; 133 bool have_registers = 134 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); 135 if (!have_registers) { 136 Report("Unable to get registers from thread %d.\n"); 137 // If unable to get SP, consider the entire stack to be reachable. 138 sp = stack_begin; 139 } 140 141 if (flags()->use_registers && have_registers) 142 ScanRangeForPointers(registers_begin, registers_end, frontier, 143 "REGISTERS", kReachable); 144 145 if (flags()->use_stacks) { 146 if (flags()->log_threads) 147 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); 148 if (sp < stack_begin || sp >= stack_end) { 149 // SP is outside the recorded stack range (e.g. the thread is running a 150 // signal handler on alternate stack). Again, consider the entire stack 151 // range to be reachable. 152 if (flags()->log_threads) 153 Report("WARNING: stack_pointer not in stack_range.\n"); 154 } else { 155 // Shrink the stack range to ignore out-of-scope values. 156 stack_begin = sp; 157 } 158 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", 159 kReachable); 160 } 161 162 if (flags()->use_tls) { 163 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); 164 if (cache_begin == cache_end) { 165 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); 166 } else { 167 // Because LSan should not be loaded with dlopen(), we can assume 168 // that allocator cache will be part of static TLS image. 169 CHECK_LE(tls_begin, cache_begin); 170 CHECK_GE(tls_end, cache_end); 171 if (tls_begin < cache_begin) 172 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", 173 kReachable); 174 if (tls_end > cache_end) 175 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); 176 } 177 } 178 } 179} 180 181static void FloodFillTag(InternalVector<uptr> *frontier, ChunkTag tag) { 182 while (frontier->size()) { 183 uptr next_chunk = frontier->back(); 184 frontier->pop_back(); 185 LsanMetadata m(reinterpret_cast<void *>(next_chunk)); 186 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, 187 "HEAP", tag); 188 } 189} 190 191// Mark leaked chunks which are reachable from other leaked chunks. 192void MarkIndirectlyLeakedCb::operator()(void *p) const { 193 p = GetUserBegin(p); 194 LsanMetadata m(p); 195 if (m.allocated() && m.tag() != kReachable) { 196 ScanRangeForPointers(reinterpret_cast<uptr>(p), 197 reinterpret_cast<uptr>(p) + m.requested_size(), 198 /* frontier */ 0, "HEAP", kIndirectlyLeaked); 199 } 200} 201 202void CollectSuppressedCb::operator()(void *p) const { 203 p = GetUserBegin(p); 204 LsanMetadata m(p); 205 if (m.allocated() && m.tag() == kSuppressed) 206 frontier_->push_back(reinterpret_cast<uptr>(p)); 207} 208 209// Set the appropriate tag on each chunk. 210static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { 211 // Holds the flood fill frontier. 212 InternalVector<uptr> frontier(GetPageSizeCached()); 213 214 if (flags()->use_globals) 215 ProcessGlobalRegions(&frontier); 216 ProcessThreads(suspended_threads, &frontier); 217 FloodFillTag(&frontier, kReachable); 218 // The check here is relatively expensive, so we do this in a separate flood 219 // fill. That way we can skip the check for chunks that are reachable 220 // otherwise. 221 ProcessPlatformSpecificAllocations(&frontier); 222 FloodFillTag(&frontier, kReachable); 223 224 if (flags()->log_pointers) 225 Report("Scanning suppressed blocks.\n"); 226 CHECK_EQ(0, frontier.size()); 227 ForEachChunk(CollectSuppressedCb(&frontier)); 228 FloodFillTag(&frontier, kSuppressed); 229 230 // Iterate over leaked chunks and mark those that are reachable from other 231 // leaked chunks. 232 if (flags()->log_pointers) 233 Report("Scanning leaked blocks.\n"); 234 ForEachChunk(MarkIndirectlyLeakedCb()); 235} 236 237static void PrintStackTraceById(u32 stack_trace_id) { 238 CHECK(stack_trace_id); 239 uptr size = 0; 240 const uptr *trace = StackDepotGet(stack_trace_id, &size); 241 StackTrace::PrintStack(trace, size, common_flags()->symbolize, 242 common_flags()->strip_path_prefix, 0); 243} 244 245void CollectLeaksCb::operator()(void *p) const { 246 p = GetUserBegin(p); 247 LsanMetadata m(p); 248 if (!m.allocated()) return; 249 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 250 uptr resolution = flags()->resolution; 251 if (resolution > 0) { 252 uptr size = 0; 253 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); 254 size = Min(size, resolution); 255 leak_report_->Add(StackDepotPut(trace, size), m.requested_size(), 256 m.tag()); 257 } else { 258 leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag()); 259 } 260 } 261} 262 263static void CollectLeaks(LeakReport *leak_report) { 264 ForEachChunk(CollectLeaksCb(leak_report)); 265} 266 267void PrintLeakedCb::operator()(void *p) const { 268 p = GetUserBegin(p); 269 LsanMetadata m(p); 270 if (!m.allocated()) return; 271 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 272 Printf("%s leaked %llu byte block at %p\n", 273 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", 274 m.requested_size(), p); 275 } 276} 277 278static void PrintLeaked() { 279 Printf("Reporting individual blocks:\n"); 280 Printf("============================\n"); 281 ForEachChunk(PrintLeakedCb()); 282 Printf("\n"); 283} 284 285enum LeakCheckResult { 286 kFatalError, 287 kLeaksFound, 288 kNoLeaks 289}; 290 291static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, 292 void *arg) { 293 LeakCheckResult *result = reinterpret_cast<LeakCheckResult *>(arg); 294 CHECK_EQ(*result, kFatalError); 295 ClassifyAllChunks(suspended_threads); 296 LeakReport leak_report; 297 CollectLeaks(&leak_report); 298 if (leak_report.IsEmpty()) { 299 *result = kNoLeaks; 300 return; 301 } 302 Printf("\n"); 303 Printf("=================================================================\n"); 304 Report("ERROR: LeakSanitizer: detected memory leaks\n"); 305 leak_report.PrintLargest(flags()->max_leaks); 306 if (flags()->report_blocks) 307 PrintLeaked(); 308 leak_report.PrintSummary(); 309 Printf("\n"); 310 *result = kLeaksFound; 311} 312 313void DoLeakCheck() { 314 static bool already_done; 315 LeakCheckResult result = kFatalError; 316 LockThreadRegistry(); 317 LockAllocator(); 318 CHECK(!already_done); 319 already_done = true; 320 StopTheWorld(DoLeakCheckCallback, &result); 321 UnlockAllocator(); 322 UnlockThreadRegistry(); 323 if (result == kFatalError) { 324 Report("LeakSanitizer has encountered a fatal error.\n"); 325 Die(); 326 } else if (result == kLeaksFound) { 327 if (flags()->exitcode) 328 internal__exit(flags()->exitcode); 329 } 330} 331 332///// LeakReport implementation. ///// 333 334// A hard limit on the number of distinct leaks, to avoid quadratic complexity 335// in LeakReport::Add(). We don't expect to ever see this many leaks in 336// real-world applications. 337// FIXME: Get rid of this limit by changing the implementation of LeakReport to 338// use a hash table. 339const uptr kMaxLeaksConsidered = 1000; 340 341void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { 342 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 343 bool is_directly_leaked = (tag == kDirectlyLeaked); 344 for (uptr i = 0; i < leaks_.size(); i++) 345 if (leaks_[i].stack_trace_id == stack_trace_id && 346 leaks_[i].is_directly_leaked == is_directly_leaked) { 347 leaks_[i].hit_count++; 348 leaks_[i].total_size += leaked_size; 349 return; 350 } 351 if (leaks_.size() == kMaxLeaksConsidered) return; 352 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, 353 is_directly_leaked }; 354 leaks_.push_back(leak); 355} 356 357static bool IsLarger(const Leak &leak1, const Leak &leak2) { 358 return leak1.total_size > leak2.total_size; 359} 360 361void LeakReport::PrintLargest(uptr max_leaks) { 362 CHECK(leaks_.size() <= kMaxLeaksConsidered); 363 Printf("\n"); 364 if (leaks_.size() == kMaxLeaksConsidered) 365 Printf("Too many leaks! Only the first %llu leaks encountered will be " 366 "reported.\n", 367 kMaxLeaksConsidered); 368 if (max_leaks > 0 && max_leaks < leaks_.size()) 369 Printf("The %llu largest leak(s):\n", max_leaks); 370 InternalSort(&leaks_, leaks_.size(), IsLarger); 371 max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size(); 372 for (uptr i = 0; i < max_leaks; i++) { 373 Printf("%s leak of %llu byte(s) in %llu object(s) allocated from:\n", 374 leaks_[i].is_directly_leaked ? "Direct" : "Indirect", 375 leaks_[i].total_size, leaks_[i].hit_count); 376 PrintStackTraceById(leaks_[i].stack_trace_id); 377 Printf("\n"); 378 } 379 if (max_leaks < leaks_.size()) { 380 uptr remaining = leaks_.size() - max_leaks; 381 Printf("Omitting %llu more leak(s).\n", remaining); 382 } 383} 384 385void LeakReport::PrintSummary() { 386 CHECK(leaks_.size() <= kMaxLeaksConsidered); 387 uptr bytes = 0, allocations = 0; 388 for (uptr i = 0; i < leaks_.size(); i++) { 389 bytes += leaks_[i].total_size; 390 allocations += leaks_[i].hit_count; 391 } 392 Printf("SUMMARY: LeakSanitizer: %llu byte(s) leaked in %llu allocation(s).\n", 393 bytes, allocations); 394} 395 396} // namespace __lsan 397#endif // CAN_SANITIZE_LEAKS 398