lsan_common.cc revision b1907c71402e01deb6fa3909a75dbc674bfd907b
1//=-- lsan_common.cc ------------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of LeakSanitizer. 11// Implementation of common leak checking functionality. 12// 13//===----------------------------------------------------------------------===// 14 15#include "lsan_common.h" 16 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_flags.h" 19#include "sanitizer_common/sanitizer_placement_new.h" 20#include "sanitizer_common/sanitizer_stackdepot.h" 21#include "sanitizer_common/sanitizer_stacktrace.h" 22#include "sanitizer_common/sanitizer_stoptheworld.h" 23#include "sanitizer_common/sanitizer_suppressions.h" 24#include "sanitizer_common/sanitizer_report_decorator.h" 25 26#if CAN_SANITIZE_LEAKS 27namespace __lsan { 28 29// This mutex is used to prevent races between DoLeakCheck and IgnoreObject. 30BlockingMutex global_mutex(LINKER_INITIALIZED); 31 32THREADLOCAL int disable_counter; 33bool DisabledInThisThread() { return disable_counter > 0; } 34 35Flags lsan_flags; 36 37static void InitializeFlags() { 38 Flags *f = flags(); 39 // Default values. 40 f->report_objects = false; 41 f->resolution = 0; 42 f->max_leaks = 0; 43 f->exitcode = 23; 44 f->suppressions=""; 45 f->use_registers = true; 46 f->use_globals = true; 47 f->use_stacks = true; 48 f->use_tls = true; 49 f->use_unaligned = false; 50 f->verbosity = 0; 51 f->log_pointers = false; 52 f->log_threads = false; 53 54 const char *options = GetEnv("LSAN_OPTIONS"); 55 if (options) { 56 ParseFlag(options, &f->use_registers, "use_registers"); 57 ParseFlag(options, &f->use_globals, "use_globals"); 58 ParseFlag(options, &f->use_stacks, "use_stacks"); 59 ParseFlag(options, &f->use_tls, "use_tls"); 60 ParseFlag(options, &f->use_unaligned, "use_unaligned"); 61 ParseFlag(options, &f->report_objects, "report_objects"); 62 ParseFlag(options, &f->resolution, "resolution"); 63 CHECK_GE(&f->resolution, 0); 64 ParseFlag(options, &f->max_leaks, "max_leaks"); 65 CHECK_GE(&f->max_leaks, 0); 66 ParseFlag(options, &f->verbosity, "verbosity"); 67 ParseFlag(options, &f->log_pointers, "log_pointers"); 68 ParseFlag(options, &f->log_threads, "log_threads"); 69 ParseFlag(options, &f->exitcode, "exitcode"); 70 ParseFlag(options, &f->suppressions, "suppressions"); 71 } 72} 73 74SuppressionContext *suppression_ctx; 75 76void InitializeSuppressions() { 77 CHECK(!suppression_ctx); 78 ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)]; 79 suppression_ctx = new(placeholder_) SuppressionContext; 80 char *suppressions_from_file; 81 uptr buffer_size; 82 if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file, 83 &buffer_size, 1 << 26 /* max_len */)) 84 suppression_ctx->Parse(suppressions_from_file); 85 if (flags()->suppressions[0] && !buffer_size) { 86 Printf("LeakSanitizer: failed to read suppressions file '%s'\n", 87 flags()->suppressions); 88 Die(); 89 } 90 if (&__lsan_default_suppressions) 91 suppression_ctx->Parse(__lsan_default_suppressions()); 92} 93 94void InitCommonLsan() { 95 InitializeFlags(); 96 if (common_flags()->detect_leaks) { 97 // Initialization which can fail or print warnings should only be done if 98 // LSan is actually enabled. 99 InitializeSuppressions(); 100 InitializePlatformSpecificModules(); 101 } 102} 103 104class Decorator: private __sanitizer::AnsiColorDecorator { 105 public: 106 Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { } 107 const char *Error() { return Red(); } 108 const char *Leak() { return Blue(); } 109 const char *End() { return Default(); } 110}; 111 112static inline bool CanBeAHeapPointer(uptr p) { 113 // Since our heap is located in mmap-ed memory, we can assume a sensible lower 114 // bound on heap addresses. 115 const uptr kMinAddress = 4 * 4096; 116 if (p < kMinAddress) return false; 117#ifdef __x86_64__ 118 // Accept only canonical form user-space addresses. 119 return ((p >> 47) == 0); 120#else 121 return true; 122#endif 123} 124 125// Scans the memory range, looking for byte patterns that point into allocator 126// chunks. Marks those chunks with |tag| and adds them to |frontier|. 127// There are two usage modes for this function: finding reachable or ignored 128// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks 129// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, 130// so |frontier| = 0. 131void ScanRangeForPointers(uptr begin, uptr end, 132 Frontier *frontier, 133 const char *region_type, ChunkTag tag) { 134 const uptr alignment = flags()->pointer_alignment(); 135 if (flags()->log_pointers) 136 Report("Scanning %s range %p-%p.\n", region_type, begin, end); 137 uptr pp = begin; 138 if (pp % alignment) 139 pp = pp + alignment - pp % alignment; 140 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT 141 void *p = *reinterpret_cast<void **>(pp); 142 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; 143 uptr chunk = PointsIntoChunk(p); 144 if (!chunk) continue; 145 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. 146 if (chunk == begin) continue; 147 LsanMetadata m(chunk); 148 // Reachable beats ignored beats leaked. 149 if (m.tag() == kReachable) continue; 150 if (m.tag() == kIgnored && tag != kReachable) continue; 151 m.set_tag(tag); 152 if (flags()->log_pointers) 153 Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, 154 chunk, chunk + m.requested_size(), m.requested_size()); 155 if (frontier) 156 frontier->push_back(chunk); 157 } 158} 159 160void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { 161 Frontier *frontier = reinterpret_cast<Frontier *>(arg); 162 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); 163} 164 165// Scans thread data (stacks and TLS) for heap pointers. 166static void ProcessThreads(SuspendedThreadsList const &suspended_threads, 167 Frontier *frontier) { 168 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); 169 uptr registers_begin = reinterpret_cast<uptr>(registers.data()); 170 uptr registers_end = registers_begin + registers.size(); 171 for (uptr i = 0; i < suspended_threads.thread_count(); i++) { 172 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); 173 if (flags()->log_threads) Report("Processing thread %d.\n", os_id); 174 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; 175 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, 176 &tls_begin, &tls_end, 177 &cache_begin, &cache_end); 178 if (!thread_found) { 179 // If a thread can't be found in the thread registry, it's probably in the 180 // process of destruction. Log this event and move on. 181 if (flags()->log_threads) 182 Report("Thread %d not found in registry.\n", os_id); 183 continue; 184 } 185 uptr sp; 186 bool have_registers = 187 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); 188 if (!have_registers) { 189 Report("Unable to get registers from thread %d.\n"); 190 // If unable to get SP, consider the entire stack to be reachable. 191 sp = stack_begin; 192 } 193 194 if (flags()->use_registers && have_registers) 195 ScanRangeForPointers(registers_begin, registers_end, frontier, 196 "REGISTERS", kReachable); 197 198 if (flags()->use_stacks) { 199 if (flags()->log_threads) 200 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); 201 if (sp < stack_begin || sp >= stack_end) { 202 // SP is outside the recorded stack range (e.g. the thread is running a 203 // signal handler on alternate stack). Again, consider the entire stack 204 // range to be reachable. 205 if (flags()->log_threads) 206 Report("WARNING: stack pointer not in stack range.\n"); 207 } else { 208 // Shrink the stack range to ignore out-of-scope values. 209 stack_begin = sp; 210 } 211 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", 212 kReachable); 213 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier); 214 } 215 216 if (flags()->use_tls) { 217 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); 218 if (cache_begin == cache_end) { 219 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); 220 } else { 221 // Because LSan should not be loaded with dlopen(), we can assume 222 // that allocator cache will be part of static TLS image. 223 CHECK_LE(tls_begin, cache_begin); 224 CHECK_GE(tls_end, cache_end); 225 if (tls_begin < cache_begin) 226 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", 227 kReachable); 228 if (tls_end > cache_end) 229 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); 230 } 231 } 232 } 233} 234 235static void FloodFillTag(Frontier *frontier, ChunkTag tag) { 236 while (frontier->size()) { 237 uptr next_chunk = frontier->back(); 238 frontier->pop_back(); 239 LsanMetadata m(next_chunk); 240 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, 241 "HEAP", tag); 242 } 243} 244 245// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks 246// which are reachable from it as indirectly leaked. 247static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { 248 chunk = GetUserBegin(chunk); 249 LsanMetadata m(chunk); 250 if (m.allocated() && m.tag() != kReachable) { 251 ScanRangeForPointers(chunk, chunk + m.requested_size(), 252 /* frontier */ 0, "HEAP", kIndirectlyLeaked); 253 } 254} 255 256// ForEachChunk callback. If chunk is marked as ignored, adds its address to 257// frontier. 258static void CollectIgnoredCb(uptr chunk, void *arg) { 259 CHECK(arg); 260 chunk = GetUserBegin(chunk); 261 LsanMetadata m(chunk); 262 if (m.allocated() && m.tag() == kIgnored) 263 reinterpret_cast<Frontier *>(arg)->push_back(chunk); 264} 265 266// Sets the appropriate tag on each chunk. 267static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { 268 // Holds the flood fill frontier. 269 Frontier frontier(GetPageSizeCached()); 270 271 if (flags()->use_globals) 272 ProcessGlobalRegions(&frontier); 273 ProcessThreads(suspended_threads, &frontier); 274 FloodFillTag(&frontier, kReachable); 275 // The check here is relatively expensive, so we do this in a separate flood 276 // fill. That way we can skip the check for chunks that are reachable 277 // otherwise. 278 ProcessPlatformSpecificAllocations(&frontier); 279 FloodFillTag(&frontier, kReachable); 280 281 if (flags()->log_pointers) 282 Report("Scanning ignored chunks.\n"); 283 CHECK_EQ(0, frontier.size()); 284 ForEachChunk(CollectIgnoredCb, &frontier); 285 FloodFillTag(&frontier, kIgnored); 286 287 // Iterate over leaked chunks and mark those that are reachable from other 288 // leaked chunks. 289 if (flags()->log_pointers) 290 Report("Scanning leaked chunks.\n"); 291 ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */); 292} 293 294static void PrintStackTraceById(u32 stack_trace_id) { 295 CHECK(stack_trace_id); 296 uptr size = 0; 297 const uptr *trace = StackDepotGet(stack_trace_id, &size); 298 StackTrace::PrintStack(trace, size, common_flags()->symbolize, 0); 299} 300 301// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport. 302static void CollectLeaksCb(uptr chunk, void *arg) { 303 CHECK(arg); 304 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg); 305 chunk = GetUserBegin(chunk); 306 LsanMetadata m(chunk); 307 if (!m.allocated()) return; 308 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 309 uptr resolution = flags()->resolution; 310 if (resolution > 0) { 311 uptr size = 0; 312 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); 313 size = Min(size, resolution); 314 leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag()); 315 } else { 316 leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag()); 317 } 318 } 319} 320 321// ForEachChunkCallback. Prints addresses of unreachable chunks. 322static void PrintLeakedCb(uptr chunk, void *arg) { 323 chunk = GetUserBegin(chunk); 324 LsanMetadata m(chunk); 325 if (!m.allocated()) return; 326 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 327 Printf("%s leaked %zu byte object at %p.\n", 328 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", 329 m.requested_size(), chunk); 330 } 331} 332 333static void PrintMatchedSuppressions() { 334 InternalMmapVector<Suppression *> matched(1); 335 suppression_ctx->GetMatched(&matched); 336 if (!matched.size()) 337 return; 338 const char *line = "-----------------------------------------------------"; 339 Printf("%s\n", line); 340 Printf("Suppressions used:\n"); 341 Printf(" count bytes template\n"); 342 for (uptr i = 0; i < matched.size(); i++) 343 Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count), 344 matched[i]->weight, matched[i]->templ); 345 Printf("%s\n\n", line); 346} 347 348static void PrintLeaked() { 349 Printf("\n"); 350 Printf("Reporting individual objects:\n"); 351 ForEachChunk(PrintLeakedCb, 0 /* arg */); 352} 353 354struct DoLeakCheckParam { 355 bool success; 356 LeakReport leak_report; 357}; 358 359static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, 360 void *arg) { 361 DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg); 362 CHECK(param); 363 CHECK(!param->success); 364 CHECK(param->leak_report.IsEmpty()); 365 ClassifyAllChunks(suspended_threads); 366 ForEachChunk(CollectLeaksCb, ¶m->leak_report); 367 if (!param->leak_report.IsEmpty() && flags()->report_objects) 368 PrintLeaked(); 369 param->success = true; 370} 371 372void DoLeakCheck() { 373 EnsureMainThreadIDIsCorrect(); 374 BlockingMutexLock l(&global_mutex); 375 static bool already_done; 376 if (already_done) return; 377 already_done = true; 378 if (&__lsan_is_turned_off && __lsan_is_turned_off()) 379 return; 380 381 DoLeakCheckParam param; 382 param.success = false; 383 LockThreadRegistry(); 384 LockAllocator(); 385 StopTheWorld(DoLeakCheckCallback, ¶m); 386 UnlockAllocator(); 387 UnlockThreadRegistry(); 388 389 if (!param.success) { 390 Report("LeakSanitizer has encountered a fatal error.\n"); 391 Die(); 392 } 393 uptr have_unsuppressed = param.leak_report.ApplySuppressions(); 394 if (have_unsuppressed) { 395 Decorator d; 396 Printf("\n" 397 "=================================================================" 398 "\n"); 399 Printf("%s", d.Error()); 400 Report("ERROR: LeakSanitizer: detected memory leaks\n"); 401 Printf("%s", d.End()); 402 param.leak_report.PrintLargest(flags()->max_leaks); 403 } 404 if (have_unsuppressed || (flags()->verbosity >= 1)) { 405 PrintMatchedSuppressions(); 406 param.leak_report.PrintSummary(); 407 } 408 if (have_unsuppressed && flags()->exitcode) 409 internal__exit(flags()->exitcode); 410} 411 412static Suppression *GetSuppressionForAddr(uptr addr) { 413 static const uptr kMaxAddrFrames = 16; 414 InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames); 415 for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo(); 416 uptr addr_frames_num = 417 getSymbolizer()->SymbolizeCode(addr, addr_frames.data(), kMaxAddrFrames); 418 for (uptr i = 0; i < addr_frames_num; i++) { 419 Suppression* s; 420 if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) || 421 suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) || 422 suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s)) 423 return s; 424 } 425 return 0; 426} 427 428static Suppression *GetSuppressionForStack(u32 stack_trace_id) { 429 uptr size = 0; 430 const uptr *trace = StackDepotGet(stack_trace_id, &size); 431 for (uptr i = 0; i < size; i++) { 432 Suppression *s = 433 GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i])); 434 if (s) return s; 435 } 436 return 0; 437} 438 439///// LeakReport implementation. ///// 440 441// A hard limit on the number of distinct leaks, to avoid quadratic complexity 442// in LeakReport::Add(). We don't expect to ever see this many leaks in 443// real-world applications. 444// FIXME: Get rid of this limit by changing the implementation of LeakReport to 445// use a hash table. 446const uptr kMaxLeaksConsidered = 5000; 447 448void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { 449 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 450 bool is_directly_leaked = (tag == kDirectlyLeaked); 451 for (uptr i = 0; i < leaks_.size(); i++) 452 if (leaks_[i].stack_trace_id == stack_trace_id && 453 leaks_[i].is_directly_leaked == is_directly_leaked) { 454 leaks_[i].hit_count++; 455 leaks_[i].total_size += leaked_size; 456 return; 457 } 458 if (leaks_.size() == kMaxLeaksConsidered) return; 459 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, 460 is_directly_leaked, /* is_suppressed */ false }; 461 leaks_.push_back(leak); 462} 463 464static bool LeakComparator(const Leak &leak1, const Leak &leak2) { 465 if (leak1.is_directly_leaked == leak2.is_directly_leaked) 466 return leak1.total_size > leak2.total_size; 467 else 468 return leak1.is_directly_leaked; 469} 470 471void LeakReport::PrintLargest(uptr num_leaks_to_print) { 472 CHECK(leaks_.size() <= kMaxLeaksConsidered); 473 Printf("\n"); 474 if (leaks_.size() == kMaxLeaksConsidered) 475 Printf("Too many leaks! Only the first %zu leaks encountered will be " 476 "reported.\n", 477 kMaxLeaksConsidered); 478 479 uptr unsuppressed_count = 0; 480 for (uptr i = 0; i < leaks_.size(); i++) 481 if (!leaks_[i].is_suppressed) unsuppressed_count++; 482 if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count) 483 Printf("The %zu largest leak(s):\n", num_leaks_to_print); 484 InternalSort(&leaks_, leaks_.size(), LeakComparator); 485 uptr leaks_printed = 0; 486 Decorator d; 487 for (uptr i = 0; i < leaks_.size(); i++) { 488 if (leaks_[i].is_suppressed) continue; 489 Printf("%s", d.Leak()); 490 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", 491 leaks_[i].is_directly_leaked ? "Direct" : "Indirect", 492 leaks_[i].total_size, leaks_[i].hit_count); 493 Printf("%s", d.End()); 494 PrintStackTraceById(leaks_[i].stack_trace_id); 495 Printf("\n"); 496 leaks_printed++; 497 if (leaks_printed == num_leaks_to_print) break; 498 } 499 if (leaks_printed < unsuppressed_count) { 500 uptr remaining = unsuppressed_count - leaks_printed; 501 Printf("Omitting %zu more leak(s).\n", remaining); 502 } 503} 504 505void LeakReport::PrintSummary() { 506 CHECK(leaks_.size() <= kMaxLeaksConsidered); 507 uptr bytes = 0, allocations = 0; 508 for (uptr i = 0; i < leaks_.size(); i++) { 509 if (leaks_[i].is_suppressed) continue; 510 bytes += leaks_[i].total_size; 511 allocations += leaks_[i].hit_count; 512 } 513 const int kMaxSummaryLength = 128; 514 InternalScopedBuffer<char> summary(kMaxSummaryLength); 515 internal_snprintf(summary.data(), kMaxSummaryLength, 516 "SUMMARY: LeakSanitizer: %zu byte(s) leaked in %zu " 517 "allocation(s).", 518 bytes, allocations); 519 __sanitizer_report_error_summary(summary.data()); 520} 521 522uptr LeakReport::ApplySuppressions() { 523 uptr unsuppressed_count = 0; 524 for (uptr i = 0; i < leaks_.size(); i++) { 525 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); 526 if (s) { 527 s->weight += leaks_[i].total_size; 528 s->hit_count += leaks_[i].hit_count; 529 leaks_[i].is_suppressed = true; 530 } else { 531 unsuppressed_count++; 532 } 533 } 534 return unsuppressed_count; 535} 536} // namespace __lsan 537#endif // CAN_SANITIZE_LEAKS 538 539using namespace __lsan; // NOLINT 540 541extern "C" { 542SANITIZER_INTERFACE_ATTRIBUTE 543void __lsan_ignore_object(const void *p) { 544#if CAN_SANITIZE_LEAKS 545 if (!common_flags()->detect_leaks) 546 return; 547 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not 548 // locked. 549 BlockingMutexLock l(&global_mutex); 550 IgnoreObjectResult res = IgnoreObjectLocked(p); 551 if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2) 552 Report("__lsan_ignore_object(): no heap object found at %p", p); 553 if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2) 554 Report("__lsan_ignore_object(): " 555 "heap object at %p is already being ignored\n", p); 556 if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3) 557 Report("__lsan_ignore_object(): ignoring heap object at %p\n", p); 558#endif // CAN_SANITIZE_LEAKS 559} 560 561SANITIZER_INTERFACE_ATTRIBUTE 562void __lsan_disable() { 563#if CAN_SANITIZE_LEAKS 564 __lsan::disable_counter++; 565#endif 566} 567 568SANITIZER_INTERFACE_ATTRIBUTE 569void __lsan_enable() { 570#if CAN_SANITIZE_LEAKS 571 if (!__lsan::disable_counter && common_flags()->detect_leaks) { 572 Report("Unmatched call to __lsan_enable().\n"); 573 Die(); 574 } 575 __lsan::disable_counter--; 576#endif 577} 578 579SANITIZER_INTERFACE_ATTRIBUTE 580void __lsan_do_leak_check() { 581#if CAN_SANITIZE_LEAKS 582 if (common_flags()->detect_leaks) 583 __lsan::DoLeakCheck(); 584#endif // CAN_SANITIZE_LEAKS 585} 586 587#if !SANITIZER_SUPPORTS_WEAK_HOOKS 588SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 589int __lsan_is_turned_off() { 590 return 0; 591} 592#endif 593} // extern "C" 594