tsan_rtl.cc revision 82dbc5195ceedba0e1a9aab92d436614cc4b7ff9
1//===-- tsan_rtl.cc -------------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12// Main file (entry points) for the TSan run-time. 13//===----------------------------------------------------------------------===// 14 15#include "sanitizer_common/sanitizer_atomic.h" 16#include "sanitizer_common/sanitizer_common.h" 17#include "sanitizer_common/sanitizer_libc.h" 18#include "sanitizer_common/sanitizer_stackdepot.h" 19#include "sanitizer_common/sanitizer_placement_new.h" 20#include "sanitizer_common/sanitizer_symbolizer.h" 21#include "tsan_defs.h" 22#include "tsan_platform.h" 23#include "tsan_rtl.h" 24#include "tsan_mman.h" 25#include "tsan_suppressions.h" 26 27volatile int __tsan_resumed = 0; 28 29extern "C" void __tsan_resume() { 30 __tsan_resumed = 1; 31} 32 33namespace __tsan { 34 35#ifndef TSAN_GO 36THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); 37#endif 38static char ctx_placeholder[sizeof(Context)] ALIGNED(64); 39 40// Can be overriden by a front-end. 41bool CPP_WEAK OnFinalize(bool failed) { 42 return failed; 43} 44 45static Context *ctx; 46Context *CTX() { 47 return ctx; 48} 49 50static char thread_registry_placeholder[sizeof(ThreadRegistry)]; 51 52static ThreadContextBase *CreateThreadContext(u32 tid) { 53 // Map thread trace when context is created. 54 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); 55 MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace)); 56 new(ThreadTrace(tid)) Trace(); 57 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); 58 return new(mem) ThreadContext(tid); 59} 60 61#ifndef TSAN_GO 62static const u32 kThreadQuarantineSize = 16; 63#else 64static const u32 kThreadQuarantineSize = 64; 65#endif 66 67Context::Context() 68 : initialized() 69 , report_mtx(MutexTypeReport, StatMtxReport) 70 , nreported() 71 , nmissed_expected() 72 , thread_registry(new(thread_registry_placeholder) ThreadRegistry( 73 CreateThreadContext, kMaxTid, kThreadQuarantineSize)) 74 , racy_stacks(MBlockRacyStacks) 75 , racy_addresses(MBlockRacyAddresses) 76 , fired_suppressions(MBlockRacyAddresses) { 77} 78 79// The objects are allocated in TLS, so one may rely on zero-initialization. 80ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, 81 uptr stk_addr, uptr stk_size, 82 uptr tls_addr, uptr tls_size) 83 : fast_state(tid, epoch) 84 // Do not touch these, rely on zero initialization, 85 // they may be accessed before the ctor. 86 // , fast_ignore_reads() 87 // , fast_ignore_writes() 88 // , in_rtl() 89 , shadow_stack_pos(&shadow_stack[0]) 90 , tid(tid) 91 , unique_id(unique_id) 92 , stk_addr(stk_addr) 93 , stk_size(stk_size) 94 , tls_addr(tls_addr) 95 , tls_size(tls_size) { 96} 97 98static void MemoryProfileThread(void *arg) { 99 ScopedInRtl in_rtl; 100 fd_t fd = (fd_t)(uptr)arg; 101 Context *ctx = CTX(); 102 for (int i = 0; ; i++) { 103 InternalScopedBuffer<char> buf(4096); 104 uptr n_threads; 105 uptr n_running_threads; 106 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); 107 internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n", 108 i, n_threads, n_running_threads); 109 internal_write(fd, buf.data(), internal_strlen(buf.data())); 110 WriteMemoryProfile(buf.data(), buf.size()); 111 internal_write(fd, buf.data(), internal_strlen(buf.data())); 112 SleepForSeconds(1); 113 } 114} 115 116static void InitializeMemoryProfile() { 117 if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0) 118 return; 119 InternalScopedBuffer<char> filename(4096); 120 internal_snprintf(filename.data(), filename.size(), "%s.%d", 121 flags()->profile_memory, GetPid()); 122 fd_t fd = OpenFile(filename.data(), true); 123 if (fd == kInvalidFd) { 124 Printf("Failed to open memory profile file '%s'\n", &filename[0]); 125 Die(); 126 } 127 internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd); 128} 129 130void DontNeedShadowFor(uptr addr, uptr size) { 131 uptr shadow_beg = MemToShadow(addr); 132 uptr shadow_end = MemToShadow(addr + size); 133 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); 134} 135 136static void MemoryFlushThread(void *arg) { 137 ScopedInRtl in_rtl; 138 for (int i = 0; ; i++) { 139 SleepForMillis(flags()->flush_memory_ms); 140 FlushShadowMemory(); 141 } 142} 143 144static void InitializeMemoryFlush() { 145 if (flags()->flush_memory_ms == 0) 146 return; 147 if (flags()->flush_memory_ms < 100) 148 flags()->flush_memory_ms = 100; 149 internal_start_thread(&MemoryFlushThread, 0); 150} 151 152void MapShadow(uptr addr, uptr size) { 153 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier); 154} 155 156void MapThreadTrace(uptr addr, uptr size) { 157 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); 158 CHECK_GE(addr, kTraceMemBegin); 159 CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize); 160 if (addr != (uptr)MmapFixedNoReserve(addr, size)) { 161 Printf("FATAL: ThreadSanitizer can not mmap thread trace\n"); 162 Die(); 163 } 164} 165 166void Initialize(ThreadState *thr) { 167 // Thread safe because done before all threads exist. 168 static bool is_initialized = false; 169 if (is_initialized) 170 return; 171 is_initialized = true; 172 SanitizerToolName = "ThreadSanitizer"; 173 // Install tool-specific callbacks in sanitizer_common. 174 SetCheckFailedCallback(TsanCheckFailed); 175 176 ScopedInRtl in_rtl; 177#ifndef TSAN_GO 178 InitializeAllocator(); 179#endif 180 InitializeInterceptors(); 181 const char *env = InitializePlatform(); 182 InitializeMutex(); 183 InitializeDynamicAnnotations(); 184 ctx = new(ctx_placeholder) Context; 185#ifndef TSAN_GO 186 InitializeShadowMemory(); 187#endif 188 InitializeFlags(&ctx->flags, env); 189 // Setup correct file descriptor for error reports. 190 if (internal_strcmp(flags()->log_path, "stdout") == 0) 191 __sanitizer_set_report_fd(kStdoutFd); 192 else if (internal_strcmp(flags()->log_path, "stderr") == 0) 193 __sanitizer_set_report_fd(kStderrFd); 194 else 195 __sanitizer_set_report_path(flags()->log_path); 196 InitializeSuppressions(); 197#ifndef TSAN_GO 198 // Initialize external symbolizer before internal threads are started. 199 const char *external_symbolizer = flags()->external_symbolizer_path; 200 if (external_symbolizer != 0 && external_symbolizer[0] != '\0') { 201 if (!InitializeExternalSymbolizer(external_symbolizer)) { 202 Printf("Failed to start external symbolizer: '%s'\n", 203 external_symbolizer); 204 Die(); 205 } 206 } 207#endif 208 InitializeMemoryProfile(); 209 InitializeMemoryFlush(); 210 211 if (ctx->flags.verbosity) 212 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n", 213 GetPid()); 214 215 // Initialize thread 0. 216 int tid = ThreadCreate(thr, 0, 0, true); 217 CHECK_EQ(tid, 0); 218 ThreadStart(thr, tid, GetPid()); 219 CHECK_EQ(thr->in_rtl, 1); 220 ctx->initialized = true; 221 222 if (flags()->stop_on_start) { 223 Printf("ThreadSanitizer is suspended at startup (pid %d)." 224 " Call __tsan_resume().\n", 225 GetPid()); 226 while (__tsan_resumed == 0) {} 227 } 228} 229 230int Finalize(ThreadState *thr) { 231 ScopedInRtl in_rtl; 232 Context *ctx = __tsan::ctx; 233 bool failed = false; 234 235 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) 236 SleepForMillis(flags()->atexit_sleep_ms); 237 238 // Wait for pending reports. 239 ctx->report_mtx.Lock(); 240 ctx->report_mtx.Unlock(); 241 242#ifndef TSAN_GO 243 if (ctx->flags.verbosity) 244 AllocatorPrintStats(); 245#endif 246 247 ThreadFinalize(thr); 248 249 if (ctx->nreported) { 250 failed = true; 251#ifndef TSAN_GO 252 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); 253#else 254 Printf("Found %d data race(s)\n", ctx->nreported); 255#endif 256 } 257 258 if (ctx->nmissed_expected) { 259 failed = true; 260 Printf("ThreadSanitizer: missed %d expected races\n", 261 ctx->nmissed_expected); 262 } 263 264 failed = OnFinalize(failed); 265 266 StatAggregate(ctx->stat, thr->stat); 267 StatOutput(ctx->stat); 268 return failed ? flags()->exitcode : 0; 269} 270 271#ifndef TSAN_GO 272u32 CurrentStackId(ThreadState *thr, uptr pc) { 273 if (thr->shadow_stack_pos == 0) // May happen during bootstrap. 274 return 0; 275 if (pc) { 276 thr->shadow_stack_pos[0] = pc; 277 thr->shadow_stack_pos++; 278 } 279 u32 id = StackDepotPut(thr->shadow_stack, 280 thr->shadow_stack_pos - thr->shadow_stack); 281 if (pc) 282 thr->shadow_stack_pos--; 283 return id; 284} 285#endif 286 287void TraceSwitch(ThreadState *thr) { 288 thr->nomalloc++; 289 ScopedInRtl in_rtl; 290 Trace *thr_trace = ThreadTrace(thr->tid); 291 Lock l(&thr_trace->mtx); 292 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); 293 TraceHeader *hdr = &thr_trace->headers[trace]; 294 hdr->epoch0 = thr->fast_state.epoch(); 295 hdr->stack0.ObtainCurrent(thr, 0); 296 hdr->mset0 = thr->mset; 297 thr->nomalloc--; 298} 299 300Trace *ThreadTrace(int tid) { 301 return (Trace*)GetThreadTraceHeader(tid); 302} 303 304uptr TraceTopPC(ThreadState *thr) { 305 Event *events = (Event*)GetThreadTrace(thr->tid); 306 uptr pc = events[thr->fast_state.GetTracePos()]; 307 return pc; 308} 309 310uptr TraceSize() { 311 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); 312} 313 314uptr TraceParts() { 315 return TraceSize() / kTracePartSize; 316} 317 318#ifndef TSAN_GO 319extern "C" void __tsan_trace_switch() { 320 TraceSwitch(cur_thread()); 321} 322 323extern "C" void __tsan_report_race() { 324 ReportRace(cur_thread()); 325} 326#endif 327 328ALWAYS_INLINE 329static Shadow LoadShadow(u64 *p) { 330 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); 331 return Shadow(raw); 332} 333 334ALWAYS_INLINE 335static void StoreShadow(u64 *sp, u64 s) { 336 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); 337} 338 339ALWAYS_INLINE 340static void StoreIfNotYetStored(u64 *sp, u64 *s) { 341 StoreShadow(sp, *s); 342 *s = 0; 343} 344 345static inline void HandleRace(ThreadState *thr, u64 *shadow_mem, 346 Shadow cur, Shadow old) { 347 thr->racy_state[0] = cur.raw(); 348 thr->racy_state[1] = old.raw(); 349 thr->racy_shadow_addr = shadow_mem; 350#ifndef TSAN_GO 351 HACKY_CALL(__tsan_report_race); 352#else 353 ReportRace(thr); 354#endif 355} 356 357static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) { 358 return old.epoch() >= thr->fast_synch_epoch; 359} 360 361static inline bool HappensBefore(Shadow old, ThreadState *thr) { 362 return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); 363} 364 365ALWAYS_INLINE 366void MemoryAccessImpl(ThreadState *thr, uptr addr, 367 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 368 u64 *shadow_mem, Shadow cur) { 369 StatInc(thr, StatMop); 370 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 371 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 372 373 // This potentially can live in an MMX/SSE scratch register. 374 // The required intrinsics are: 375 // __m128i _mm_move_epi64(__m128i*); 376 // _mm_storel_epi64(u64*, __m128i); 377 u64 store_word = cur.raw(); 378 379 // scan all the shadow values and dispatch to 4 categories: 380 // same, replace, candidate and race (see comments below). 381 // we consider only 3 cases regarding access sizes: 382 // equal, intersect and not intersect. initially I considered 383 // larger and smaller as well, it allowed to replace some 384 // 'candidates' with 'same' or 'replace', but I think 385 // it's just not worth it (performance- and complexity-wise). 386 387 Shadow old(0); 388 if (kShadowCnt == 1) { 389 int idx = 0; 390#include "tsan_update_shadow_word_inl.h" 391 } else if (kShadowCnt == 2) { 392 int idx = 0; 393#include "tsan_update_shadow_word_inl.h" 394 idx = 1; 395#include "tsan_update_shadow_word_inl.h" 396 } else if (kShadowCnt == 4) { 397 int idx = 0; 398#include "tsan_update_shadow_word_inl.h" 399 idx = 1; 400#include "tsan_update_shadow_word_inl.h" 401 idx = 2; 402#include "tsan_update_shadow_word_inl.h" 403 idx = 3; 404#include "tsan_update_shadow_word_inl.h" 405 } else if (kShadowCnt == 8) { 406 int idx = 0; 407#include "tsan_update_shadow_word_inl.h" 408 idx = 1; 409#include "tsan_update_shadow_word_inl.h" 410 idx = 2; 411#include "tsan_update_shadow_word_inl.h" 412 idx = 3; 413#include "tsan_update_shadow_word_inl.h" 414 idx = 4; 415#include "tsan_update_shadow_word_inl.h" 416 idx = 5; 417#include "tsan_update_shadow_word_inl.h" 418 idx = 6; 419#include "tsan_update_shadow_word_inl.h" 420 idx = 7; 421#include "tsan_update_shadow_word_inl.h" 422 } else { 423 CHECK(false); 424 } 425 426 // we did not find any races and had already stored 427 // the current access info, so we are done 428 if (LIKELY(store_word == 0)) 429 return; 430 // choose a random candidate slot and replace it 431 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); 432 StatInc(thr, StatShadowReplace); 433 return; 434 RACE: 435 HandleRace(thr, shadow_mem, cur, old); 436 return; 437} 438 439ALWAYS_INLINE 440void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, 441 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { 442 u64 *shadow_mem = (u64*)MemToShadow(addr); 443 DPrintf2("#%d: MemoryAccess: @%p %p size=%d" 444 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", 445 (int)thr->fast_state.tid(), (void*)pc, (void*)addr, 446 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, 447 (uptr)shadow_mem[0], (uptr)shadow_mem[1], 448 (uptr)shadow_mem[2], (uptr)shadow_mem[3]); 449#if TSAN_DEBUG 450 if (!IsAppMem(addr)) { 451 Printf("Access to non app mem %zx\n", addr); 452 DCHECK(IsAppMem(addr)); 453 } 454 if (!IsShadowMem((uptr)shadow_mem)) { 455 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); 456 DCHECK(IsShadowMem((uptr)shadow_mem)); 457 } 458#endif 459 460 if (*shadow_mem == kShadowRodata) { 461 // Access to .rodata section, no races here. 462 // Measurements show that it can be 10-20% of all memory accesses. 463 StatInc(thr, StatMop); 464 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 465 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 466 StatInc(thr, StatMopRodata); 467 return; 468 } 469 470 FastState fast_state = thr->fast_state; 471 if (fast_state.GetIgnoreBit()) 472 return; 473 fast_state.IncrementEpoch(); 474 thr->fast_state = fast_state; 475 Shadow cur(fast_state); 476 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); 477 cur.SetWrite(kAccessIsWrite); 478 cur.SetAtomic(kIsAtomic); 479 480 // We must not store to the trace if we do not store to the shadow. 481 // That is, this call must be moved somewhere below. 482 TraceAddEvent(thr, fast_state, EventTypeMop, pc); 483 484 MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, 485 shadow_mem, cur); 486} 487 488static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, 489 u64 val) { 490 (void)thr; 491 (void)pc; 492 if (size == 0) 493 return; 494 // FIXME: fix me. 495 uptr offset = addr % kShadowCell; 496 if (offset) { 497 offset = kShadowCell - offset; 498 if (size <= offset) 499 return; 500 addr += offset; 501 size -= offset; 502 } 503 DCHECK_EQ(addr % 8, 0); 504 // If a user passes some insane arguments (memset(0)), 505 // let it just crash as usual. 506 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) 507 return; 508 // Don't want to touch lots of shadow memory. 509 // If a program maps 10MB stack, there is no need reset the whole range. 510 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); 511 if (size < 64*1024) { 512 u64 *p = (u64*)MemToShadow(addr); 513 CHECK(IsShadowMem((uptr)p)); 514 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); 515 // FIXME: may overwrite a part outside the region 516 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { 517 p[i++] = val; 518 for (uptr j = 1; j < kShadowCnt; j++) 519 p[i++] = 0; 520 } 521 } else { 522 // The region is big, reset only beginning and end. 523 const uptr kPageSize = 4096; 524 u64 *begin = (u64*)MemToShadow(addr); 525 u64 *end = begin + size / kShadowCell * kShadowCnt; 526 u64 *p = begin; 527 // Set at least first kPageSize/2 to page boundary. 528 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { 529 *p++ = val; 530 for (uptr j = 1; j < kShadowCnt; j++) 531 *p++ = 0; 532 } 533 // Reset middle part. 534 u64 *p1 = p; 535 p = RoundDown(end, kPageSize); 536 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); 537 MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1); 538 // Set the ending. 539 while (p < end) { 540 *p++ = val; 541 for (uptr j = 1; j < kShadowCnt; j++) 542 *p++ = 0; 543 } 544 } 545} 546 547void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { 548 MemoryRangeSet(thr, pc, addr, size, 0); 549} 550 551void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { 552 // Processing more than 1k (4k of shadow) is expensive, 553 // can cause excessive memory consumption (user does not necessary touch 554 // the whole range) and most likely unnecessary. 555 if (size > 1024) 556 size = 1024; 557 CHECK_EQ(thr->is_freeing, false); 558 thr->is_freeing = true; 559 MemoryAccessRange(thr, pc, addr, size, true); 560 thr->is_freeing = false; 561 Shadow s(thr->fast_state); 562 s.ClearIgnoreBit(); 563 s.MarkAsFreed(); 564 s.SetWrite(true); 565 s.SetAddr0AndSizeLog(0, 3); 566 MemoryRangeSet(thr, pc, addr, size, s.raw()); 567} 568 569void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { 570 Shadow s(thr->fast_state); 571 s.ClearIgnoreBit(); 572 s.SetWrite(true); 573 s.SetAddr0AndSizeLog(0, 3); 574 MemoryRangeSet(thr, pc, addr, size, s.raw()); 575} 576 577ALWAYS_INLINE 578void FuncEntry(ThreadState *thr, uptr pc) { 579 DCHECK_EQ(thr->in_rtl, 0); 580 StatInc(thr, StatFuncEnter); 581 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); 582 thr->fast_state.IncrementEpoch(); 583 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); 584 585 // Shadow stack maintenance can be replaced with 586 // stack unwinding during trace switch (which presumably must be faster). 587 DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]); 588#ifndef TSAN_GO 589 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]); 590#else 591 if (thr->shadow_stack_pos == thr->shadow_stack_end) { 592 const int sz = thr->shadow_stack_end - thr->shadow_stack; 593 const int newsz = 2 * sz; 594 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, 595 newsz * sizeof(uptr)); 596 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); 597 internal_free(thr->shadow_stack); 598 thr->shadow_stack = newstack; 599 thr->shadow_stack_pos = newstack + sz; 600 thr->shadow_stack_end = newstack + newsz; 601 } 602#endif 603 thr->shadow_stack_pos[0] = pc; 604 thr->shadow_stack_pos++; 605} 606 607ALWAYS_INLINE 608void FuncExit(ThreadState *thr) { 609 DCHECK_EQ(thr->in_rtl, 0); 610 StatInc(thr, StatFuncExit); 611 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); 612 thr->fast_state.IncrementEpoch(); 613 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); 614 615 DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]); 616#ifndef TSAN_GO 617 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]); 618#endif 619 thr->shadow_stack_pos--; 620} 621 622void IgnoreCtl(ThreadState *thr, bool write, bool begin) { 623 DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin); 624 thr->ignore_reads_and_writes += begin ? 1 : -1; 625 CHECK_GE(thr->ignore_reads_and_writes, 0); 626 if (thr->ignore_reads_and_writes) 627 thr->fast_state.SetIgnoreBit(); 628 else 629 thr->fast_state.ClearIgnoreBit(); 630} 631 632bool MD5Hash::operator==(const MD5Hash &other) const { 633 return hash[0] == other.hash[0] && hash[1] == other.hash[1]; 634} 635 636#if TSAN_DEBUG 637void build_consistency_debug() {} 638#else 639void build_consistency_release() {} 640#endif 641 642#if TSAN_COLLECT_STATS 643void build_consistency_stats() {} 644#else 645void build_consistency_nostats() {} 646#endif 647 648#if TSAN_SHADOW_COUNT == 1 649void build_consistency_shadow1() {} 650#elif TSAN_SHADOW_COUNT == 2 651void build_consistency_shadow2() {} 652#elif TSAN_SHADOW_COUNT == 4 653void build_consistency_shadow4() {} 654#else 655void build_consistency_shadow8() {} 656#endif 657 658} // namespace __tsan 659 660#ifndef TSAN_GO 661// Must be included in this file to make sure everything is inlined. 662#include "tsan_interface_inl.h" 663#endif 664