tsan_rtl.cc revision 92b54796149a8b5995fa49c43f43b709b83c5644
1//===-- tsan_rtl.cc -------------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12// Main file (entry points) for the TSan run-time. 13//===----------------------------------------------------------------------===// 14 15#include "sanitizer_common/sanitizer_atomic.h" 16#include "sanitizer_common/sanitizer_common.h" 17#include "sanitizer_common/sanitizer_libc.h" 18#include "sanitizer_common/sanitizer_stackdepot.h" 19#include "sanitizer_common/sanitizer_placement_new.h" 20#include "sanitizer_common/sanitizer_symbolizer.h" 21#include "tsan_defs.h" 22#include "tsan_platform.h" 23#include "tsan_rtl.h" 24#include "tsan_mman.h" 25#include "tsan_suppressions.h" 26#include "tsan_symbolize.h" 27 28volatile int __tsan_resumed = 0; 29 30extern "C" void __tsan_resume() { 31 __tsan_resumed = 1; 32} 33 34namespace __tsan { 35 36#ifndef TSAN_GO 37THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); 38#endif 39static char ctx_placeholder[sizeof(Context)] ALIGNED(64); 40 41// Can be overriden by a front-end. 42bool CPP_WEAK OnFinalize(bool failed) { 43 return failed; 44} 45 46static Context *ctx; 47Context *CTX() { 48 return ctx; 49} 50 51static char thread_registry_placeholder[sizeof(ThreadRegistry)]; 52 53static ThreadContextBase *CreateThreadContext(u32 tid) { 54 // Map thread trace when context is created. 55 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); 56 MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace)); 57 new(ThreadTrace(tid)) Trace(); 58 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); 59 return new(mem) ThreadContext(tid); 60} 61 62#ifndef TSAN_GO 63static const u32 kThreadQuarantineSize = 16; 64#else 65static const u32 kThreadQuarantineSize = 64; 66#endif 67 68Context::Context() 69 : initialized() 70 , report_mtx(MutexTypeReport, StatMtxReport) 71 , nreported() 72 , nmissed_expected() 73 , thread_registry(new(thread_registry_placeholder) ThreadRegistry( 74 CreateThreadContext, kMaxTid, kThreadQuarantineSize)) 75 , racy_stacks(MBlockRacyStacks) 76 , racy_addresses(MBlockRacyAddresses) 77 , fired_suppressions(8) { 78} 79 80// The objects are allocated in TLS, so one may rely on zero-initialization. 81ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, 82 uptr stk_addr, uptr stk_size, 83 uptr tls_addr, uptr tls_size) 84 : fast_state(tid, epoch) 85 // Do not touch these, rely on zero initialization, 86 // they may be accessed before the ctor. 87 // , ignore_reads_and_writes() 88 // , in_rtl() 89 , shadow_stack_pos(&shadow_stack[0]) 90#ifndef TSAN_GO 91 , jmp_bufs(MBlockJmpBuf) 92#endif 93 , tid(tid) 94 , unique_id(unique_id) 95 , stk_addr(stk_addr) 96 , stk_size(stk_size) 97 , tls_addr(tls_addr) 98 , tls_size(tls_size) { 99} 100 101static void MemoryProfiler(Context *ctx, fd_t fd, int i) { 102 uptr n_threads; 103 uptr n_running_threads; 104 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); 105 InternalScopedBuffer<char> buf(4096); 106 internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n", 107 i, n_threads, n_running_threads); 108 internal_write(fd, buf.data(), internal_strlen(buf.data())); 109 WriteMemoryProfile(buf.data(), buf.size()); 110 internal_write(fd, buf.data(), internal_strlen(buf.data())); 111} 112 113static void BackgroundThread(void *arg) { 114 ScopedInRtl in_rtl; 115 Context *ctx = CTX(); 116 const u64 kMs2Ns = 1000 * 1000; 117 118 fd_t mprof_fd = kInvalidFd; 119 if (flags()->profile_memory && flags()->profile_memory[0]) { 120 InternalScopedBuffer<char> filename(4096); 121 internal_snprintf(filename.data(), filename.size(), "%s.%d", 122 flags()->profile_memory, (int)internal_getpid()); 123 uptr openrv = OpenFile(filename.data(), true); 124 if (internal_iserror(openrv)) { 125 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", 126 &filename[0]); 127 } else { 128 mprof_fd = openrv; 129 } 130 } 131 132 u64 last_flush = NanoTime(); 133 uptr last_rss = 0; 134 for (int i = 0; ; i++) { 135 SleepForSeconds(1); 136 u64 now = NanoTime(); 137 138 // Flush memory if requested. 139 if (flags()->flush_memory_ms > 0) { 140 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { 141 if (flags()->verbosity > 0) 142 Printf("ThreadSanitizer: periodic memory flush\n"); 143 FlushShadowMemory(); 144 last_flush = NanoTime(); 145 } 146 } 147 if (flags()->memory_limit_mb > 0) { 148 uptr rss = GetRSS(); 149 uptr limit = uptr(flags()->memory_limit_mb) << 20; 150 if (flags()->verbosity > 0) { 151 Printf("ThreadSanitizer: memory flush check" 152 " RSS=%llu LAST=%llu LIMIT=%llu\n", 153 (u64)rss>>20, (u64)last_rss>>20, (u64)limit>>20); 154 } 155 if (2 * rss > limit + last_rss) { 156 if (flags()->verbosity > 0) 157 Printf("ThreadSanitizer: flushing memory due to RSS\n"); 158 FlushShadowMemory(); 159 rss = GetRSS(); 160 if (flags()->verbosity > 0) 161 Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); 162 } 163 last_rss = rss; 164 } 165 166 // Write memory profile if requested. 167 if (mprof_fd != kInvalidFd) 168 MemoryProfiler(ctx, mprof_fd, i); 169 170#ifndef TSAN_GO 171 // Flush symbolizer cache if requested. 172 if (flags()->flush_symbolizer_ms > 0) { 173 u64 last = atomic_load(&ctx->last_symbolize_time_ns, 174 memory_order_relaxed); 175 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { 176 Lock l(&ctx->report_mtx); 177 SpinMutexLock l2(&CommonSanitizerReportMutex); 178 SymbolizeFlush(); 179 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); 180 } 181 } 182#endif 183 } 184} 185 186void DontNeedShadowFor(uptr addr, uptr size) { 187 uptr shadow_beg = MemToShadow(addr); 188 uptr shadow_end = MemToShadow(addr + size); 189 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); 190} 191 192void MapShadow(uptr addr, uptr size) { 193 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier); 194} 195 196void MapThreadTrace(uptr addr, uptr size) { 197 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); 198 CHECK_GE(addr, kTraceMemBegin); 199 CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize); 200 if (addr != (uptr)MmapFixedNoReserve(addr, size)) { 201 Printf("FATAL: ThreadSanitizer can not mmap thread trace\n"); 202 Die(); 203 } 204} 205 206void Initialize(ThreadState *thr) { 207 // Thread safe because done before all threads exist. 208 static bool is_initialized = false; 209 if (is_initialized) 210 return; 211 is_initialized = true; 212 SanitizerToolName = "ThreadSanitizer"; 213 // Install tool-specific callbacks in sanitizer_common. 214 SetCheckFailedCallback(TsanCheckFailed); 215 216 ScopedInRtl in_rtl; 217#ifndef TSAN_GO 218 InitializeAllocator(); 219#endif 220 InitializeInterceptors(); 221 const char *env = InitializePlatform(); 222 InitializeMutex(); 223 InitializeDynamicAnnotations(); 224 ctx = new(ctx_placeholder) Context; 225#ifndef TSAN_GO 226 InitializeShadowMemory(); 227#endif 228 InitializeFlags(&ctx->flags, env); 229 // Setup correct file descriptor for error reports. 230 if (internal_strcmp(flags()->log_path, "stdout") == 0) 231 __sanitizer_set_report_fd(kStdoutFd); 232 else if (internal_strcmp(flags()->log_path, "stderr") == 0) 233 __sanitizer_set_report_fd(kStderrFd); 234 else 235 __sanitizer_set_report_path(flags()->log_path); 236 InitializeSuppressions(); 237#ifndef TSAN_GO 238 InitializeLibIgnore(); 239 // Initialize external symbolizer before internal threads are started. 240 const char *external_symbolizer = flags()->external_symbolizer_path; 241 if (external_symbolizer != 0 && external_symbolizer[0] != '\0') { 242 if (!getSymbolizer()->InitializeExternal(external_symbolizer)) { 243 Printf("Failed to start external symbolizer: '%s'\n", 244 external_symbolizer); 245 Die(); 246 } 247 } 248#endif 249 internal_start_thread(&BackgroundThread, 0); 250 251 if (ctx->flags.verbosity) 252 Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n", 253 (int)internal_getpid()); 254 255 // Initialize thread 0. 256 int tid = ThreadCreate(thr, 0, 0, true); 257 CHECK_EQ(tid, 0); 258 ThreadStart(thr, tid, internal_getpid()); 259 CHECK_EQ(thr->in_rtl, 1); 260 ctx->initialized = true; 261 262 if (flags()->stop_on_start) { 263 Printf("ThreadSanitizer is suspended at startup (pid %d)." 264 " Call __tsan_resume().\n", 265 (int)internal_getpid()); 266 while (__tsan_resumed == 0) {} 267 } 268} 269 270int Finalize(ThreadState *thr) { 271 ScopedInRtl in_rtl; 272 Context *ctx = __tsan::ctx; 273 bool failed = false; 274 275 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) 276 SleepForMillis(flags()->atexit_sleep_ms); 277 278 // Wait for pending reports. 279 ctx->report_mtx.Lock(); 280 CommonSanitizerReportMutex.Lock(); 281 CommonSanitizerReportMutex.Unlock(); 282 ctx->report_mtx.Unlock(); 283 284#ifndef TSAN_GO 285 if (ctx->flags.verbosity) 286 AllocatorPrintStats(); 287#endif 288 289 ThreadFinalize(thr); 290 291 if (ctx->nreported) { 292 failed = true; 293#ifndef TSAN_GO 294 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); 295#else 296 Printf("Found %d data race(s)\n", ctx->nreported); 297#endif 298 } 299 300 if (ctx->nmissed_expected) { 301 failed = true; 302 Printf("ThreadSanitizer: missed %d expected races\n", 303 ctx->nmissed_expected); 304 } 305 306 if (flags()->print_suppressions) 307 PrintMatchedSuppressions(); 308#ifndef TSAN_GO 309 if (flags()->print_benign) 310 PrintMatchedBenignRaces(); 311#endif 312 313 failed = OnFinalize(failed); 314 315 StatAggregate(ctx->stat, thr->stat); 316 StatOutput(ctx->stat); 317 return failed ? flags()->exitcode : 0; 318} 319 320#ifndef TSAN_GO 321u32 CurrentStackId(ThreadState *thr, uptr pc) { 322 if (thr->shadow_stack_pos == 0) // May happen during bootstrap. 323 return 0; 324 if (pc) { 325 thr->shadow_stack_pos[0] = pc; 326 thr->shadow_stack_pos++; 327 } 328 u32 id = StackDepotPut(thr->shadow_stack, 329 thr->shadow_stack_pos - thr->shadow_stack); 330 if (pc) 331 thr->shadow_stack_pos--; 332 return id; 333} 334#endif 335 336void TraceSwitch(ThreadState *thr) { 337 thr->nomalloc++; 338 ScopedInRtl in_rtl; 339 Trace *thr_trace = ThreadTrace(thr->tid); 340 Lock l(&thr_trace->mtx); 341 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); 342 TraceHeader *hdr = &thr_trace->headers[trace]; 343 hdr->epoch0 = thr->fast_state.epoch(); 344 hdr->stack0.ObtainCurrent(thr, 0); 345 hdr->mset0 = thr->mset; 346 thr->nomalloc--; 347} 348 349Trace *ThreadTrace(int tid) { 350 return (Trace*)GetThreadTraceHeader(tid); 351} 352 353uptr TraceTopPC(ThreadState *thr) { 354 Event *events = (Event*)GetThreadTrace(thr->tid); 355 uptr pc = events[thr->fast_state.GetTracePos()]; 356 return pc; 357} 358 359uptr TraceSize() { 360 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); 361} 362 363uptr TraceParts() { 364 return TraceSize() / kTracePartSize; 365} 366 367#ifndef TSAN_GO 368extern "C" void __tsan_trace_switch() { 369 TraceSwitch(cur_thread()); 370} 371 372extern "C" void __tsan_report_race() { 373 ReportRace(cur_thread()); 374} 375#endif 376 377ALWAYS_INLINE 378Shadow LoadShadow(u64 *p) { 379 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); 380 return Shadow(raw); 381} 382 383ALWAYS_INLINE 384void StoreShadow(u64 *sp, u64 s) { 385 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); 386} 387 388ALWAYS_INLINE 389void StoreIfNotYetStored(u64 *sp, u64 *s) { 390 StoreShadow(sp, *s); 391 *s = 0; 392} 393 394static inline void HandleRace(ThreadState *thr, u64 *shadow_mem, 395 Shadow cur, Shadow old) { 396 thr->racy_state[0] = cur.raw(); 397 thr->racy_state[1] = old.raw(); 398 thr->racy_shadow_addr = shadow_mem; 399#ifndef TSAN_GO 400 HACKY_CALL(__tsan_report_race); 401#else 402 ReportRace(thr); 403#endif 404} 405 406static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) { 407 return old.epoch() >= thr->fast_synch_epoch; 408} 409 410static inline bool HappensBefore(Shadow old, ThreadState *thr) { 411 return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); 412} 413 414ALWAYS_INLINE USED 415void MemoryAccessImpl(ThreadState *thr, uptr addr, 416 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 417 u64 *shadow_mem, Shadow cur) { 418 StatInc(thr, StatMop); 419 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 420 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 421 422 // This potentially can live in an MMX/SSE scratch register. 423 // The required intrinsics are: 424 // __m128i _mm_move_epi64(__m128i*); 425 // _mm_storel_epi64(u64*, __m128i); 426 u64 store_word = cur.raw(); 427 428 // scan all the shadow values and dispatch to 4 categories: 429 // same, replace, candidate and race (see comments below). 430 // we consider only 3 cases regarding access sizes: 431 // equal, intersect and not intersect. initially I considered 432 // larger and smaller as well, it allowed to replace some 433 // 'candidates' with 'same' or 'replace', but I think 434 // it's just not worth it (performance- and complexity-wise). 435 436 Shadow old(0); 437 if (kShadowCnt == 1) { 438 int idx = 0; 439#include "tsan_update_shadow_word_inl.h" 440 } else if (kShadowCnt == 2) { 441 int idx = 0; 442#include "tsan_update_shadow_word_inl.h" 443 idx = 1; 444#include "tsan_update_shadow_word_inl.h" 445 } else if (kShadowCnt == 4) { 446 int idx = 0; 447#include "tsan_update_shadow_word_inl.h" 448 idx = 1; 449#include "tsan_update_shadow_word_inl.h" 450 idx = 2; 451#include "tsan_update_shadow_word_inl.h" 452 idx = 3; 453#include "tsan_update_shadow_word_inl.h" 454 } else if (kShadowCnt == 8) { 455 int idx = 0; 456#include "tsan_update_shadow_word_inl.h" 457 idx = 1; 458#include "tsan_update_shadow_word_inl.h" 459 idx = 2; 460#include "tsan_update_shadow_word_inl.h" 461 idx = 3; 462#include "tsan_update_shadow_word_inl.h" 463 idx = 4; 464#include "tsan_update_shadow_word_inl.h" 465 idx = 5; 466#include "tsan_update_shadow_word_inl.h" 467 idx = 6; 468#include "tsan_update_shadow_word_inl.h" 469 idx = 7; 470#include "tsan_update_shadow_word_inl.h" 471 } else { 472 CHECK(false); 473 } 474 475 // we did not find any races and had already stored 476 // the current access info, so we are done 477 if (LIKELY(store_word == 0)) 478 return; 479 // choose a random candidate slot and replace it 480 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); 481 StatInc(thr, StatShadowReplace); 482 return; 483 RACE: 484 HandleRace(thr, shadow_mem, cur, old); 485 return; 486} 487 488void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, 489 int size, bool kAccessIsWrite, bool kIsAtomic) { 490 while (size) { 491 int size1 = 1; 492 int kAccessSizeLog = kSizeLog1; 493 if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) { 494 size1 = 8; 495 kAccessSizeLog = kSizeLog8; 496 } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) { 497 size1 = 4; 498 kAccessSizeLog = kSizeLog4; 499 } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) { 500 size1 = 2; 501 kAccessSizeLog = kSizeLog2; 502 } 503 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); 504 addr += size1; 505 size -= size1; 506 } 507} 508 509ALWAYS_INLINE USED 510void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, 511 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { 512 u64 *shadow_mem = (u64*)MemToShadow(addr); 513 DPrintf2("#%d: MemoryAccess: @%p %p size=%d" 514 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", 515 (int)thr->fast_state.tid(), (void*)pc, (void*)addr, 516 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, 517 (uptr)shadow_mem[0], (uptr)shadow_mem[1], 518 (uptr)shadow_mem[2], (uptr)shadow_mem[3]); 519#if TSAN_DEBUG 520 if (!IsAppMem(addr)) { 521 Printf("Access to non app mem %zx\n", addr); 522 DCHECK(IsAppMem(addr)); 523 } 524 if (!IsShadowMem((uptr)shadow_mem)) { 525 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); 526 DCHECK(IsShadowMem((uptr)shadow_mem)); 527 } 528#endif 529 530 if (*shadow_mem == kShadowRodata) { 531 // Access to .rodata section, no races here. 532 // Measurements show that it can be 10-20% of all memory accesses. 533 StatInc(thr, StatMop); 534 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 535 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 536 StatInc(thr, StatMopRodata); 537 return; 538 } 539 540 FastState fast_state = thr->fast_state; 541 if (fast_state.GetIgnoreBit()) 542 return; 543 fast_state.IncrementEpoch(); 544 thr->fast_state = fast_state; 545 Shadow cur(fast_state); 546 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); 547 cur.SetWrite(kAccessIsWrite); 548 cur.SetAtomic(kIsAtomic); 549 550 // We must not store to the trace if we do not store to the shadow. 551 // That is, this call must be moved somewhere below. 552 TraceAddEvent(thr, fast_state, EventTypeMop, pc); 553 554 MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, 555 shadow_mem, cur); 556} 557 558static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, 559 u64 val) { 560 (void)thr; 561 (void)pc; 562 if (size == 0) 563 return; 564 // FIXME: fix me. 565 uptr offset = addr % kShadowCell; 566 if (offset) { 567 offset = kShadowCell - offset; 568 if (size <= offset) 569 return; 570 addr += offset; 571 size -= offset; 572 } 573 DCHECK_EQ(addr % 8, 0); 574 // If a user passes some insane arguments (memset(0)), 575 // let it just crash as usual. 576 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) 577 return; 578 // Don't want to touch lots of shadow memory. 579 // If a program maps 10MB stack, there is no need reset the whole range. 580 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); 581 // UnmapOrDie/MmapFixedNoReserve does not work on Windows, 582 // so we do it only for C/C++. 583 if (kGoMode || size < 64*1024) { 584 u64 *p = (u64*)MemToShadow(addr); 585 CHECK(IsShadowMem((uptr)p)); 586 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); 587 // FIXME: may overwrite a part outside the region 588 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { 589 p[i++] = val; 590 for (uptr j = 1; j < kShadowCnt; j++) 591 p[i++] = 0; 592 } 593 } else { 594 // The region is big, reset only beginning and end. 595 const uptr kPageSize = 4096; 596 u64 *begin = (u64*)MemToShadow(addr); 597 u64 *end = begin + size / kShadowCell * kShadowCnt; 598 u64 *p = begin; 599 // Set at least first kPageSize/2 to page boundary. 600 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { 601 *p++ = val; 602 for (uptr j = 1; j < kShadowCnt; j++) 603 *p++ = 0; 604 } 605 // Reset middle part. 606 u64 *p1 = p; 607 p = RoundDown(end, kPageSize); 608 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); 609 MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1); 610 // Set the ending. 611 while (p < end) { 612 *p++ = val; 613 for (uptr j = 1; j < kShadowCnt; j++) 614 *p++ = 0; 615 } 616 } 617} 618 619void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { 620 MemoryRangeSet(thr, pc, addr, size, 0); 621} 622 623void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { 624 // Processing more than 1k (4k of shadow) is expensive, 625 // can cause excessive memory consumption (user does not necessary touch 626 // the whole range) and most likely unnecessary. 627 if (size > 1024) 628 size = 1024; 629 CHECK_EQ(thr->is_freeing, false); 630 thr->is_freeing = true; 631 MemoryAccessRange(thr, pc, addr, size, true); 632 thr->is_freeing = false; 633 thr->fast_state.IncrementEpoch(); 634 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); 635 Shadow s(thr->fast_state); 636 s.ClearIgnoreBit(); 637 s.MarkAsFreed(); 638 s.SetWrite(true); 639 s.SetAddr0AndSizeLog(0, 3); 640 MemoryRangeSet(thr, pc, addr, size, s.raw()); 641} 642 643void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { 644 thr->fast_state.IncrementEpoch(); 645 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); 646 Shadow s(thr->fast_state); 647 s.ClearIgnoreBit(); 648 s.SetWrite(true); 649 s.SetAddr0AndSizeLog(0, 3); 650 MemoryRangeSet(thr, pc, addr, size, s.raw()); 651} 652 653ALWAYS_INLINE USED 654void FuncEntry(ThreadState *thr, uptr pc) { 655 DCHECK_EQ(thr->in_rtl, 0); 656 StatInc(thr, StatFuncEnter); 657 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); 658 thr->fast_state.IncrementEpoch(); 659 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); 660 661 // Shadow stack maintenance can be replaced with 662 // stack unwinding during trace switch (which presumably must be faster). 663 DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]); 664#ifndef TSAN_GO 665 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]); 666#else 667 if (thr->shadow_stack_pos == thr->shadow_stack_end) { 668 const int sz = thr->shadow_stack_end - thr->shadow_stack; 669 const int newsz = 2 * sz; 670 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, 671 newsz * sizeof(uptr)); 672 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); 673 internal_free(thr->shadow_stack); 674 thr->shadow_stack = newstack; 675 thr->shadow_stack_pos = newstack + sz; 676 thr->shadow_stack_end = newstack + newsz; 677 } 678#endif 679 thr->shadow_stack_pos[0] = pc; 680 thr->shadow_stack_pos++; 681} 682 683ALWAYS_INLINE USED 684void FuncExit(ThreadState *thr) { 685 DCHECK_EQ(thr->in_rtl, 0); 686 StatInc(thr, StatFuncExit); 687 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); 688 thr->fast_state.IncrementEpoch(); 689 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); 690 691 DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]); 692#ifndef TSAN_GO 693 DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]); 694#endif 695 thr->shadow_stack_pos--; 696} 697 698void ThreadIgnoreBegin(ThreadState *thr) { 699 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); 700 thr->ignore_reads_and_writes++; 701 CHECK_GE(thr->ignore_reads_and_writes, 0); 702 thr->fast_state.SetIgnoreBit(); 703} 704 705void ThreadIgnoreEnd(ThreadState *thr) { 706 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); 707 thr->ignore_reads_and_writes--; 708 CHECK_GE(thr->ignore_reads_and_writes, 0); 709 if (thr->ignore_reads_and_writes == 0) 710 thr->fast_state.ClearIgnoreBit(); 711} 712 713bool MD5Hash::operator==(const MD5Hash &other) const { 714 return hash[0] == other.hash[0] && hash[1] == other.hash[1]; 715} 716 717#if TSAN_DEBUG 718void build_consistency_debug() {} 719#else 720void build_consistency_release() {} 721#endif 722 723#if TSAN_COLLECT_STATS 724void build_consistency_stats() {} 725#else 726void build_consistency_nostats() {} 727#endif 728 729#if TSAN_SHADOW_COUNT == 1 730void build_consistency_shadow1() {} 731#elif TSAN_SHADOW_COUNT == 2 732void build_consistency_shadow2() {} 733#elif TSAN_SHADOW_COUNT == 4 734void build_consistency_shadow4() {} 735#else 736void build_consistency_shadow8() {} 737#endif 738 739} // namespace __tsan 740 741#ifndef TSAN_GO 742// Must be included in this file to make sure everything is inlined. 743#include "tsan_interface_inl.h" 744#endif 745