tsan_rtl.h revision e1ddbf9a458e81125a03fea721997565124294ae
1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12// Main internal TSan header file. 13// 14// Ground rules: 15// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static 16// function-scope locals) 17// - All functions/classes/etc reside in namespace __tsan, except for those 18// declared in tsan_interface.h. 19// - Platform-specific files should be used instead of ifdefs (*). 20// - No system headers included in header files (*). 21// - Platform specific headres included only into platform-specific files (*). 22// 23// (*) Except when inlining is critical for performance. 24//===----------------------------------------------------------------------===// 25 26#ifndef TSAN_RTL_H 27#define TSAN_RTL_H 28 29#include "sanitizer_common/sanitizer_allocator.h" 30#include "sanitizer_common/sanitizer_allocator_internal.h" 31#include "sanitizer_common/sanitizer_common.h" 32#include "sanitizer_common/sanitizer_libignore.h" 33#include "sanitizer_common/sanitizer_suppressions.h" 34#include "sanitizer_common/sanitizer_thread_registry.h" 35#include "tsan_clock.h" 36#include "tsan_defs.h" 37#include "tsan_flags.h" 38#include "tsan_sync.h" 39#include "tsan_trace.h" 40#include "tsan_vector.h" 41#include "tsan_report.h" 42#include "tsan_platform.h" 43#include "tsan_mutexset.h" 44 45#if SANITIZER_WORDSIZE != 64 46# error "ThreadSanitizer is supported only on 64-bit platforms" 47#endif 48 49namespace __tsan { 50 51// Descriptor of user's memory block. 52struct MBlock { 53 /* 54 u64 mtx : 1; // must be first 55 u64 lst : 44; 56 u64 stk : 31; // on word boundary 57 u64 tid : kTidBits; 58 u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39 59 */ 60 u64 raw[2]; 61 62 void Init(uptr siz, u32 tid, u32 stk) { 63 raw[0] = raw[1] = 0; 64 raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64); 65 raw[1] |= (u64)tid << ((1 + 44 + 31) % 64); 66 raw[0] |= (u64)stk << (1 + 44); 67 raw[1] |= (u64)stk >> (64 - 44 - 1); 68 DCHECK_EQ(Size(), siz); 69 DCHECK_EQ(Tid(), tid); 70 DCHECK_EQ(StackId(), stk); 71 } 72 73 u32 Tid() const { 74 return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits); 75 } 76 77 uptr Size() const { 78 return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64); 79 } 80 81 u32 StackId() const { 82 return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31); 83 } 84 85 SyncVar *ListHead() const { 86 return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3); 87 } 88 89 void ListPush(SyncVar *v) { 90 SyncVar *lst = ListHead(); 91 v->next = lst; 92 u64 x = (u64)v ^ (u64)lst; 93 x = (x >> 3) << 1; 94 raw[0] ^= x; 95 DCHECK_EQ(ListHead(), v); 96 } 97 98 SyncVar *ListPop() { 99 SyncVar *lst = ListHead(); 100 SyncVar *nxt = lst->next; 101 lst->next = 0; 102 u64 x = (u64)lst ^ (u64)nxt; 103 x = (x >> 3) << 1; 104 raw[0] ^= x; 105 DCHECK_EQ(ListHead(), nxt); 106 return lst; 107 } 108 109 void ListReset() { 110 SyncVar *lst = ListHead(); 111 u64 x = (u64)lst; 112 x = (x >> 3) << 1; 113 raw[0] ^= x; 114 DCHECK_EQ(ListHead(), 0); 115 } 116 117 void Lock(); 118 void Unlock(); 119 typedef GenericScopedLock<MBlock> ScopedLock; 120}; 121 122#ifndef TSAN_GO 123#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW 124const uptr kAllocatorSpace = 0x7d0000000000ULL; 125#else 126const uptr kAllocatorSpace = 0x7d0000000000ULL; 127#endif 128const uptr kAllocatorSize = 0x10000000000ULL; // 1T. 129 130struct MapUnmapCallback; 131typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock), 132 DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; 133typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 134typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator; 135typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 136 SecondaryAllocator> Allocator; 137Allocator *allocator(); 138#endif 139 140void TsanCheckFailed(const char *file, int line, const char *cond, 141 u64 v1, u64 v2); 142 143const u64 kShadowRodata = (u64)-1; // .rodata shadow marker 144 145// FastState (from most significant bit): 146// ignore : 1 147// tid : kTidBits 148// epoch : kClkBits 149// unused : - 150// history_size : 3 151class FastState { 152 public: 153 FastState(u64 tid, u64 epoch) { 154 x_ = tid << kTidShift; 155 x_ |= epoch << kClkShift; 156 DCHECK_EQ(tid, this->tid()); 157 DCHECK_EQ(epoch, this->epoch()); 158 DCHECK_EQ(GetIgnoreBit(), false); 159 } 160 161 explicit FastState(u64 x) 162 : x_(x) { 163 } 164 165 u64 raw() const { 166 return x_; 167 } 168 169 u64 tid() const { 170 u64 res = (x_ & ~kIgnoreBit) >> kTidShift; 171 return res; 172 } 173 174 u64 TidWithIgnore() const { 175 u64 res = x_ >> kTidShift; 176 return res; 177 } 178 179 u64 epoch() const { 180 u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits); 181 return res; 182 } 183 184 void IncrementEpoch() { 185 u64 old_epoch = epoch(); 186 x_ += 1 << kClkShift; 187 DCHECK_EQ(old_epoch + 1, epoch()); 188 (void)old_epoch; 189 } 190 191 void SetIgnoreBit() { x_ |= kIgnoreBit; } 192 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } 193 bool GetIgnoreBit() const { return (s64)x_ < 0; } 194 195 void SetHistorySize(int hs) { 196 CHECK_GE(hs, 0); 197 CHECK_LE(hs, 7); 198 x_ = (x_ & ~7) | hs; 199 } 200 201 int GetHistorySize() const { 202 return (int)(x_ & 7); 203 } 204 205 void ClearHistorySize() { 206 x_ &= ~7; 207 } 208 209 u64 GetTracePos() const { 210 const int hs = GetHistorySize(); 211 // When hs == 0, the trace consists of 2 parts. 212 const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1; 213 return epoch() & mask; 214 } 215 216 private: 217 friend class Shadow; 218 static const int kTidShift = 64 - kTidBits - 1; 219 static const int kClkShift = kTidShift - kClkBits; 220 static const u64 kIgnoreBit = 1ull << 63; 221 static const u64 kFreedBit = 1ull << 63; 222 u64 x_; 223}; 224 225// Shadow (from most significant bit): 226// freed : 1 227// tid : kTidBits 228// epoch : kClkBits 229// is_atomic : 1 230// is_read : 1 231// size_log : 2 232// addr0 : 3 233class Shadow : public FastState { 234 public: 235 explicit Shadow(u64 x) 236 : FastState(x) { 237 } 238 239 explicit Shadow(const FastState &s) 240 : FastState(s.x_) { 241 ClearHistorySize(); 242 } 243 244 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { 245 DCHECK_EQ(x_ & 31, 0); 246 DCHECK_LE(addr0, 7); 247 DCHECK_LE(kAccessSizeLog, 3); 248 x_ |= (kAccessSizeLog << 3) | addr0; 249 DCHECK_EQ(kAccessSizeLog, size_log()); 250 DCHECK_EQ(addr0, this->addr0()); 251 } 252 253 void SetWrite(unsigned kAccessIsWrite) { 254 DCHECK_EQ(x_ & kReadBit, 0); 255 if (!kAccessIsWrite) 256 x_ |= kReadBit; 257 DCHECK_EQ(kAccessIsWrite, IsWrite()); 258 } 259 260 void SetAtomic(bool kIsAtomic) { 261 DCHECK(!IsAtomic()); 262 if (kIsAtomic) 263 x_ |= kAtomicBit; 264 DCHECK_EQ(IsAtomic(), kIsAtomic); 265 } 266 267 bool IsAtomic() const { 268 return x_ & kAtomicBit; 269 } 270 271 bool IsZero() const { 272 return x_ == 0; 273 } 274 275 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { 276 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; 277 DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore()); 278 return shifted_xor == 0; 279 } 280 281 static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { 282 u64 masked_xor = (s1.x_ ^ s2.x_) & 31; 283 return masked_xor == 0; 284 } 285 286 static inline bool TwoRangesIntersect(Shadow s1, Shadow s2, 287 unsigned kS2AccessSize) { 288 bool res = false; 289 u64 diff = s1.addr0() - s2.addr0(); 290 if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT 291 // if (s1.addr0() + size1) > s2.addr0()) return true; 292 if (s1.size() > -diff) res = true; 293 } else { 294 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true; 295 if (kS2AccessSize > diff) res = true; 296 } 297 DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2)); 298 DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1)); 299 return res; 300 } 301 302 // The idea behind the offset is as follows. 303 // Consider that we have 8 bool's contained within a single 8-byte block 304 // (mapped to a single shadow "cell"). Now consider that we write to the bools 305 // from a single thread (which we consider the common case). 306 // W/o offsetting each access will have to scan 4 shadow values at average 307 // to find the corresponding shadow value for the bool. 308 // With offsetting we start scanning shadow with the offset so that 309 // each access hits necessary shadow straight off (at least in an expected 310 // optimistic case). 311 // This logic works seamlessly for any layout of user data. For example, 312 // if user data is {int, short, char, char}, then accesses to the int are 313 // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses 314 // from a single thread won't need to scan all 8 shadow values. 315 unsigned ComputeSearchOffset() { 316 return x_ & 7; 317 } 318 u64 addr0() const { return x_ & 7; } 319 u64 size() const { return 1ull << size_log(); } 320 bool IsWrite() const { return !IsRead(); } 321 bool IsRead() const { return x_ & kReadBit; } 322 323 // The idea behind the freed bit is as follows. 324 // When the memory is freed (or otherwise unaccessible) we write to the shadow 325 // values with tid/epoch related to the free and the freed bit set. 326 // During memory accesses processing the freed bit is considered 327 // as msb of tid. So any access races with shadow with freed bit set 328 // (it is as if write from a thread with which we never synchronized before). 329 // This allows us to detect accesses to freed memory w/o additional 330 // overheads in memory access processing and at the same time restore 331 // tid/epoch of free. 332 void MarkAsFreed() { 333 x_ |= kFreedBit; 334 } 335 336 bool IsFreed() const { 337 return x_ & kFreedBit; 338 } 339 340 bool GetFreedAndReset() { 341 bool res = x_ & kFreedBit; 342 x_ &= ~kFreedBit; 343 return res; 344 } 345 346 bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const { 347 // analyzes 5-th bit (is_read) and 6-th bit (is_atomic) 348 bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift) 349 | (kIsAtomic << kAtomicShift)); 350 DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic)); 351 return v; 352 } 353 354 bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const { 355 bool v = ((x_ >> kReadShift) & 3) 356 <= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); 357 DCHECK_EQ(v, (IsAtomic() < kIsAtomic) || 358 (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite)); 359 return v; 360 } 361 362 bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const { 363 bool v = ((x_ >> kReadShift) & 3) 364 >= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); 365 DCHECK_EQ(v, (IsAtomic() > kIsAtomic) || 366 (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite)); 367 return v; 368 } 369 370 private: 371 static const u64 kReadShift = 5; 372 static const u64 kReadBit = 1ull << kReadShift; 373 static const u64 kAtomicShift = 6; 374 static const u64 kAtomicBit = 1ull << kAtomicShift; 375 376 u64 size_log() const { return (x_ >> 3) & 3; } 377 378 static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) { 379 if (s1.addr0() == s2.addr0()) return true; 380 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) 381 return true; 382 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) 383 return true; 384 return false; 385 } 386}; 387 388struct SignalContext; 389 390struct JmpBuf { 391 uptr sp; 392 uptr mangled_sp; 393 uptr *shadow_stack_pos; 394}; 395 396// This struct is stored in TLS. 397struct ThreadState { 398 FastState fast_state; 399 // Synch epoch represents the threads's epoch before the last synchronization 400 // action. It allows to reduce number of shadow state updates. 401 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150, 402 // if we are processing write to X from the same thread at epoch=200, 403 // we do nothing, because both writes happen in the same 'synch epoch'. 404 // That is, if another memory access does not race with the former write, 405 // it does not race with the latter as well. 406 // QUESTION: can we can squeeze this into ThreadState::Fast? 407 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are 408 // taken by epoch between synchs. 409 // This way we can save one load from tls. 410 u64 fast_synch_epoch; 411 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read. 412 // We do not distinguish beteween ignoring reads and writes 413 // for better performance. 414 int ignore_reads_and_writes; 415 int ignore_sync; 416 uptr *shadow_stack_pos; 417 u64 *racy_shadow_addr; 418 u64 racy_state[2]; 419#ifndef TSAN_GO 420 // C/C++ uses embed shadow stack of fixed size. 421 uptr shadow_stack[kShadowStackSize]; 422#else 423 // Go uses satellite shadow stack with dynamic size. 424 uptr *shadow_stack; 425 uptr *shadow_stack_end; 426#endif 427 MutexSet mset; 428 ThreadClock clock; 429#ifndef TSAN_GO 430 AllocatorCache alloc_cache; 431 InternalAllocatorCache internal_alloc_cache; 432 Vector<JmpBuf> jmp_bufs; 433#endif 434 u64 stat[StatCnt]; 435 const int tid; 436 const int unique_id; 437 int in_rtl; 438 bool in_symbolizer; 439 bool in_ignored_lib; 440 bool is_alive; 441 bool is_freeing; 442 bool is_vptr_access; 443 const uptr stk_addr; 444 const uptr stk_size; 445 const uptr tls_addr; 446 const uptr tls_size; 447 448 DeadlockDetector deadlock_detector; 449 450 bool in_signal_handler; 451 SignalContext *signal_ctx; 452 453#ifndef TSAN_GO 454 u32 last_sleep_stack_id; 455 ThreadClock last_sleep_clock; 456#endif 457 458 // Set in regions of runtime that must be signal-safe and fork-safe. 459 // If set, malloc must not be called. 460 int nomalloc; 461 462 explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, 463 uptr stk_addr, uptr stk_size, 464 uptr tls_addr, uptr tls_size); 465}; 466 467Context *CTX(); 468 469#ifndef TSAN_GO 470extern THREADLOCAL char cur_thread_placeholder[]; 471INLINE ThreadState *cur_thread() { 472 return reinterpret_cast<ThreadState *>(&cur_thread_placeholder); 473} 474#endif 475 476class ThreadContext : public ThreadContextBase { 477 public: 478 explicit ThreadContext(int tid); 479 ~ThreadContext(); 480 ThreadState *thr; 481#ifdef TSAN_GO 482 StackTrace creation_stack; 483#else 484 u32 creation_stack_id; 485#endif 486 SyncClock sync; 487 // Epoch at which the thread had started. 488 // If we see an event from the thread stamped by an older epoch, 489 // the event is from a dead thread that shared tid with this thread. 490 u64 epoch0; 491 u64 epoch1; 492 493 // Override superclass callbacks. 494 void OnDead(); 495 void OnJoined(void *arg); 496 void OnFinished(); 497 void OnStarted(void *arg); 498 void OnCreated(void *arg); 499 void OnReset(); 500}; 501 502struct RacyStacks { 503 MD5Hash hash[2]; 504 bool operator==(const RacyStacks &other) const { 505 if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) 506 return true; 507 if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) 508 return true; 509 return false; 510 } 511}; 512 513struct RacyAddress { 514 uptr addr_min; 515 uptr addr_max; 516}; 517 518struct FiredSuppression { 519 ReportType type; 520 uptr pc; 521 Suppression *supp; 522}; 523 524struct Context { 525 Context(); 526 527 bool initialized; 528 529 SyncTab synctab; 530 531 Mutex report_mtx; 532 int nreported; 533 int nmissed_expected; 534 atomic_uint64_t last_symbolize_time_ns; 535 536 ThreadRegistry *thread_registry; 537 538 Vector<RacyStacks> racy_stacks; 539 Vector<RacyAddress> racy_addresses; 540 // Number of fired suppressions may be large enough. 541 InternalMmapVector<FiredSuppression> fired_suppressions; 542 543 Flags flags; 544 545 u64 stat[StatCnt]; 546 u64 int_alloc_cnt[MBlockTypeCount]; 547 u64 int_alloc_siz[MBlockTypeCount]; 548}; 549 550class ScopedInRtl { 551 public: 552 ScopedInRtl(); 553 ~ScopedInRtl(); 554 private: 555 ThreadState*thr_; 556 int in_rtl_; 557 int errno_; 558}; 559 560class ScopedReport { 561 public: 562 explicit ScopedReport(ReportType typ); 563 ~ScopedReport(); 564 565 void AddStack(const StackTrace *stack); 566 void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack, 567 const MutexSet *mset); 568 void AddThread(const ThreadContext *tctx); 569 void AddMutex(const SyncVar *s); 570 void AddLocation(uptr addr, uptr size); 571 void AddSleep(u32 stack_id); 572 void SetCount(int count); 573 574 const ReportDesc *GetReport() const; 575 576 private: 577 Context *ctx_; 578 ReportDesc *rep_; 579 580 void AddMutex(u64 id); 581 582 ScopedReport(const ScopedReport&); 583 void operator = (const ScopedReport&); 584}; 585 586void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset); 587 588void StatAggregate(u64 *dst, u64 *src); 589void StatOutput(u64 *stat); 590void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { 591 if (kCollectStats) 592 thr->stat[typ] += n; 593} 594void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { 595 if (kCollectStats) 596 thr->stat[typ] = n; 597} 598 599void MapShadow(uptr addr, uptr size); 600void MapThreadTrace(uptr addr, uptr size); 601void DontNeedShadowFor(uptr addr, uptr size); 602void InitializeShadowMemory(); 603void InitializeInterceptors(); 604void InitializeLibIgnore(); 605void InitializeDynamicAnnotations(); 606 607void ReportRace(ThreadState *thr); 608bool OutputReport(Context *ctx, 609 const ScopedReport &srep, 610 const ReportStack *suppress_stack1 = 0, 611 const ReportStack *suppress_stack2 = 0, 612 const ReportLocation *suppress_loc = 0); 613bool IsFiredSuppression(Context *ctx, 614 const ScopedReport &srep, 615 const StackTrace &trace); 616bool IsExpectedReport(uptr addr, uptr size); 617void PrintMatchedBenignRaces(); 618bool FrameIsInternal(const ReportStack *frame); 619ReportStack *SkipTsanInternalFrames(ReportStack *ent); 620 621#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 622# define DPrintf Printf 623#else 624# define DPrintf(...) 625#endif 626 627#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 628# define DPrintf2 Printf 629#else 630# define DPrintf2(...) 631#endif 632 633u32 CurrentStackId(ThreadState *thr, uptr pc); 634void PrintCurrentStack(ThreadState *thr, uptr pc); 635void PrintCurrentStackSlow(); // uses libunwind 636 637void Initialize(ThreadState *thr); 638int Finalize(ThreadState *thr); 639 640SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr, 641 bool write_lock, bool create); 642SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr); 643 644void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, 645 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); 646void MemoryAccessImpl(ThreadState *thr, uptr addr, 647 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 648 u64 *shadow_mem, Shadow cur); 649void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, 650 uptr size, bool is_write); 651void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, 652 uptr size, uptr step, bool is_write); 653void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, 654 int size, bool kAccessIsWrite, bool kIsAtomic); 655 656const int kSizeLog1 = 0; 657const int kSizeLog2 = 1; 658const int kSizeLog4 = 2; 659const int kSizeLog8 = 3; 660 661void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc, 662 uptr addr, int kAccessSizeLog) { 663 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); 664} 665 666void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc, 667 uptr addr, int kAccessSizeLog) { 668 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); 669} 670 671void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc, 672 uptr addr, int kAccessSizeLog) { 673 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); 674} 675 676void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, 677 uptr addr, int kAccessSizeLog) { 678 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); 679} 680 681void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); 682void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); 683void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); 684 685void ThreadIgnoreBegin(ThreadState *thr); 686void ThreadIgnoreEnd(ThreadState *thr); 687void ThreadIgnoreSyncBegin(ThreadState *thr); 688void ThreadIgnoreSyncEnd(ThreadState *thr); 689 690void FuncEntry(ThreadState *thr, uptr pc); 691void FuncExit(ThreadState *thr); 692 693int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); 694void ThreadStart(ThreadState *thr, int tid, uptr os_id); 695void ThreadFinish(ThreadState *thr); 696int ThreadTid(ThreadState *thr, uptr pc, uptr uid); 697void ThreadJoin(ThreadState *thr, uptr pc, int tid); 698void ThreadDetach(ThreadState *thr, uptr pc, int tid); 699void ThreadFinalize(ThreadState *thr); 700void ThreadSetName(ThreadState *thr, const char *name); 701int ThreadCount(ThreadState *thr); 702void ProcessPendingSignals(ThreadState *thr); 703 704void MutexCreate(ThreadState *thr, uptr pc, uptr addr, 705 bool rw, bool recursive, bool linker_init); 706void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); 707void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1); 708int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false); 709void MutexReadLock(ThreadState *thr, uptr pc, uptr addr); 710void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); 711void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); 712 713void Acquire(ThreadState *thr, uptr pc, uptr addr); 714void AcquireGlobal(ThreadState *thr, uptr pc); 715void Release(ThreadState *thr, uptr pc, uptr addr); 716void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); 717void AfterSleep(ThreadState *thr, uptr pc); 718void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); 719void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); 720void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); 721void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); 722 723// The hacky call uses custom calling convention and an assembly thunk. 724// It is considerably faster that a normal call for the caller 725// if it is not executed (it is intended for slow paths from hot functions). 726// The trick is that the call preserves all registers and the compiler 727// does not treat it as a call. 728// If it does not work for you, use normal call. 729#if TSAN_DEBUG == 0 730// The caller may not create the stack frame for itself at all, 731// so we create a reserve stack frame for it (1024b must be enough). 732#define HACKY_CALL(f) \ 733 __asm__ __volatile__("sub $1024, %%rsp;" \ 734 "/*.cfi_adjust_cfa_offset 1024;*/" \ 735 ".hidden " #f "_thunk;" \ 736 "call " #f "_thunk;" \ 737 "add $1024, %%rsp;" \ 738 "/*.cfi_adjust_cfa_offset -1024;*/" \ 739 ::: "memory", "cc"); 740#else 741#define HACKY_CALL(f) f() 742#endif 743 744void TraceSwitch(ThreadState *thr); 745uptr TraceTopPC(ThreadState *thr); 746uptr TraceSize(); 747uptr TraceParts(); 748Trace *ThreadTrace(int tid); 749 750extern "C" void __tsan_trace_switch(); 751void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, 752 EventType typ, u64 addr) { 753 DCHECK_GE((int)typ, 0); 754 DCHECK_LE((int)typ, 7); 755 DCHECK_EQ(GetLsb(addr, 61), addr); 756 StatInc(thr, StatEvents); 757 u64 pos = fs.GetTracePos(); 758 if (UNLIKELY((pos % kTracePartSize) == 0)) { 759#ifndef TSAN_GO 760 HACKY_CALL(__tsan_trace_switch); 761#else 762 TraceSwitch(thr); 763#endif 764 } 765 Event *trace = (Event*)GetThreadTrace(fs.tid()); 766 Event *evp = &trace[pos]; 767 Event ev = (u64)addr | ((u64)typ << 61); 768 *evp = ev; 769} 770 771} // namespace __tsan 772 773#endif // TSAN_RTL_H 774