tsan_rtl.h revision 39968339a07d790aadcf27534f92a0de8c0c90fb
1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12// Main internal TSan header file. 13// 14// Ground rules: 15// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static 16// function-scope locals) 17// - All functions/classes/etc reside in namespace __tsan, except for those 18// declared in tsan_interface.h. 19// - Platform-specific files should be used instead of ifdefs (*). 20// - No system headers included in header files (*). 21// - Platform specific headres included only into platform-specific files (*). 22// 23// (*) Except when inlining is critical for performance. 24//===----------------------------------------------------------------------===// 25 26#ifndef TSAN_RTL_H 27#define TSAN_RTL_H 28 29#include "sanitizer_common/sanitizer_allocator.h" 30#include "sanitizer_common/sanitizer_allocator_internal.h" 31#include "sanitizer_common/sanitizer_common.h" 32#include "sanitizer_common/sanitizer_thread_registry.h" 33#include "tsan_clock.h" 34#include "tsan_defs.h" 35#include "tsan_flags.h" 36#include "tsan_sync.h" 37#include "tsan_trace.h" 38#include "tsan_vector.h" 39#include "tsan_report.h" 40#include "tsan_platform.h" 41#include "tsan_mutexset.h" 42 43#if SANITIZER_WORDSIZE != 64 44# error "ThreadSanitizer is supported only on 64-bit platforms" 45#endif 46 47namespace __tsan { 48 49// Descriptor of user's memory block. 50struct MBlock { 51 /* 52 u64 mtx : 1; // must be first 53 u64 lst : 44; 54 u64 stk : 31; // on word boundary 55 u64 tid : kTidBits; 56 u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39 57 */ 58 u64 raw[2]; 59 60 void Init(uptr siz, u32 tid, u32 stk) { 61 raw[0] = raw[1] = 0; 62 raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64); 63 raw[1] |= (u64)tid << ((1 + 44 + 31) % 64); 64 raw[0] |= (u64)stk << (1 + 44); 65 raw[1] |= (u64)stk >> (64 - 44 - 1); 66 DCHECK_EQ(Size(), siz); 67 DCHECK_EQ(Tid(), tid); 68 DCHECK_EQ(StackId(), stk); 69 } 70 71 u32 Tid() const { 72 return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits); 73 } 74 75 uptr Size() const { 76 return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64); 77 } 78 79 u32 StackId() const { 80 return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31); 81 } 82 83 SyncVar *ListHead() const { 84 return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3); 85 } 86 87 void ListPush(SyncVar *v) { 88 SyncVar *lst = ListHead(); 89 v->next = lst; 90 u64 x = (u64)v ^ (u64)lst; 91 x = (x >> 3) << 1; 92 raw[0] ^= x; 93 DCHECK_EQ(ListHead(), v); 94 } 95 96 SyncVar *ListPop() { 97 SyncVar *lst = ListHead(); 98 SyncVar *nxt = lst->next; 99 lst->next = 0; 100 u64 x = (u64)lst ^ (u64)nxt; 101 x = (x >> 3) << 1; 102 raw[0] ^= x; 103 DCHECK_EQ(ListHead(), nxt); 104 return lst; 105 } 106 107 void ListReset() { 108 SyncVar *lst = ListHead(); 109 u64 x = (u64)lst; 110 x = (x >> 3) << 1; 111 raw[0] ^= x; 112 DCHECK_EQ(ListHead(), 0); 113 } 114 115 void Lock(); 116 void Unlock(); 117 typedef GenericScopedLock<MBlock> ScopedLock; 118}; 119 120#ifndef TSAN_GO 121#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW 122const uptr kAllocatorSpace = 0x7d0000000000ULL; 123#else 124const uptr kAllocatorSpace = 0x7d0000000000ULL; 125#endif 126const uptr kAllocatorSize = 0x10000000000ULL; // 1T. 127 128struct MapUnmapCallback; 129typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock), 130 DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; 131typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 132typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator; 133typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 134 SecondaryAllocator> Allocator; 135Allocator *allocator(); 136#endif 137 138void TsanCheckFailed(const char *file, int line, const char *cond, 139 u64 v1, u64 v2); 140 141const u64 kShadowRodata = (u64)-1; // .rodata shadow marker 142 143// FastState (from most significant bit): 144// ignore : 1 145// tid : kTidBits 146// epoch : kClkBits 147// unused : - 148// history_size : 3 149class FastState { 150 public: 151 FastState(u64 tid, u64 epoch) { 152 x_ = tid << kTidShift; 153 x_ |= epoch << kClkShift; 154 DCHECK_EQ(tid, this->tid()); 155 DCHECK_EQ(epoch, this->epoch()); 156 DCHECK_EQ(GetIgnoreBit(), false); 157 } 158 159 explicit FastState(u64 x) 160 : x_(x) { 161 } 162 163 u64 raw() const { 164 return x_; 165 } 166 167 u64 tid() const { 168 u64 res = (x_ & ~kIgnoreBit) >> kTidShift; 169 return res; 170 } 171 172 u64 TidWithIgnore() const { 173 u64 res = x_ >> kTidShift; 174 return res; 175 } 176 177 u64 epoch() const { 178 u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits); 179 return res; 180 } 181 182 void IncrementEpoch() { 183 u64 old_epoch = epoch(); 184 x_ += 1 << kClkShift; 185 DCHECK_EQ(old_epoch + 1, epoch()); 186 (void)old_epoch; 187 } 188 189 void SetIgnoreBit() { x_ |= kIgnoreBit; } 190 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } 191 bool GetIgnoreBit() const { return (s64)x_ < 0; } 192 193 void SetHistorySize(int hs) { 194 CHECK_GE(hs, 0); 195 CHECK_LE(hs, 7); 196 x_ = (x_ & ~7) | hs; 197 } 198 199 int GetHistorySize() const { 200 return (int)(x_ & 7); 201 } 202 203 void ClearHistorySize() { 204 x_ &= ~7; 205 } 206 207 u64 GetTracePos() const { 208 const int hs = GetHistorySize(); 209 // When hs == 0, the trace consists of 2 parts. 210 const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1; 211 return epoch() & mask; 212 } 213 214 private: 215 friend class Shadow; 216 static const int kTidShift = 64 - kTidBits - 1; 217 static const int kClkShift = kTidShift - kClkBits; 218 static const u64 kIgnoreBit = 1ull << 63; 219 static const u64 kFreedBit = 1ull << 63; 220 u64 x_; 221}; 222 223// Shadow (from most significant bit): 224// freed : 1 225// tid : kTidBits 226// epoch : kClkBits 227// is_atomic : 1 228// is_read : 1 229// size_log : 2 230// addr0 : 3 231class Shadow : public FastState { 232 public: 233 explicit Shadow(u64 x) 234 : FastState(x) { 235 } 236 237 explicit Shadow(const FastState &s) 238 : FastState(s.x_) { 239 ClearHistorySize(); 240 } 241 242 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { 243 DCHECK_EQ(x_ & 31, 0); 244 DCHECK_LE(addr0, 7); 245 DCHECK_LE(kAccessSizeLog, 3); 246 x_ |= (kAccessSizeLog << 3) | addr0; 247 DCHECK_EQ(kAccessSizeLog, size_log()); 248 DCHECK_EQ(addr0, this->addr0()); 249 } 250 251 void SetWrite(unsigned kAccessIsWrite) { 252 DCHECK_EQ(x_ & kReadBit, 0); 253 if (!kAccessIsWrite) 254 x_ |= kReadBit; 255 DCHECK_EQ(kAccessIsWrite, IsWrite()); 256 } 257 258 void SetAtomic(bool kIsAtomic) { 259 DCHECK(!IsAtomic()); 260 if (kIsAtomic) 261 x_ |= kAtomicBit; 262 DCHECK_EQ(IsAtomic(), kIsAtomic); 263 } 264 265 bool IsAtomic() const { 266 return x_ & kAtomicBit; 267 } 268 269 bool IsZero() const { 270 return x_ == 0; 271 } 272 273 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { 274 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; 275 DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore()); 276 return shifted_xor == 0; 277 } 278 279 static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { 280 u64 masked_xor = (s1.x_ ^ s2.x_) & 31; 281 return masked_xor == 0; 282 } 283 284 static inline bool TwoRangesIntersect(Shadow s1, Shadow s2, 285 unsigned kS2AccessSize) { 286 bool res = false; 287 u64 diff = s1.addr0() - s2.addr0(); 288 if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT 289 // if (s1.addr0() + size1) > s2.addr0()) return true; 290 if (s1.size() > -diff) res = true; 291 } else { 292 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true; 293 if (kS2AccessSize > diff) res = true; 294 } 295 DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2)); 296 DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1)); 297 return res; 298 } 299 300 // The idea behind the offset is as follows. 301 // Consider that we have 8 bool's contained within a single 8-byte block 302 // (mapped to a single shadow "cell"). Now consider that we write to the bools 303 // from a single thread (which we consider the common case). 304 // W/o offsetting each access will have to scan 4 shadow values at average 305 // to find the corresponding shadow value for the bool. 306 // With offsetting we start scanning shadow with the offset so that 307 // each access hits necessary shadow straight off (at least in an expected 308 // optimistic case). 309 // This logic works seamlessly for any layout of user data. For example, 310 // if user data is {int, short, char, char}, then accesses to the int are 311 // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses 312 // from a single thread won't need to scan all 8 shadow values. 313 unsigned ComputeSearchOffset() { 314 return x_ & 7; 315 } 316 u64 addr0() const { return x_ & 7; } 317 u64 size() const { return 1ull << size_log(); } 318 bool IsWrite() const { return !IsRead(); } 319 bool IsRead() const { return x_ & kReadBit; } 320 321 // The idea behind the freed bit is as follows. 322 // When the memory is freed (or otherwise unaccessible) we write to the shadow 323 // values with tid/epoch related to the free and the freed bit set. 324 // During memory accesses processing the freed bit is considered 325 // as msb of tid. So any access races with shadow with freed bit set 326 // (it is as if write from a thread with which we never synchronized before). 327 // This allows us to detect accesses to freed memory w/o additional 328 // overheads in memory access processing and at the same time restore 329 // tid/epoch of free. 330 void MarkAsFreed() { 331 x_ |= kFreedBit; 332 } 333 334 bool IsFreed() const { 335 return x_ & kFreedBit; 336 } 337 338 bool GetFreedAndReset() { 339 bool res = x_ & kFreedBit; 340 x_ &= ~kFreedBit; 341 return res; 342 } 343 344 bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const { 345 // analyzes 5-th bit (is_read) and 6-th bit (is_atomic) 346 bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift) 347 | (kIsAtomic << kAtomicShift)); 348 DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic)); 349 return v; 350 } 351 352 bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const { 353 bool v = ((x_ >> kReadShift) & 3) 354 <= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); 355 DCHECK_EQ(v, (IsAtomic() < kIsAtomic) || 356 (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite)); 357 return v; 358 } 359 360 bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const { 361 bool v = ((x_ >> kReadShift) & 3) 362 >= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); 363 DCHECK_EQ(v, (IsAtomic() > kIsAtomic) || 364 (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite)); 365 return v; 366 } 367 368 private: 369 static const u64 kReadShift = 5; 370 static const u64 kReadBit = 1ull << kReadShift; 371 static const u64 kAtomicShift = 6; 372 static const u64 kAtomicBit = 1ull << kAtomicShift; 373 374 u64 size_log() const { return (x_ >> 3) & 3; } 375 376 static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) { 377 if (s1.addr0() == s2.addr0()) return true; 378 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) 379 return true; 380 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) 381 return true; 382 return false; 383 } 384}; 385 386struct SignalContext; 387 388struct JmpBuf { 389 uptr sp; 390 uptr mangled_sp; 391 uptr *shadow_stack_pos; 392}; 393 394// This struct is stored in TLS. 395struct ThreadState { 396 FastState fast_state; 397 // Synch epoch represents the threads's epoch before the last synchronization 398 // action. It allows to reduce number of shadow state updates. 399 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150, 400 // if we are processing write to X from the same thread at epoch=200, 401 // we do nothing, because both writes happen in the same 'synch epoch'. 402 // That is, if another memory access does not race with the former write, 403 // it does not race with the latter as well. 404 // QUESTION: can we can squeeze this into ThreadState::Fast? 405 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are 406 // taken by epoch between synchs. 407 // This way we can save one load from tls. 408 u64 fast_synch_epoch; 409 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read. 410 // We do not distinguish beteween ignoring reads and writes 411 // for better performance. 412 int ignore_reads_and_writes; 413 uptr *shadow_stack_pos; 414 u64 *racy_shadow_addr; 415 u64 racy_state[2]; 416#ifndef TSAN_GO 417 // C/C++ uses embed shadow stack of fixed size. 418 uptr shadow_stack[kShadowStackSize]; 419#else 420 // Go uses satellite shadow stack with dynamic size. 421 uptr *shadow_stack; 422 uptr *shadow_stack_end; 423#endif 424 MutexSet mset; 425 ThreadClock clock; 426#ifndef TSAN_GO 427 AllocatorCache alloc_cache; 428 InternalAllocatorCache internal_alloc_cache; 429 Vector<JmpBuf> jmp_bufs; 430#endif 431 u64 stat[StatCnt]; 432 const int tid; 433 const int unique_id; 434 int in_rtl; 435 bool in_symbolizer; 436 bool is_alive; 437 bool is_freeing; 438 bool is_vptr_access; 439 const uptr stk_addr; 440 const uptr stk_size; 441 const uptr tls_addr; 442 const uptr tls_size; 443 444 DeadlockDetector deadlock_detector; 445 446 bool in_signal_handler; 447 SignalContext *signal_ctx; 448 449#ifndef TSAN_GO 450 u32 last_sleep_stack_id; 451 ThreadClock last_sleep_clock; 452#endif 453 454 // Set in regions of runtime that must be signal-safe and fork-safe. 455 // If set, malloc must not be called. 456 int nomalloc; 457 458 explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, 459 uptr stk_addr, uptr stk_size, 460 uptr tls_addr, uptr tls_size); 461}; 462 463Context *CTX(); 464 465#ifndef TSAN_GO 466extern THREADLOCAL char cur_thread_placeholder[]; 467INLINE ThreadState *cur_thread() { 468 return reinterpret_cast<ThreadState *>(&cur_thread_placeholder); 469} 470#endif 471 472class ThreadContext : public ThreadContextBase { 473 public: 474 explicit ThreadContext(int tid); 475 ~ThreadContext(); 476 ThreadState *thr; 477#ifdef TSAN_GO 478 StackTrace creation_stack; 479#else 480 u32 creation_stack_id; 481#endif 482 SyncClock sync; 483 // Epoch at which the thread had started. 484 // If we see an event from the thread stamped by an older epoch, 485 // the event is from a dead thread that shared tid with this thread. 486 u64 epoch0; 487 u64 epoch1; 488 489 // Override superclass callbacks. 490 void OnDead(); 491 void OnJoined(void *arg); 492 void OnFinished(); 493 void OnStarted(void *arg); 494 void OnCreated(void *arg); 495 void OnReset(); 496}; 497 498struct RacyStacks { 499 MD5Hash hash[2]; 500 bool operator==(const RacyStacks &other) const { 501 if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) 502 return true; 503 if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) 504 return true; 505 return false; 506 } 507}; 508 509struct RacyAddress { 510 uptr addr_min; 511 uptr addr_max; 512}; 513 514struct FiredSuppression { 515 ReportType type; 516 uptr pc; 517 Suppression *supp; 518}; 519 520struct Context { 521 Context(); 522 523 bool initialized; 524 525 SyncTab synctab; 526 527 Mutex report_mtx; 528 int nreported; 529 int nmissed_expected; 530 atomic_uint64_t last_symbolize_time_ns; 531 532 ThreadRegistry *thread_registry; 533 534 Vector<RacyStacks> racy_stacks; 535 Vector<RacyAddress> racy_addresses; 536 Vector<FiredSuppression> fired_suppressions; 537 538 Flags flags; 539 540 u64 stat[StatCnt]; 541 u64 int_alloc_cnt[MBlockTypeCount]; 542 u64 int_alloc_siz[MBlockTypeCount]; 543}; 544 545class ScopedInRtl { 546 public: 547 ScopedInRtl(); 548 ~ScopedInRtl(); 549 private: 550 ThreadState*thr_; 551 int in_rtl_; 552 int errno_; 553}; 554 555class ScopedReport { 556 public: 557 explicit ScopedReport(ReportType typ); 558 ~ScopedReport(); 559 560 void AddStack(const StackTrace *stack); 561 void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack, 562 const MutexSet *mset); 563 void AddThread(const ThreadContext *tctx); 564 void AddMutex(const SyncVar *s); 565 void AddLocation(uptr addr, uptr size); 566 void AddSleep(u32 stack_id); 567 void SetCount(int count); 568 569 const ReportDesc *GetReport() const; 570 571 private: 572 Context *ctx_; 573 ReportDesc *rep_; 574 575 void AddMutex(u64 id); 576 577 ScopedReport(const ScopedReport&); 578 void operator = (const ScopedReport&); 579}; 580 581void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset); 582 583void StatAggregate(u64 *dst, u64 *src); 584void StatOutput(u64 *stat); 585void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { 586 if (kCollectStats) 587 thr->stat[typ] += n; 588} 589void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { 590 if (kCollectStats) 591 thr->stat[typ] = n; 592} 593 594void MapShadow(uptr addr, uptr size); 595void MapThreadTrace(uptr addr, uptr size); 596void DontNeedShadowFor(uptr addr, uptr size); 597void InitializeShadowMemory(); 598void InitializeInterceptors(); 599void InitializeDynamicAnnotations(); 600 601void ReportRace(ThreadState *thr); 602bool OutputReport(Context *ctx, 603 const ScopedReport &srep, 604 const ReportStack *suppress_stack1 = 0, 605 const ReportStack *suppress_stack2 = 0, 606 const ReportLocation *suppress_loc = 0); 607bool IsFiredSuppression(Context *ctx, 608 const ScopedReport &srep, 609 const StackTrace &trace); 610bool IsExpectedReport(uptr addr, uptr size); 611void PrintMatchedBenignRaces(); 612bool FrameIsInternal(const ReportStack *frame); 613ReportStack *SkipTsanInternalFrames(ReportStack *ent); 614 615#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 616# define DPrintf Printf 617#else 618# define DPrintf(...) 619#endif 620 621#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 622# define DPrintf2 Printf 623#else 624# define DPrintf2(...) 625#endif 626 627u32 CurrentStackId(ThreadState *thr, uptr pc); 628void PrintCurrentStack(ThreadState *thr, uptr pc); 629void PrintCurrentStackSlow(); // uses libunwind 630 631void Initialize(ThreadState *thr); 632int Finalize(ThreadState *thr); 633 634SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr, 635 bool write_lock, bool create); 636SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr); 637 638void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, 639 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); 640void MemoryAccessImpl(ThreadState *thr, uptr addr, 641 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 642 u64 *shadow_mem, Shadow cur); 643void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, 644 uptr size, bool is_write); 645void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, 646 uptr size, uptr step, bool is_write); 647void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, 648 int size, bool kAccessIsWrite, bool kIsAtomic); 649 650const int kSizeLog1 = 0; 651const int kSizeLog2 = 1; 652const int kSizeLog4 = 2; 653const int kSizeLog8 = 3; 654 655void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc, 656 uptr addr, int kAccessSizeLog) { 657 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); 658} 659 660void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc, 661 uptr addr, int kAccessSizeLog) { 662 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); 663} 664 665void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc, 666 uptr addr, int kAccessSizeLog) { 667 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); 668} 669 670void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, 671 uptr addr, int kAccessSizeLog) { 672 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); 673} 674 675void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); 676void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); 677void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); 678void IgnoreCtl(ThreadState *thr, bool write, bool begin); 679 680void FuncEntry(ThreadState *thr, uptr pc); 681void FuncExit(ThreadState *thr); 682 683int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); 684void ThreadStart(ThreadState *thr, int tid, uptr os_id); 685void ThreadFinish(ThreadState *thr); 686int ThreadTid(ThreadState *thr, uptr pc, uptr uid); 687void ThreadJoin(ThreadState *thr, uptr pc, int tid); 688void ThreadDetach(ThreadState *thr, uptr pc, int tid); 689void ThreadFinalize(ThreadState *thr); 690void ThreadSetName(ThreadState *thr, const char *name); 691int ThreadCount(ThreadState *thr); 692void ProcessPendingSignals(ThreadState *thr); 693 694void MutexCreate(ThreadState *thr, uptr pc, uptr addr, 695 bool rw, bool recursive, bool linker_init); 696void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); 697void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1); 698int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false); 699void MutexReadLock(ThreadState *thr, uptr pc, uptr addr); 700void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); 701void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); 702 703void Acquire(ThreadState *thr, uptr pc, uptr addr); 704void AcquireGlobal(ThreadState *thr, uptr pc); 705void Release(ThreadState *thr, uptr pc, uptr addr); 706void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); 707void AfterSleep(ThreadState *thr, uptr pc); 708 709// The hacky call uses custom calling convention and an assembly thunk. 710// It is considerably faster that a normal call for the caller 711// if it is not executed (it is intended for slow paths from hot functions). 712// The trick is that the call preserves all registers and the compiler 713// does not treat it as a call. 714// If it does not work for you, use normal call. 715#if TSAN_DEBUG == 0 716// The caller may not create the stack frame for itself at all, 717// so we create a reserve stack frame for it (1024b must be enough). 718#define HACKY_CALL(f) \ 719 __asm__ __volatile__("sub $1024, %%rsp;" \ 720 "/*.cfi_adjust_cfa_offset 1024;*/" \ 721 ".hidden " #f "_thunk;" \ 722 "call " #f "_thunk;" \ 723 "add $1024, %%rsp;" \ 724 "/*.cfi_adjust_cfa_offset -1024;*/" \ 725 ::: "memory", "cc"); 726#else 727#define HACKY_CALL(f) f() 728#endif 729 730void TraceSwitch(ThreadState *thr); 731uptr TraceTopPC(ThreadState *thr); 732uptr TraceSize(); 733uptr TraceParts(); 734Trace *ThreadTrace(int tid); 735 736extern "C" void __tsan_trace_switch(); 737void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, 738 EventType typ, u64 addr) { 739 DCHECK_GE((int)typ, 0); 740 DCHECK_LE((int)typ, 7); 741 DCHECK_EQ(GetLsb(addr, 61), addr); 742 StatInc(thr, StatEvents); 743 u64 pos = fs.GetTracePos(); 744 if (UNLIKELY((pos % kTracePartSize) == 0)) { 745#ifndef TSAN_GO 746 HACKY_CALL(__tsan_trace_switch); 747#else 748 TraceSwitch(thr); 749#endif 750 } 751 Event *trace = (Event*)GetThreadTrace(fs.tid()); 752 Event *evp = &trace[pos]; 753 Event ev = (u64)addr | ((u64)typ << 61); 754 *evp = ev; 755} 756 757} // namespace __tsan 758 759#endif // TSAN_RTL_H 760