tsan_rtl.h revision 2d1fdb26e458c4ddc04155c1d421bced3ba90cd0
1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12// Main internal TSan header file. 13// 14// Ground rules: 15// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static 16// function-scope locals) 17// - All functions/classes/etc reside in namespace __tsan, except for those 18// declared in tsan_interface.h. 19// - Platform-specific files should be used instead of ifdefs (*). 20// - No system headers included in header files (*). 21// - Platform specific headres included only into platform-specific files (*). 22// 23// (*) Except when inlining is critical for performance. 24//===----------------------------------------------------------------------===// 25 26#ifndef TSAN_RTL_H 27#define TSAN_RTL_H 28 29#include "sanitizer_common/sanitizer_allocator.h" 30#include "sanitizer_common/sanitizer_allocator_internal.h" 31#include "sanitizer_common/sanitizer_asm.h" 32#include "sanitizer_common/sanitizer_common.h" 33#include "sanitizer_common/sanitizer_deadlock_detector_interface.h" 34#include "sanitizer_common/sanitizer_libignore.h" 35#include "sanitizer_common/sanitizer_suppressions.h" 36#include "sanitizer_common/sanitizer_thread_registry.h" 37#include "tsan_clock.h" 38#include "tsan_defs.h" 39#include "tsan_flags.h" 40#include "tsan_sync.h" 41#include "tsan_trace.h" 42#include "tsan_vector.h" 43#include "tsan_report.h" 44#include "tsan_platform.h" 45#include "tsan_mutexset.h" 46#include "tsan_ignoreset.h" 47 48#if SANITIZER_WORDSIZE != 64 49# error "ThreadSanitizer is supported only on 64-bit platforms" 50#endif 51 52namespace __tsan { 53 54// Descriptor of user's memory block. 55struct MBlock { 56 /* 57 u64 mtx : 1; // must be first 58 u64 lst : 44; 59 u64 stk : 31; // on word boundary 60 u64 tid : kTidBits; 61 u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39 62 */ 63 u64 raw[2]; 64 65 void Init(uptr siz, u32 tid, u32 stk) { 66 raw[0] = raw[1] = 0; 67 raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64); 68 raw[1] |= (u64)tid << ((1 + 44 + 31) % 64); 69 raw[0] |= (u64)stk << (1 + 44); 70 raw[1] |= (u64)stk >> (64 - 44 - 1); 71 DCHECK_EQ(Size(), siz); 72 DCHECK_EQ(Tid(), tid); 73 DCHECK_EQ(StackId(), stk); 74 } 75 76 u32 Tid() const { 77 return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits); 78 } 79 80 uptr Size() const { 81 return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64); 82 } 83 84 u32 StackId() const { 85 return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31); 86 } 87 88 SyncVar *ListHead() const { 89 return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3); 90 } 91 92 void ListPush(SyncVar *v) { 93 SyncVar *lst = ListHead(); 94 v->next = lst; 95 u64 x = (u64)v ^ (u64)lst; 96 x = (x >> 3) << 1; 97 raw[0] ^= x; 98 DCHECK_EQ(ListHead(), v); 99 } 100 101 SyncVar *ListPop() { 102 SyncVar *lst = ListHead(); 103 SyncVar *nxt = lst->next; 104 lst->next = 0; 105 u64 x = (u64)lst ^ (u64)nxt; 106 x = (x >> 3) << 1; 107 raw[0] ^= x; 108 DCHECK_EQ(ListHead(), nxt); 109 return lst; 110 } 111 112 void ListReset() { 113 SyncVar *lst = ListHead(); 114 u64 x = (u64)lst; 115 x = (x >> 3) << 1; 116 raw[0] ^= x; 117 DCHECK_EQ(ListHead(), 0); 118 } 119 120 void Lock(); 121 void Unlock(); 122 typedef GenericScopedLock<MBlock> ScopedLock; 123}; 124 125#ifndef TSAN_GO 126#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW 127const uptr kAllocatorSpace = 0x7d0000000000ULL; 128#else 129const uptr kAllocatorSpace = 0x7d0000000000ULL; 130#endif 131const uptr kAllocatorSize = 0x10000000000ULL; // 1T. 132 133struct MapUnmapCallback; 134typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock), 135 DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; 136typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 137typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator; 138typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 139 SecondaryAllocator> Allocator; 140Allocator *allocator(); 141#endif 142 143void TsanCheckFailed(const char *file, int line, const char *cond, 144 u64 v1, u64 v2); 145 146const u64 kShadowRodata = (u64)-1; // .rodata shadow marker 147 148// FastState (from most significant bit): 149// ignore : 1 150// tid : kTidBits 151// epoch : kClkBits 152// unused : - 153// history_size : 3 154class FastState { 155 public: 156 FastState(u64 tid, u64 epoch) { 157 x_ = tid << kTidShift; 158 x_ |= epoch << kClkShift; 159 DCHECK_EQ(tid, this->tid()); 160 DCHECK_EQ(epoch, this->epoch()); 161 DCHECK_EQ(GetIgnoreBit(), false); 162 } 163 164 explicit FastState(u64 x) 165 : x_(x) { 166 } 167 168 u64 raw() const { 169 return x_; 170 } 171 172 u64 tid() const { 173 u64 res = (x_ & ~kIgnoreBit) >> kTidShift; 174 return res; 175 } 176 177 u64 TidWithIgnore() const { 178 u64 res = x_ >> kTidShift; 179 return res; 180 } 181 182 u64 epoch() const { 183 u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits); 184 return res; 185 } 186 187 void IncrementEpoch() { 188 u64 old_epoch = epoch(); 189 x_ += 1 << kClkShift; 190 DCHECK_EQ(old_epoch + 1, epoch()); 191 (void)old_epoch; 192 } 193 194 void SetIgnoreBit() { x_ |= kIgnoreBit; } 195 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } 196 bool GetIgnoreBit() const { return (s64)x_ < 0; } 197 198 void SetHistorySize(int hs) { 199 CHECK_GE(hs, 0); 200 CHECK_LE(hs, 7); 201 x_ = (x_ & ~7) | hs; 202 } 203 204 int GetHistorySize() const { 205 return (int)(x_ & 7); 206 } 207 208 void ClearHistorySize() { 209 x_ &= ~7; 210 } 211 212 u64 GetTracePos() const { 213 const int hs = GetHistorySize(); 214 // When hs == 0, the trace consists of 2 parts. 215 const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1; 216 return epoch() & mask; 217 } 218 219 private: 220 friend class Shadow; 221 static const int kTidShift = 64 - kTidBits - 1; 222 static const int kClkShift = kTidShift - kClkBits; 223 static const u64 kIgnoreBit = 1ull << 63; 224 static const u64 kFreedBit = 1ull << 63; 225 u64 x_; 226}; 227 228// Shadow (from most significant bit): 229// freed : 1 230// tid : kTidBits 231// epoch : kClkBits 232// is_atomic : 1 233// is_read : 1 234// size_log : 2 235// addr0 : 3 236class Shadow : public FastState { 237 public: 238 explicit Shadow(u64 x) 239 : FastState(x) { 240 } 241 242 explicit Shadow(const FastState &s) 243 : FastState(s.x_) { 244 ClearHistorySize(); 245 } 246 247 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { 248 DCHECK_EQ(x_ & 31, 0); 249 DCHECK_LE(addr0, 7); 250 DCHECK_LE(kAccessSizeLog, 3); 251 x_ |= (kAccessSizeLog << 3) | addr0; 252 DCHECK_EQ(kAccessSizeLog, size_log()); 253 DCHECK_EQ(addr0, this->addr0()); 254 } 255 256 void SetWrite(unsigned kAccessIsWrite) { 257 DCHECK_EQ(x_ & kReadBit, 0); 258 if (!kAccessIsWrite) 259 x_ |= kReadBit; 260 DCHECK_EQ(kAccessIsWrite, IsWrite()); 261 } 262 263 void SetAtomic(bool kIsAtomic) { 264 DCHECK(!IsAtomic()); 265 if (kIsAtomic) 266 x_ |= kAtomicBit; 267 DCHECK_EQ(IsAtomic(), kIsAtomic); 268 } 269 270 bool IsAtomic() const { 271 return x_ & kAtomicBit; 272 } 273 274 bool IsZero() const { 275 return x_ == 0; 276 } 277 278 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { 279 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; 280 DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore()); 281 return shifted_xor == 0; 282 } 283 284 static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { 285 u64 masked_xor = (s1.x_ ^ s2.x_) & 31; 286 return masked_xor == 0; 287 } 288 289 static inline bool TwoRangesIntersect(Shadow s1, Shadow s2, 290 unsigned kS2AccessSize) { 291 bool res = false; 292 u64 diff = s1.addr0() - s2.addr0(); 293 if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT 294 // if (s1.addr0() + size1) > s2.addr0()) return true; 295 if (s1.size() > -diff) res = true; 296 } else { 297 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true; 298 if (kS2AccessSize > diff) res = true; 299 } 300 DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2)); 301 DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1)); 302 return res; 303 } 304 305 // The idea behind the offset is as follows. 306 // Consider that we have 8 bool's contained within a single 8-byte block 307 // (mapped to a single shadow "cell"). Now consider that we write to the bools 308 // from a single thread (which we consider the common case). 309 // W/o offsetting each access will have to scan 4 shadow values at average 310 // to find the corresponding shadow value for the bool. 311 // With offsetting we start scanning shadow with the offset so that 312 // each access hits necessary shadow straight off (at least in an expected 313 // optimistic case). 314 // This logic works seamlessly for any layout of user data. For example, 315 // if user data is {int, short, char, char}, then accesses to the int are 316 // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses 317 // from a single thread won't need to scan all 8 shadow values. 318 unsigned ComputeSearchOffset() { 319 return x_ & 7; 320 } 321 u64 addr0() const { return x_ & 7; } 322 u64 size() const { return 1ull << size_log(); } 323 bool IsWrite() const { return !IsRead(); } 324 bool IsRead() const { return x_ & kReadBit; } 325 326 // The idea behind the freed bit is as follows. 327 // When the memory is freed (or otherwise unaccessible) we write to the shadow 328 // values with tid/epoch related to the free and the freed bit set. 329 // During memory accesses processing the freed bit is considered 330 // as msb of tid. So any access races with shadow with freed bit set 331 // (it is as if write from a thread with which we never synchronized before). 332 // This allows us to detect accesses to freed memory w/o additional 333 // overheads in memory access processing and at the same time restore 334 // tid/epoch of free. 335 void MarkAsFreed() { 336 x_ |= kFreedBit; 337 } 338 339 bool IsFreed() const { 340 return x_ & kFreedBit; 341 } 342 343 bool GetFreedAndReset() { 344 bool res = x_ & kFreedBit; 345 x_ &= ~kFreedBit; 346 return res; 347 } 348 349 bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const { 350 // analyzes 5-th bit (is_read) and 6-th bit (is_atomic) 351 bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift) 352 | (kIsAtomic << kAtomicShift)); 353 DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic)); 354 return v; 355 } 356 357 bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const { 358 bool v = ((x_ >> kReadShift) & 3) 359 <= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); 360 DCHECK_EQ(v, (IsAtomic() < kIsAtomic) || 361 (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite)); 362 return v; 363 } 364 365 bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const { 366 bool v = ((x_ >> kReadShift) & 3) 367 >= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); 368 DCHECK_EQ(v, (IsAtomic() > kIsAtomic) || 369 (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite)); 370 return v; 371 } 372 373 private: 374 static const u64 kReadShift = 5; 375 static const u64 kReadBit = 1ull << kReadShift; 376 static const u64 kAtomicShift = 6; 377 static const u64 kAtomicBit = 1ull << kAtomicShift; 378 379 u64 size_log() const { return (x_ >> 3) & 3; } 380 381 static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) { 382 if (s1.addr0() == s2.addr0()) return true; 383 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) 384 return true; 385 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) 386 return true; 387 return false; 388 } 389}; 390 391struct SignalContext; 392 393struct JmpBuf { 394 uptr sp; 395 uptr mangled_sp; 396 uptr *shadow_stack_pos; 397}; 398 399// This struct is stored in TLS. 400struct ThreadState { 401 FastState fast_state; 402 // Synch epoch represents the threads's epoch before the last synchronization 403 // action. It allows to reduce number of shadow state updates. 404 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150, 405 // if we are processing write to X from the same thread at epoch=200, 406 // we do nothing, because both writes happen in the same 'synch epoch'. 407 // That is, if another memory access does not race with the former write, 408 // it does not race with the latter as well. 409 // QUESTION: can we can squeeze this into ThreadState::Fast? 410 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are 411 // taken by epoch between synchs. 412 // This way we can save one load from tls. 413 u64 fast_synch_epoch; 414 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read. 415 // We do not distinguish beteween ignoring reads and writes 416 // for better performance. 417 int ignore_reads_and_writes; 418 int ignore_sync; 419 // Go does not support ignores. 420#ifndef TSAN_GO 421 IgnoreSet mop_ignore_set; 422 IgnoreSet sync_ignore_set; 423#endif 424 // C/C++ uses fixed size shadow stack embed into Trace. 425 // Go uses malloc-allocated shadow stack with dynamic size. 426 uptr *shadow_stack; 427 uptr *shadow_stack_end; 428 uptr *shadow_stack_pos; 429 u64 *racy_shadow_addr; 430 u64 racy_state[2]; 431 MutexSet mset; 432 ThreadClock clock; 433#ifndef TSAN_GO 434 AllocatorCache alloc_cache; 435 InternalAllocatorCache internal_alloc_cache; 436 Vector<JmpBuf> jmp_bufs; 437 int ignore_interceptors; 438#endif 439 u64 stat[StatCnt]; 440 const int tid; 441 const int unique_id; 442 bool in_symbolizer; 443 bool in_ignored_lib; 444 bool is_alive; 445 bool is_freeing; 446 bool is_vptr_access; 447 const uptr stk_addr; 448 const uptr stk_size; 449 const uptr tls_addr; 450 const uptr tls_size; 451 ThreadContext *tctx; 452 453 InternalDeadlockDetector internal_deadlock_detector; 454 DDPhysicalThread *dd_pt; 455 DDLogicalThread *dd_lt; 456 457 bool in_signal_handler; 458 SignalContext *signal_ctx; 459 460#ifndef TSAN_GO 461 u32 last_sleep_stack_id; 462 ThreadClock last_sleep_clock; 463#endif 464 465 // Set in regions of runtime that must be signal-safe and fork-safe. 466 // If set, malloc must not be called. 467 int nomalloc; 468 469 explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, 470 unsigned reuse_count, 471 uptr stk_addr, uptr stk_size, 472 uptr tls_addr, uptr tls_size); 473}; 474 475#ifndef TSAN_GO 476__attribute__((tls_model("initial-exec"))) 477extern THREADLOCAL char cur_thread_placeholder[]; 478INLINE ThreadState *cur_thread() { 479 return reinterpret_cast<ThreadState *>(&cur_thread_placeholder); 480} 481#endif 482 483class ThreadContext : public ThreadContextBase { 484 public: 485 explicit ThreadContext(int tid); 486 ~ThreadContext(); 487 ThreadState *thr; 488 u32 creation_stack_id; 489 SyncClock sync; 490 // Epoch at which the thread had started. 491 // If we see an event from the thread stamped by an older epoch, 492 // the event is from a dead thread that shared tid with this thread. 493 u64 epoch0; 494 u64 epoch1; 495 496 // Override superclass callbacks. 497 void OnDead(); 498 void OnJoined(void *arg); 499 void OnFinished(); 500 void OnStarted(void *arg); 501 void OnCreated(void *arg); 502 void OnReset(); 503}; 504 505struct RacyStacks { 506 MD5Hash hash[2]; 507 bool operator==(const RacyStacks &other) const { 508 if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) 509 return true; 510 if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) 511 return true; 512 return false; 513 } 514}; 515 516struct RacyAddress { 517 uptr addr_min; 518 uptr addr_max; 519}; 520 521struct FiredSuppression { 522 ReportType type; 523 uptr pc; 524 Suppression *supp; 525}; 526 527struct Context { 528 Context(); 529 530 bool initialized; 531 bool after_multithreaded_fork; 532 533 SyncTab synctab; 534 535 Mutex report_mtx; 536 int nreported; 537 int nmissed_expected; 538 atomic_uint64_t last_symbolize_time_ns; 539 540 void *background_thread; 541 atomic_uint32_t stop_background_thread; 542 543 ThreadRegistry *thread_registry; 544 545 Vector<RacyStacks> racy_stacks; 546 Vector<RacyAddress> racy_addresses; 547 // Number of fired suppressions may be large enough. 548 InternalMmapVector<FiredSuppression> fired_suppressions; 549 DDetector *dd; 550 551 Flags flags; 552 553 u64 stat[StatCnt]; 554 u64 int_alloc_cnt[MBlockTypeCount]; 555 u64 int_alloc_siz[MBlockTypeCount]; 556}; 557 558extern Context *ctx; // The one and the only global runtime context. 559 560struct ScopedIgnoreInterceptors { 561 ScopedIgnoreInterceptors() { 562#ifndef TSAN_GO 563 cur_thread()->ignore_interceptors++; 564#endif 565 } 566 567 ~ScopedIgnoreInterceptors() { 568#ifndef TSAN_GO 569 cur_thread()->ignore_interceptors--; 570#endif 571 } 572}; 573 574class ScopedReport { 575 public: 576 explicit ScopedReport(ReportType typ); 577 ~ScopedReport(); 578 579 void AddStack(const StackTrace *stack); 580 void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack, 581 const MutexSet *mset); 582 void AddThread(const ThreadContext *tctx); 583 void AddThread(int unique_tid); 584 void AddUniqueTid(int unique_tid); 585 void AddMutex(const SyncVar *s); 586 u64 AddMutex(u64 id); 587 void AddLocation(uptr addr, uptr size); 588 void AddSleep(u32 stack_id); 589 void SetCount(int count); 590 591 const ReportDesc *GetReport() const; 592 593 private: 594 ReportDesc *rep_; 595 // Symbolizer makes lots of intercepted calls. If we try to process them, 596 // at best it will cause deadlocks on internal mutexes. 597 ScopedIgnoreInterceptors ignore_interceptors_; 598 599 void AddDeadMutex(u64 id); 600 601 ScopedReport(const ScopedReport&); 602 void operator = (const ScopedReport&); 603}; 604 605void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset); 606 607void StatAggregate(u64 *dst, u64 *src); 608void StatOutput(u64 *stat); 609void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { 610 if (kCollectStats) 611 thr->stat[typ] += n; 612} 613void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { 614 if (kCollectStats) 615 thr->stat[typ] = n; 616} 617 618void MapShadow(uptr addr, uptr size); 619void MapThreadTrace(uptr addr, uptr size); 620void DontNeedShadowFor(uptr addr, uptr size); 621void InitializeShadowMemory(); 622void InitializeInterceptors(); 623void InitializeLibIgnore(); 624void InitializeDynamicAnnotations(); 625 626void ForkBefore(ThreadState *thr, uptr pc); 627void ForkParentAfter(ThreadState *thr, uptr pc); 628void ForkChildAfter(ThreadState *thr, uptr pc); 629 630void ReportRace(ThreadState *thr); 631bool OutputReport(Context *ctx, 632 const ScopedReport &srep, 633 const ReportStack *suppress_stack1, 634 const ReportStack *suppress_stack2 = 0, 635 const ReportLocation *suppress_loc = 0); 636bool IsFiredSuppression(Context *ctx, 637 const ScopedReport &srep, 638 const StackTrace &trace); 639bool IsExpectedReport(uptr addr, uptr size); 640void PrintMatchedBenignRaces(); 641bool FrameIsInternal(const ReportStack *frame); 642ReportStack *SkipTsanInternalFrames(ReportStack *ent); 643 644#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 645# define DPrintf Printf 646#else 647# define DPrintf(...) 648#endif 649 650#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 651# define DPrintf2 Printf 652#else 653# define DPrintf2(...) 654#endif 655 656u32 CurrentStackId(ThreadState *thr, uptr pc); 657ReportStack *SymbolizeStackId(u32 stack_id); 658void PrintCurrentStack(ThreadState *thr, uptr pc); 659void PrintCurrentStackSlow(); // uses libunwind 660 661void Initialize(ThreadState *thr); 662int Finalize(ThreadState *thr); 663 664SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr, 665 bool write_lock, bool create); 666SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr); 667 668void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, 669 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); 670void MemoryAccessImpl(ThreadState *thr, uptr addr, 671 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 672 u64 *shadow_mem, Shadow cur); 673void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, 674 uptr size, bool is_write); 675void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, 676 uptr size, uptr step, bool is_write); 677void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, 678 int size, bool kAccessIsWrite, bool kIsAtomic); 679 680const int kSizeLog1 = 0; 681const int kSizeLog2 = 1; 682const int kSizeLog4 = 2; 683const int kSizeLog8 = 3; 684 685void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc, 686 uptr addr, int kAccessSizeLog) { 687 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); 688} 689 690void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc, 691 uptr addr, int kAccessSizeLog) { 692 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); 693} 694 695void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc, 696 uptr addr, int kAccessSizeLog) { 697 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); 698} 699 700void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, 701 uptr addr, int kAccessSizeLog) { 702 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); 703} 704 705void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); 706void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); 707void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); 708 709void ThreadIgnoreBegin(ThreadState *thr, uptr pc); 710void ThreadIgnoreEnd(ThreadState *thr, uptr pc); 711void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc); 712void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc); 713 714void FuncEntry(ThreadState *thr, uptr pc); 715void FuncExit(ThreadState *thr); 716 717int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); 718void ThreadStart(ThreadState *thr, int tid, uptr os_id); 719void ThreadFinish(ThreadState *thr); 720int ThreadTid(ThreadState *thr, uptr pc, uptr uid); 721void ThreadJoin(ThreadState *thr, uptr pc, int tid); 722void ThreadDetach(ThreadState *thr, uptr pc, int tid); 723void ThreadFinalize(ThreadState *thr); 724void ThreadSetName(ThreadState *thr, const char *name); 725int ThreadCount(ThreadState *thr); 726void ProcessPendingSignals(ThreadState *thr); 727 728void MutexCreate(ThreadState *thr, uptr pc, uptr addr, 729 bool rw, bool recursive, bool linker_init); 730void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); 731void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1, 732 bool try_lock = false); 733int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false); 734void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false); 735void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); 736void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); 737void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD 738 739void Acquire(ThreadState *thr, uptr pc, uptr addr); 740void AcquireGlobal(ThreadState *thr, uptr pc); 741void Release(ThreadState *thr, uptr pc, uptr addr); 742void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); 743void AfterSleep(ThreadState *thr, uptr pc); 744void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); 745void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); 746void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); 747void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); 748 749// The hacky call uses custom calling convention and an assembly thunk. 750// It is considerably faster that a normal call for the caller 751// if it is not executed (it is intended for slow paths from hot functions). 752// The trick is that the call preserves all registers and the compiler 753// does not treat it as a call. 754// If it does not work for you, use normal call. 755#if TSAN_DEBUG == 0 756// The caller may not create the stack frame for itself at all, 757// so we create a reserve stack frame for it (1024b must be enough). 758#define HACKY_CALL(f) \ 759 __asm__ __volatile__("sub $1024, %%rsp;" \ 760 CFI_INL_ADJUST_CFA_OFFSET(1024) \ 761 ".hidden " #f "_thunk;" \ 762 "call " #f "_thunk;" \ 763 "add $1024, %%rsp;" \ 764 CFI_INL_ADJUST_CFA_OFFSET(-1024) \ 765 ::: "memory", "cc"); 766#else 767#define HACKY_CALL(f) f() 768#endif 769 770void TraceSwitch(ThreadState *thr); 771uptr TraceTopPC(ThreadState *thr); 772uptr TraceSize(); 773uptr TraceParts(); 774Trace *ThreadTrace(int tid); 775 776extern "C" void __tsan_trace_switch(); 777void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, 778 EventType typ, u64 addr) { 779 if (!kCollectHistory) 780 return; 781 DCHECK_GE((int)typ, 0); 782 DCHECK_LE((int)typ, 7); 783 DCHECK_EQ(GetLsb(addr, 61), addr); 784 StatInc(thr, StatEvents); 785 u64 pos = fs.GetTracePos(); 786 if (UNLIKELY((pos % kTracePartSize) == 0)) { 787#ifndef TSAN_GO 788 HACKY_CALL(__tsan_trace_switch); 789#else 790 TraceSwitch(thr); 791#endif 792 } 793 Event *trace = (Event*)GetThreadTrace(fs.tid()); 794 Event *evp = &trace[pos]; 795 Event ev = (u64)addr | ((u64)typ << 61); 796 *evp = ev; 797} 798 799} // namespace __tsan 800 801#endif // TSAN_RTL_H 802