tsan_rtl.h revision 9d150bdb433ddd092073dabd87ba15aa176603a1
1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12// Main internal TSan header file. 13// 14// Ground rules: 15// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static 16// function-scope locals) 17// - All functions/classes/etc reside in namespace __tsan, except for those 18// declared in tsan_interface.h. 19// - Platform-specific files should be used instead of ifdefs (*). 20// - No system headers included in header files (*). 21// - Platform specific headres included only into platform-specific files (*). 22// 23// (*) Except when inlining is critical for performance. 24//===----------------------------------------------------------------------===// 25 26#ifndef TSAN_RTL_H 27#define TSAN_RTL_H 28 29#include "sanitizer_common/sanitizer_common.h" 30#include "tsan_clock.h" 31#include "tsan_defs.h" 32#include "tsan_flags.h" 33#include "tsan_sync.h" 34#include "tsan_trace.h" 35#include "tsan_vector.h" 36#include "tsan_report.h" 37 38namespace __tsan { 39 40void TsanPrintf(const char *format, ...); 41 42// FastState (from most significant bit): 43// unused : 1 44// tid : kTidBits 45// epoch : kClkBits 46// unused : - 47// ignore_bit : 1 48class FastState { 49 public: 50 FastState(u64 tid, u64 epoch) { 51 x_ = tid << kTidShift; 52 x_ |= epoch << kClkShift; 53 DCHECK(tid == this->tid()); 54 DCHECK(epoch == this->epoch()); 55 } 56 57 explicit FastState(u64 x) 58 : x_(x) { 59 } 60 61 u64 tid() const { 62 u64 res = x_ >> kTidShift; 63 return res; 64 } 65 66 u64 epoch() const { 67 u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits); 68 return res; 69 } 70 71 void IncrementEpoch() { 72 u64 old_epoch = epoch(); 73 x_ += 1 << kClkShift; 74 DCHECK_EQ(old_epoch + 1, epoch()); 75 (void)old_epoch; 76 } 77 78 void SetIgnoreBit() { x_ |= kIgnoreBit; } 79 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } 80 bool GetIgnoreBit() const { return x_ & kIgnoreBit; } 81 82 private: 83 friend class Shadow; 84 static const int kTidShift = 64 - kTidBits - 1; 85 static const int kClkShift = kTidShift - kClkBits; 86 static const u64 kIgnoreBit = 1ull; 87 static const u64 kFreedBit = 1ull << 63; 88 u64 x_; 89}; 90 91// Shadow (from most significant bit): 92// freed : 1 93// tid : kTidBits 94// epoch : kClkBits 95// is_write : 1 96// size_log : 2 97// addr0 : 3 98class Shadow : public FastState { 99 public: 100 explicit Shadow(u64 x) : FastState(x) { } 101 102 explicit Shadow(const FastState &s) : FastState(s.x_) { } 103 104 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { 105 DCHECK_EQ(x_ & 31, 0); 106 DCHECK_LE(addr0, 7); 107 DCHECK_LE(kAccessSizeLog, 3); 108 x_ |= (kAccessSizeLog << 3) | addr0; 109 DCHECK_EQ(kAccessSizeLog, size_log()); 110 DCHECK_EQ(addr0, this->addr0()); 111 } 112 113 void SetWrite(unsigned kAccessIsWrite) { 114 DCHECK_EQ(x_ & 32, 0); 115 if (kAccessIsWrite) 116 x_ |= 32; 117 DCHECK_EQ(kAccessIsWrite, is_write()); 118 } 119 120 bool IsZero() const { return x_ == 0; } 121 u64 raw() const { return x_; } 122 123 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { 124 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; 125 DCHECK_EQ(shifted_xor == 0, s1.tid() == s2.tid()); 126 return shifted_xor == 0; 127 } 128 129 static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { 130 u64 masked_xor = (s1.x_ ^ s2.x_) & 31; 131 return masked_xor == 0; 132 } 133 134 static inline bool TwoRangesIntersect(Shadow s1, Shadow s2, 135 unsigned kS2AccessSize) { 136 bool res = false; 137 u64 diff = s1.addr0() - s2.addr0(); 138 if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT 139 // if (s1.addr0() + size1) > s2.addr0()) return true; 140 if (s1.size() > -diff) res = true; 141 } else { 142 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true; 143 if (kS2AccessSize > diff) res = true; 144 } 145 DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2)); 146 DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1)); 147 return res; 148 } 149 150 // The idea behind the offset is as follows. 151 // Consider that we have 8 bool's contained within a single 8-byte block 152 // (mapped to a single shadow "cell"). Now consider that we write to the bools 153 // from a single thread (which we consider the common case). 154 // W/o offsetting each access will have to scan 4 shadow values at average 155 // to find the corresponding shadow value for the bool. 156 // With offsetting we start scanning shadow with the offset so that 157 // each access hits necessary shadow straight off (at least in an expected 158 // optimistic case). 159 // This logic works seamlessly for any layout of user data. For example, 160 // if user data is {int, short, char, char}, then accesses to the int are 161 // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses 162 // from a single thread won't need to scan all 8 shadow values. 163 unsigned ComputeSearchOffset() { 164 return x_ & 7; 165 } 166 u64 addr0() const { return x_ & 7; } 167 u64 size() const { return 1ull << size_log(); } 168 bool is_write() const { return x_ & 32; } 169 170 // The idea behind the freed bit is as follows. 171 // When the memory is freed (or otherwise unaccessible) we write to the shadow 172 // values with tid/epoch related to the free and the freed bit set. 173 // During memory accesses processing the freed bit is considered 174 // as msb of tid. So any access races with shadow with freed bit set 175 // (it is as if write from a thread with which we never synchronized before). 176 // This allows us to detect accesses to freed memory w/o additional 177 // overheads in memory access processing and at the same time restore 178 // tid/epoch of free. 179 void MarkAsFreed() { 180 x_ |= kFreedBit; 181 } 182 183 bool GetFreedAndReset() { 184 bool res = x_ & kFreedBit; 185 x_ &= ~kFreedBit; 186 return res; 187 } 188 189 private: 190 u64 size_log() const { return (x_ >> 3) & 3; } 191 192 static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) { 193 if (s1.addr0() == s2.addr0()) return true; 194 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) 195 return true; 196 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) 197 return true; 198 return false; 199 } 200}; 201 202// Freed memory. 203// As if 8-byte write by thread 0xff..f at epoch 0xff..f, races with everything. 204const u64 kShadowFreed = 0xfffffffffffffff8ull; 205 206struct SignalContext; 207 208// This struct is stored in TLS. 209struct ThreadState { 210 FastState fast_state; 211 // Synch epoch represents the threads's epoch before the last synchronization 212 // action. It allows to reduce number of shadow state updates. 213 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150, 214 // if we are processing write to X from the same thread at epoch=200, 215 // we do nothing, because both writes happen in the same 'synch epoch'. 216 // That is, if another memory access does not race with the former write, 217 // it does not race with the latter as well. 218 // QUESTION: can we can squeeze this into ThreadState::Fast? 219 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are 220 // taken by epoch between synchs. 221 // This way we can save one load from tls. 222 u64 fast_synch_epoch; 223 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read. 224 // We do not distinguish beteween ignoring reads and writes 225 // for better performance. 226 int ignore_reads_and_writes; 227 uptr *shadow_stack_pos; 228 u64 *racy_shadow_addr; 229 u64 racy_state[2]; 230 Trace trace; 231#ifndef TSAN_GO 232 // C/C++ uses embed shadow stack of fixed size. 233 uptr shadow_stack[kShadowStackSize]; 234#else 235 // Go uses satellite shadow stack with dynamic size. 236 uptr *shadow_stack; 237 uptr *shadow_stack_end; 238#endif 239 ThreadClock clock; 240 u64 stat[StatCnt]; 241 const int tid; 242 int in_rtl; 243 bool is_alive; 244 const uptr stk_addr; 245 const uptr stk_size; 246 const uptr tls_addr; 247 const uptr tls_size; 248 249 DeadlockDetector deadlock_detector; 250 251 bool in_signal_handler; 252 SignalContext *signal_ctx; 253 254 // Set in regions of runtime that must be signal-safe and fork-safe. 255 // If set, malloc must not be called. 256 int nomalloc; 257 258 explicit ThreadState(Context *ctx, int tid, u64 epoch, 259 uptr stk_addr, uptr stk_size, 260 uptr tls_addr, uptr tls_size); 261}; 262 263Context *CTX(); 264 265#ifndef TSAN_GO 266extern THREADLOCAL char cur_thread_placeholder[]; 267INLINE ThreadState *cur_thread() { 268 return reinterpret_cast<ThreadState *>(&cur_thread_placeholder); 269} 270#endif 271 272enum ThreadStatus { 273 ThreadStatusInvalid, // Non-existent thread, data is invalid. 274 ThreadStatusCreated, // Created but not yet running. 275 ThreadStatusRunning, // The thread is currently running. 276 ThreadStatusFinished, // Joinable thread is finished but not yet joined. 277 ThreadStatusDead, // Joined, but some info (trace) is still alive. 278}; 279 280// An info about a thread that is hold for some time after its termination. 281struct ThreadDeadInfo { 282 Trace trace; 283}; 284 285struct ThreadContext { 286 const int tid; 287 int unique_id; // Non-rolling thread id. 288 uptr user_id; // Some opaque user thread id (e.g. pthread_t). 289 ThreadState *thr; 290 ThreadStatus status; 291 bool detached; 292 int reuse_count; 293 SyncClock sync; 294 // Epoch at which the thread had started. 295 // If we see an event from the thread stamped by an older epoch, 296 // the event is from a dead thread that shared tid with this thread. 297 u64 epoch0; 298 u64 epoch1; 299 StackTrace creation_stack; 300 ThreadDeadInfo *dead_info; 301 ThreadContext *dead_next; // In dead thread list. 302 303 explicit ThreadContext(int tid); 304}; 305 306struct RacyStacks { 307 MD5Hash hash[2]; 308 bool operator==(const RacyStacks &other) const { 309 if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) 310 return true; 311 if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) 312 return true; 313 return false; 314 } 315}; 316 317struct RacyAddress { 318 uptr addr_min; 319 uptr addr_max; 320}; 321 322struct Context { 323 Context(); 324 325 bool initialized; 326 327 SyncTab synctab; 328 329 Mutex report_mtx; 330 int nreported; 331 int nmissed_expected; 332 333 Mutex thread_mtx; 334 unsigned thread_seq; 335 unsigned unique_thread_seq; 336 int alive_threads; 337 int max_alive_threads; 338 ThreadContext *threads[kMaxTid]; 339 int dead_list_size; 340 ThreadContext* dead_list_head; 341 ThreadContext* dead_list_tail; 342 343 Vector<RacyStacks> racy_stacks; 344 Vector<RacyAddress> racy_addresses; 345 346 Flags flags; 347 348 u64 stat[StatCnt]; 349 u64 int_alloc_cnt[MBlockTypeCount]; 350 u64 int_alloc_siz[MBlockTypeCount]; 351}; 352 353class ScopedInRtl { 354 public: 355 ScopedInRtl(); 356 ~ScopedInRtl(); 357 private: 358 ThreadState*thr_; 359 int in_rtl_; 360 int errno_; 361}; 362 363class ScopedReport { 364 public: 365 explicit ScopedReport(ReportType typ); 366 ~ScopedReport(); 367 368 void AddStack(const StackTrace *stack); 369 void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack); 370 void AddThread(const ThreadContext *tctx); 371 void AddMutex(const SyncVar *s); 372 void AddLocation(uptr addr, uptr size); 373 374 const ReportDesc *GetReport() const; 375 376 private: 377 Context *ctx_; 378 ReportDesc *rep_; 379 380 ScopedReport(const ScopedReport&); 381 void operator = (const ScopedReport&); 382}; 383 384void StatAggregate(u64 *dst, u64 *src); 385void StatOutput(u64 *stat); 386void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { 387 if (kCollectStats) 388 thr->stat[typ] += n; 389} 390 391void InitializeShadowMemory(); 392void InitializeInterceptors(); 393void InitializeDynamicAnnotations(); 394 395void ReportRace(ThreadState *thr); 396bool OutputReport(const ScopedReport &srep, 397 const ReportStack *suppress_stack = 0); 398bool IsExpectedReport(uptr addr, uptr size); 399 400#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 401# define DPrintf TsanPrintf 402#else 403# define DPrintf(...) 404#endif 405 406#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 407# define DPrintf2 TsanPrintf 408#else 409# define DPrintf2(...) 410#endif 411 412void Initialize(ThreadState *thr); 413int Finalize(ThreadState *thr); 414 415void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, 416 int kAccessSizeLog, bool kAccessIsWrite); 417void MemoryAccessImpl(ThreadState *thr, uptr addr, 418 int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state, 419 u64 *shadow_mem, Shadow cur); 420void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr); 421void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr); 422void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr); 423void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr); 424void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, 425 uptr size, bool is_write); 426void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); 427void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); 428void IgnoreCtl(ThreadState *thr, bool write, bool begin); 429 430void FuncEntry(ThreadState *thr, uptr pc); 431void FuncExit(ThreadState *thr); 432 433int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); 434void ThreadStart(ThreadState *thr, int tid); 435void ThreadFinish(ThreadState *thr); 436int ThreadTid(ThreadState *thr, uptr pc, uptr uid); 437void ThreadJoin(ThreadState *thr, uptr pc, int tid); 438void ThreadDetach(ThreadState *thr, uptr pc, int tid); 439void ThreadFinalize(ThreadState *thr); 440void ThreadFinalizerGoroutine(ThreadState *thr); 441 442void MutexCreate(ThreadState *thr, uptr pc, uptr addr, bool rw, bool recursive); 443void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); 444void MutexLock(ThreadState *thr, uptr pc, uptr addr); 445void MutexUnlock(ThreadState *thr, uptr pc, uptr addr); 446void MutexReadLock(ThreadState *thr, uptr pc, uptr addr); 447void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); 448void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); 449 450void Acquire(ThreadState *thr, uptr pc, uptr addr); 451void Release(ThreadState *thr, uptr pc, uptr addr); 452void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); 453 454// The hacky call uses custom calling convention and an assembly thunk. 455// It is considerably faster that a normal call for the caller 456// if it is not executed (it is intended for slow paths from hot functions). 457// The trick is that the call preserves all registers and the compiler 458// does not treat it as a call. 459// If it does not work for you, use normal call. 460#if TSAN_DEBUG == 0 461// The caller may not create the stack frame for itself at all, 462// so we create a reserve stack frame for it (1024b must be enough). 463#define HACKY_CALL(f) \ 464 __asm__ __volatile__("sub $0x400, %%rsp;" \ 465 "call " #f "_thunk;" \ 466 "add $0x400, %%rsp;" ::: "memory"); 467#else 468#define HACKY_CALL(f) f() 469#endif 470 471void TraceSwitch(ThreadState *thr); 472 473extern "C" void __tsan_trace_switch(); 474void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch, 475 EventType typ, uptr addr) { 476 StatInc(thr, StatEvents); 477 if (UNLIKELY((epoch % kTracePartSize) == 0)) { 478#ifndef TSAN_GO 479 HACKY_CALL(__tsan_trace_switch); 480#else 481 TraceSwitch(thr); 482#endif 483 } 484 Event *evp = &thr->trace.events[epoch % kTraceSize]; 485 Event ev = (u64)addr | ((u64)typ << 61); 486 *evp = ev; 487} 488 489} // namespace __tsan 490 491#endif // TSAN_RTL_H 492