tsan_rtl.h revision e96366613372e616fc6f2209b81b776f931a2c58
1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main internal TSan header file.
13//
14// Ground rules:
15//   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
16//     function-scope locals)
17//   - All functions/classes/etc reside in namespace __tsan, except for those
18//     declared in tsan_interface.h.
19//   - Platform-specific files should be used instead of ifdefs (*).
20//   - No system headers included in header files (*).
21//   - Platform specific headres included only into platform-specific files (*).
22//
23//  (*) Except when inlining is critical for performance.
24//===----------------------------------------------------------------------===//
25
26#ifndef TSAN_RTL_H
27#define TSAN_RTL_H
28
29#include "sanitizer_common/sanitizer_common.h"
30#include "tsan_clock.h"
31#include "tsan_defs.h"
32#include "tsan_flags.h"
33#include "tsan_sync.h"
34#include "tsan_trace.h"
35#include "tsan_vector.h"
36#include "tsan_report.h"
37
38namespace __tsan {
39
40void TsanPrintf(const char *format, ...);
41
42// FastState (from most significant bit):
43//   unused          : 1
44//   tid             : kTidBits
45//   epoch           : kClkBits
46//   unused          : -
47//   ignore_bit      : 1
48class FastState {
49 public:
50  FastState(u64 tid, u64 epoch) {
51    x_ = tid << kTidShift;
52    x_ |= epoch << kClkShift;
53    DCHECK(tid == this->tid());
54    DCHECK(epoch == this->epoch());
55  }
56
57  explicit FastState(u64 x)
58      : x_(x) {
59  }
60
61  u64 tid() const {
62    u64 res = x_ >> kTidShift;
63    return res;
64  }
65
66  u64 epoch() const {
67    u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
68    return res;
69  }
70
71  void IncrementEpoch() {
72    u64 old_epoch = epoch();
73    x_ += 1 << kClkShift;
74    DCHECK_EQ(old_epoch + 1, epoch());
75    (void)old_epoch;
76  }
77
78  void SetIgnoreBit() { x_ |= kIgnoreBit; }
79  void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
80  bool GetIgnoreBit() const { return x_ & kIgnoreBit; }
81
82 private:
83  friend class Shadow;
84  static const int kTidShift = 64 - kTidBits - 1;
85  static const int kClkShift = kTidShift - kClkBits;
86  static const u64 kIgnoreBit = 1ull;
87  static const u64 kFreedBit = 1ull << 63;
88  u64 x_;
89};
90
91// Shadow (from most significant bit):
92//   freed           : 1
93//   tid             : kTidBits
94//   epoch           : kClkBits
95//   is_write        : 1
96//   size_log        : 2
97//   addr0           : 3
98class Shadow : public FastState {
99 public:
100  explicit Shadow(u64 x) : FastState(x) { }
101
102  explicit Shadow(const FastState &s) : FastState(s.x_) { }
103
104  void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
105    DCHECK_EQ(x_ & 31, 0);
106    DCHECK_LE(addr0, 7);
107    DCHECK_LE(kAccessSizeLog, 3);
108    x_ |= (kAccessSizeLog << 3) | addr0;
109    DCHECK_EQ(kAccessSizeLog, size_log());
110    DCHECK_EQ(addr0, this->addr0());
111  }
112
113  void SetWrite(unsigned kAccessIsWrite) {
114    DCHECK_EQ(x_ & 32, 0);
115    if (kAccessIsWrite)
116      x_ |= 32;
117    DCHECK_EQ(kAccessIsWrite, is_write());
118  }
119
120  bool IsZero() const { return x_ == 0; }
121  u64 raw() const { return x_; }
122
123  static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
124    u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
125    DCHECK_EQ(shifted_xor == 0, s1.tid() == s2.tid());
126    return shifted_xor == 0;
127  }
128
129  static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
130    u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
131    return masked_xor == 0;
132  }
133
134  static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
135      unsigned kS2AccessSize) {
136    bool res = false;
137    u64 diff = s1.addr0() - s2.addr0();
138    if ((s64)diff < 0) {  // s1.addr0 < s2.addr0  // NOLINT
139      // if (s1.addr0() + size1) > s2.addr0()) return true;
140      if (s1.size() > -diff)  res = true;
141    } else {
142      // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
143      if (kS2AccessSize > diff) res = true;
144    }
145    DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
146    DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
147    return res;
148  }
149
150  // The idea behind the offset is as follows.
151  // Consider that we have 8 bool's contained within a single 8-byte block
152  // (mapped to a single shadow "cell"). Now consider that we write to the bools
153  // from a single thread (which we consider the common case).
154  // W/o offsetting each access will have to scan 4 shadow values at average
155  // to find the corresponding shadow value for the bool.
156  // With offsetting we start scanning shadow with the offset so that
157  // each access hits necessary shadow straight off (at least in an expected
158  // optimistic case).
159  // This logic works seamlessly for any layout of user data. For example,
160  // if user data is {int, short, char, char}, then accesses to the int are
161  // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
162  // from a single thread won't need to scan all 8 shadow values.
163  unsigned ComputeSearchOffset() {
164    return x_ & 7;
165  }
166  u64 addr0() const { return x_ & 7; }
167  u64 size() const { return 1ull << size_log(); }
168  bool is_write() const { return x_ & 32; }
169
170  // The idea behind the freed bit is as follows.
171  // When the memory is freed (or otherwise unaccessible) we write to the shadow
172  // values with tid/epoch related to the free and the freed bit set.
173  // During memory accesses processing the freed bit is considered
174  // as msb of tid. So any access races with shadow with freed bit set
175  // (it is as if write from a thread with which we never synchronized before).
176  // This allows us to detect accesses to freed memory w/o additional
177  // overheads in memory access processing and at the same time restore
178  // tid/epoch of free.
179  void MarkAsFreed() {
180     x_ |= kFreedBit;
181  }
182
183  bool GetFreedAndReset() {
184    bool res = x_ & kFreedBit;
185    x_ &= ~kFreedBit;
186    return res;
187  }
188
189 private:
190  u64 size_log() const { return (x_ >> 3) & 3; }
191
192  static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
193    if (s1.addr0() == s2.addr0()) return true;
194    if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
195      return true;
196    if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
197      return true;
198    return false;
199  }
200};
201
202// Freed memory.
203// As if 8-byte write by thread 0xff..f at epoch 0xff..f, races with everything.
204const u64 kShadowFreed = 0xfffffffffffffff8ull;
205
206struct SignalContext;
207
208// This struct is stored in TLS.
209struct ThreadState {
210  FastState fast_state;
211  // Synch epoch represents the threads's epoch before the last synchronization
212  // action. It allows to reduce number of shadow state updates.
213  // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
214  // if we are processing write to X from the same thread at epoch=200,
215  // we do nothing, because both writes happen in the same 'synch epoch'.
216  // That is, if another memory access does not race with the former write,
217  // it does not race with the latter as well.
218  // QUESTION: can we can squeeze this into ThreadState::Fast?
219  // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
220  // taken by epoch between synchs.
221  // This way we can save one load from tls.
222  u64 fast_synch_epoch;
223  // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
224  // We do not distinguish beteween ignoring reads and writes
225  // for better performance.
226  int ignore_reads_and_writes;
227  uptr *shadow_stack_pos;
228  u64 *racy_shadow_addr;
229  u64 racy_state[2];
230  Trace trace;
231  uptr shadow_stack[kShadowStackSize];
232  ThreadClock clock;
233  u64 stat[StatCnt];
234  const int tid;
235  int in_rtl;
236  const uptr stk_addr;
237  const uptr stk_size;
238  const uptr tls_addr;
239  const uptr tls_size;
240
241  DeadlockDetector deadlock_detector;
242
243  bool in_signal_handler;
244  SignalContext *signal_ctx;
245
246  // Set in regions of runtime that must be signal-safe and fork-safe.
247  // If set, malloc must not be called.
248  int nomalloc;
249
250  explicit ThreadState(Context *ctx, int tid, u64 epoch,
251                       uptr stk_addr, uptr stk_size,
252                       uptr tls_addr, uptr tls_size);
253};
254
255Context *CTX();
256extern THREADLOCAL char cur_thread_placeholder[];
257
258INLINE ThreadState *cur_thread() {
259  return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
260}
261
262enum ThreadStatus {
263  ThreadStatusInvalid,   // Non-existent thread, data is invalid.
264  ThreadStatusCreated,   // Created but not yet running.
265  ThreadStatusRunning,   // The thread is currently running.
266  ThreadStatusFinished,  // Joinable thread is finished but not yet joined.
267  ThreadStatusDead,      // Joined, but some info (trace) is still alive.
268};
269
270// An info about a thread that is hold for some time after its termination.
271struct ThreadDeadInfo {
272  Trace trace;
273};
274
275struct ThreadContext {
276  const int tid;
277  int unique_id;  // Non-rolling thread id.
278  uptr user_id;  // Some opaque user thread id (e.g. pthread_t).
279  ThreadState *thr;
280  ThreadStatus status;
281  bool detached;
282  int reuse_count;
283  SyncClock sync;
284  // Epoch at which the thread had started.
285  // If we see an event from the thread stamped by an older epoch,
286  // the event is from a dead thread that shared tid with this thread.
287  u64 epoch0;
288  u64 epoch1;
289  StackTrace creation_stack;
290  ThreadDeadInfo *dead_info;
291  ThreadContext *dead_next;  // In dead thread list.
292
293  explicit ThreadContext(int tid);
294};
295
296struct RacyStacks {
297  MD5Hash hash[2];
298  bool operator==(const RacyStacks &other) const {
299    if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
300      return true;
301    if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
302      return true;
303    return false;
304  }
305};
306
307struct RacyAddress {
308  uptr addr_min;
309  uptr addr_max;
310};
311
312struct Context {
313  Context();
314
315  bool initialized;
316
317  SyncTab synctab;
318
319  Mutex report_mtx;
320  int nreported;
321  int nmissed_expected;
322
323  Mutex thread_mtx;
324  unsigned thread_seq;
325  unsigned unique_thread_seq;
326  int alive_threads;
327  int max_alive_threads;
328  ThreadContext *threads[kMaxTid];
329  int dead_list_size;
330  ThreadContext* dead_list_head;
331  ThreadContext* dead_list_tail;
332
333  Vector<RacyStacks> racy_stacks;
334  Vector<RacyAddress> racy_addresses;
335
336  Flags flags;
337
338  u64 stat[StatCnt];
339  u64 int_alloc_cnt[MBlockTypeCount];
340  u64 int_alloc_siz[MBlockTypeCount];
341};
342
343class ScopedInRtl {
344 public:
345  ScopedInRtl();
346  ~ScopedInRtl();
347 private:
348  ThreadState*thr_;
349  int in_rtl_;
350  int errno_;
351};
352
353class ScopedReport {
354 public:
355  explicit ScopedReport(ReportType typ);
356  ~ScopedReport();
357
358  void AddStack(const StackTrace *stack);
359  void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack);
360  void AddThread(const ThreadContext *tctx);
361  void AddMutex(const SyncVar *s);
362  void AddLocation(uptr addr, uptr size);
363
364  const ReportDesc *GetReport() const;
365
366 private:
367  Context *ctx_;
368  ReportDesc *rep_;
369
370  ScopedReport(const ScopedReport&);
371  void operator = (const ScopedReport&);
372};
373
374void StatAggregate(u64 *dst, u64 *src);
375void StatOutput(u64 *stat);
376void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
377  if (kCollectStats)
378    thr->stat[typ] += n;
379}
380
381void InitializeShadowMemory();
382void InitializeInterceptors();
383void InitializeDynamicAnnotations();
384
385void ReportRace(ThreadState *thr);
386bool OutputReport(const ScopedReport &srep,
387                  const ReportStack *suppress_stack = 0);
388bool IsExpectedReport(uptr addr, uptr size);
389
390#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
391# define DPrintf TsanPrintf
392#else
393# define DPrintf(...)
394#endif
395
396#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
397# define DPrintf2 TsanPrintf
398#else
399# define DPrintf2(...)
400#endif
401
402void Initialize(ThreadState *thr);
403int Finalize(ThreadState *thr);
404
405void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
406    int kAccessSizeLog, bool kAccessIsWrite);
407void MemoryAccessImpl(ThreadState *thr, uptr addr,
408    int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state,
409    u64 *shadow_mem, Shadow cur);
410void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr);
411void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr);
412void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr);
413void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr);
414void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
415                       uptr size, bool is_write);
416void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
417void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
418void IgnoreCtl(ThreadState *thr, bool write, bool begin);
419
420void FuncEntry(ThreadState *thr, uptr pc);
421void FuncExit(ThreadState *thr);
422
423int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
424void ThreadStart(ThreadState *thr, int tid);
425void ThreadFinish(ThreadState *thr);
426int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
427void ThreadJoin(ThreadState *thr, uptr pc, int tid);
428void ThreadDetach(ThreadState *thr, uptr pc, int tid);
429void ThreadFinalize(ThreadState *thr);
430
431void MutexCreate(ThreadState *thr, uptr pc, uptr addr, bool rw, bool recursive);
432void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
433void MutexLock(ThreadState *thr, uptr pc, uptr addr);
434void MutexUnlock(ThreadState *thr, uptr pc, uptr addr);
435void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
436void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
437void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
438
439void Acquire(ThreadState *thr, uptr pc, uptr addr);
440void Release(ThreadState *thr, uptr pc, uptr addr);
441
442// The hacky call uses custom calling convention and an assembly thunk.
443// It is considerably faster that a normal call for the caller
444// if it is not executed (it is intended for slow paths from hot functions).
445// The trick is that the call preserves all registers and the compiler
446// does not treat it as a call.
447// If it does not work for you, use normal call.
448#if TSAN_DEBUG == 0
449// The caller may not create the stack frame for itself at all,
450// so we create a reserve stack frame for it (1024b must be enough).
451#define HACKY_CALL(f) \
452  __asm__ __volatile__("sub $0x400, %%rsp;" \
453                       "call " #f "_thunk;" \
454                       "add $0x400, %%rsp;" ::: "memory");
455#else
456#define HACKY_CALL(f) f()
457#endif
458
459extern "C" void __tsan_trace_switch();
460void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch,
461                                        EventType typ, uptr addr) {
462  StatInc(thr, StatEvents);
463  if (UNLIKELY((epoch % kTracePartSize) == 0))
464    HACKY_CALL(__tsan_trace_switch);
465  Event *evp = &thr->trace.events[epoch % kTraceSize];
466  Event ev = (u64)addr | ((u64)typ << 61);
467  *evp = ev;
468}
469
470}  // namespace __tsan
471
472#endif  // TSAN_RTL_H
473