1//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main internal TSan header file.
13//
14// Ground rules:
15//   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
16//     function-scope locals)
17//   - All functions/classes/etc reside in namespace __tsan, except for those
18//     declared in tsan_interface.h.
19//   - Platform-specific files should be used instead of ifdefs (*).
20//   - No system headers included in header files (*).
21//   - Platform specific headres included only into platform-specific files (*).
22//
23//  (*) Except when inlining is critical for performance.
24//===----------------------------------------------------------------------===//
25
26#ifndef TSAN_RTL_H
27#define TSAN_RTL_H
28
29#include "sanitizer_common/sanitizer_allocator.h"
30#include "sanitizer_common/sanitizer_allocator_internal.h"
31#include "sanitizer_common/sanitizer_asm.h"
32#include "sanitizer_common/sanitizer_common.h"
33#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
34#include "sanitizer_common/sanitizer_libignore.h"
35#include "sanitizer_common/sanitizer_suppressions.h"
36#include "sanitizer_common/sanitizer_thread_registry.h"
37#include "tsan_clock.h"
38#include "tsan_defs.h"
39#include "tsan_flags.h"
40#include "tsan_sync.h"
41#include "tsan_trace.h"
42#include "tsan_vector.h"
43#include "tsan_report.h"
44#include "tsan_platform.h"
45#include "tsan_mutexset.h"
46#include "tsan_ignoreset.h"
47#include "tsan_stack_trace.h"
48
49#if SANITIZER_WORDSIZE != 64
50# error "ThreadSanitizer is supported only on 64-bit platforms"
51#endif
52
53namespace __tsan {
54
55#ifndef SANITIZER_GO
56struct MapUnmapCallback;
57#ifdef __mips64
58static const uptr kAllocatorSpace = 0;
59static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
60static const uptr kAllocatorRegionSizeLog = 20;
61static const uptr kAllocatorNumRegions =
62    kAllocatorSize >> kAllocatorRegionSizeLog;
63typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12,
64    MapUnmapCallback> ByteMap;
65typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
66    CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
67    MapUnmapCallback> PrimaryAllocator;
68#else
69typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
70    DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
71#endif
72typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
73typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
74typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
75    SecondaryAllocator> Allocator;
76Allocator *allocator();
77#endif
78
79void TsanCheckFailed(const char *file, int line, const char *cond,
80                     u64 v1, u64 v2);
81
82const u64 kShadowRodata = (u64)-1;  // .rodata shadow marker
83
84// FastState (from most significant bit):
85//   ignore          : 1
86//   tid             : kTidBits
87//   unused          : -
88//   history_size    : 3
89//   epoch           : kClkBits
90class FastState {
91 public:
92  FastState(u64 tid, u64 epoch) {
93    x_ = tid << kTidShift;
94    x_ |= epoch;
95    DCHECK_EQ(tid, this->tid());
96    DCHECK_EQ(epoch, this->epoch());
97    DCHECK_EQ(GetIgnoreBit(), false);
98  }
99
100  explicit FastState(u64 x)
101      : x_(x) {
102  }
103
104  u64 raw() const {
105    return x_;
106  }
107
108  u64 tid() const {
109    u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
110    return res;
111  }
112
113  u64 TidWithIgnore() const {
114    u64 res = x_ >> kTidShift;
115    return res;
116  }
117
118  u64 epoch() const {
119    u64 res = x_ & ((1ull << kClkBits) - 1);
120    return res;
121  }
122
123  void IncrementEpoch() {
124    u64 old_epoch = epoch();
125    x_ += 1;
126    DCHECK_EQ(old_epoch + 1, epoch());
127    (void)old_epoch;
128  }
129
130  void SetIgnoreBit() { x_ |= kIgnoreBit; }
131  void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
132  bool GetIgnoreBit() const { return (s64)x_ < 0; }
133
134  void SetHistorySize(int hs) {
135    CHECK_GE(hs, 0);
136    CHECK_LE(hs, 7);
137    x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
138  }
139
140  ALWAYS_INLINE
141  int GetHistorySize() const {
142    return (int)((x_ >> kHistoryShift) & kHistoryMask);
143  }
144
145  void ClearHistorySize() {
146    SetHistorySize(0);
147  }
148
149  ALWAYS_INLINE
150  u64 GetTracePos() const {
151    const int hs = GetHistorySize();
152    // When hs == 0, the trace consists of 2 parts.
153    const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
154    return epoch() & mask;
155  }
156
157 private:
158  friend class Shadow;
159  static const int kTidShift = 64 - kTidBits - 1;
160  static const u64 kIgnoreBit = 1ull << 63;
161  static const u64 kFreedBit = 1ull << 63;
162  static const u64 kHistoryShift = kClkBits;
163  static const u64 kHistoryMask = 7;
164  u64 x_;
165};
166
167// Shadow (from most significant bit):
168//   freed           : 1
169//   tid             : kTidBits
170//   is_atomic       : 1
171//   is_read         : 1
172//   size_log        : 2
173//   addr0           : 3
174//   epoch           : kClkBits
175class Shadow : public FastState {
176 public:
177  explicit Shadow(u64 x)
178      : FastState(x) {
179  }
180
181  explicit Shadow(const FastState &s)
182      : FastState(s.x_) {
183    ClearHistorySize();
184  }
185
186  void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
187    DCHECK_EQ((x_ >> kClkBits) & 31, 0);
188    DCHECK_LE(addr0, 7);
189    DCHECK_LE(kAccessSizeLog, 3);
190    x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
191    DCHECK_EQ(kAccessSizeLog, size_log());
192    DCHECK_EQ(addr0, this->addr0());
193  }
194
195  void SetWrite(unsigned kAccessIsWrite) {
196    DCHECK_EQ(x_ & kReadBit, 0);
197    if (!kAccessIsWrite)
198      x_ |= kReadBit;
199    DCHECK_EQ(kAccessIsWrite, IsWrite());
200  }
201
202  void SetAtomic(bool kIsAtomic) {
203    DCHECK(!IsAtomic());
204    if (kIsAtomic)
205      x_ |= kAtomicBit;
206    DCHECK_EQ(IsAtomic(), kIsAtomic);
207  }
208
209  bool IsAtomic() const {
210    return x_ & kAtomicBit;
211  }
212
213  bool IsZero() const {
214    return x_ == 0;
215  }
216
217  static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
218    u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
219    DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
220    return shifted_xor == 0;
221  }
222
223  static ALWAYS_INLINE
224  bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
225    u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
226    return masked_xor == 0;
227  }
228
229  static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
230      unsigned kS2AccessSize) {
231    bool res = false;
232    u64 diff = s1.addr0() - s2.addr0();
233    if ((s64)diff < 0) {  // s1.addr0 < s2.addr0  // NOLINT
234      // if (s1.addr0() + size1) > s2.addr0()) return true;
235      if (s1.size() > -diff)
236        res = true;
237    } else {
238      // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
239      if (kS2AccessSize > diff)
240        res = true;
241    }
242    DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
243    DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
244    return res;
245  }
246
247  u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
248  u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
249  bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
250  bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
251
252  // The idea behind the freed bit is as follows.
253  // When the memory is freed (or otherwise unaccessible) we write to the shadow
254  // values with tid/epoch related to the free and the freed bit set.
255  // During memory accesses processing the freed bit is considered
256  // as msb of tid. So any access races with shadow with freed bit set
257  // (it is as if write from a thread with which we never synchronized before).
258  // This allows us to detect accesses to freed memory w/o additional
259  // overheads in memory access processing and at the same time restore
260  // tid/epoch of free.
261  void MarkAsFreed() {
262     x_ |= kFreedBit;
263  }
264
265  bool IsFreed() const {
266    return x_ & kFreedBit;
267  }
268
269  bool GetFreedAndReset() {
270    bool res = x_ & kFreedBit;
271    x_ &= ~kFreedBit;
272    return res;
273  }
274
275  bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
276    bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
277        | (u64(kIsAtomic) << kAtomicShift));
278    DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
279    return v;
280  }
281
282  bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
283    bool v = ((x_ >> kReadShift) & 3)
284        <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
285    DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
286        (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
287    return v;
288  }
289
290  bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
291    bool v = ((x_ >> kReadShift) & 3)
292        >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
293    DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
294        (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
295    return v;
296  }
297
298 private:
299  static const u64 kReadShift   = 5 + kClkBits;
300  static const u64 kReadBit     = 1ull << kReadShift;
301  static const u64 kAtomicShift = 6 + kClkBits;
302  static const u64 kAtomicBit   = 1ull << kAtomicShift;
303
304  u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
305
306  static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
307    if (s1.addr0() == s2.addr0()) return true;
308    if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
309      return true;
310    if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
311      return true;
312    return false;
313  }
314};
315
316struct ThreadSignalContext;
317
318struct JmpBuf {
319  uptr sp;
320  uptr mangled_sp;
321  int int_signal_send;
322  bool in_blocking_func;
323  uptr in_signal_handler;
324  uptr *shadow_stack_pos;
325};
326
327// This struct is stored in TLS.
328struct ThreadState {
329  FastState fast_state;
330  // Synch epoch represents the threads's epoch before the last synchronization
331  // action. It allows to reduce number of shadow state updates.
332  // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
333  // if we are processing write to X from the same thread at epoch=200,
334  // we do nothing, because both writes happen in the same 'synch epoch'.
335  // That is, if another memory access does not race with the former write,
336  // it does not race with the latter as well.
337  // QUESTION: can we can squeeze this into ThreadState::Fast?
338  // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
339  // taken by epoch between synchs.
340  // This way we can save one load from tls.
341  u64 fast_synch_epoch;
342  // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
343  // We do not distinguish beteween ignoring reads and writes
344  // for better performance.
345  int ignore_reads_and_writes;
346  int ignore_sync;
347  // Go does not support ignores.
348#ifndef SANITIZER_GO
349  IgnoreSet mop_ignore_set;
350  IgnoreSet sync_ignore_set;
351#endif
352  // C/C++ uses fixed size shadow stack embed into Trace.
353  // Go uses malloc-allocated shadow stack with dynamic size.
354  uptr *shadow_stack;
355  uptr *shadow_stack_end;
356  uptr *shadow_stack_pos;
357  u64 *racy_shadow_addr;
358  u64 racy_state[2];
359  MutexSet mset;
360  ThreadClock clock;
361#ifndef SANITIZER_GO
362  AllocatorCache alloc_cache;
363  InternalAllocatorCache internal_alloc_cache;
364  Vector<JmpBuf> jmp_bufs;
365  int ignore_interceptors;
366#endif
367#if TSAN_COLLECT_STATS
368  u64 stat[StatCnt];
369#endif
370  const int tid;
371  const int unique_id;
372  bool in_symbolizer;
373  bool in_ignored_lib;
374  bool is_inited;
375  bool is_dead;
376  bool is_freeing;
377  bool is_vptr_access;
378  const uptr stk_addr;
379  const uptr stk_size;
380  const uptr tls_addr;
381  const uptr tls_size;
382  ThreadContext *tctx;
383
384#if SANITIZER_DEBUG && !SANITIZER_GO
385  InternalDeadlockDetector internal_deadlock_detector;
386#endif
387  DDPhysicalThread *dd_pt;
388  DDLogicalThread *dd_lt;
389
390  atomic_uintptr_t in_signal_handler;
391  ThreadSignalContext *signal_ctx;
392
393  DenseSlabAllocCache block_cache;
394  DenseSlabAllocCache sync_cache;
395  DenseSlabAllocCache clock_cache;
396
397#ifndef SANITIZER_GO
398  u32 last_sleep_stack_id;
399  ThreadClock last_sleep_clock;
400#endif
401
402  // Set in regions of runtime that must be signal-safe and fork-safe.
403  // If set, malloc must not be called.
404  int nomalloc;
405
406  explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
407                       unsigned reuse_count,
408                       uptr stk_addr, uptr stk_size,
409                       uptr tls_addr, uptr tls_size);
410};
411
412#ifndef SANITIZER_GO
413__attribute__((tls_model("initial-exec")))
414extern THREADLOCAL char cur_thread_placeholder[];
415INLINE ThreadState *cur_thread() {
416  return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
417}
418#endif
419
420class ThreadContext : public ThreadContextBase {
421 public:
422  explicit ThreadContext(int tid);
423  ~ThreadContext();
424  ThreadState *thr;
425  u32 creation_stack_id;
426  SyncClock sync;
427  // Epoch at which the thread had started.
428  // If we see an event from the thread stamped by an older epoch,
429  // the event is from a dead thread that shared tid with this thread.
430  u64 epoch0;
431  u64 epoch1;
432
433  // Override superclass callbacks.
434  void OnDead() override;
435  void OnJoined(void *arg) override;
436  void OnFinished() override;
437  void OnStarted(void *arg) override;
438  void OnCreated(void *arg) override;
439  void OnReset() override;
440  void OnDetached(void *arg) override;
441};
442
443struct RacyStacks {
444  MD5Hash hash[2];
445  bool operator==(const RacyStacks &other) const {
446    if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
447      return true;
448    if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
449      return true;
450    return false;
451  }
452};
453
454struct RacyAddress {
455  uptr addr_min;
456  uptr addr_max;
457};
458
459struct FiredSuppression {
460  ReportType type;
461  uptr pc;
462  Suppression *supp;
463};
464
465struct Context {
466  Context();
467
468  bool initialized;
469  bool after_multithreaded_fork;
470
471  MetaMap metamap;
472
473  Mutex report_mtx;
474  int nreported;
475  int nmissed_expected;
476  atomic_uint64_t last_symbolize_time_ns;
477
478  void *background_thread;
479  atomic_uint32_t stop_background_thread;
480
481  ThreadRegistry *thread_registry;
482
483  Vector<RacyStacks> racy_stacks;
484  Vector<RacyAddress> racy_addresses;
485  // Number of fired suppressions may be large enough.
486  InternalMmapVector<FiredSuppression> fired_suppressions;
487  DDetector *dd;
488
489  ClockAlloc clock_alloc;
490
491  Flags flags;
492
493  u64 stat[StatCnt];
494  u64 int_alloc_cnt[MBlockTypeCount];
495  u64 int_alloc_siz[MBlockTypeCount];
496};
497
498extern Context *ctx;  // The one and the only global runtime context.
499
500struct ScopedIgnoreInterceptors {
501  ScopedIgnoreInterceptors() {
502#ifndef SANITIZER_GO
503    cur_thread()->ignore_interceptors++;
504#endif
505  }
506
507  ~ScopedIgnoreInterceptors() {
508#ifndef SANITIZER_GO
509    cur_thread()->ignore_interceptors--;
510#endif
511  }
512};
513
514class ScopedReport {
515 public:
516  explicit ScopedReport(ReportType typ);
517  ~ScopedReport();
518
519  void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
520                       const MutexSet *mset);
521  void AddStack(StackTrace stack, bool suppressable = false);
522  void AddThread(const ThreadContext *tctx, bool suppressable = false);
523  void AddThread(int unique_tid, bool suppressable = false);
524  void AddUniqueTid(int unique_tid);
525  void AddMutex(const SyncVar *s);
526  u64 AddMutex(u64 id);
527  void AddLocation(uptr addr, uptr size);
528  void AddSleep(u32 stack_id);
529  void SetCount(int count);
530
531  const ReportDesc *GetReport() const;
532
533 private:
534  ReportDesc *rep_;
535  // Symbolizer makes lots of intercepted calls. If we try to process them,
536  // at best it will cause deadlocks on internal mutexes.
537  ScopedIgnoreInterceptors ignore_interceptors_;
538
539  void AddDeadMutex(u64 id);
540
541  ScopedReport(const ScopedReport&);
542  void operator = (const ScopedReport&);
543};
544
545void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
546                  MutexSet *mset);
547
548template<typename StackTraceTy>
549void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
550  uptr size = thr->shadow_stack_pos - thr->shadow_stack;
551  uptr start = 0;
552  if (size + !!toppc > kStackTraceMax) {
553    start = size + !!toppc - kStackTraceMax;
554    size = kStackTraceMax - !!toppc;
555  }
556  stack->Init(&thr->shadow_stack[start], size, toppc);
557}
558
559
560#if TSAN_COLLECT_STATS
561void StatAggregate(u64 *dst, u64 *src);
562void StatOutput(u64 *stat);
563#endif
564
565void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
566#if TSAN_COLLECT_STATS
567  thr->stat[typ] += n;
568#endif
569}
570void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
571#if TSAN_COLLECT_STATS
572  thr->stat[typ] = n;
573#endif
574}
575
576void MapShadow(uptr addr, uptr size);
577void MapThreadTrace(uptr addr, uptr size);
578void DontNeedShadowFor(uptr addr, uptr size);
579void InitializeShadowMemory();
580void InitializeInterceptors();
581void InitializeLibIgnore();
582void InitializeDynamicAnnotations();
583
584void ForkBefore(ThreadState *thr, uptr pc);
585void ForkParentAfter(ThreadState *thr, uptr pc);
586void ForkChildAfter(ThreadState *thr, uptr pc);
587
588void ReportRace(ThreadState *thr);
589bool OutputReport(ThreadState *thr, const ScopedReport &srep);
590bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
591                        StackTrace trace);
592bool IsExpectedReport(uptr addr, uptr size);
593void PrintMatchedBenignRaces();
594
595#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
596# define DPrintf Printf
597#else
598# define DPrintf(...)
599#endif
600
601#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
602# define DPrintf2 Printf
603#else
604# define DPrintf2(...)
605#endif
606
607u32 CurrentStackId(ThreadState *thr, uptr pc);
608ReportStack *SymbolizeStackId(u32 stack_id);
609void PrintCurrentStack(ThreadState *thr, uptr pc);
610void PrintCurrentStackSlow(uptr pc);  // uses libunwind
611
612void Initialize(ThreadState *thr);
613int Finalize(ThreadState *thr);
614
615void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
616void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
617
618void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
619    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
620void MemoryAccessImpl(ThreadState *thr, uptr addr,
621    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
622    u64 *shadow_mem, Shadow cur);
623void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
624    uptr size, bool is_write);
625void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
626    uptr size, uptr step, bool is_write);
627void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
628    int size, bool kAccessIsWrite, bool kIsAtomic);
629
630const int kSizeLog1 = 0;
631const int kSizeLog2 = 1;
632const int kSizeLog4 = 2;
633const int kSizeLog8 = 3;
634
635void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
636                                     uptr addr, int kAccessSizeLog) {
637  MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
638}
639
640void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
641                                      uptr addr, int kAccessSizeLog) {
642  MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
643}
644
645void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
646                                           uptr addr, int kAccessSizeLog) {
647  MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
648}
649
650void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
651                                            uptr addr, int kAccessSizeLog) {
652  MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
653}
654
655void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
656void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
657void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
658
659void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
660void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
661void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
662void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
663
664void FuncEntry(ThreadState *thr, uptr pc);
665void FuncExit(ThreadState *thr);
666
667int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
668void ThreadStart(ThreadState *thr, int tid, uptr os_id);
669void ThreadFinish(ThreadState *thr);
670int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
671void ThreadJoin(ThreadState *thr, uptr pc, int tid);
672void ThreadDetach(ThreadState *thr, uptr pc, int tid);
673void ThreadFinalize(ThreadState *thr);
674void ThreadSetName(ThreadState *thr, const char *name);
675int ThreadCount(ThreadState *thr);
676void ProcessPendingSignals(ThreadState *thr);
677
678void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
679                 bool rw, bool recursive, bool linker_init);
680void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
681void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
682               bool try_lock = false);
683int  MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
684void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
685void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
686void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
687void MutexRepair(ThreadState *thr, uptr pc, uptr addr);  // call on EOWNERDEAD
688
689void Acquire(ThreadState *thr, uptr pc, uptr addr);
690// AcquireGlobal synchronizes the current thread with all other threads.
691// In terms of happens-before relation, it draws a HB edge from all threads
692// (where they happen to execute right now) to the current thread. We use it to
693// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
694// right before executing finalizers. This provides a coarse, but simple
695// approximation of the actual required synchronization.
696void AcquireGlobal(ThreadState *thr, uptr pc);
697void Release(ThreadState *thr, uptr pc, uptr addr);
698void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
699void AfterSleep(ThreadState *thr, uptr pc);
700void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
701void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
702void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
703void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
704
705// The hacky call uses custom calling convention and an assembly thunk.
706// It is considerably faster that a normal call for the caller
707// if it is not executed (it is intended for slow paths from hot functions).
708// The trick is that the call preserves all registers and the compiler
709// does not treat it as a call.
710// If it does not work for you, use normal call.
711#if !SANITIZER_DEBUG && defined(__x86_64__)
712// The caller may not create the stack frame for itself at all,
713// so we create a reserve stack frame for it (1024b must be enough).
714#define HACKY_CALL(f) \
715  __asm__ __volatile__("sub $1024, %%rsp;" \
716                       CFI_INL_ADJUST_CFA_OFFSET(1024) \
717                       ".hidden " #f "_thunk;" \
718                       "call " #f "_thunk;" \
719                       "add $1024, %%rsp;" \
720                       CFI_INL_ADJUST_CFA_OFFSET(-1024) \
721                       ::: "memory", "cc");
722#else
723#define HACKY_CALL(f) f()
724#endif
725
726void TraceSwitch(ThreadState *thr);
727uptr TraceTopPC(ThreadState *thr);
728uptr TraceSize();
729uptr TraceParts();
730Trace *ThreadTrace(int tid);
731
732extern "C" void __tsan_trace_switch();
733void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
734                                        EventType typ, u64 addr) {
735  if (!kCollectHistory)
736    return;
737  DCHECK_GE((int)typ, 0);
738  DCHECK_LE((int)typ, 7);
739  DCHECK_EQ(GetLsb(addr, 61), addr);
740  StatInc(thr, StatEvents);
741  u64 pos = fs.GetTracePos();
742  if (UNLIKELY((pos % kTracePartSize) == 0)) {
743#ifndef SANITIZER_GO
744    HACKY_CALL(__tsan_trace_switch);
745#else
746    TraceSwitch(thr);
747#endif
748  }
749  Event *trace = (Event*)GetThreadTrace(fs.tid());
750  Event *evp = &trace[pos];
751  Event ev = (u64)addr | ((u64)typ << 61);
752  *evp = ev;
753}
754
755#ifndef SANITIZER_GO
756uptr ALWAYS_INLINE HeapEnd() {
757#if SANITIZER_CAN_USE_ALLOCATOR64
758  return kHeapMemEnd + PrimaryAllocator::AdditionalSize();
759#else
760  return kHeapMemEnd;
761#endif
762}
763#endif
764
765}  // namespace __tsan
766
767#endif  // TSAN_RTL_H
768