tsan_interface_atomic.cc revision 5d71de26cedae3dafc17449fe0182045c0bd20e8
1//===-- tsan_interface_atomic.cc ------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14// ThreadSanitizer atomic operations are based on C++11/C1x standards.
15// For background see C++11 standard.  A slightly older, publicly
16// available draft of the standard (not entirely up-to-date, but close enough
17// for casual browsing) is available here:
18// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19// The following page contains more background information:
20// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
22#include "sanitizer_common/sanitizer_placement_new.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "sanitizer_common/sanitizer_mutex.h"
25#include "tsan_flags.h"
26#include "tsan_rtl.h"
27
28using namespace __tsan;  // NOLINT
29
30#define SCOPED_ATOMIC(func, ...) \
31    const uptr callpc = (uptr)__builtin_return_address(0); \
32    uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
33    mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
34    ThreadState *const thr = cur_thread(); \
35    if (thr->ignore_interceptors) \
36      return NoTsanAtomic##func(__VA_ARGS__); \
37    AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
38    ScopedAtomic sa(thr, callpc, a, mo, __func__); \
39    return Atomic##func(thr, pc, __VA_ARGS__); \
40/**/
41
42// These should match declarations from public tsan_interface_atomic.h header.
43typedef unsigned char      a8;
44typedef unsigned short     a16;  // NOLINT
45typedef unsigned int       a32;
46typedef unsigned long long a64;  // NOLINT
47#if defined(__SIZEOF_INT128__) \
48    || (__clang_major__ * 100 + __clang_minor__ >= 302)
49__extension__ typedef __int128 a128;
50# define __TSAN_HAS_INT128 1
51#else
52# define __TSAN_HAS_INT128 0
53#endif
54
55// Protects emulation of 128-bit atomic operations.
56static StaticSpinMutex mutex128;
57
58// Part of ABI, do not change.
59// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
60typedef enum {
61  mo_relaxed,
62  mo_consume,
63  mo_acquire,
64  mo_release,
65  mo_acq_rel,
66  mo_seq_cst
67} morder;
68
69class ScopedAtomic {
70 public:
71  ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
72               morder mo, const char *func)
73      : thr_(thr) {
74    FuncEntry(thr_, pc);
75    DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
76  }
77  ~ScopedAtomic() {
78    ProcessPendingSignals(thr_);
79    FuncExit(thr_);
80  }
81 private:
82  ThreadState *thr_;
83};
84
85static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
86  StatInc(thr, StatAtomic);
87  StatInc(thr, t);
88  StatInc(thr, size == 1 ? StatAtomic1
89             : size == 2 ? StatAtomic2
90             : size == 4 ? StatAtomic4
91             : size == 8 ? StatAtomic8
92             :             StatAtomic16);
93  StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
94             : mo == mo_consume ? StatAtomicConsume
95             : mo == mo_acquire ? StatAtomicAcquire
96             : mo == mo_release ? StatAtomicRelease
97             : mo == mo_acq_rel ? StatAtomicAcq_Rel
98             :                    StatAtomicSeq_Cst);
99}
100
101static bool IsLoadOrder(morder mo) {
102  return mo == mo_relaxed || mo == mo_consume
103      || mo == mo_acquire || mo == mo_seq_cst;
104}
105
106static bool IsStoreOrder(morder mo) {
107  return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
108}
109
110static bool IsReleaseOrder(morder mo) {
111  return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
112}
113
114static bool IsAcquireOrder(morder mo) {
115  return mo == mo_consume || mo == mo_acquire
116      || mo == mo_acq_rel || mo == mo_seq_cst;
117}
118
119static bool IsAcqRelOrder(morder mo) {
120  return mo == mo_acq_rel || mo == mo_seq_cst;
121}
122
123template<typename T> T func_xchg(volatile T *v, T op) {
124  T res = __sync_lock_test_and_set(v, op);
125  // __sync_lock_test_and_set does not contain full barrier.
126  __sync_synchronize();
127  return res;
128}
129
130template<typename T> T func_add(volatile T *v, T op) {
131  return __sync_fetch_and_add(v, op);
132}
133
134template<typename T> T func_sub(volatile T *v, T op) {
135  return __sync_fetch_and_sub(v, op);
136}
137
138template<typename T> T func_and(volatile T *v, T op) {
139  return __sync_fetch_and_and(v, op);
140}
141
142template<typename T> T func_or(volatile T *v, T op) {
143  return __sync_fetch_and_or(v, op);
144}
145
146template<typename T> T func_xor(volatile T *v, T op) {
147  return __sync_fetch_and_xor(v, op);
148}
149
150template<typename T> T func_nand(volatile T *v, T op) {
151  // clang does not support __sync_fetch_and_nand.
152  T cmp = *v;
153  for (;;) {
154    T newv = ~(cmp & op);
155    T cur = __sync_val_compare_and_swap(v, cmp, newv);
156    if (cmp == cur)
157      return cmp;
158    cmp = cur;
159  }
160}
161
162template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
163  return __sync_val_compare_and_swap(v, cmp, xch);
164}
165
166// clang does not support 128-bit atomic ops.
167// Atomic ops are executed under tsan internal mutex,
168// here we assume that the atomic variables are not accessed
169// from non-instrumented code.
170#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
171a128 func_xchg(volatile a128 *v, a128 op) {
172  SpinMutexLock lock(&mutex128);
173  a128 cmp = *v;
174  *v = op;
175  return cmp;
176}
177
178a128 func_add(volatile a128 *v, a128 op) {
179  SpinMutexLock lock(&mutex128);
180  a128 cmp = *v;
181  *v = cmp + op;
182  return cmp;
183}
184
185a128 func_sub(volatile a128 *v, a128 op) {
186  SpinMutexLock lock(&mutex128);
187  a128 cmp = *v;
188  *v = cmp - op;
189  return cmp;
190}
191
192a128 func_and(volatile a128 *v, a128 op) {
193  SpinMutexLock lock(&mutex128);
194  a128 cmp = *v;
195  *v = cmp & op;
196  return cmp;
197}
198
199a128 func_or(volatile a128 *v, a128 op) {
200  SpinMutexLock lock(&mutex128);
201  a128 cmp = *v;
202  *v = cmp | op;
203  return cmp;
204}
205
206a128 func_xor(volatile a128 *v, a128 op) {
207  SpinMutexLock lock(&mutex128);
208  a128 cmp = *v;
209  *v = cmp ^ op;
210  return cmp;
211}
212
213a128 func_nand(volatile a128 *v, a128 op) {
214  SpinMutexLock lock(&mutex128);
215  a128 cmp = *v;
216  *v = ~(cmp & op);
217  return cmp;
218}
219
220a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
221  SpinMutexLock lock(&mutex128);
222  a128 cur = *v;
223  if (cur == cmp)
224    *v = xch;
225  return cur;
226}
227#endif
228
229template<typename T>
230static int SizeLog() {
231  if (sizeof(T) <= 1)
232    return kSizeLog1;
233  else if (sizeof(T) <= 2)
234    return kSizeLog2;
235  else if (sizeof(T) <= 4)
236    return kSizeLog4;
237  else
238    return kSizeLog8;
239  // For 16-byte atomics we also use 8-byte memory access,
240  // this leads to false negatives only in very obscure cases.
241}
242
243static atomic_uint8_t *to_atomic(const volatile a8 *a) {
244  return (atomic_uint8_t*)a;
245}
246
247static atomic_uint16_t *to_atomic(const volatile a16 *a) {
248  return (atomic_uint16_t*)a;
249}
250
251static atomic_uint32_t *to_atomic(const volatile a32 *a) {
252  return (atomic_uint32_t*)a;
253}
254
255static atomic_uint64_t *to_atomic(const volatile a64 *a) {
256  return (atomic_uint64_t*)a;
257}
258
259static memory_order to_mo(morder mo) {
260  switch (mo) {
261  case mo_relaxed: return memory_order_relaxed;
262  case mo_consume: return memory_order_consume;
263  case mo_acquire: return memory_order_acquire;
264  case mo_release: return memory_order_release;
265  case mo_acq_rel: return memory_order_acq_rel;
266  case mo_seq_cst: return memory_order_seq_cst;
267  }
268  CHECK(0);
269  return memory_order_seq_cst;
270}
271
272template<typename T>
273static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
274  return atomic_load(to_atomic(a), to_mo(mo));
275}
276
277#if __TSAN_HAS_INT128
278static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
279  SpinMutexLock lock(&mutex128);
280  return *a;
281}
282#endif
283
284template<typename T>
285static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
286    morder mo) {
287  CHECK(IsLoadOrder(mo));
288  // This fast-path is critical for performance.
289  // Assume the access is atomic.
290  if (!IsAcquireOrder(mo)) {
291    MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
292    return NoTsanAtomicLoad(a, mo);
293  }
294  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
295  AcquireImpl(thr, pc, &s->clock);
296  T v = NoTsanAtomicLoad(a, mo);
297  s->mtx.ReadUnlock();
298  MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
299  return v;
300}
301
302template<typename T>
303static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
304  atomic_store(to_atomic(a), v, to_mo(mo));
305}
306
307#if __TSAN_HAS_INT128
308static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
309  SpinMutexLock lock(&mutex128);
310  *a = v;
311}
312#endif
313
314template<typename T>
315static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
316    morder mo) {
317  CHECK(IsStoreOrder(mo));
318  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
319  // This fast-path is critical for performance.
320  // Assume the access is atomic.
321  // Strictly saying even relaxed store cuts off release sequence,
322  // so must reset the clock.
323  if (!IsReleaseOrder(mo)) {
324    NoTsanAtomicStore(a, v, mo);
325    return;
326  }
327  __sync_synchronize();
328  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
329  thr->fast_state.IncrementEpoch();
330  // Can't increment epoch w/o writing to the trace as well.
331  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
332  ReleaseImpl(thr, pc, &s->clock);
333  NoTsanAtomicStore(a, v, mo);
334  s->mtx.Unlock();
335}
336
337template<typename T, T (*F)(volatile T *v, T op)>
338static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
339  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
340  SyncVar *s = 0;
341  if (mo != mo_relaxed) {
342    s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
343    thr->fast_state.IncrementEpoch();
344    // Can't increment epoch w/o writing to the trace as well.
345    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
346    if (IsAcqRelOrder(mo))
347      AcquireReleaseImpl(thr, pc, &s->clock);
348    else if (IsReleaseOrder(mo))
349      ReleaseImpl(thr, pc, &s->clock);
350    else if (IsAcquireOrder(mo))
351      AcquireImpl(thr, pc, &s->clock);
352  }
353  v = F(a, v);
354  if (s)
355    s->mtx.Unlock();
356  return v;
357}
358
359template<typename T>
360static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
361  return func_xchg(a, v);
362}
363
364template<typename T>
365static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
366  return func_add(a, v);
367}
368
369template<typename T>
370static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
371  return func_sub(a, v);
372}
373
374template<typename T>
375static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
376  return func_and(a, v);
377}
378
379template<typename T>
380static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
381  return func_or(a, v);
382}
383
384template<typename T>
385static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
386  return func_xor(a, v);
387}
388
389template<typename T>
390static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
391  return func_nand(a, v);
392}
393
394template<typename T>
395static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
396    morder mo) {
397  return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
398}
399
400template<typename T>
401static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
402    morder mo) {
403  return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
404}
405
406template<typename T>
407static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
408    morder mo) {
409  return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
410}
411
412template<typename T>
413static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
414    morder mo) {
415  return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
416}
417
418template<typename T>
419static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
420    morder mo) {
421  return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
422}
423
424template<typename T>
425static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
426    morder mo) {
427  return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
428}
429
430template<typename T>
431static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
432    morder mo) {
433  return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
434}
435
436template<typename T>
437static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
438  return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
439}
440
441#if __TSAN_HAS_INT128
442static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
443    morder mo, morder fmo) {
444  a128 old = *c;
445  a128 cur = func_cas(a, old, v);
446  if (cur == old)
447    return true;
448  *c = cur;
449  return false;
450}
451#endif
452
453template<typename T>
454static bool NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
455  return NoTsanAtomicCAS(a, &c, v, mo, fmo);
456}
457
458template<typename T>
459static bool AtomicCAS(ThreadState *thr, uptr pc,
460    volatile T *a, T *c, T v, morder mo, morder fmo) {
461  (void)fmo;  // Unused because llvm does not pass it yet.
462  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
463  SyncVar *s = 0;
464  bool write_lock = mo != mo_acquire && mo != mo_consume;
465  if (mo != mo_relaxed) {
466    s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
467    thr->fast_state.IncrementEpoch();
468    // Can't increment epoch w/o writing to the trace as well.
469    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
470    if (IsAcqRelOrder(mo))
471      AcquireReleaseImpl(thr, pc, &s->clock);
472    else if (IsReleaseOrder(mo))
473      ReleaseImpl(thr, pc, &s->clock);
474    else if (IsAcquireOrder(mo))
475      AcquireImpl(thr, pc, &s->clock);
476  }
477  T cc = *c;
478  T pr = func_cas(a, cc, v);
479  if (s) {
480    if (write_lock)
481      s->mtx.Unlock();
482    else
483      s->mtx.ReadUnlock();
484  }
485  if (pr == cc)
486    return true;
487  *c = pr;
488  return false;
489}
490
491template<typename T>
492static T AtomicCAS(ThreadState *thr, uptr pc,
493    volatile T *a, T c, T v, morder mo, morder fmo) {
494  AtomicCAS(thr, pc, a, &c, v, mo, fmo);
495  return c;
496}
497
498static void NoTsanAtomicFence(morder mo) {
499  __sync_synchronize();
500}
501
502static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
503  // FIXME(dvyukov): not implemented.
504  __sync_synchronize();
505}
506
507extern "C" {
508SANITIZER_INTERFACE_ATTRIBUTE
509a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
510  SCOPED_ATOMIC(Load, a, mo);
511}
512
513SANITIZER_INTERFACE_ATTRIBUTE
514a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
515  SCOPED_ATOMIC(Load, a, mo);
516}
517
518SANITIZER_INTERFACE_ATTRIBUTE
519a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
520  SCOPED_ATOMIC(Load, a, mo);
521}
522
523SANITIZER_INTERFACE_ATTRIBUTE
524a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
525  SCOPED_ATOMIC(Load, a, mo);
526}
527
528#if __TSAN_HAS_INT128
529SANITIZER_INTERFACE_ATTRIBUTE
530a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
531  SCOPED_ATOMIC(Load, a, mo);
532}
533#endif
534
535SANITIZER_INTERFACE_ATTRIBUTE
536void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
537  SCOPED_ATOMIC(Store, a, v, mo);
538}
539
540SANITIZER_INTERFACE_ATTRIBUTE
541void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
542  SCOPED_ATOMIC(Store, a, v, mo);
543}
544
545SANITIZER_INTERFACE_ATTRIBUTE
546void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
547  SCOPED_ATOMIC(Store, a, v, mo);
548}
549
550SANITIZER_INTERFACE_ATTRIBUTE
551void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
552  SCOPED_ATOMIC(Store, a, v, mo);
553}
554
555#if __TSAN_HAS_INT128
556SANITIZER_INTERFACE_ATTRIBUTE
557void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
558  SCOPED_ATOMIC(Store, a, v, mo);
559}
560#endif
561
562SANITIZER_INTERFACE_ATTRIBUTE
563a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
564  SCOPED_ATOMIC(Exchange, a, v, mo);
565}
566
567SANITIZER_INTERFACE_ATTRIBUTE
568a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
569  SCOPED_ATOMIC(Exchange, a, v, mo);
570}
571
572SANITIZER_INTERFACE_ATTRIBUTE
573a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
574  SCOPED_ATOMIC(Exchange, a, v, mo);
575}
576
577SANITIZER_INTERFACE_ATTRIBUTE
578a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
579  SCOPED_ATOMIC(Exchange, a, v, mo);
580}
581
582#if __TSAN_HAS_INT128
583SANITIZER_INTERFACE_ATTRIBUTE
584a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
585  SCOPED_ATOMIC(Exchange, a, v, mo);
586}
587#endif
588
589SANITIZER_INTERFACE_ATTRIBUTE
590a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
591  SCOPED_ATOMIC(FetchAdd, a, v, mo);
592}
593
594SANITIZER_INTERFACE_ATTRIBUTE
595a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
596  SCOPED_ATOMIC(FetchAdd, a, v, mo);
597}
598
599SANITIZER_INTERFACE_ATTRIBUTE
600a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
601  SCOPED_ATOMIC(FetchAdd, a, v, mo);
602}
603
604SANITIZER_INTERFACE_ATTRIBUTE
605a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
606  SCOPED_ATOMIC(FetchAdd, a, v, mo);
607}
608
609#if __TSAN_HAS_INT128
610SANITIZER_INTERFACE_ATTRIBUTE
611a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
612  SCOPED_ATOMIC(FetchAdd, a, v, mo);
613}
614#endif
615
616SANITIZER_INTERFACE_ATTRIBUTE
617a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
618  SCOPED_ATOMIC(FetchSub, a, v, mo);
619}
620
621SANITIZER_INTERFACE_ATTRIBUTE
622a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
623  SCOPED_ATOMIC(FetchSub, a, v, mo);
624}
625
626SANITIZER_INTERFACE_ATTRIBUTE
627a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
628  SCOPED_ATOMIC(FetchSub, a, v, mo);
629}
630
631SANITIZER_INTERFACE_ATTRIBUTE
632a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
633  SCOPED_ATOMIC(FetchSub, a, v, mo);
634}
635
636#if __TSAN_HAS_INT128
637SANITIZER_INTERFACE_ATTRIBUTE
638a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
639  SCOPED_ATOMIC(FetchSub, a, v, mo);
640}
641#endif
642
643SANITIZER_INTERFACE_ATTRIBUTE
644a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
645  SCOPED_ATOMIC(FetchAnd, a, v, mo);
646}
647
648SANITIZER_INTERFACE_ATTRIBUTE
649a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
650  SCOPED_ATOMIC(FetchAnd, a, v, mo);
651}
652
653SANITIZER_INTERFACE_ATTRIBUTE
654a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
655  SCOPED_ATOMIC(FetchAnd, a, v, mo);
656}
657
658SANITIZER_INTERFACE_ATTRIBUTE
659a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
660  SCOPED_ATOMIC(FetchAnd, a, v, mo);
661}
662
663#if __TSAN_HAS_INT128
664SANITIZER_INTERFACE_ATTRIBUTE
665a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
666  SCOPED_ATOMIC(FetchAnd, a, v, mo);
667}
668#endif
669
670SANITIZER_INTERFACE_ATTRIBUTE
671a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
672  SCOPED_ATOMIC(FetchOr, a, v, mo);
673}
674
675SANITIZER_INTERFACE_ATTRIBUTE
676a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
677  SCOPED_ATOMIC(FetchOr, a, v, mo);
678}
679
680SANITIZER_INTERFACE_ATTRIBUTE
681a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
682  SCOPED_ATOMIC(FetchOr, a, v, mo);
683}
684
685SANITIZER_INTERFACE_ATTRIBUTE
686a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
687  SCOPED_ATOMIC(FetchOr, a, v, mo);
688}
689
690#if __TSAN_HAS_INT128
691SANITIZER_INTERFACE_ATTRIBUTE
692a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
693  SCOPED_ATOMIC(FetchOr, a, v, mo);
694}
695#endif
696
697SANITIZER_INTERFACE_ATTRIBUTE
698a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
699  SCOPED_ATOMIC(FetchXor, a, v, mo);
700}
701
702SANITIZER_INTERFACE_ATTRIBUTE
703a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
704  SCOPED_ATOMIC(FetchXor, a, v, mo);
705}
706
707SANITIZER_INTERFACE_ATTRIBUTE
708a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
709  SCOPED_ATOMIC(FetchXor, a, v, mo);
710}
711
712SANITIZER_INTERFACE_ATTRIBUTE
713a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
714  SCOPED_ATOMIC(FetchXor, a, v, mo);
715}
716
717#if __TSAN_HAS_INT128
718SANITIZER_INTERFACE_ATTRIBUTE
719a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
720  SCOPED_ATOMIC(FetchXor, a, v, mo);
721}
722#endif
723
724SANITIZER_INTERFACE_ATTRIBUTE
725a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
726  SCOPED_ATOMIC(FetchNand, a, v, mo);
727}
728
729SANITIZER_INTERFACE_ATTRIBUTE
730a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
731  SCOPED_ATOMIC(FetchNand, a, v, mo);
732}
733
734SANITIZER_INTERFACE_ATTRIBUTE
735a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
736  SCOPED_ATOMIC(FetchNand, a, v, mo);
737}
738
739SANITIZER_INTERFACE_ATTRIBUTE
740a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
741  SCOPED_ATOMIC(FetchNand, a, v, mo);
742}
743
744#if __TSAN_HAS_INT128
745SANITIZER_INTERFACE_ATTRIBUTE
746a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
747  SCOPED_ATOMIC(FetchNand, a, v, mo);
748}
749#endif
750
751SANITIZER_INTERFACE_ATTRIBUTE
752int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
753    morder mo, morder fmo) {
754  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
755}
756
757SANITIZER_INTERFACE_ATTRIBUTE
758int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
759    morder mo, morder fmo) {
760  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
761}
762
763SANITIZER_INTERFACE_ATTRIBUTE
764int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
765    morder mo, morder fmo) {
766  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
767}
768
769SANITIZER_INTERFACE_ATTRIBUTE
770int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
771    morder mo, morder fmo) {
772  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
773}
774
775#if __TSAN_HAS_INT128
776SANITIZER_INTERFACE_ATTRIBUTE
777int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
778    morder mo, morder fmo) {
779  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
780}
781#endif
782
783SANITIZER_INTERFACE_ATTRIBUTE
784int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
785    morder mo, morder fmo) {
786  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
787}
788
789SANITIZER_INTERFACE_ATTRIBUTE
790int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
791    morder mo, morder fmo) {
792  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
793}
794
795SANITIZER_INTERFACE_ATTRIBUTE
796int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
797    morder mo, morder fmo) {
798  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
799}
800
801SANITIZER_INTERFACE_ATTRIBUTE
802int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
803    morder mo, morder fmo) {
804  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
805}
806
807#if __TSAN_HAS_INT128
808SANITIZER_INTERFACE_ATTRIBUTE
809int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
810    morder mo, morder fmo) {
811  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
812}
813#endif
814
815SANITIZER_INTERFACE_ATTRIBUTE
816a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
817    morder mo, morder fmo) {
818  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
819}
820
821SANITIZER_INTERFACE_ATTRIBUTE
822a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
823    morder mo, morder fmo) {
824  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
825}
826
827SANITIZER_INTERFACE_ATTRIBUTE
828a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
829    morder mo, morder fmo) {
830  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
831}
832
833SANITIZER_INTERFACE_ATTRIBUTE
834a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
835    morder mo, morder fmo) {
836  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
837}
838
839#if __TSAN_HAS_INT128
840SANITIZER_INTERFACE_ATTRIBUTE
841a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
842    morder mo, morder fmo) {
843  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
844}
845#endif
846
847SANITIZER_INTERFACE_ATTRIBUTE
848void __tsan_atomic_thread_fence(morder mo) {
849  char* a = 0;
850  SCOPED_ATOMIC(Fence, mo);
851}
852
853SANITIZER_INTERFACE_ATTRIBUTE
854void __tsan_atomic_signal_fence(morder mo) {
855}
856}  // extern "C"
857