tsan_interface_atomic.cc revision e7718bcc1372d25fc21100e403cf41b166d42f9b
1//===-- tsan_interface_atomic.cc ------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14// ThreadSanitizer atomic operations are based on C++11/C1x standards.
15// For background see C++11 standard.  A slightly older, publically
16// available draft of the standard (not entirely up-to-date, but close enough
17// for casual browsing) is available here:
18// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19// The following page contains more background information:
20// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
22#include "sanitizer_common/sanitizer_placement_new.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "tsan_interface_atomic.h"
25#include "tsan_flags.h"
26#include "tsan_rtl.h"
27
28using namespace __tsan;  // NOLINT
29
30#define SCOPED_ATOMIC(func, ...) \
31    const uptr callpc = (uptr)__builtin_return_address(0); \
32    uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
33    mo = ConvertOrder(mo); \
34    mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
35    ThreadState *const thr = cur_thread(); \
36    AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
37    ScopedAtomic sa(thr, callpc, a, mo, __FUNCTION__); \
38    return Atomic##func(thr, pc, __VA_ARGS__); \
39/**/
40
41// Some shortcuts.
42typedef __tsan_memory_order morder;
43typedef __tsan_atomic8 a8;
44typedef __tsan_atomic16 a16;
45typedef __tsan_atomic32 a32;
46typedef __tsan_atomic64 a64;
47typedef __tsan_atomic128 a128;
48const morder mo_relaxed = __tsan_memory_order_relaxed;
49const morder mo_consume = __tsan_memory_order_consume;
50const morder mo_acquire = __tsan_memory_order_acquire;
51const morder mo_release = __tsan_memory_order_release;
52const morder mo_acq_rel = __tsan_memory_order_acq_rel;
53const morder mo_seq_cst = __tsan_memory_order_seq_cst;
54
55class ScopedAtomic {
56 public:
57  ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
58               morder mo, const char *func)
59      : thr_(thr) {
60    CHECK_EQ(thr_->in_rtl, 0);
61    ProcessPendingSignals(thr);
62    FuncEntry(thr_, pc);
63    DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
64    thr_->in_rtl++;
65  }
66  ~ScopedAtomic() {
67    thr_->in_rtl--;
68    CHECK_EQ(thr_->in_rtl, 0);
69    FuncExit(thr_);
70  }
71 private:
72  ThreadState *thr_;
73};
74
75static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
76  StatInc(thr, StatAtomic);
77  StatInc(thr, t);
78  StatInc(thr, size == 1 ? StatAtomic1
79             : size == 2 ? StatAtomic2
80             : size == 4 ? StatAtomic4
81             : size == 8 ? StatAtomic8
82             :             StatAtomic16);
83  StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
84             : mo == mo_consume ? StatAtomicConsume
85             : mo == mo_acquire ? StatAtomicAcquire
86             : mo == mo_release ? StatAtomicRelease
87             : mo == mo_acq_rel ? StatAtomicAcq_Rel
88             :                    StatAtomicSeq_Cst);
89}
90
91static bool IsLoadOrder(morder mo) {
92  return mo == mo_relaxed || mo == mo_consume
93      || mo == mo_acquire || mo == mo_seq_cst;
94}
95
96static bool IsStoreOrder(morder mo) {
97  return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
98}
99
100static bool IsReleaseOrder(morder mo) {
101  return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
102}
103
104static bool IsAcquireOrder(morder mo) {
105  return mo == mo_consume || mo == mo_acquire
106      || mo == mo_acq_rel || mo == mo_seq_cst;
107}
108
109static bool IsAcqRelOrder(morder mo) {
110  return mo == mo_acq_rel || mo == mo_seq_cst;
111}
112
113static morder ConvertOrder(morder mo) {
114  if (mo > (morder)100500) {
115    mo = morder(mo - 100500);
116    if (mo ==  morder(1 << 0))
117      mo = mo_relaxed;
118    else if (mo == morder(1 << 1))
119      mo = mo_consume;
120    else if (mo == morder(1 << 2))
121      mo = mo_acquire;
122    else if (mo == morder(1 << 3))
123      mo = mo_release;
124    else if (mo == morder(1 << 4))
125      mo = mo_acq_rel;
126    else if (mo == morder(1 << 5))
127      mo = mo_seq_cst;
128  }
129  CHECK_GE(mo, mo_relaxed);
130  CHECK_LE(mo, mo_seq_cst);
131  return mo;
132}
133
134template<typename T> T func_xchg(volatile T *v, T op) {
135  T res = __sync_lock_test_and_set(v, op);
136  // __sync_lock_test_and_set does not contain full barrier.
137  __sync_synchronize();
138  return res;
139}
140
141template<typename T> T func_add(volatile T *v, T op) {
142  return __sync_fetch_and_add(v, op);
143}
144
145template<typename T> T func_sub(volatile T *v, T op) {
146  return __sync_fetch_and_sub(v, op);
147}
148
149template<typename T> T func_and(volatile T *v, T op) {
150  return __sync_fetch_and_and(v, op);
151}
152
153template<typename T> T func_or(volatile T *v, T op) {
154  return __sync_fetch_and_or(v, op);
155}
156
157template<typename T> T func_xor(volatile T *v, T op) {
158  return __sync_fetch_and_xor(v, op);
159}
160
161template<typename T> T func_nand(volatile T *v, T op) {
162  // clang does not support __sync_fetch_and_nand.
163  T cmp = *v;
164  for (;;) {
165    T newv = ~(cmp & op);
166    T cur = __sync_val_compare_and_swap(v, cmp, newv);
167    if (cmp == cur)
168      return cmp;
169    cmp = cur;
170  }
171}
172
173template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
174  return __sync_val_compare_and_swap(v, cmp, xch);
175}
176
177// clang does not support 128-bit atomic ops.
178// Atomic ops are executed under tsan internal mutex,
179// here we assume that the atomic variables are not accessed
180// from non-instrumented code.
181#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
182a128 func_xchg(volatile a128 *v, a128 op) {
183  a128 cmp = *v;
184  *v = op;
185  return cmp;
186}
187
188a128 func_add(volatile a128 *v, a128 op) {
189  a128 cmp = *v;
190  *v = cmp + op;
191  return cmp;
192}
193
194a128 func_sub(volatile a128 *v, a128 op) {
195  a128 cmp = *v;
196  *v = cmp - op;
197  return cmp;
198}
199
200a128 func_and(volatile a128 *v, a128 op) {
201  a128 cmp = *v;
202  *v = cmp & op;
203  return cmp;
204}
205
206a128 func_or(volatile a128 *v, a128 op) {
207  a128 cmp = *v;
208  *v = cmp | op;
209  return cmp;
210}
211
212a128 func_xor(volatile a128 *v, a128 op) {
213  a128 cmp = *v;
214  *v = cmp ^ op;
215  return cmp;
216}
217
218a128 func_nand(volatile a128 *v, a128 op) {
219  a128 cmp = *v;
220  *v = ~(cmp & op);
221  return cmp;
222}
223
224a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
225  a128 cur = *v;
226  if (cur == cmp)
227    *v = xch;
228  return cur;
229}
230#endif
231
232template<typename T>
233static int SizeLog() {
234  if (sizeof(T) <= 1)
235    return kSizeLog1;
236  else if (sizeof(T) <= 2)
237    return kSizeLog2;
238  else if (sizeof(T) <= 4)
239    return kSizeLog4;
240  else
241    return kSizeLog8;
242  // For 16-byte atomics we also use 8-byte memory access,
243  // this leads to false negatives only in very obscure cases.
244}
245
246template<typename T>
247static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
248    morder mo) {
249  CHECK(IsLoadOrder(mo));
250  // This fast-path is critical for performance.
251  // Assume the access is atomic.
252  if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
253    MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
254    return *a;
255  }
256  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
257  thr->clock.set(thr->tid, thr->fast_state.epoch());
258  thr->clock.acquire(&s->clock);
259  T v = *a;
260  s->mtx.ReadUnlock();
261  __sync_synchronize();
262  MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
263  return v;
264}
265
266template<typename T>
267static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
268    morder mo) {
269  CHECK(IsStoreOrder(mo));
270  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
271  // This fast-path is critical for performance.
272  // Assume the access is atomic.
273  // Strictly saying even relaxed store cuts off release sequence,
274  // so must reset the clock.
275  if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
276    *a = v;
277    return;
278  }
279  __sync_synchronize();
280  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
281  thr->clock.set(thr->tid, thr->fast_state.epoch());
282  thr->clock.ReleaseStore(&s->clock);
283  *a = v;
284  s->mtx.Unlock();
285  // Trainling memory barrier to provide sequential consistency
286  // for Dekker-like store-load synchronization.
287  __sync_synchronize();
288}
289
290template<typename T, T (*F)(volatile T *v, T op)>
291static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
292  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
293  SyncVar *s = 0;
294  if (mo != mo_relaxed) {
295    s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
296    thr->clock.set(thr->tid, thr->fast_state.epoch());
297    if (IsAcqRelOrder(mo))
298      thr->clock.acq_rel(&s->clock);
299    else if (IsReleaseOrder(mo))
300      thr->clock.release(&s->clock);
301    else if (IsAcquireOrder(mo))
302      thr->clock.acquire(&s->clock);
303  }
304  v = F(a, v);
305  if (s)
306    s->mtx.Unlock();
307  return v;
308}
309
310template<typename T>
311static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
312    morder mo) {
313  return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
314}
315
316template<typename T>
317static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
318    morder mo) {
319  return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
320}
321
322template<typename T>
323static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
324    morder mo) {
325  return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
326}
327
328template<typename T>
329static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
330    morder mo) {
331  return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
332}
333
334template<typename T>
335static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
336    morder mo) {
337  return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
338}
339
340template<typename T>
341static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
342    morder mo) {
343  return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
344}
345
346template<typename T>
347static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
348    morder mo) {
349  return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
350}
351
352template<typename T>
353static bool AtomicCAS(ThreadState *thr, uptr pc,
354    volatile T *a, T *c, T v, morder mo, morder fmo) {
355  (void)fmo;  // Unused because llvm does not pass it yet.
356  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
357  SyncVar *s = 0;
358  if (mo != mo_relaxed) {
359    s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
360    thr->clock.set(thr->tid, thr->fast_state.epoch());
361    if (IsAcqRelOrder(mo))
362      thr->clock.acq_rel(&s->clock);
363    else if (IsReleaseOrder(mo))
364      thr->clock.release(&s->clock);
365    else if (IsAcquireOrder(mo))
366      thr->clock.acquire(&s->clock);
367  }
368  T cc = *c;
369  T pr = func_cas(a, cc, v);
370  if (s)
371    s->mtx.Unlock();
372  if (pr == cc)
373    return true;
374  *c = pr;
375  return false;
376}
377
378template<typename T>
379static T AtomicCAS(ThreadState *thr, uptr pc,
380    volatile T *a, T c, T v, morder mo, morder fmo) {
381  AtomicCAS(thr, pc, a, &c, v, mo, fmo);
382  return c;
383}
384
385static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
386  // FIXME(dvyukov): not implemented.
387  __sync_synchronize();
388}
389
390a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
391  SCOPED_ATOMIC(Load, a, mo);
392}
393
394a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
395  SCOPED_ATOMIC(Load, a, mo);
396}
397
398a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
399  SCOPED_ATOMIC(Load, a, mo);
400}
401
402a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
403  SCOPED_ATOMIC(Load, a, mo);
404}
405
406#if __TSAN_HAS_INT128
407a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
408  SCOPED_ATOMIC(Load, a, mo);
409}
410#endif
411
412void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
413  SCOPED_ATOMIC(Store, a, v, mo);
414}
415
416void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
417  SCOPED_ATOMIC(Store, a, v, mo);
418}
419
420void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
421  SCOPED_ATOMIC(Store, a, v, mo);
422}
423
424void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
425  SCOPED_ATOMIC(Store, a, v, mo);
426}
427
428#if __TSAN_HAS_INT128
429void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
430  SCOPED_ATOMIC(Store, a, v, mo);
431}
432#endif
433
434a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
435  SCOPED_ATOMIC(Exchange, a, v, mo);
436}
437
438a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
439  SCOPED_ATOMIC(Exchange, a, v, mo);
440}
441
442a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
443  SCOPED_ATOMIC(Exchange, a, v, mo);
444}
445
446a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
447  SCOPED_ATOMIC(Exchange, a, v, mo);
448}
449
450#if __TSAN_HAS_INT128
451a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
452  SCOPED_ATOMIC(Exchange, a, v, mo);
453}
454#endif
455
456a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
457  SCOPED_ATOMIC(FetchAdd, a, v, mo);
458}
459
460a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
461  SCOPED_ATOMIC(FetchAdd, a, v, mo);
462}
463
464a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
465  SCOPED_ATOMIC(FetchAdd, a, v, mo);
466}
467
468a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
469  SCOPED_ATOMIC(FetchAdd, a, v, mo);
470}
471
472#if __TSAN_HAS_INT128
473a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
474  SCOPED_ATOMIC(FetchAdd, a, v, mo);
475}
476#endif
477
478a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
479  SCOPED_ATOMIC(FetchSub, a, v, mo);
480}
481
482a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
483  SCOPED_ATOMIC(FetchSub, a, v, mo);
484}
485
486a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
487  SCOPED_ATOMIC(FetchSub, a, v, mo);
488}
489
490a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
491  SCOPED_ATOMIC(FetchSub, a, v, mo);
492}
493
494#if __TSAN_HAS_INT128
495a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
496  SCOPED_ATOMIC(FetchSub, a, v, mo);
497}
498#endif
499
500a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
501  SCOPED_ATOMIC(FetchAnd, a, v, mo);
502}
503
504a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
505  SCOPED_ATOMIC(FetchAnd, a, v, mo);
506}
507
508a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
509  SCOPED_ATOMIC(FetchAnd, a, v, mo);
510}
511
512a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
513  SCOPED_ATOMIC(FetchAnd, a, v, mo);
514}
515
516#if __TSAN_HAS_INT128
517a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
518  SCOPED_ATOMIC(FetchAnd, a, v, mo);
519}
520#endif
521
522a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
523  SCOPED_ATOMIC(FetchOr, a, v, mo);
524}
525
526a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
527  SCOPED_ATOMIC(FetchOr, a, v, mo);
528}
529
530a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
531  SCOPED_ATOMIC(FetchOr, a, v, mo);
532}
533
534a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
535  SCOPED_ATOMIC(FetchOr, a, v, mo);
536}
537
538#if __TSAN_HAS_INT128
539a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
540  SCOPED_ATOMIC(FetchOr, a, v, mo);
541}
542#endif
543
544a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
545  SCOPED_ATOMIC(FetchXor, a, v, mo);
546}
547
548a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
549  SCOPED_ATOMIC(FetchXor, a, v, mo);
550}
551
552a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
553  SCOPED_ATOMIC(FetchXor, a, v, mo);
554}
555
556a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
557  SCOPED_ATOMIC(FetchXor, a, v, mo);
558}
559
560#if __TSAN_HAS_INT128
561a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
562  SCOPED_ATOMIC(FetchXor, a, v, mo);
563}
564#endif
565
566a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
567  SCOPED_ATOMIC(FetchNand, a, v, mo);
568}
569
570a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
571  SCOPED_ATOMIC(FetchNand, a, v, mo);
572}
573
574a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
575  SCOPED_ATOMIC(FetchNand, a, v, mo);
576}
577
578a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
579  SCOPED_ATOMIC(FetchNand, a, v, mo);
580}
581
582#if __TSAN_HAS_INT128
583a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
584  SCOPED_ATOMIC(FetchNand, a, v, mo);
585}
586#endif
587
588int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
589    morder mo, morder fmo) {
590  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
591}
592
593int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
594    morder mo, morder fmo) {
595  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
596}
597
598int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
599    morder mo, morder fmo) {
600  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
601}
602
603int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
604    morder mo, morder fmo) {
605  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
606}
607
608#if __TSAN_HAS_INT128
609int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
610    morder mo, morder fmo) {
611  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
612}
613#endif
614
615int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
616    morder mo, morder fmo) {
617  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
618}
619
620int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
621    morder mo, morder fmo) {
622  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
623}
624
625int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
626    morder mo, morder fmo) {
627  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
628}
629
630int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
631    morder mo, morder fmo) {
632  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
633}
634
635#if __TSAN_HAS_INT128
636int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
637    morder mo, morder fmo) {
638  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
639}
640#endif
641
642a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
643    morder mo, morder fmo) {
644  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
645}
646a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
647    morder mo, morder fmo) {
648  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
649}
650
651a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
652    morder mo, morder fmo) {
653  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
654}
655
656a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
657    morder mo, morder fmo) {
658  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
659}
660
661#if __TSAN_HAS_INT128
662a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
663    morder mo, morder fmo) {
664  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
665}
666#endif
667
668void __tsan_atomic_thread_fence(morder mo) {
669  char* a = 0;
670  SCOPED_ATOMIC(Fence, mo);
671}
672
673void __tsan_atomic_signal_fence(morder mo) {
674}
675