tsan_interface_atomic.cc revision 334553ec45d8982df45a6f5e656e068142ecde3f
1//===-- tsan_interface_atomic.cc ------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14// ThreadSanitizer atomic operations are based on C++11/C1x standards.
15// For background see C++11 standard.  A slightly older, publically
16// available draft of the standard (not entirely up-to-date, but close enough
17// for casual browsing) is available here:
18// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19// The following page contains more background information:
20// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
22#include "sanitizer_common/sanitizer_placement_new.h"
23#include "tsan_interface_atomic.h"
24#include "tsan_flags.h"
25#include "tsan_rtl.h"
26
27using namespace __tsan;  // NOLINT
28
29class ScopedAtomic {
30 public:
31  ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
32      : thr_(thr) {
33    CHECK_EQ(thr_->in_rtl, 1);  // 1 due to our own ScopedInRtl member.
34    DPrintf("#%d: %s\n", thr_->tid, func);
35  }
36  ~ScopedAtomic() {
37    CHECK_EQ(thr_->in_rtl, 1);
38  }
39 private:
40  ThreadState *thr_;
41  ScopedInRtl in_rtl_;
42};
43
44// Some shortcuts.
45typedef __tsan_memory_order morder;
46typedef __tsan_atomic8 a8;
47typedef __tsan_atomic16 a16;
48typedef __tsan_atomic32 a32;
49typedef __tsan_atomic64 a64;
50typedef __tsan_atomic128 a128;
51const morder mo_relaxed = __tsan_memory_order_relaxed;
52const morder mo_consume = __tsan_memory_order_consume;
53const morder mo_acquire = __tsan_memory_order_acquire;
54const morder mo_release = __tsan_memory_order_release;
55const morder mo_acq_rel = __tsan_memory_order_acq_rel;
56const morder mo_seq_cst = __tsan_memory_order_seq_cst;
57
58static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
59  StatInc(thr, StatAtomic);
60  StatInc(thr, t);
61  StatInc(thr, size == 1 ? StatAtomic1
62             : size == 2 ? StatAtomic2
63             : size == 4 ? StatAtomic4
64             : size == 8 ? StatAtomic8
65             :             StatAtomic16);
66  StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
67             : mo == mo_consume ? StatAtomicConsume
68             : mo == mo_acquire ? StatAtomicAcquire
69             : mo == mo_release ? StatAtomicRelease
70             : mo == mo_acq_rel ? StatAtomicAcq_Rel
71             :                    StatAtomicSeq_Cst);
72}
73
74static bool IsLoadOrder(morder mo) {
75  return mo == mo_relaxed || mo == mo_consume
76      || mo == mo_acquire || mo == mo_seq_cst;
77}
78
79static bool IsStoreOrder(morder mo) {
80  return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
81}
82
83static bool IsReleaseOrder(morder mo) {
84  return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
85}
86
87static bool IsAcquireOrder(morder mo) {
88  return mo == mo_consume || mo == mo_acquire
89      || mo == mo_acq_rel || mo == mo_seq_cst;
90}
91
92static bool IsAcqRelOrder(morder mo) {
93  return mo == mo_acq_rel || mo == mo_seq_cst;
94}
95
96static morder ConvertOrder(morder mo) {
97  if (mo > (morder)100500) {
98    mo = morder(mo - 100500);
99    if (mo ==  morder(1 << 0))
100      mo = mo_relaxed;
101    else if (mo == morder(1 << 1))
102      mo = mo_consume;
103    else if (mo == morder(1 << 2))
104      mo = mo_acquire;
105    else if (mo == morder(1 << 3))
106      mo = mo_release;
107    else if (mo == morder(1 << 4))
108      mo = mo_acq_rel;
109    else if (mo == morder(1 << 5))
110      mo = mo_seq_cst;
111  }
112  CHECK_GE(mo, mo_relaxed);
113  CHECK_LE(mo, mo_seq_cst);
114  return mo;
115}
116
117template<typename T> T func_xchg(volatile T *v, T op) {
118  T res = __sync_lock_test_and_set(v, op);
119  // __sync_lock_test_and_set does not contain full barrier.
120  __sync_synchronize();
121  return res;
122}
123
124template<typename T> T func_add(volatile T *v, T op) {
125  return __sync_fetch_and_add(v, op);
126}
127
128template<typename T> T func_sub(volatile T *v, T op) {
129  return __sync_fetch_and_sub(v, op);
130}
131
132template<typename T> T func_and(volatile T *v, T op) {
133  return __sync_fetch_and_and(v, op);
134}
135
136template<typename T> T func_or(volatile T *v, T op) {
137  return __sync_fetch_and_or(v, op);
138}
139
140template<typename T> T func_xor(volatile T *v, T op) {
141  return __sync_fetch_and_xor(v, op);
142}
143
144template<typename T> T func_nand(volatile T *v, T op) {
145  // clang does not support __sync_fetch_and_nand.
146  T cmp = *v;
147  for (;;) {
148    T newv = ~(cmp & op);
149    T cur = __sync_val_compare_and_swap(v, cmp, newv);
150    if (cmp == cur)
151      return cmp;
152    cmp = cur;
153  }
154}
155
156template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
157  return __sync_val_compare_and_swap(v, cmp, xch);
158}
159
160// clang does not support 128-bit atomic ops.
161// Atomic ops are executed under tsan internal mutex,
162// here we assume that the atomic variables are not accessed
163// from non-instrumented code.
164#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
165a128 func_xchg(volatile a128 *v, a128 op) {
166  a128 cmp = *v;
167  *v = op;
168  return cmp;
169}
170
171a128 func_add(volatile a128 *v, a128 op) {
172  a128 cmp = *v;
173  *v = cmp + op;
174  return cmp;
175}
176
177a128 func_sub(volatile a128 *v, a128 op) {
178  a128 cmp = *v;
179  *v = cmp - op;
180  return cmp;
181}
182
183a128 func_and(volatile a128 *v, a128 op) {
184  a128 cmp = *v;
185  *v = cmp & op;
186  return cmp;
187}
188
189a128 func_or(volatile a128 *v, a128 op) {
190  a128 cmp = *v;
191  *v = cmp | op;
192  return cmp;
193}
194
195a128 func_xor(volatile a128 *v, a128 op) {
196  a128 cmp = *v;
197  *v = cmp ^ op;
198  return cmp;
199}
200
201a128 func_nand(volatile a128 *v, a128 op) {
202  a128 cmp = *v;
203  *v = ~(cmp & op);
204  return cmp;
205}
206
207a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
208  a128 cur = *v;
209  if (cur == cmp)
210    *v = xch;
211  return cur;
212}
213#endif
214
215#define SCOPED_ATOMIC(func, ...) \
216    mo = ConvertOrder(mo); \
217    mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
218    ThreadState *const thr = cur_thread(); \
219    ProcessPendingSignals(thr); \
220    const uptr pc = (uptr)__builtin_return_address(0); \
221    AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
222    ScopedAtomic sa(thr, pc, __FUNCTION__); \
223    return Atomic##func(thr, pc, __VA_ARGS__); \
224/**/
225
226template<typename T>
227static int SizeLog() {
228  if (sizeof(T) <= 1)
229    return kSizeLog1;
230  else if (sizeof(T) <= 2)
231    return kSizeLog2;
232  else if (sizeof(T) <= 4)
233    return kSizeLog4;
234  else
235    return kSizeLog8;
236  // For 16-byte atomics we also use 8-byte memory access,
237  // this leads to false negatives only in very obscure cases.
238}
239
240template<typename T>
241static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
242    morder mo) {
243  CHECK(IsLoadOrder(mo));
244  // This fast-path is critical for performance.
245  // Assume the access is atomic.
246  if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
247    MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
248    return *a;
249  }
250  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
251  thr->clock.set(thr->tid, thr->fast_state.epoch());
252  thr->clock.acquire(&s->clock);
253  T v = *a;
254  s->mtx.ReadUnlock();
255  __sync_synchronize();
256    MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
257  return v;
258}
259
260template<typename T>
261static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
262    morder mo) {
263  CHECK(IsStoreOrder(mo));
264  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
265  // This fast-path is critical for performance.
266  // Assume the access is atomic.
267  // Strictly saying even relaxed store cuts off release sequence,
268  // so must reset the clock.
269  if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
270    *a = v;
271    return;
272  }
273  __sync_synchronize();
274  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
275  thr->clock.set(thr->tid, thr->fast_state.epoch());
276  thr->clock.ReleaseStore(&s->clock);
277  *a = v;
278  s->mtx.Unlock();
279  // Trainling memory barrier to provide sequential consistency
280  // for Dekker-like store-load synchronization.
281  __sync_synchronize();
282}
283
284template<typename T, T (*F)(volatile T *v, T op)>
285static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
286  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
287  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
288  thr->clock.set(thr->tid, thr->fast_state.epoch());
289  if (IsAcqRelOrder(mo))
290    thr->clock.acq_rel(&s->clock);
291  else if (IsReleaseOrder(mo))
292    thr->clock.release(&s->clock);
293  else if (IsAcquireOrder(mo))
294    thr->clock.acquire(&s->clock);
295  v = F(a, v);
296  s->mtx.Unlock();
297  return v;
298}
299
300template<typename T>
301static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
302    morder mo) {
303  return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
304}
305
306template<typename T>
307static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
308    morder mo) {
309  return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
310}
311
312template<typename T>
313static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
314    morder mo) {
315  return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
316}
317
318template<typename T>
319static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
320    morder mo) {
321  return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
322}
323
324template<typename T>
325static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
326    morder mo) {
327  return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
328}
329
330template<typename T>
331static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
332    morder mo) {
333  return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
334}
335
336template<typename T>
337static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
338    morder mo) {
339  return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
340}
341
342template<typename T>
343static bool AtomicCAS(ThreadState *thr, uptr pc,
344    volatile T *a, T *c, T v, morder mo, morder fmo) {
345  (void)fmo;  // Unused because llvm does not pass it yet.
346  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
347  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
348  thr->clock.set(thr->tid, thr->fast_state.epoch());
349  if (IsAcqRelOrder(mo))
350    thr->clock.acq_rel(&s->clock);
351  else if (IsReleaseOrder(mo))
352    thr->clock.release(&s->clock);
353  else if (IsAcquireOrder(mo))
354    thr->clock.acquire(&s->clock);
355  T cc = *c;
356  T pr = func_cas(a, cc, v);
357  s->mtx.Unlock();
358  if (pr == cc)
359    return true;
360  *c = pr;
361  return false;
362}
363
364template<typename T>
365static T AtomicCAS(ThreadState *thr, uptr pc,
366    volatile T *a, T c, T v, morder mo, morder fmo) {
367  AtomicCAS(thr, pc, a, &c, v, mo, fmo);
368  return c;
369}
370
371static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
372  // FIXME(dvyukov): not implemented.
373  __sync_synchronize();
374}
375
376a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
377  SCOPED_ATOMIC(Load, a, mo);
378}
379
380a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
381  SCOPED_ATOMIC(Load, a, mo);
382}
383
384a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
385  SCOPED_ATOMIC(Load, a, mo);
386}
387
388a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
389  SCOPED_ATOMIC(Load, a, mo);
390}
391
392#if __TSAN_HAS_INT128
393a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
394  SCOPED_ATOMIC(Load, a, mo);
395}
396#endif
397
398void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
399  SCOPED_ATOMIC(Store, a, v, mo);
400}
401
402void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
403  SCOPED_ATOMIC(Store, a, v, mo);
404}
405
406void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
407  SCOPED_ATOMIC(Store, a, v, mo);
408}
409
410void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
411  SCOPED_ATOMIC(Store, a, v, mo);
412}
413
414#if __TSAN_HAS_INT128
415void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
416  SCOPED_ATOMIC(Store, a, v, mo);
417}
418#endif
419
420a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
421  SCOPED_ATOMIC(Exchange, a, v, mo);
422}
423
424a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
425  SCOPED_ATOMIC(Exchange, a, v, mo);
426}
427
428a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
429  SCOPED_ATOMIC(Exchange, a, v, mo);
430}
431
432a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
433  SCOPED_ATOMIC(Exchange, a, v, mo);
434}
435
436#if __TSAN_HAS_INT128
437a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
438  SCOPED_ATOMIC(Exchange, a, v, mo);
439}
440#endif
441
442a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
443  SCOPED_ATOMIC(FetchAdd, a, v, mo);
444}
445
446a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
447  SCOPED_ATOMIC(FetchAdd, a, v, mo);
448}
449
450a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
451  SCOPED_ATOMIC(FetchAdd, a, v, mo);
452}
453
454a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
455  SCOPED_ATOMIC(FetchAdd, a, v, mo);
456}
457
458#if __TSAN_HAS_INT128
459a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
460  SCOPED_ATOMIC(FetchAdd, a, v, mo);
461}
462#endif
463
464a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
465  SCOPED_ATOMIC(FetchSub, a, v, mo);
466}
467
468a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
469  SCOPED_ATOMIC(FetchSub, a, v, mo);
470}
471
472a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
473  SCOPED_ATOMIC(FetchSub, a, v, mo);
474}
475
476a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
477  SCOPED_ATOMIC(FetchSub, a, v, mo);
478}
479
480#if __TSAN_HAS_INT128
481a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
482  SCOPED_ATOMIC(FetchSub, a, v, mo);
483}
484#endif
485
486a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
487  SCOPED_ATOMIC(FetchAnd, a, v, mo);
488}
489
490a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
491  SCOPED_ATOMIC(FetchAnd, a, v, mo);
492}
493
494a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
495  SCOPED_ATOMIC(FetchAnd, a, v, mo);
496}
497
498a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
499  SCOPED_ATOMIC(FetchAnd, a, v, mo);
500}
501
502#if __TSAN_HAS_INT128
503a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
504  SCOPED_ATOMIC(FetchAnd, a, v, mo);
505}
506#endif
507
508a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
509  SCOPED_ATOMIC(FetchOr, a, v, mo);
510}
511
512a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
513  SCOPED_ATOMIC(FetchOr, a, v, mo);
514}
515
516a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
517  SCOPED_ATOMIC(FetchOr, a, v, mo);
518}
519
520a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
521  SCOPED_ATOMIC(FetchOr, a, v, mo);
522}
523
524#if __TSAN_HAS_INT128
525a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
526  SCOPED_ATOMIC(FetchOr, a, v, mo);
527}
528#endif
529
530a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
531  SCOPED_ATOMIC(FetchXor, a, v, mo);
532}
533
534a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
535  SCOPED_ATOMIC(FetchXor, a, v, mo);
536}
537
538a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
539  SCOPED_ATOMIC(FetchXor, a, v, mo);
540}
541
542a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
543  SCOPED_ATOMIC(FetchXor, a, v, mo);
544}
545
546#if __TSAN_HAS_INT128
547a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
548  SCOPED_ATOMIC(FetchXor, a, v, mo);
549}
550#endif
551
552a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
553  SCOPED_ATOMIC(FetchNand, a, v, mo);
554}
555
556a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
557  SCOPED_ATOMIC(FetchNand, a, v, mo);
558}
559
560a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
561  SCOPED_ATOMIC(FetchNand, a, v, mo);
562}
563
564a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
565  SCOPED_ATOMIC(FetchNand, a, v, mo);
566}
567
568#if __TSAN_HAS_INT128
569a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
570  SCOPED_ATOMIC(FetchNand, a, v, mo);
571}
572#endif
573
574int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
575    morder mo, morder fmo) {
576  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
577}
578
579int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
580    morder mo, morder fmo) {
581  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
582}
583
584int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
585    morder mo, morder fmo) {
586  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
587}
588
589int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
590    morder mo, morder fmo) {
591  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
592}
593
594#if __TSAN_HAS_INT128
595int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
596    morder mo, morder fmo) {
597  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
598}
599#endif
600
601int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
602    morder mo, morder fmo) {
603  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
604}
605
606int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
607    morder mo, morder fmo) {
608  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
609}
610
611int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
612    morder mo, morder fmo) {
613  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
614}
615
616int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
617    morder mo, morder fmo) {
618  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
619}
620
621#if __TSAN_HAS_INT128
622int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
623    morder mo, morder fmo) {
624  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
625}
626#endif
627
628a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
629    morder mo, morder fmo) {
630  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
631}
632a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
633    morder mo, morder fmo) {
634  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
635}
636
637a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
638    morder mo, morder fmo) {
639  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
640}
641
642a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
643    morder mo, morder fmo) {
644  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
645}
646
647#if __TSAN_HAS_INT128
648a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
649    morder mo, morder fmo) {
650  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
651}
652#endif
653
654void __tsan_atomic_thread_fence(morder mo) {
655  char* a;
656  SCOPED_ATOMIC(Fence, mo);
657}
658
659void __tsan_atomic_signal_fence(morder mo) {
660}
661