tsan_interface_atomic.cc revision ddbe2be9dc8b8eb7d40017c077e8392daf708510
1//===-- tsan_interface_atomic.cc ------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14// ThreadSanitizer atomic operations are based on C++11/C1x standards.
15// For background see C++11 standard.  A slightly older, publically
16// available draft of the standard (not entirely up-to-date, but close enough
17// for casual browsing) is available here:
18// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19// The following page contains more background information:
20// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
22#include "sanitizer_common/sanitizer_placement_new.h"
23#include "tsan_interface_atomic.h"
24#include "tsan_flags.h"
25#include "tsan_rtl.h"
26
27using namespace __tsan;  // NOLINT
28
29class ScopedAtomic {
30 public:
31  ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
32      : thr_(thr) {
33    CHECK_EQ(thr_->in_rtl, 1);  // 1 due to our own ScopedInRtl member.
34    DPrintf("#%d: %s\n", thr_->tid, func);
35  }
36  ~ScopedAtomic() {
37    CHECK_EQ(thr_->in_rtl, 1);
38  }
39 private:
40  ThreadState *thr_;
41  ScopedInRtl in_rtl_;
42};
43
44// Some shortcuts.
45typedef __tsan_memory_order morder;
46typedef __tsan_atomic8 a8;
47typedef __tsan_atomic16 a16;
48typedef __tsan_atomic32 a32;
49typedef __tsan_atomic64 a64;
50typedef __tsan_atomic128 a128;
51const morder mo_relaxed = __tsan_memory_order_relaxed;
52const morder mo_consume = __tsan_memory_order_consume;
53const morder mo_acquire = __tsan_memory_order_acquire;
54const morder mo_release = __tsan_memory_order_release;
55const morder mo_acq_rel = __tsan_memory_order_acq_rel;
56const morder mo_seq_cst = __tsan_memory_order_seq_cst;
57
58static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
59  StatInc(thr, StatAtomic);
60  StatInc(thr, t);
61  StatInc(thr, size == 1 ? StatAtomic1
62             : size == 2 ? StatAtomic2
63             : size == 4 ? StatAtomic4
64             : size == 8 ? StatAtomic8
65             :             StatAtomic16);
66  StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
67             : mo == mo_consume ? StatAtomicConsume
68             : mo == mo_acquire ? StatAtomicAcquire
69             : mo == mo_release ? StatAtomicRelease
70             : mo == mo_acq_rel ? StatAtomicAcq_Rel
71             :                    StatAtomicSeq_Cst);
72}
73
74static bool IsLoadOrder(morder mo) {
75  return mo == mo_relaxed || mo == mo_consume
76      || mo == mo_acquire || mo == mo_seq_cst;
77}
78
79static bool IsStoreOrder(morder mo) {
80  return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
81}
82
83static bool IsReleaseOrder(morder mo) {
84  return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
85}
86
87static bool IsAcquireOrder(morder mo) {
88  return mo == mo_consume || mo == mo_acquire
89      || mo == mo_acq_rel || mo == mo_seq_cst;
90}
91
92static bool IsAcqRelOrder(morder mo) {
93  return mo == mo_acq_rel || mo == mo_seq_cst;
94}
95
96static morder ConvertOrder(morder mo) {
97  if (mo > (morder)100500) {
98    mo = morder(mo - 100500);
99    if (mo ==  morder(1 << 0))
100      mo = mo_relaxed;
101    else if (mo == morder(1 << 1))
102      mo = mo_consume;
103    else if (mo == morder(1 << 2))
104      mo = mo_acquire;
105    else if (mo == morder(1 << 3))
106      mo = mo_release;
107    else if (mo == morder(1 << 4))
108      mo = mo_acq_rel;
109    else if (mo == morder(1 << 5))
110      mo = mo_seq_cst;
111  }
112  CHECK_GE(mo, mo_relaxed);
113  CHECK_LE(mo, mo_seq_cst);
114  return mo;
115}
116
117template<typename T> T func_xchg(volatile T *v, T op) {
118  T res = __sync_lock_test_and_set(v, op);
119  // __sync_lock_test_and_set does not contain full barrier.
120  __sync_synchronize();
121  return res;
122}
123
124template<typename T> T func_add(volatile T *v, T op) {
125  return __sync_fetch_and_add(v, op);
126}
127
128template<typename T> T func_sub(volatile T *v, T op) {
129  return __sync_fetch_and_sub(v, op);
130}
131
132template<typename T> T func_and(volatile T *v, T op) {
133  return __sync_fetch_and_and(v, op);
134}
135
136template<typename T> T func_or(volatile T *v, T op) {
137  return __sync_fetch_and_or(v, op);
138}
139
140template<typename T> T func_xor(volatile T *v, T op) {
141  return __sync_fetch_and_xor(v, op);
142}
143
144template<typename T> T func_nand(volatile T *v, T op) {
145  // clang does not support __sync_fetch_and_nand.
146  T cmp = *v;
147  for (;;) {
148    T newv = ~(cmp & op);
149    T cur = __sync_val_compare_and_swap(v, cmp, newv);
150    if (cmp == cur)
151      return cmp;
152    cmp = cur;
153  }
154}
155
156template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
157  return __sync_val_compare_and_swap(v, cmp, xch);
158}
159
160// clang does not support 128-bit atomic ops.
161// Atomic ops are executed under tsan internal mutex,
162// here we assume that the atomic variables are not accessed
163// from non-instrumented code.
164#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
165a128 func_xchg(volatile a128 *v, a128 op) {
166  a128 cmp = *v;
167  *v = op;
168  return cmp;
169}
170
171a128 func_add(volatile a128 *v, a128 op) {
172  a128 cmp = *v;
173  *v = cmp + op;
174  return cmp;
175}
176
177a128 func_sub(volatile a128 *v, a128 op) {
178  a128 cmp = *v;
179  *v = cmp - op;
180  return cmp;
181}
182
183a128 func_and(volatile a128 *v, a128 op) {
184  a128 cmp = *v;
185  *v = cmp & op;
186  return cmp;
187}
188
189a128 func_or(volatile a128 *v, a128 op) {
190  a128 cmp = *v;
191  *v = cmp | op;
192  return cmp;
193}
194
195a128 func_xor(volatile a128 *v, a128 op) {
196  a128 cmp = *v;
197  *v = cmp ^ op;
198  return cmp;
199}
200
201a128 func_nand(volatile a128 *v, a128 op) {
202  a128 cmp = *v;
203  *v = ~(cmp & op);
204  return cmp;
205}
206
207a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
208  a128 cur = *v;
209  if (cur == cmp)
210    *v = xch;
211  return cur;
212}
213#endif
214
215#define SCOPED_ATOMIC(func, ...) \
216    mo = ConvertOrder(mo); \
217    mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
218    ThreadState *const thr = cur_thread(); \
219    ProcessPendingSignals(thr); \
220    const uptr pc = (uptr)__builtin_return_address(0); \
221    AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
222    ScopedAtomic sa(thr, pc, __FUNCTION__); \
223    return Atomic##func(thr, pc, __VA_ARGS__); \
224/**/
225
226template<typename T>
227static int SizeLog() {
228  if (sizeof(T) <= 1)
229    return kSizeLog1;
230  else if (sizeof(T) <= 2)
231    return kSizeLog2;
232  else if (sizeof(T) <= 4)
233    return kSizeLog4;
234  else
235    return kSizeLog8;
236  // For 16-byte atomics we also use 8-byte memory access,
237  // this leads to false negatives only in very obscure cases.
238}
239
240template<typename T>
241static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
242    morder mo) {
243  CHECK(IsLoadOrder(mo));
244  // This fast-path is critical for performance.
245  // Assume the access is atomic.
246  if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
247    if (flags()->report_atomic_races)
248      MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
249    return *a;
250  }
251  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
252  thr->clock.set(thr->tid, thr->fast_state.epoch());
253  thr->clock.acquire(&s->clock);
254  T v = *a;
255  s->mtx.ReadUnlock();
256  __sync_synchronize();
257  if (flags()->report_atomic_races)
258    MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
259  return v;
260}
261
262template<typename T>
263static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
264    morder mo) {
265  CHECK(IsStoreOrder(mo));
266  if (flags()->report_atomic_races)
267    MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
268  // This fast-path is critical for performance.
269  // Assume the access is atomic.
270  // Strictly saying even relaxed store cuts off release sequence,
271  // so must reset the clock.
272  if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
273    *a = v;
274    return;
275  }
276  __sync_synchronize();
277  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
278  thr->clock.set(thr->tid, thr->fast_state.epoch());
279  thr->clock.ReleaseStore(&s->clock);
280  *a = v;
281  s->mtx.Unlock();
282  // Trainling memory barrier to provide sequential consistency
283  // for Dekker-like store-load synchronization.
284  __sync_synchronize();
285}
286
287template<typename T, T (*F)(volatile T *v, T op)>
288static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
289  if (flags()->report_atomic_races)
290    MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
291  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
292  thr->clock.set(thr->tid, thr->fast_state.epoch());
293  if (IsAcqRelOrder(mo))
294    thr->clock.acq_rel(&s->clock);
295  else if (IsReleaseOrder(mo))
296    thr->clock.release(&s->clock);
297  else if (IsAcquireOrder(mo))
298    thr->clock.acquire(&s->clock);
299  v = F(a, v);
300  s->mtx.Unlock();
301  return v;
302}
303
304template<typename T>
305static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
306    morder mo) {
307  return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
308}
309
310template<typename T>
311static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
312    morder mo) {
313  return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
314}
315
316template<typename T>
317static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
318    morder mo) {
319  return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
320}
321
322template<typename T>
323static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
324    morder mo) {
325  return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
326}
327
328template<typename T>
329static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
330    morder mo) {
331  return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
332}
333
334template<typename T>
335static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
336    morder mo) {
337  return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
338}
339
340template<typename T>
341static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
342    morder mo) {
343  return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
344}
345
346template<typename T>
347static bool AtomicCAS(ThreadState *thr, uptr pc,
348    volatile T *a, T *c, T v, morder mo, morder fmo) {
349  (void)fmo;  // Unused because llvm does not pass it yet.
350  if (flags()->report_atomic_races)
351    MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
352  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
353  thr->clock.set(thr->tid, thr->fast_state.epoch());
354  if (IsAcqRelOrder(mo))
355    thr->clock.acq_rel(&s->clock);
356  else if (IsReleaseOrder(mo))
357    thr->clock.release(&s->clock);
358  else if (IsAcquireOrder(mo))
359    thr->clock.acquire(&s->clock);
360  T cc = *c;
361  T pr = func_cas(a, cc, v);
362  s->mtx.Unlock();
363  if (pr == cc)
364    return true;
365  *c = pr;
366  return false;
367}
368
369template<typename T>
370static T AtomicCAS(ThreadState *thr, uptr pc,
371    volatile T *a, T c, T v, morder mo, morder fmo) {
372  AtomicCAS(thr, pc, a, &c, v, mo, fmo);
373  return c;
374}
375
376static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
377  // FIXME(dvyukov): not implemented.
378  __sync_synchronize();
379}
380
381a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
382  SCOPED_ATOMIC(Load, a, mo);
383}
384
385a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
386  SCOPED_ATOMIC(Load, a, mo);
387}
388
389a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
390  SCOPED_ATOMIC(Load, a, mo);
391}
392
393a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
394  SCOPED_ATOMIC(Load, a, mo);
395}
396
397#if __TSAN_HAS_INT128
398a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
399  SCOPED_ATOMIC(Load, a, mo);
400}
401#endif
402
403void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
404  SCOPED_ATOMIC(Store, a, v, mo);
405}
406
407void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
408  SCOPED_ATOMIC(Store, a, v, mo);
409}
410
411void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
412  SCOPED_ATOMIC(Store, a, v, mo);
413}
414
415void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
416  SCOPED_ATOMIC(Store, a, v, mo);
417}
418
419#if __TSAN_HAS_INT128
420void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
421  SCOPED_ATOMIC(Store, a, v, mo);
422}
423#endif
424
425a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
426  SCOPED_ATOMIC(Exchange, a, v, mo);
427}
428
429a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
430  SCOPED_ATOMIC(Exchange, a, v, mo);
431}
432
433a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
434  SCOPED_ATOMIC(Exchange, a, v, mo);
435}
436
437a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
438  SCOPED_ATOMIC(Exchange, a, v, mo);
439}
440
441#if __TSAN_HAS_INT128
442a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
443  SCOPED_ATOMIC(Exchange, a, v, mo);
444}
445#endif
446
447a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
448  SCOPED_ATOMIC(FetchAdd, a, v, mo);
449}
450
451a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
452  SCOPED_ATOMIC(FetchAdd, a, v, mo);
453}
454
455a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
456  SCOPED_ATOMIC(FetchAdd, a, v, mo);
457}
458
459a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
460  SCOPED_ATOMIC(FetchAdd, a, v, mo);
461}
462
463#if __TSAN_HAS_INT128
464a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
465  SCOPED_ATOMIC(FetchAdd, a, v, mo);
466}
467#endif
468
469a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
470  SCOPED_ATOMIC(FetchSub, a, v, mo);
471}
472
473a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
474  SCOPED_ATOMIC(FetchSub, a, v, mo);
475}
476
477a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
478  SCOPED_ATOMIC(FetchSub, a, v, mo);
479}
480
481a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
482  SCOPED_ATOMIC(FetchSub, a, v, mo);
483}
484
485#if __TSAN_HAS_INT128
486a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
487  SCOPED_ATOMIC(FetchSub, a, v, mo);
488}
489#endif
490
491a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
492  SCOPED_ATOMIC(FetchAnd, a, v, mo);
493}
494
495a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
496  SCOPED_ATOMIC(FetchAnd, a, v, mo);
497}
498
499a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
500  SCOPED_ATOMIC(FetchAnd, a, v, mo);
501}
502
503a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
504  SCOPED_ATOMIC(FetchAnd, a, v, mo);
505}
506
507#if __TSAN_HAS_INT128
508a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
509  SCOPED_ATOMIC(FetchAnd, a, v, mo);
510}
511#endif
512
513a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
514  SCOPED_ATOMIC(FetchOr, a, v, mo);
515}
516
517a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
518  SCOPED_ATOMIC(FetchOr, a, v, mo);
519}
520
521a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
522  SCOPED_ATOMIC(FetchOr, a, v, mo);
523}
524
525a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
526  SCOPED_ATOMIC(FetchOr, a, v, mo);
527}
528
529#if __TSAN_HAS_INT128
530a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
531  SCOPED_ATOMIC(FetchOr, a, v, mo);
532}
533#endif
534
535a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
536  SCOPED_ATOMIC(FetchXor, a, v, mo);
537}
538
539a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
540  SCOPED_ATOMIC(FetchXor, a, v, mo);
541}
542
543a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
544  SCOPED_ATOMIC(FetchXor, a, v, mo);
545}
546
547a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
548  SCOPED_ATOMIC(FetchXor, a, v, mo);
549}
550
551#if __TSAN_HAS_INT128
552a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
553  SCOPED_ATOMIC(FetchXor, a, v, mo);
554}
555#endif
556
557a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
558  SCOPED_ATOMIC(FetchNand, a, v, mo);
559}
560
561a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
562  SCOPED_ATOMIC(FetchNand, a, v, mo);
563}
564
565a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
566  SCOPED_ATOMIC(FetchNand, a, v, mo);
567}
568
569a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
570  SCOPED_ATOMIC(FetchNand, a, v, mo);
571}
572
573#if __TSAN_HAS_INT128
574a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
575  SCOPED_ATOMIC(FetchNand, a, v, mo);
576}
577#endif
578
579int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
580    morder mo, morder fmo) {
581  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
582}
583
584int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
585    morder mo, morder fmo) {
586  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
587}
588
589int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
590    morder mo, morder fmo) {
591  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
592}
593
594int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
595    morder mo, morder fmo) {
596  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
597}
598
599#if __TSAN_HAS_INT128
600int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
601    morder mo, morder fmo) {
602  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
603}
604#endif
605
606int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
607    morder mo, morder fmo) {
608  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
609}
610
611int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
612    morder mo, morder fmo) {
613  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
614}
615
616int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
617    morder mo, morder fmo) {
618  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
619}
620
621int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
622    morder mo, morder fmo) {
623  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
624}
625
626#if __TSAN_HAS_INT128
627int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
628    morder mo, morder fmo) {
629  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
630}
631#endif
632
633a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
634    morder mo, morder fmo) {
635  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
636}
637a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
638    morder mo, morder fmo) {
639  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
640}
641
642a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
643    morder mo, morder fmo) {
644  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
645}
646
647a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
648    morder mo, morder fmo) {
649  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
650}
651
652#if __TSAN_HAS_INT128
653a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
654    morder mo, morder fmo) {
655  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
656}
657#endif
658
659void __tsan_atomic_thread_fence(morder mo) {
660  char* a;
661  SCOPED_ATOMIC(Fence, mo);
662}
663
664void __tsan_atomic_signal_fence(morder mo) {
665}
666