tsan_interface_atomic.cc revision 02b45d2aec154cb392cd6eb85e8dd16ed255351f
1//===-- tsan_interface_atomic.cc ------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_placement_new.h"
15#include "tsan_interface_atomic.h"
16#include "tsan_flags.h"
17#include "tsan_rtl.h"
18
19using namespace __tsan;  // NOLINT
20
21class ScopedAtomic {
22 public:
23  ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
24      : thr_(thr) {
25    CHECK_EQ(thr_->in_rtl, 1);  // 1 due to our own ScopedInRtl member.
26    DPrintf("#%d: %s\n", thr_->tid, func);
27  }
28  ~ScopedAtomic() {
29    CHECK_EQ(thr_->in_rtl, 1);
30  }
31 private:
32  ThreadState *thr_;
33  ScopedInRtl in_rtl_;
34};
35
36// Some shortcuts.
37typedef __tsan_memory_order morder;
38typedef __tsan_atomic8 a8;
39typedef __tsan_atomic16 a16;
40typedef __tsan_atomic32 a32;
41typedef __tsan_atomic64 a64;
42const morder mo_relaxed = __tsan_memory_order_relaxed;
43const morder mo_consume = __tsan_memory_order_consume;
44const morder mo_acquire = __tsan_memory_order_acquire;
45const morder mo_release = __tsan_memory_order_release;
46const morder mo_acq_rel = __tsan_memory_order_acq_rel;
47const morder mo_seq_cst = __tsan_memory_order_seq_cst;
48
49static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
50  StatInc(thr, StatAtomic);
51  StatInc(thr, t);
52  StatInc(thr, size == 1 ? StatAtomic1
53             : size == 2 ? StatAtomic2
54             : size == 4 ? StatAtomic4
55             :             StatAtomic8);
56  StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
57             : mo == mo_consume ? StatAtomicConsume
58             : mo == mo_acquire ? StatAtomicAcquire
59             : mo == mo_release ? StatAtomicRelease
60             : mo == mo_acq_rel ? StatAtomicAcq_Rel
61             :                    StatAtomicSeq_Cst);
62}
63
64static bool IsLoadOrder(morder mo) {
65  return mo == mo_relaxed || mo == mo_consume
66      || mo == mo_acquire || mo == mo_seq_cst;
67}
68
69static bool IsStoreOrder(morder mo) {
70  return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
71}
72
73static bool IsReleaseOrder(morder mo) {
74  return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
75}
76
77static bool IsAcquireOrder(morder mo) {
78  return mo == mo_consume || mo == mo_acquire
79      || mo == mo_acq_rel || mo == mo_seq_cst;
80}
81
82static morder ConvertOrder(morder mo) {
83  if (mo > (morder)100500) {
84    mo = morder(mo - 100500);
85    if (mo ==  morder(1 << 0))
86      mo = mo_relaxed;
87    else if (mo == morder(1 << 1))
88      mo = mo_consume;
89    else if (mo == morder(1 << 2))
90      mo = mo_acquire;
91    else if (mo == morder(1 << 3))
92      mo = mo_release;
93    else if (mo == morder(1 << 4))
94      mo = mo_acq_rel;
95    else if (mo == morder(1 << 5))
96      mo = mo_seq_cst;
97  }
98  CHECK_GE(mo, mo_relaxed);
99  CHECK_LE(mo, mo_seq_cst);
100  return mo;
101}
102
103#define SCOPED_ATOMIC(func, ...) \
104    mo = ConvertOrder(mo); \
105    mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
106    ThreadState *const thr = cur_thread(); \
107    ProcessPendingSignals(thr); \
108    const uptr pc = (uptr)__builtin_return_address(0); \
109    AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
110    ScopedAtomic sa(thr, pc, __FUNCTION__); \
111    return Atomic##func(thr, pc, __VA_ARGS__); \
112/**/
113
114template<typename T>
115static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
116    morder mo) {
117  CHECK(IsLoadOrder(mo));
118  T v = *a;
119  if (IsAcquireOrder(mo))
120    Acquire(thr, pc, (uptr)a);
121  return v;
122}
123
124template<typename T>
125static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
126    morder mo) {
127  CHECK(IsStoreOrder(mo));
128  if (IsReleaseOrder(mo))
129    ReleaseStore(thr, pc, (uptr)a);
130  *a = v;
131}
132
133template<typename T>
134static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
135    morder mo) {
136  if (IsReleaseOrder(mo))
137    Release(thr, pc, (uptr)a);
138  v = __sync_lock_test_and_set(a, v);
139  if (IsAcquireOrder(mo))
140    Acquire(thr, pc, (uptr)a);
141  return v;
142}
143
144template<typename T>
145static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
146    morder mo) {
147  if (IsReleaseOrder(mo))
148    Release(thr, pc, (uptr)a);
149  v = __sync_fetch_and_add(a, v);
150  if (IsAcquireOrder(mo))
151    Acquire(thr, pc, (uptr)a);
152  return v;
153}
154
155template<typename T>
156static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
157    morder mo) {
158  if (IsReleaseOrder(mo))
159    Release(thr, pc, (uptr)a);
160  v = __sync_fetch_and_sub(a, v);
161  if (IsAcquireOrder(mo))
162    Acquire(thr, pc, (uptr)a);
163  return v;
164}
165
166template<typename T>
167static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
168    morder mo) {
169  if (IsReleaseOrder(mo))
170    Release(thr, pc, (uptr)a);
171  v = __sync_fetch_and_and(a, v);
172  if (IsAcquireOrder(mo))
173    Acquire(thr, pc, (uptr)a);
174  return v;
175}
176
177template<typename T>
178static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
179    morder mo) {
180  if (IsReleaseOrder(mo))
181    Release(thr, pc, (uptr)a);
182  v = __sync_fetch_and_or(a, v);
183  if (IsAcquireOrder(mo))
184    Acquire(thr, pc, (uptr)a);
185  return v;
186}
187
188template<typename T>
189static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
190    morder mo) {
191  if (IsReleaseOrder(mo))
192    Release(thr, pc, (uptr)a);
193  v = __sync_fetch_and_xor(a, v);
194  if (IsAcquireOrder(mo))
195    Acquire(thr, pc, (uptr)a);
196  return v;
197}
198
199template<typename T>
200static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
201    morder mo) {
202  if (IsReleaseOrder(mo))
203    Release(thr, pc, (uptr)a);
204  T cmp = *a;
205  for (;;) {
206    T xch = ~cmp & v;
207    T cur = __sync_val_compare_and_swap(a, cmp, xch);
208    if (cmp == cur)
209      break;
210    cmp = cur;
211  }
212  if (IsAcquireOrder(mo))
213    Acquire(thr, pc, (uptr)a);
214  return v;
215}
216
217template<typename T>
218static bool AtomicCAS(ThreadState *thr, uptr pc,
219    volatile T *a, T *c, T v, morder mo, morder fmo) {
220  (void)fmo;
221  if (IsReleaseOrder(mo))
222    Release(thr, pc, (uptr)a);
223  T cc = *c;
224  T pr = __sync_val_compare_and_swap(a, cc, v);
225  if (IsAcquireOrder(mo))
226    Acquire(thr, pc, (uptr)a);
227  if (pr == cc)
228    return true;
229  *c = pr;
230  return false;
231}
232
233template<typename T>
234static T AtomicCAS(ThreadState *thr, uptr pc,
235    volatile T *a, T c, T v, morder mo, morder fmo) {
236  AtomicCAS(thr, pc, a, &c, v, mo, fmo);
237  return c;
238}
239
240static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
241  __sync_synchronize();
242}
243
244a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
245  SCOPED_ATOMIC(Load, a, mo);
246}
247
248a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
249  SCOPED_ATOMIC(Load, a, mo);
250}
251
252a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
253  SCOPED_ATOMIC(Load, a, mo);
254}
255
256a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
257  SCOPED_ATOMIC(Load, a, mo);
258}
259
260void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
261  SCOPED_ATOMIC(Store, a, v, mo);
262}
263
264void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
265  SCOPED_ATOMIC(Store, a, v, mo);
266}
267
268void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
269  SCOPED_ATOMIC(Store, a, v, mo);
270}
271
272void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
273  SCOPED_ATOMIC(Store, a, v, mo);
274}
275
276a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
277  SCOPED_ATOMIC(Exchange, a, v, mo);
278}
279
280a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
281  SCOPED_ATOMIC(Exchange, a, v, mo);
282}
283
284a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
285  SCOPED_ATOMIC(Exchange, a, v, mo);
286}
287
288a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
289  SCOPED_ATOMIC(Exchange, a, v, mo);
290}
291
292a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
293  SCOPED_ATOMIC(FetchAdd, a, v, mo);
294}
295
296a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
297  SCOPED_ATOMIC(FetchAdd, a, v, mo);
298}
299
300a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
301  SCOPED_ATOMIC(FetchAdd, a, v, mo);
302}
303
304a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
305  SCOPED_ATOMIC(FetchAdd, a, v, mo);
306}
307
308a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
309  SCOPED_ATOMIC(FetchSub, a, v, mo);
310}
311
312a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
313  SCOPED_ATOMIC(FetchSub, a, v, mo);
314}
315
316a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
317  SCOPED_ATOMIC(FetchSub, a, v, mo);
318}
319
320a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
321  SCOPED_ATOMIC(FetchSub, a, v, mo);
322}
323
324a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
325  SCOPED_ATOMIC(FetchAnd, a, v, mo);
326}
327
328a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
329  SCOPED_ATOMIC(FetchAnd, a, v, mo);
330}
331
332a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
333  SCOPED_ATOMIC(FetchAnd, a, v, mo);
334}
335
336a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
337  SCOPED_ATOMIC(FetchAnd, a, v, mo);
338}
339
340a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
341  SCOPED_ATOMIC(FetchOr, a, v, mo);
342}
343
344a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
345  SCOPED_ATOMIC(FetchOr, a, v, mo);
346}
347
348a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
349  SCOPED_ATOMIC(FetchOr, a, v, mo);
350}
351
352a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
353  SCOPED_ATOMIC(FetchOr, a, v, mo);
354}
355
356a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
357  SCOPED_ATOMIC(FetchXor, a, v, mo);
358}
359
360a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
361  SCOPED_ATOMIC(FetchXor, a, v, mo);
362}
363
364a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
365  SCOPED_ATOMIC(FetchXor, a, v, mo);
366}
367
368a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
369  SCOPED_ATOMIC(FetchXor, a, v, mo);
370}
371
372a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
373  SCOPED_ATOMIC(FetchNand, a, v, mo);
374}
375
376a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
377  SCOPED_ATOMIC(FetchNand, a, v, mo);
378}
379
380a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
381  SCOPED_ATOMIC(FetchNand, a, v, mo);
382}
383
384a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
385  SCOPED_ATOMIC(FetchNand, a, v, mo);
386}
387
388int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
389    morder mo, morder fmo) {
390  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
391}
392
393int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
394    morder mo, morder fmo) {
395  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
396}
397
398int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
399    morder mo, morder fmo) {
400  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
401}
402
403int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
404    morder mo, morder fmo) {
405  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
406}
407
408int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
409    morder mo, morder fmo) {
410  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
411}
412
413int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
414    morder mo, morder fmo) {
415  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
416}
417
418int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
419    morder mo, morder fmo) {
420  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
421}
422
423int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
424    morder mo, morder fmo) {
425  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
426}
427
428a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
429    morder mo, morder fmo) {
430  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
431}
432a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
433    morder mo, morder fmo) {
434  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
435}
436
437a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
438    morder mo, morder fmo) {
439  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
440}
441
442a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
443    morder mo, morder fmo) {
444  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
445}
446
447void __tsan_atomic_thread_fence(morder mo) {
448  char* a;
449  SCOPED_ATOMIC(Fence, mo);
450}
451
452void __tsan_atomic_signal_fence(morder mo) {
453}
454