tsan_interface_atomic.cc revision 47b1634df012507799eb39aa17d4022d748ba67b
1//===-- tsan_interface_atomic.cc ------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_placement_new.h"
15#include "tsan_interface_atomic.h"
16#include "tsan_flags.h"
17#include "tsan_rtl.h"
18
19using namespace __tsan;  // NOLINT
20
21class ScopedAtomic {
22 public:
23  ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
24      : thr_(thr) {
25    CHECK_EQ(thr_->in_rtl, 1);  // 1 due to our own ScopedInRtl member.
26    DPrintf("#%d: %s\n", thr_->tid, func);
27  }
28  ~ScopedAtomic() {
29    CHECK_EQ(thr_->in_rtl, 1);
30  }
31 private:
32  ThreadState *thr_;
33  ScopedInRtl in_rtl_;
34};
35
36// Some shortcuts.
37typedef __tsan_memory_order morder;
38typedef __tsan_atomic8 a8;
39typedef __tsan_atomic16 a16;
40typedef __tsan_atomic32 a32;
41typedef __tsan_atomic64 a64;
42const int mo_relaxed = __tsan_memory_order_relaxed;
43const int mo_consume = __tsan_memory_order_consume;
44const int mo_acquire = __tsan_memory_order_acquire;
45const int mo_release = __tsan_memory_order_release;
46const int mo_acq_rel = __tsan_memory_order_acq_rel;
47const int mo_seq_cst = __tsan_memory_order_seq_cst;
48
49static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
50  StatInc(thr, StatAtomic);
51  StatInc(thr, t);
52  StatInc(thr, size == 1 ? StatAtomic1
53             : size == 2 ? StatAtomic2
54             : size == 4 ? StatAtomic4
55             :             StatAtomic8);
56  StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
57             : mo == mo_consume ? StatAtomicConsume
58             : mo == mo_acquire ? StatAtomicAcquire
59             : mo == mo_release ? StatAtomicRelease
60             : mo == mo_acq_rel ? StatAtomicAcq_Rel
61             :                    StatAtomicSeq_Cst);
62}
63
64#define SCOPED_ATOMIC(func, ...) \
65    mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
66    ThreadState *const thr = cur_thread(); \
67    const uptr pc = (uptr)__builtin_return_address(0); \
68    AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
69    ScopedAtomic sa(thr, pc, __FUNCTION__); \
70    return Atomic##func(thr, pc, __VA_ARGS__); \
71/**/
72
73template<typename T>
74static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
75    morder mo) {
76  CHECK(mo & (mo_relaxed | mo_consume | mo_acquire | mo_seq_cst));
77  T v = *a;
78  if (mo & (mo_consume | mo_acquire | mo_seq_cst))
79    Acquire(thr, pc, (uptr)a);
80  return v;
81}
82
83template<typename T>
84static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
85    morder mo) {
86  CHECK(mo & (mo_relaxed | mo_release | mo_seq_cst));
87  if (mo & (mo_release | mo_seq_cst))
88    Release(thr, pc, (uptr)a);
89  *a = v;
90}
91
92template<typename T>
93static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
94    morder mo) {
95  if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
96    Release(thr, pc, (uptr)a);
97  v = __sync_lock_test_and_set(a, v);
98  if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
99    Acquire(thr, pc, (uptr)a);
100  return v;
101}
102
103template<typename T>
104static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
105    morder mo) {
106  if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
107    Release(thr, pc, (uptr)a);
108  v = __sync_fetch_and_add(a, v);
109  if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
110    Acquire(thr, pc, (uptr)a);
111  return v;
112}
113
114template<typename T>
115static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
116    morder mo) {
117  if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
118    Release(thr, pc, (uptr)a);
119  v = __sync_fetch_and_and(a, v);
120  if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
121    Acquire(thr, pc, (uptr)a);
122  return v;
123}
124
125template<typename T>
126static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
127    morder mo) {
128  if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
129    Release(thr, pc, (uptr)a);
130  v = __sync_fetch_and_or(a, v);
131  if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
132    Acquire(thr, pc, (uptr)a);
133  return v;
134}
135
136template<typename T>
137static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
138    morder mo) {
139  if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
140    Release(thr, pc, (uptr)a);
141  v = __sync_fetch_and_xor(a, v);
142  if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
143    Acquire(thr, pc, (uptr)a);
144  return v;
145}
146
147template<typename T>
148static bool AtomicCAS(ThreadState *thr, uptr pc,
149    volatile T *a, T *c, T v, morder mo) {
150  if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
151    Release(thr, pc, (uptr)a);
152  T cc = *c;
153  T pr = __sync_val_compare_and_swap(a, cc, v);
154  if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
155    Acquire(thr, pc, (uptr)a);
156  if (pr == cc)
157    return true;
158  *c = pr;
159  return false;
160}
161
162static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
163  __sync_synchronize();
164}
165
166a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
167  SCOPED_ATOMIC(Load, a, mo);
168}
169
170a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
171  SCOPED_ATOMIC(Load, a, mo);
172}
173
174a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
175  SCOPED_ATOMIC(Load, a, mo);
176}
177
178a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
179  SCOPED_ATOMIC(Load, a, mo);
180}
181
182void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
183  SCOPED_ATOMIC(Store, a, v, mo);
184}
185
186void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
187  SCOPED_ATOMIC(Store, a, v, mo);
188}
189
190void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
191  SCOPED_ATOMIC(Store, a, v, mo);
192}
193
194void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
195  SCOPED_ATOMIC(Store, a, v, mo);
196}
197
198a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
199  SCOPED_ATOMIC(Exchange, a, v, mo);
200}
201
202a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
203  SCOPED_ATOMIC(Exchange, a, v, mo);
204}
205
206a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
207  SCOPED_ATOMIC(Exchange, a, v, mo);
208}
209
210a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
211  SCOPED_ATOMIC(Exchange, a, v, mo);
212}
213
214a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
215  SCOPED_ATOMIC(FetchAdd, a, v, mo);
216}
217
218a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
219  SCOPED_ATOMIC(FetchAdd, a, v, mo);
220}
221
222a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
223  SCOPED_ATOMIC(FetchAdd, a, v, mo);
224}
225
226a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
227  SCOPED_ATOMIC(FetchAdd, a, v, mo);
228}
229
230a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
231  SCOPED_ATOMIC(FetchAnd, a, v, mo);
232}
233
234a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
235  SCOPED_ATOMIC(FetchAnd, a, v, mo);
236}
237
238a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
239  SCOPED_ATOMIC(FetchAnd, a, v, mo);
240}
241
242a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
243  SCOPED_ATOMIC(FetchAnd, a, v, mo);
244}
245
246a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
247  SCOPED_ATOMIC(FetchOr, a, v, mo);
248}
249
250a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
251  SCOPED_ATOMIC(FetchOr, a, v, mo);
252}
253
254a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
255  SCOPED_ATOMIC(FetchOr, a, v, mo);
256}
257
258a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
259  SCOPED_ATOMIC(FetchOr, a, v, mo);
260}
261
262a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
263  SCOPED_ATOMIC(FetchXor, a, v, mo);
264}
265
266a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
267  SCOPED_ATOMIC(FetchXor, a, v, mo);
268}
269
270a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
271  SCOPED_ATOMIC(FetchXor, a, v, mo);
272}
273
274a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
275  SCOPED_ATOMIC(FetchXor, a, v, mo);
276}
277
278int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
279    morder mo) {
280  SCOPED_ATOMIC(CAS, a, c, v, mo);
281}
282
283int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
284    morder mo) {
285  SCOPED_ATOMIC(CAS, a, c, v, mo);
286}
287
288int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
289    morder mo) {
290  SCOPED_ATOMIC(CAS, a, c, v, mo);
291}
292
293int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
294    morder mo) {
295  SCOPED_ATOMIC(CAS, a, c, v, mo);
296}
297
298int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
299    morder mo) {
300  SCOPED_ATOMIC(CAS, a, c, v, mo);
301}
302
303int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
304    morder mo) {
305  SCOPED_ATOMIC(CAS, a, c, v, mo);
306}
307
308int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
309    morder mo) {
310  SCOPED_ATOMIC(CAS, a, c, v, mo);
311}
312
313int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
314    morder mo) {
315  SCOPED_ATOMIC(CAS, a, c, v, mo);
316}
317
318void __tsan_atomic_thread_fence(morder mo) {
319  char* a;
320  SCOPED_ATOMIC(Fence, mo);
321}
322