sanitizer_atomic_clang.h revision 6fa061978b138b39d29d98a6d28c67684a23eef0
1//===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11// Not intended for direct inclusion. Include sanitizer_atomic.h.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef SANITIZER_ATOMIC_CLANG_H
16#define SANITIZER_ATOMIC_CLANG_H
17
18#ifndef __has_builtin
19# define __has_builtin(x) 0
20#endif
21
22#define ATOMIC_ORDER(mo) \
23  ((mo) == memory_order_relaxed ? __ATOMIC_RELAXED : \
24  (mo) == memory_order_consume ? __ATOMIC_CONSUME : \
25  (mo) == memory_order_acquire ? __ATOMIC_ACQUIRE : \
26  (mo) == memory_order_release ? __ATOMIC_RELEASE : \
27  (mo) == memory_order_acq_rel ? __ATOMIC_ACQ_REL : \
28  __ATOMIC_SEQ_CST)
29
30namespace __sanitizer {
31
32INLINE void atomic_signal_fence(memory_order) {
33  __asm__ __volatile__("" ::: "memory");
34}
35
36INLINE void atomic_thread_fence(memory_order) {
37  __sync_synchronize();
38}
39
40INLINE void proc_yield(int cnt) {
41  __asm__ __volatile__("" ::: "memory");
42#if defined(__i386__) || defined(__x86_64__)
43  for (int i = 0; i < cnt; i++)
44    __asm__ __volatile__("pause");
45#endif
46  __asm__ __volatile__("" ::: "memory");
47}
48
49template<typename T>
50INLINE typename T::Type atomic_load(
51    const volatile T *a, memory_order mo) {
52  DCHECK(mo & (memory_order_relaxed | memory_order_consume
53      | memory_order_acquire | memory_order_seq_cst));
54  DCHECK(!((uptr)a % sizeof(*a)));
55  typename T::Type v;
56// Use builtin atomic operations if available.
57// But not on x86_64 because they lead to vastly inefficient code generation
58// (http://llvm.org/bugs/show_bug.cgi?id=17281).
59// And not on x86_32 because they are not implemented
60// (http://llvm.org/bugs/show_bug.cgi?id=15034)
61// Have to use them on ARM/PPC/etc, because our implementation lacks necessary
62// memory fences.
63#if __has_builtin(__atomic_load_n) && !defined(__x86_64__) && !defined(__i386__)
64  v = __atomic_load_n(&a->val_dont_use, ATOMIC_ORDER(mo));
65#else
66  if (mo == memory_order_relaxed) {
67    v = a->val_dont_use;
68  } else {
69    atomic_signal_fence(memory_order_seq_cst);
70    v = a->val_dont_use;
71    atomic_signal_fence(memory_order_seq_cst);
72  }
73#endif
74  return v;
75}
76
77template<typename T>
78INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
79  DCHECK(mo & (memory_order_relaxed | memory_order_release
80      | memory_order_seq_cst));
81  DCHECK(!((uptr)a % sizeof(*a)));
82// See the comment in atomic_load.
83#if __has_builtin(__atomic_store_n) && !defined(__x86_64__) \
84    && !defined(__i386__)
85  __atomic_store_n(&a->val_dont_use, v, ATOMIC_ORDER(mo));
86#else
87  if (mo == memory_order_relaxed) {
88    a->val_dont_use = v;
89  } else {
90    atomic_signal_fence(memory_order_seq_cst);
91    a->val_dont_use = v;
92    atomic_signal_fence(memory_order_seq_cst);
93  }
94  if (mo == memory_order_seq_cst)
95    atomic_thread_fence(memory_order_seq_cst);
96#endif
97}
98
99template<typename T>
100INLINE typename T::Type atomic_fetch_add(volatile T *a,
101    typename T::Type v, memory_order mo) {
102  (void)mo;
103  DCHECK(!((uptr)a % sizeof(*a)));
104  return __sync_fetch_and_add(&a->val_dont_use, v);
105}
106
107template<typename T>
108INLINE typename T::Type atomic_fetch_sub(volatile T *a,
109    typename T::Type v, memory_order mo) {
110  (void)mo;
111  DCHECK(!((uptr)a % sizeof(*a)));
112  return __sync_fetch_and_add(&a->val_dont_use, -v);
113}
114
115template<typename T>
116INLINE typename T::Type atomic_exchange(volatile T *a,
117    typename T::Type v, memory_order mo) {
118  DCHECK(!((uptr)a % sizeof(*a)));
119  if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
120    __sync_synchronize();
121  v = __sync_lock_test_and_set(&a->val_dont_use, v);
122  if (mo == memory_order_seq_cst)
123    __sync_synchronize();
124  return v;
125}
126
127template<typename T>
128INLINE bool atomic_compare_exchange_strong(volatile T *a,
129                                           typename T::Type *cmp,
130                                           typename T::Type xchg,
131                                           memory_order mo) {
132  typedef typename T::Type Type;
133  Type cmpv = *cmp;
134  Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
135  if (prev == cmpv)
136    return true;
137  *cmp = prev;
138  return false;
139}
140
141template<typename T>
142INLINE bool atomic_compare_exchange_weak(volatile T *a,
143                                         typename T::Type *cmp,
144                                         typename T::Type xchg,
145                                         memory_order mo) {
146  return atomic_compare_exchange_strong(a, cmp, xchg, mo);
147}
148
149}  // namespace __sanitizer
150
151#undef ATOMIC_ORDER
152
153#endif  // SANITIZER_ATOMIC_CLANG_H
154