1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file is an internal atomic implementation for compiler-based
6// ThreadSanitizer. Use base/atomicops.h instead.
7
8#ifndef BASE_ATOMICOPS_INTERNALS_TSAN_H_
9#define BASE_ATOMICOPS_INTERNALS_TSAN_H_
10
11#include <sanitizer/tsan_interface_atomic.h>
12
13namespace base {
14namespace subtle {
15
16inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
17                                         Atomic32 old_value,
18                                         Atomic32 new_value) {
19  Atomic32 cmp = old_value;
20  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
21      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
22  return cmp;
23}
24
25inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
26                                         Atomic32 new_value) {
27  return __tsan_atomic32_exchange(ptr, new_value,
28      __tsan_memory_order_relaxed);
29}
30
31inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
32                                       Atomic32 new_value) {
33  return __tsan_atomic32_exchange(ptr, new_value,
34      __tsan_memory_order_acquire);
35}
36
37inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
38                                       Atomic32 new_value) {
39  return __tsan_atomic32_exchange(ptr, new_value,
40      __tsan_memory_order_release);
41}
42
43inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
44                                          Atomic32 increment) {
45  return increment + __tsan_atomic32_fetch_add(ptr, increment,
46      __tsan_memory_order_relaxed);
47}
48
49inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
50                                        Atomic32 increment) {
51  return increment + __tsan_atomic32_fetch_add(ptr, increment,
52      __tsan_memory_order_acq_rel);
53}
54
55inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
56                                       Atomic32 old_value,
57                                       Atomic32 new_value) {
58  Atomic32 cmp = old_value;
59  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
60      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
61  return cmp;
62}
63
64inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
65                                       Atomic32 old_value,
66                                       Atomic32 new_value) {
67  Atomic32 cmp = old_value;
68  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
69      __tsan_memory_order_release, __tsan_memory_order_relaxed);
70  return cmp;
71}
72
73inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
74  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
75}
76
77inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
78  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
79  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
80}
81
82inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
83  __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
84}
85
86inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
87  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
88}
89
90inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
91  return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
92}
93
94inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
95  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
96  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
97}
98
99inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
100                                         Atomic64 old_value,
101                                         Atomic64 new_value) {
102  Atomic64 cmp = old_value;
103  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
104      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
105  return cmp;
106}
107
108inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
109                                         Atomic64 new_value) {
110  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
111}
112
113inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
114                                       Atomic64 new_value) {
115  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
116}
117
118inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
119                                       Atomic64 new_value) {
120  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
121}
122
123inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
124                                          Atomic64 increment) {
125  return increment + __tsan_atomic64_fetch_add(ptr, increment,
126      __tsan_memory_order_relaxed);
127}
128
129inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
130                                        Atomic64 increment) {
131  return increment + __tsan_atomic64_fetch_add(ptr, increment,
132      __tsan_memory_order_acq_rel);
133}
134
135inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
136  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
137}
138
139inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
140  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
141  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
142}
143
144inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
145  __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
146}
147
148inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
149  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
150}
151
152inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
153  return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
154}
155
156inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
157  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
158  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
159}
160
161inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
162                                       Atomic64 old_value,
163                                       Atomic64 new_value) {
164  Atomic64 cmp = old_value;
165  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
166      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
167  return cmp;
168}
169
170inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
171                                       Atomic64 old_value,
172                                       Atomic64 new_value) {
173  Atomic64 cmp = old_value;
174  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
175      __tsan_memory_order_release, __tsan_memory_order_relaxed);
176  return cmp;
177}
178
179inline void MemoryBarrier() {
180  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
181}
182
183}  // namespace base::subtle
184}  // namespace base
185
186#endif  // BASE_ATOMICOPS_INTERNALS_TSAN_H_
187