1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28
29// This file is an internal atomic implementation for compiler-based
30// ThreadSanitizer. Use base/atomicops.h instead.
31
32#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
33#define V8_ATOMICOPS_INTERNALS_TSAN_H_
34
35namespace v8 {
36namespace internal {
37
38#ifndef TSAN_INTERFACE_ATOMIC_H
39#define TSAN_INTERFACE_ATOMIC_H
40
41// This struct is not part of the public API of this module; clients may not
42// use it.  (However, it's exported via BASE_EXPORT because clients implicitly
43// do use it at link time by inlining these functions.)
44// Features of this x86.  Values may not be correct before main() is run,
45// but are set conservatively.
46struct AtomicOps_x86CPUFeatureStruct {
47  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
48                             // after acquire compare-and-swap.
49  bool has_sse2;             // Processor has SSE2.
50};
51extern struct AtomicOps_x86CPUFeatureStruct
52    AtomicOps_Internalx86CPUFeatures;
53
54#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
55
56#ifdef __cplusplus
57extern "C" {
58#endif
59
60typedef char  __tsan_atomic8;
61typedef short __tsan_atomic16;  // NOLINT
62typedef int   __tsan_atomic32;
63typedef long  __tsan_atomic64;  // NOLINT
64
65#if defined(__SIZEOF_INT128__) \
66    || (__clang_major__ * 100 + __clang_minor__ >= 302)
67typedef __int128 __tsan_atomic128;
68#define __TSAN_HAS_INT128 1
69#else
70typedef char     __tsan_atomic128;
71#define __TSAN_HAS_INT128 0
72#endif
73
74typedef enum {
75  __tsan_memory_order_relaxed,
76  __tsan_memory_order_consume,
77  __tsan_memory_order_acquire,
78  __tsan_memory_order_release,
79  __tsan_memory_order_acq_rel,
80  __tsan_memory_order_seq_cst,
81} __tsan_memory_order;
82
83__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
84    __tsan_memory_order mo);
85__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
86    __tsan_memory_order mo);
87__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
88    __tsan_memory_order mo);
89__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
90    __tsan_memory_order mo);
91__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
92    __tsan_memory_order mo);
93
94void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
95    __tsan_memory_order mo);
96void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
97    __tsan_memory_order mo);
98void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
99    __tsan_memory_order mo);
100void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
101    __tsan_memory_order mo);
102void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
103    __tsan_memory_order mo);
104
105__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
106    __tsan_atomic8 v, __tsan_memory_order mo);
107__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
108    __tsan_atomic16 v, __tsan_memory_order mo);
109__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
110    __tsan_atomic32 v, __tsan_memory_order mo);
111__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
112    __tsan_atomic64 v, __tsan_memory_order mo);
113__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
114    __tsan_atomic128 v, __tsan_memory_order mo);
115
116__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
117    __tsan_atomic8 v, __tsan_memory_order mo);
118__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
119    __tsan_atomic16 v, __tsan_memory_order mo);
120__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
121    __tsan_atomic32 v, __tsan_memory_order mo);
122__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
123    __tsan_atomic64 v, __tsan_memory_order mo);
124__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
125    __tsan_atomic128 v, __tsan_memory_order mo);
126
127__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
128    __tsan_atomic8 v, __tsan_memory_order mo);
129__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
130    __tsan_atomic16 v, __tsan_memory_order mo);
131__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
132    __tsan_atomic32 v, __tsan_memory_order mo);
133__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
134    __tsan_atomic64 v, __tsan_memory_order mo);
135__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
136    __tsan_atomic128 v, __tsan_memory_order mo);
137
138__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
139    __tsan_atomic8 v, __tsan_memory_order mo);
140__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
141    __tsan_atomic16 v, __tsan_memory_order mo);
142__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
143    __tsan_atomic32 v, __tsan_memory_order mo);
144__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
145    __tsan_atomic64 v, __tsan_memory_order mo);
146__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
147    __tsan_atomic128 v, __tsan_memory_order mo);
148
149__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
150    __tsan_atomic8 v, __tsan_memory_order mo);
151__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
152    __tsan_atomic16 v, __tsan_memory_order mo);
153__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
154    __tsan_atomic32 v, __tsan_memory_order mo);
155__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
156    __tsan_atomic64 v, __tsan_memory_order mo);
157__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
158    __tsan_atomic128 v, __tsan_memory_order mo);
159
160__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
161    __tsan_atomic8 v, __tsan_memory_order mo);
162__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
163    __tsan_atomic16 v, __tsan_memory_order mo);
164__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
165    __tsan_atomic32 v, __tsan_memory_order mo);
166__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
167    __tsan_atomic64 v, __tsan_memory_order mo);
168__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
169    __tsan_atomic64 v, __tsan_memory_order mo);
170
171int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
172    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
173    __tsan_memory_order fail_mo);
174int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
175    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
176    __tsan_memory_order fail_mo);
177int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
178    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
179    __tsan_memory_order fail_mo);
180int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
181    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
182    __tsan_memory_order fail_mo);
183int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
184    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
185    __tsan_memory_order fail_mo);
186
187int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
188    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
189    __tsan_memory_order fail_mo);
190int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
191    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
192    __tsan_memory_order fail_mo);
193int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
194    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
195    __tsan_memory_order fail_mo);
196int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
197    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
198    __tsan_memory_order fail_mo);
199int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
200    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
201    __tsan_memory_order fail_mo);
202
203__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
204    volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
205    __tsan_memory_order mo, __tsan_memory_order fail_mo);
206__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
207    volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
208    __tsan_memory_order mo, __tsan_memory_order fail_mo);
209__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
210    volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
211    __tsan_memory_order mo, __tsan_memory_order fail_mo);
212__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
213    volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
214    __tsan_memory_order mo, __tsan_memory_order fail_mo);
215__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
216    volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
217    __tsan_memory_order mo, __tsan_memory_order fail_mo);
218
219void __tsan_atomic_thread_fence(__tsan_memory_order mo);
220void __tsan_atomic_signal_fence(__tsan_memory_order mo);
221
222#ifdef __cplusplus
223}  // extern "C"
224#endif
225
226#endif  // #ifndef TSAN_INTERFACE_ATOMIC_H
227
228inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
229                                         Atomic32 old_value,
230                                         Atomic32 new_value) {
231  Atomic32 cmp = old_value;
232  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
233      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
234  return cmp;
235}
236
237inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
238                                         Atomic32 new_value) {
239  return __tsan_atomic32_exchange(ptr, new_value,
240      __tsan_memory_order_relaxed);
241}
242
243inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
244                                       Atomic32 new_value) {
245  return __tsan_atomic32_exchange(ptr, new_value,
246      __tsan_memory_order_acquire);
247}
248
249inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
250                                       Atomic32 new_value) {
251  return __tsan_atomic32_exchange(ptr, new_value,
252      __tsan_memory_order_release);
253}
254
255inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
256                                          Atomic32 increment) {
257  return increment + __tsan_atomic32_fetch_add(ptr, increment,
258      __tsan_memory_order_relaxed);
259}
260
261inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
262                                        Atomic32 increment) {
263  return increment + __tsan_atomic32_fetch_add(ptr, increment,
264      __tsan_memory_order_acq_rel);
265}
266
267inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
268                                       Atomic32 old_value,
269                                       Atomic32 new_value) {
270  Atomic32 cmp = old_value;
271  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
272      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
273  return cmp;
274}
275
276inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
277                                       Atomic32 old_value,
278                                       Atomic32 new_value) {
279  Atomic32 cmp = old_value;
280  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
281      __tsan_memory_order_release, __tsan_memory_order_relaxed);
282  return cmp;
283}
284
285inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
286  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
287}
288
289inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
290  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
291  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
292}
293
294inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
295  __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
296}
297
298inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
299  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
300}
301
302inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
303  return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
304}
305
306inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
307  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
308  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
309}
310
311inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
312                                         Atomic64 old_value,
313                                         Atomic64 new_value) {
314  Atomic64 cmp = old_value;
315  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
316      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
317  return cmp;
318}
319
320inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
321                                         Atomic64 new_value) {
322  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
323}
324
325inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
326                                       Atomic64 new_value) {
327  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
328}
329
330inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
331                                       Atomic64 new_value) {
332  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
333}
334
335inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
336                                          Atomic64 increment) {
337  return increment + __tsan_atomic64_fetch_add(ptr, increment,
338      __tsan_memory_order_relaxed);
339}
340
341inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
342                                        Atomic64 increment) {
343  return increment + __tsan_atomic64_fetch_add(ptr, increment,
344      __tsan_memory_order_acq_rel);
345}
346
347inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
348  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
349}
350
351inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
352  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
353  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
354}
355
356inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
357  __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
358}
359
360inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
361  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
362}
363
364inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
365  return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
366}
367
368inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
369  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
370  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
371}
372
373inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
374                                       Atomic64 old_value,
375                                       Atomic64 new_value) {
376  Atomic64 cmp = old_value;
377  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
378      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
379  return cmp;
380}
381
382inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
383                                       Atomic64 old_value,
384                                       Atomic64 new_value) {
385  Atomic64 cmp = old_value;
386  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
387      __tsan_memory_order_release, __tsan_memory_order_relaxed);
388  return cmp;
389}
390
391inline void MemoryBarrier() {
392  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
393}
394
395}  // namespace internal
396}  // namespace v8
397
398#undef ATOMICOPS_COMPILER_BARRIER
399
400#endif  // V8_ATOMICOPS_INTERNALS_TSAN_H_
401