sanitizer_atomic_msvc.h revision b975c8abce50e513d9e168b24692fa9310e32aed
1//===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11// Not intended for direct inclusion. Include sanitizer_atomic.h.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef SANITIZER_ATOMIC_MSVC_H
16#define SANITIZER_ATOMIC_MSVC_H
17
18extern "C" void _ReadWriteBarrier();
19#pragma intrinsic(_ReadWriteBarrier)
20extern "C" void _mm_mfence();
21#pragma intrinsic(_mm_mfence)
22extern "C" void _mm_pause();
23#pragma intrinsic(_mm_pause)
24extern "C" long _InterlockedExchangeAdd(  // NOLINT
25    long volatile * Addend, long Value);  // NOLINT
26#pragma intrinsic(_InterlockedExchangeAdd)
27
28#ifdef _WIN64
29extern "C" void *_InterlockedCompareExchangePointer(
30    void *volatile *Destination,
31    void *Exchange, void *Comparand);
32#pragma intrinsic(_InterlockedCompareExchangePointer)
33#else
34// There's no _InterlockedCompareExchangePointer intrinsic on x86,
35// so call _InterlockedCompareExchange instead.
36extern "C"
37long __cdecl _InterlockedCompareExchange(  // NOLINT
38    long volatile *Destination,            // NOLINT
39    long Exchange, long Comparand);        // NOLINT
40#pragma intrinsic(_InterlockedCompareExchange)
41
42inline static void *_InterlockedCompareExchangePointer(
43    void *volatile *Destination,
44    void *Exchange, void *Comparand) {
45  return reinterpret_cast<void*>(
46      _InterlockedCompareExchange(
47          reinterpret_cast<long volatile*>(Destination),  // NOLINT
48          reinterpret_cast<long>(Exchange),               // NOLINT
49          reinterpret_cast<long>(Comparand)));            // NOLINT
50}
51#endif
52
53namespace __sanitizer {
54
55INLINE void atomic_signal_fence(memory_order) {
56  _ReadWriteBarrier();
57}
58
59INLINE void atomic_thread_fence(memory_order) {
60  _mm_mfence();
61}
62
63INLINE void proc_yield(int cnt) {
64  for (int i = 0; i < cnt; i++)
65    _mm_pause();
66}
67
68template<typename T>
69INLINE typename T::Type atomic_load(
70    const volatile T *a, memory_order mo) {
71  DCHECK(mo & (memory_order_relaxed | memory_order_consume
72      | memory_order_acquire | memory_order_seq_cst));
73  DCHECK(!((uptr)a % sizeof(*a)));
74  typename T::Type v;
75  // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
76  if (mo == memory_order_relaxed) {
77    v = a->val_dont_use;
78  } else {
79    atomic_signal_fence(memory_order_seq_cst);
80    v = a->val_dont_use;
81    atomic_signal_fence(memory_order_seq_cst);
82  }
83  return v;
84}
85
86template<typename T>
87INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
88  DCHECK(mo & (memory_order_relaxed | memory_order_release
89      | memory_order_seq_cst));
90  DCHECK(!((uptr)a % sizeof(*a)));
91  // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
92  if (mo == memory_order_relaxed) {
93    a->val_dont_use = v;
94  } else {
95    atomic_signal_fence(memory_order_seq_cst);
96    a->val_dont_use = v;
97    atomic_signal_fence(memory_order_seq_cst);
98  }
99  if (mo == memory_order_seq_cst)
100    atomic_thread_fence(memory_order_seq_cst);
101}
102
103INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
104    u32 v, memory_order mo) {
105  (void)mo;
106  DCHECK(!((uptr)a % sizeof(*a)));
107  return (u32)_InterlockedExchangeAdd(
108      (volatile long*)&a->val_dont_use, (long)v);  // NOLINT
109}
110
111INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
112    u8 v, memory_order mo) {
113  (void)mo;
114  DCHECK(!((uptr)a % sizeof(*a)));
115  __asm {
116    mov eax, a
117    mov cl, v
118    xchg [eax], cl  // NOLINT
119    mov v, cl
120  }
121  return v;
122}
123
124INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
125    u16 v, memory_order mo) {
126  (void)mo;
127  DCHECK(!((uptr)a % sizeof(*a)));
128  __asm {
129    mov eax, a
130    mov cx, v
131    xchg [eax], cx  // NOLINT
132    mov v, cx
133  }
134  return v;
135}
136
137INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
138                                           u8 *cmp,
139                                           u8 xchg,
140                                           memory_order mo) {
141  (void)mo;
142  DCHECK(!((uptr)a % sizeof(*a)));
143  u8 cmpv = *cmp;
144  u8 prev;
145  __asm {
146    mov al, cmpv
147    mov ecx, a
148    mov dl, xchg
149    lock cmpxchg [ecx], dl
150    mov prev, al
151  }
152  if (prev == cmpv)
153    return true;
154  *cmp = prev;
155  return false;
156}
157
158INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
159                                           uptr *cmp,
160                                           uptr xchg,
161                                           memory_order mo) {
162  uptr cmpv = *cmp;
163  uptr prev = (uptr)_InterlockedCompareExchangePointer(
164      (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
165  if (prev == cmpv)
166    return true;
167  *cmp = prev;
168  return false;
169}
170
171template<typename T>
172INLINE bool atomic_compare_exchange_weak(volatile T *a,
173                                         typename T::Type *cmp,
174                                         typename T::Type xchg,
175                                         memory_order mo) {
176  return atomic_compare_exchange_strong(a, cmp, xchg, mo);
177}
178
179}  // namespace __sanitizer
180
181#endif  // SANITIZER_ATOMIC_CLANG_H
182