tsan_interface_atomic.cc revision fc575e52636c803262fc24bacf63a95f731b1314
1//===-- tsan_interface_atomic.cc ------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12//===----------------------------------------------------------------------===// 13 14#include "sanitizer_common/sanitizer_placement_new.h" 15#include "tsan_interface_atomic.h" 16#include "tsan_flags.h" 17#include "tsan_rtl.h" 18 19using namespace __tsan; // NOLINT 20 21class ScopedAtomic { 22 public: 23 ScopedAtomic(ThreadState *thr, uptr pc, const char *func) 24 : thr_(thr) { 25 CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member. 26 DPrintf("#%d: %s\n", thr_->tid, func); 27 } 28 ~ScopedAtomic() { 29 CHECK_EQ(thr_->in_rtl, 1); 30 } 31 private: 32 ThreadState *thr_; 33 ScopedInRtl in_rtl_; 34}; 35 36// Some shortcuts. 37typedef __tsan_memory_order morder; 38typedef __tsan_atomic8 a8; 39typedef __tsan_atomic16 a16; 40typedef __tsan_atomic32 a32; 41typedef __tsan_atomic64 a64; 42const morder mo_relaxed = __tsan_memory_order_relaxed; 43const morder mo_consume = __tsan_memory_order_consume; 44const morder mo_acquire = __tsan_memory_order_acquire; 45const morder mo_release = __tsan_memory_order_release; 46const morder mo_acq_rel = __tsan_memory_order_acq_rel; 47const morder mo_seq_cst = __tsan_memory_order_seq_cst; 48 49static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) { 50 StatInc(thr, StatAtomic); 51 StatInc(thr, t); 52 StatInc(thr, size == 1 ? StatAtomic1 53 : size == 2 ? StatAtomic2 54 : size == 4 ? StatAtomic4 55 : StatAtomic8); 56 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed 57 : mo == mo_consume ? StatAtomicConsume 58 : mo == mo_acquire ? StatAtomicAcquire 59 : mo == mo_release ? StatAtomicRelease 60 : mo == mo_acq_rel ? StatAtomicAcq_Rel 61 : StatAtomicSeq_Cst); 62} 63 64static bool IsLoadOrder(morder mo) { 65 return mo == mo_relaxed || mo == mo_consume 66 || mo == mo_acquire || mo == mo_seq_cst; 67} 68 69static bool IsStoreOrder(morder mo) { 70 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst; 71} 72 73static bool IsReleaseOrder(morder mo) { 74 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst; 75} 76 77static bool IsAcquireOrder(morder mo) { 78 return mo == mo_consume || mo == mo_acquire 79 || mo == mo_acq_rel || mo == mo_seq_cst; 80} 81 82static morder ConvertOrder(morder mo) { 83 if (mo > (morder)100500) { 84 mo = morder(mo - 100500); 85 if (mo == morder(1 << 0)) 86 mo = mo_relaxed; 87 else if (mo == morder(1 << 1)) 88 mo = mo_consume; 89 else if (mo == morder(1 << 2)) 90 mo = mo_acquire; 91 else if (mo == morder(1 << 3)) 92 mo = mo_release; 93 else if (mo == morder(1 << 4)) 94 mo = mo_acq_rel; 95 else if (mo == morder(1 << 5)) 96 mo = mo_seq_cst; 97 } 98 CHECK_GE(mo, mo_relaxed); 99 CHECK_LE(mo, mo_seq_cst); 100 return mo; 101} 102 103#define SCOPED_ATOMIC(func, ...) \ 104 mo = ConvertOrder(mo); \ 105 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \ 106 ThreadState *const thr = cur_thread(); \ 107 ProcessPendingSignals(thr); \ 108 const uptr pc = (uptr)__builtin_return_address(0); \ 109 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \ 110 ScopedAtomic sa(thr, pc, __FUNCTION__); \ 111 return Atomic##func(thr, pc, __VA_ARGS__); \ 112/**/ 113 114template<typename T> 115static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, 116 morder mo) { 117 CHECK(IsLoadOrder(mo)); 118 T v = *a; 119 if (IsAcquireOrder(mo)) 120 Acquire(thr, pc, (uptr)a); 121 return v; 122} 123 124template<typename T> 125static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, 126 morder mo) { 127 CHECK(IsStoreOrder(mo)); 128 if (IsReleaseOrder(mo)) 129 ReleaseStore(thr, pc, (uptr)a); 130 *a = v; 131} 132 133template<typename T> 134static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, 135 morder mo) { 136 if (IsReleaseOrder(mo)) 137 Release(thr, pc, (uptr)a); 138 v = __sync_lock_test_and_set(a, v); 139 if (IsAcquireOrder(mo)) 140 Acquire(thr, pc, (uptr)a); 141 return v; 142} 143 144template<typename T> 145static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, 146 morder mo) { 147 if (IsReleaseOrder(mo)) 148 Release(thr, pc, (uptr)a); 149 v = __sync_fetch_and_add(a, v); 150 if (IsAcquireOrder(mo)) 151 Acquire(thr, pc, (uptr)a); 152 return v; 153} 154 155template<typename T> 156static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, 157 morder mo) { 158 if (IsReleaseOrder(mo)) 159 Release(thr, pc, (uptr)a); 160 v = __sync_fetch_and_sub(a, v); 161 if (IsAcquireOrder(mo)) 162 Acquire(thr, pc, (uptr)a); 163 return v; 164} 165 166template<typename T> 167static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, 168 morder mo) { 169 if (IsReleaseOrder(mo)) 170 Release(thr, pc, (uptr)a); 171 v = __sync_fetch_and_and(a, v); 172 if (IsAcquireOrder(mo)) 173 Acquire(thr, pc, (uptr)a); 174 return v; 175} 176 177template<typename T> 178static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, 179 morder mo) { 180 if (IsReleaseOrder(mo)) 181 Release(thr, pc, (uptr)a); 182 v = __sync_fetch_and_or(a, v); 183 if (IsAcquireOrder(mo)) 184 Acquire(thr, pc, (uptr)a); 185 return v; 186} 187 188template<typename T> 189static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, 190 morder mo) { 191 if (IsReleaseOrder(mo)) 192 Release(thr, pc, (uptr)a); 193 v = __sync_fetch_and_xor(a, v); 194 if (IsAcquireOrder(mo)) 195 Acquire(thr, pc, (uptr)a); 196 return v; 197} 198 199template<typename T> 200static bool AtomicCAS(ThreadState *thr, uptr pc, 201 volatile T *a, T *c, T v, morder mo, morder fmo) { 202 (void)fmo; 203 if (IsReleaseOrder(mo)) 204 Release(thr, pc, (uptr)a); 205 T cc = *c; 206 T pr = __sync_val_compare_and_swap(a, cc, v); 207 if (IsAcquireOrder(mo)) 208 Acquire(thr, pc, (uptr)a); 209 if (pr == cc) 210 return true; 211 *c = pr; 212 return false; 213} 214 215template<typename T> 216static T AtomicCAS(ThreadState *thr, uptr pc, 217 volatile T *a, T c, T v, morder mo, morder fmo) { 218 AtomicCAS(thr, pc, a, &c, v, mo, fmo); 219 return c; 220} 221 222static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { 223 __sync_synchronize(); 224} 225 226a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) { 227 SCOPED_ATOMIC(Load, a, mo); 228} 229 230a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) { 231 SCOPED_ATOMIC(Load, a, mo); 232} 233 234a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) { 235 SCOPED_ATOMIC(Load, a, mo); 236} 237 238a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) { 239 SCOPED_ATOMIC(Load, a, mo); 240} 241 242void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) { 243 SCOPED_ATOMIC(Store, a, v, mo); 244} 245 246void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) { 247 SCOPED_ATOMIC(Store, a, v, mo); 248} 249 250void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) { 251 SCOPED_ATOMIC(Store, a, v, mo); 252} 253 254void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { 255 SCOPED_ATOMIC(Store, a, v, mo); 256} 257 258a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { 259 SCOPED_ATOMIC(Exchange, a, v, mo); 260} 261 262a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) { 263 SCOPED_ATOMIC(Exchange, a, v, mo); 264} 265 266a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) { 267 SCOPED_ATOMIC(Exchange, a, v, mo); 268} 269 270a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { 271 SCOPED_ATOMIC(Exchange, a, v, mo); 272} 273 274a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { 275 SCOPED_ATOMIC(FetchAdd, a, v, mo); 276} 277 278a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) { 279 SCOPED_ATOMIC(FetchAdd, a, v, mo); 280} 281 282a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) { 283 SCOPED_ATOMIC(FetchAdd, a, v, mo); 284} 285 286a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { 287 SCOPED_ATOMIC(FetchAdd, a, v, mo); 288} 289 290a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) { 291 SCOPED_ATOMIC(FetchSub, a, v, mo); 292} 293 294a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) { 295 SCOPED_ATOMIC(FetchSub, a, v, mo); 296} 297 298a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) { 299 SCOPED_ATOMIC(FetchSub, a, v, mo); 300} 301 302a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) { 303 SCOPED_ATOMIC(FetchSub, a, v, mo); 304} 305 306a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { 307 SCOPED_ATOMIC(FetchAnd, a, v, mo); 308} 309 310a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) { 311 SCOPED_ATOMIC(FetchAnd, a, v, mo); 312} 313 314a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) { 315 SCOPED_ATOMIC(FetchAnd, a, v, mo); 316} 317 318a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { 319 SCOPED_ATOMIC(FetchAnd, a, v, mo); 320} 321 322a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { 323 SCOPED_ATOMIC(FetchOr, a, v, mo); 324} 325 326a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) { 327 SCOPED_ATOMIC(FetchOr, a, v, mo); 328} 329 330a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) { 331 SCOPED_ATOMIC(FetchOr, a, v, mo); 332} 333 334a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { 335 SCOPED_ATOMIC(FetchOr, a, v, mo); 336} 337 338a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { 339 SCOPED_ATOMIC(FetchXor, a, v, mo); 340} 341 342a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) { 343 SCOPED_ATOMIC(FetchXor, a, v, mo); 344} 345 346a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) { 347 SCOPED_ATOMIC(FetchXor, a, v, mo); 348} 349 350a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { 351 SCOPED_ATOMIC(FetchXor, a, v, mo); 352} 353 354int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, 355 morder mo, morder fmo) { 356 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 357} 358 359int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, 360 morder mo, morder fmo) { 361 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 362} 363 364int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, 365 morder mo, morder fmo) { 366 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 367} 368 369int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, 370 morder mo, morder fmo) { 371 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 372} 373 374int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, 375 morder mo, morder fmo) { 376 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 377} 378 379int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, 380 morder mo, morder fmo) { 381 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 382} 383 384int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, 385 morder mo, morder fmo) { 386 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 387} 388 389int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, 390 morder mo, morder fmo) { 391 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 392} 393 394a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, 395 morder mo, morder fmo) { 396 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 397} 398a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, 399 morder mo, morder fmo) { 400 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 401} 402 403a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, 404 morder mo, morder fmo) { 405 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 406} 407 408a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, 409 morder mo, morder fmo) { 410 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); 411} 412 413void __tsan_atomic_thread_fence(morder mo) { 414 char* a; 415 SCOPED_ATOMIC(Fence, mo); 416} 417 418void __tsan_atomic_signal_fence(morder mo) { 419} 420