Lines Matching refs:mo

33     mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
37 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
38 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
72 morder mo, const char *func)
75 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
85 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
93 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
94 : mo == mo_consume ? StatAtomicConsume
95 : mo == mo_acquire ? StatAtomicAcquire
96 : mo == mo_release ? StatAtomicRelease
97 : mo == mo_acq_rel ? StatAtomicAcq_Rel
101 static bool IsLoadOrder(morder mo) {
102 return mo == mo_relaxed || mo == mo_consume
103 || mo == mo_acquire || mo == mo_seq_cst;
106 static bool IsStoreOrder(morder mo) {
107 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
110 static bool IsReleaseOrder(morder mo) {
111 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
114 static bool IsAcquireOrder(morder mo) {
115 return mo == mo_consume || mo == mo_acquire
116 || mo == mo_acq_rel || mo == mo_seq_cst;
119 static bool IsAcqRelOrder(morder mo) {
120 return mo == mo_acq_rel || mo == mo_seq_cst;
259 static memory_order to_mo(morder mo) {
260 switch (mo) {
273 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
274 return atomic_load(to_atomic(a), to_mo(mo));
278 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
286 morder mo) {
287 CHECK(IsLoadOrder(mo));
290 if (!IsAcquireOrder(mo)) {
292 return NoTsanAtomicLoad(a, mo);
296 T v = NoTsanAtomicLoad(a, mo);
303 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
304 atomic_store(to_atomic(a), v, to_mo(mo));
308 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
316 morder mo) {
317 CHECK(IsStoreOrder(mo));
323 if (!IsReleaseOrder(mo)) {
324 NoTsanAtomicStore(a, v, mo);
333 NoTsanAtomicStore(a, v, mo);
338 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
341 if (mo != mo_relaxed) {
346 if (IsAcqRelOrder(mo))
348 else if (IsReleaseOrder(mo))
350 else if (IsAcquireOrder(mo))
360 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
365 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
370 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
375 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
380 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
385 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
390 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
396 morder mo) {
397 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
402 morder mo) {
403 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
408 morder mo) {
409 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
414 morder mo) {
415 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
420 morder mo) {
421 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
426 morder mo) {
427 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
432 morder mo) {
433 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
437 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
438 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
443 morder mo, morder fmo) {
454 static bool NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
455 return NoTsanAtomicCAS(a, &c, v, mo, fmo);
460 volatile T *a, T *c, T v, morder mo, morder fmo) {
464 bool write_lock = mo != mo_acquire && mo != mo_consume;
465 if (mo != mo_relaxed) {
470 if (IsAcqRelOrder(mo))
472 else if (IsReleaseOrder(mo))
474 else if (IsAcquireOrder(mo))
493 volatile T *a, T c, T v, morder mo, morder fmo) {
494 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
498 static void NoTsanAtomicFence(morder mo) {
502 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
509 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
510 SCOPED_ATOMIC(Load, a, mo);
514 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
515 SCOPED_ATOMIC(Load, a, mo);
519 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
520 SCOPED_ATOMIC(Load, a, mo);
524 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
525 SCOPED_ATOMIC(Load, a, mo);
530 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
531 SCOPED_ATOMIC(Load, a, mo);
536 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
537 SCOPED_ATOMIC(Store, a, v, mo);
541 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
542 SCOPED_ATOMIC(Store, a, v, mo);
546 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
547 SCOPED_ATOMIC(Store, a, v, mo);
551 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
552 SCOPED_ATOMIC(Store, a, v, mo);
557 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
558 SCOPED_ATOMIC(Store, a, v, mo);
563 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
564 SCOPED_ATOMIC(Exchange, a, v, mo);
568 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
569 SCOPED_ATOMIC(Exchange, a, v, mo);
573 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
574 SCOPED_ATOMIC(Exchange, a, v, mo);
578 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
579 SCOPED_ATOMIC(Exchange, a, v, mo);
584 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
585 SCOPED_ATOMIC(Exchange, a, v, mo);
590 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
591 SCOPED_ATOMIC(FetchAdd, a, v, mo);
595 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
596 SCOPED_ATOMIC(FetchAdd, a, v, mo);
600 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
601 SCOPED_ATOMIC(FetchAdd, a, v, mo);
605 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
606 SCOPED_ATOMIC(FetchAdd, a, v, mo);
611 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
612 SCOPED_ATOMIC(FetchAdd, a, v, mo);
617 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
618 SCOPED_ATOMIC(FetchSub, a, v, mo);
622 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
623 SCOPED_ATOMIC(FetchSub, a, v, mo);
627 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
628 SCOPED_ATOMIC(FetchSub, a, v, mo);
632 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
633 SCOPED_ATOMIC(FetchSub, a, v, mo);
638 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
639 SCOPED_ATOMIC(FetchSub, a, v, mo);
644 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
645 SCOPED_ATOMIC(FetchAnd, a, v, mo);
649 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
650 SCOPED_ATOMIC(FetchAnd, a, v, mo);
654 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
655 SCOPED_ATOMIC(FetchAnd, a, v, mo);
659 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
660 SCOPED_ATOMIC(FetchAnd, a, v, mo);
665 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
666 SCOPED_ATOMIC(FetchAnd, a, v, mo);
671 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
672 SCOPED_ATOMIC(FetchOr, a, v, mo);
676 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
677 SCOPED_ATOMIC(FetchOr, a, v, mo);
681 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
682 SCOPED_ATOMIC(FetchOr, a, v, mo);
686 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
687 SCOPED_ATOMIC(FetchOr, a, v, mo);
692 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
693 SCOPED_ATOMIC(FetchOr, a, v, mo);
698 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
699 SCOPED_ATOMIC(FetchXor, a, v, mo);
703 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
704 SCOPED_ATOMIC(FetchXor, a, v, mo);
708 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
709 SCOPED_ATOMIC(FetchXor, a, v, mo);
713 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
714 SCOPED_ATOMIC(FetchXor, a, v, mo);
719 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
720 SCOPED_ATOMIC(FetchXor, a, v, mo);
725 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
726 SCOPED_ATOMIC(FetchNand, a, v, mo);
730 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
731 SCOPED_ATOMIC(FetchNand, a, v, mo);
735 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
736 SCOPED_ATOMIC(FetchNand, a, v, mo);
740 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
741 SCOPED_ATOMIC(FetchNand, a, v, mo);
746 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
747 SCOPED_ATOMIC(FetchNand, a, v, mo);
753 morder mo, morder fmo) {
754 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
759 morder mo, morder fmo) {
760 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
765 morder mo, morder fmo) {
766 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
771 morder mo, morder fmo) {
772 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
778 morder mo, morder fmo) {
779 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
785 morder mo, morder fmo) {
786 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
791 morder mo, morder fmo) {
792 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
797 morder mo, morder fmo) {
798 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
803 morder mo, morder fmo) {
804 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
810 morder mo, morder fmo) {
811 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
817 morder mo, morder fmo) {
818 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
823 morder mo, morder fmo) {
824 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
829 morder mo, morder fmo) {
830 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
835 morder mo, morder fmo) {
836 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
842 morder mo, morder fmo) {
843 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
848 void __tsan_atomic_thread_fence(morder mo) {
850 SCOPED_ATOMIC(Fence, mo);
854 void __tsan_atomic_signal_fence(morder mo) {