Searched refs:mo (Results 1 - 25 of 117) sorted by relevance

12345

/external/libcxx/test/std/containers/associative/multiset/multiset.cons/
H A Dcopy.pass.cpp41 std::multiset<int, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
42 std::multiset<int, C, A> m = mo;
57 assert(mo.get_allocator() == A(7));
58 assert(mo.key_comp() == C(5));
59 assert(mo.size() == 9);
60 assert(distance(mo.begin(), mo.end()) == 9);
61 assert(*next(mo.begin(), 0) == 1);
62 assert(*next(mo.begin(), 1) == 1);
63 assert(*next(mo
[all...]
H A Dcopy_assign.pass.cpp40 std::multiset<int, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(2));
42 m = mo;
57 assert(mo.get_allocator() == A(2));
58 assert(mo.key_comp() == C(5));
59 assert(mo.size() == 9);
60 assert(distance(mo.begin(), mo.end()) == 9);
61 assert(*next(mo.begin(), 0) == 1);
62 assert(*next(mo.begin(), 1) == 1);
63 assert(*next(mo
[all...]
H A Dcopy_alloc.pass.cpp39 std::multiset<int, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
40 std::multiset<int, C, A> m(mo, A(3));
55 assert(mo.get_allocator() == A(7));
56 assert(mo.key_comp() == C(5));
57 assert(mo.size() == 9);
58 assert(distance(mo.begin(), mo.end()) == 9);
59 assert(*next(mo.begin(), 0) == 1);
60 assert(*next(mo.begin(), 1) == 1);
61 assert(*next(mo
[all...]
H A Dmove.pass.cpp31 std::multiset<int, C, A> mo(C(5), A(7));
32 std::multiset<int, C, A> m = std::move(mo);
38 assert(mo.get_allocator() == A(7));
39 assert(mo.key_comp() == C(5));
40 assert(mo.size() == 0);
41 assert(distance(mo.begin(), mo.end()) == 0);
59 std::multiset<int, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
60 std::multiset<int, C, A> m = std::move(mo);
75 assert(mo
[all...]
/external/compiler-rt/include/sanitizer/
H A Dtsan_interface_atomic.h45 __tsan_memory_order mo);
47 __tsan_memory_order mo);
49 __tsan_memory_order mo);
51 __tsan_memory_order mo);
54 __tsan_memory_order mo);
58 __tsan_memory_order mo);
60 __tsan_memory_order mo);
62 __tsan_memory_order mo);
64 __tsan_memory_order mo);
67 __tsan_memory_order mo);
[all...]
/external/libcxx/test/std/containers/associative/set/set.cons/
H A Dmove.pass.cpp31 std::set<int, C, A> mo(C(5), A(7));
32 std::set<int, C, A> m = std::move(mo);
38 assert(mo.get_allocator() == A(7));
39 assert(mo.key_comp() == C(5));
40 assert(mo.size() == 0);
41 assert(distance(mo.begin(), mo.end()) == 0);
59 std::set<int, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
60 std::set<int, C, A> m = std::move(mo);
69 assert(mo
[all...]
H A Dcopy.pass.cpp41 std::set<int, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
42 std::set<int, C, A> m = mo;
51 assert(mo.get_allocator() == A(7));
52 assert(mo.key_comp() == C(5));
53 assert(mo.size() == 3);
54 assert(distance(mo.begin(), mo.end()) == 3);
55 assert(*mo.begin() == 1);
56 assert(*next(mo.begin()) == 2);
57 assert(*next(mo
[all...]
H A Dcopy_assign.pass.cpp40 std::set<int, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(2));
42 m = mo;
51 assert(mo.get_allocator() == A(2));
52 assert(mo.key_comp() == C(5));
53 assert(mo.size() == 3);
54 assert(distance(mo.begin(), mo.end()) == 3);
55 assert(*mo.begin() == 1);
56 assert(*next(mo.begin()) == 2);
57 assert(*next(mo
[all...]
H A Dcopy_alloc.pass.cpp39 std::set<int, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
40 std::set<int, C, A> m(mo, A(3));
49 assert(mo.get_allocator() == A(7));
50 assert(mo.key_comp() == C(5));
51 assert(mo.size() == 3);
52 assert(distance(mo.begin(), mo.end()) == 3);
53 assert(*mo.begin() == 1);
54 assert(*next(mo.begin()) == 2);
55 assert(*next(mo
[all...]
/external/libcxx/test/std/containers/associative/map/map.cons/
H A Dcopy.pass.cpp42 std::map<int, double, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
43 std::map<int, double, C, A> m = mo;
52 assert(mo.get_allocator() == A(7));
53 assert(mo.key_comp() == C(5));
54 assert(mo.size() == 3);
55 assert(distance(mo.begin(), mo.end()) == 3);
56 assert(*mo.begin() == V(1, 1));
57 assert(*next(mo.begin()) == V(2, 1));
58 assert(*next(mo
[all...]
H A Dcopy_alloc.pass.cpp41 std::map<int, double, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
42 std::map<int, double, C, A> m(mo, A(3));
51 assert(mo.get_allocator() == A(7));
52 assert(mo.key_comp() == C(5));
53 assert(mo.size() == 3);
54 assert(distance(mo.begin(), mo.end()) == 3);
55 assert(*mo.begin() == V(1, 1));
56 assert(*next(mo.begin()) == V(2, 1));
57 assert(*next(mo
[all...]
H A Dmove.pass.cpp31 std::map<int, double, C, A> mo(C(5), A(7));
32 std::map<int, double, C, A> m = std::move(mo);
38 assert(mo.get_allocator() == A(7));
39 assert(mo.key_comp() == C(5));
40 assert(mo.size() == 0);
41 assert(distance(mo.begin(), mo.end()) == 0);
58 std::map<int, double, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
59 std::map<int, double, C, A> m = std::move(mo);
68 assert(mo
[all...]
/external/autotest/client/profilers/powertop/src/po/
H A DMakefile2 OBJ= $(SRC:.po=.mo)
7 %.mo: %.po
11 rm -f *.mo *~
14 # the po/mo file list (we fool make by pretending the need for .inst files).
17 %.inst: %.mo
19 -cp -f $< $(DESTDIR)$(LOCALESDIR)/$*/LC_MESSAGES/powertop.mo
/external/compiler-rt/lib/tsan/rtl/
H A Dtsan_interface_atomic.cc36 static bool IsLoadOrder(morder mo) { argument
37 return mo == mo_relaxed || mo == mo_consume
38 || mo == mo_acquire || mo == mo_seq_cst;
41 static bool IsStoreOrder(morder mo) { argument
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
45 static bool IsReleaseOrder(morder mo) { argument
49 IsAcquireOrder(morder mo) argument
54 IsAcqRelOrder(morder mo) argument
197 to_mo(morder mo) argument
211 NoTsanAtomicLoad(const volatile T *a, morder mo) argument
216 NoTsanAtomicLoad(const volatile a128 *a, morder mo) argument
223 AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) argument
241 NoTsanAtomicStore(volatile T *a, T v, morder mo) argument
246 NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) argument
253 AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
276 AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
298 NoTsanAtomicExchange(volatile T *a, T v, morder mo) argument
303 NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) argument
308 NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) argument
313 NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) argument
318 NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) argument
323 NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) argument
328 NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) argument
333 AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
339 AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
345 AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
351 AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
357 AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
363 AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
369 AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) argument
375 NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) argument
380 NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
392 NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) argument
398 AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo, morder fmo) argument
431 AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T c, T v, morder mo, morder fmo) argument
438 NoTsanAtomicFence(morder mo) argument
442 AtomicFence(ThreadState *thr, uptr pc, morder mo) argument
467 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a, morder mo, const char *func) argument
481 AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) argument
499 __tsan_atomic8_load(const volatile a8 *a, morder mo) argument
504 __tsan_atomic16_load(const volatile a16 *a, morder mo) argument
509 __tsan_atomic32_load(const volatile a32 *a, morder mo) argument
514 __tsan_atomic64_load(const volatile a64 *a, morder mo) argument
520 __tsan_atomic128_load(const volatile a128 *a, morder mo) argument
526 __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) argument
531 __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) argument
536 __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) argument
541 __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) argument
547 __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) argument
553 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) argument
558 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) argument
563 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) argument
568 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) argument
574 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) argument
580 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) argument
585 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) argument
590 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) argument
595 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) argument
601 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) argument
607 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) argument
612 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) argument
617 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) argument
622 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) argument
628 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) argument
634 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) argument
639 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) argument
644 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) argument
649 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) argument
655 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) argument
661 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) argument
666 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) argument
671 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) argument
676 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) argument
682 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) argument
688 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) argument
693 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) argument
698 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) argument
703 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) argument
709 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) argument
715 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) argument
720 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) argument
725 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) argument
730 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) argument
736 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) argument
742 __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) argument
748 __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo) argument
754 __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo) argument
760 __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo) argument
767 __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
774 __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo, morder fmo) argument
780 __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, morder mo, morder fmo) argument
786 __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, morder mo, morder fmo) argument
792 __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, morder mo, morder fmo) argument
799 __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, morder mo, morder fmo) argument
806 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo, morder fmo) argument
812 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, morder mo, morder fmo) argument
818 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, morder mo, morder fmo) argument
824 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, morder mo, morder fmo) argument
831 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, morder mo, morder fmo) argument
838 __tsan_atomic_thread_fence(morder mo) argument
844 __tsan_atomic_signal_fence(morder mo) argument
[all...]
/external/libcxx/test/std/containers/associative/multimap/multimap.cons/
H A Dcopy.pass.cpp42 std::multimap<int, double, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
43 std::multimap<int, double, C, A> m = mo;
44 assert(m == mo);
48 assert(mo.get_allocator() == A(7));
49 assert(mo.key_comp() == C(5));
68 std::multimap<int, double, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
69 std::multimap<int, double, C, A> m = mo;
70 assert(m == mo);
74 assert(mo.get_allocator() == A(7));
75 assert(mo
[all...]
H A Dcopy_alloc.pass.cpp41 std::multimap<int, double, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
42 std::multimap<int, double, C, A> m(mo, A(3));
43 assert(m == mo);
47 assert(mo.get_allocator() == A(7));
48 assert(mo.key_comp() == C(5));
67 std::multimap<int, double, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A());
68 std::multimap<int, double, C, A> m(mo, A());
69 assert(m == mo);
73 assert(mo.get_allocator() == A());
74 assert(mo
[all...]
H A Dmove.pass.cpp31 std::multimap<int, double, C, A> mo(C(5), A(7));
32 std::multimap<int, double, C, A> m = std::move(mo);
38 assert(mo.get_allocator() == A(7));
39 assert(mo.key_comp() == C(5));
40 assert(mo.size() == 0);
41 assert(distance(mo.begin(), mo.end()) == 0);
58 std::multimap<int, double, C, A> mo(ar, ar+sizeof(ar)/sizeof(ar[0]), C(5), A(7));
59 std::multimap<int, double, C, A> m = std::move(mo);
74 assert(mo
[all...]
/external/compiler-rt/lib/sanitizer_common/
H A Dsanitizer_atomic_clang.h48 typename T::Type v, memory_order mo) {
49 (void)mo;
56 typename T::Type v, memory_order mo) {
57 (void)mo;
64 typename T::Type v, memory_order mo) {
66 if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
69 if (mo == memory_order_seq_cst)
78 memory_order mo) {
92 memory_order mo) {
93 return atomic_compare_exchange_strong(a, cmp, xchg, mo);
47 atomic_fetch_add(volatile T *a, typename T::Type v, memory_order mo) argument
55 atomic_fetch_sub(volatile T *a, typename T::Type v, memory_order mo) argument
63 atomic_exchange(volatile T *a, typename T::Type v, memory_order mo) argument
75 atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
89 atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_clang_x86.h29 const volatile T *a, memory_order mo) {
30 DCHECK(mo & (memory_order_relaxed | memory_order_consume
37 if (mo == memory_order_relaxed) {
39 } else if (mo == memory_order_consume) {
45 } else if (mo == memory_order_acquire) {
75 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
76 DCHECK(mo & (memory_order_relaxed | memory_order_release
82 if (mo == memory_order_relaxed) {
84 } else if (mo == memory_order_release) {
109 if (mo
28 atomic_load( const volatile T *a, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_msvc.h82 const volatile T *a, memory_order mo) {
83 DCHECK(mo & (memory_order_relaxed | memory_order_consume
88 if (mo == memory_order_relaxed) {
99 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
100 DCHECK(mo & (memory_order_relaxed | memory_order_release
104 if (mo == memory_order_relaxed) {
111 if (mo == memory_order_seq_cst)
116 u32 v, memory_order mo) {
117 (void)mo;
124 uptr v, memory_order mo) {
81 atomic_load( const volatile T *a, memory_order mo) argument
115 atomic_fetch_add(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
123 atomic_fetch_add(volatile atomic_uintptr_t *a, uptr v, memory_order mo) argument
136 atomic_fetch_sub(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
144 atomic_fetch_sub(volatile atomic_uintptr_t *a, uptr v, memory_order mo) argument
157 atomic_exchange(volatile atomic_uint8_t *a, u8 v, memory_order mo) argument
164 atomic_exchange(volatile atomic_uint16_t *a, u16 v, memory_order mo) argument
171 atomic_exchange(volatile atomic_uint32_t *a, u32 v, memory_order mo) argument
178 atomic_compare_exchange_strong(volatile atomic_uint8_t *a, u8 *cmp, u8 xchgv, memory_order mo) argument
204 atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, uptr *cmp, uptr xchg, memory_order mo) argument
217 atomic_compare_exchange_strong(volatile atomic_uint16_t *a, u16 *cmp, u16 xchg, memory_order mo) argument
230 atomic_compare_exchange_strong(volatile atomic_uint32_t *a, u32 *cmp, u32 xchg, memory_order mo) argument
243 atomic_compare_exchange_strong(volatile atomic_uint64_t *a, u64 *cmp, u64 xchg, memory_order mo) argument
257 atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) argument
[all...]
H A Dsanitizer_atomic_clang_other.h26 const volatile T *a, memory_order mo) {
27 DCHECK(mo & (memory_order_relaxed | memory_order_consume
34 if (mo == memory_order_relaxed) {
36 } else if (mo == memory_order_consume) {
42 } else if (mo == memory_order_acquire) {
63 INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { argument
64 DCHECK(mo & (memory_order_relaxed | memory_order_release
70 if (mo == memory_order_relaxed) {
72 } else if (mo == memory_order_release) {
25 atomic_load( const volatile T *a, memory_order mo) argument
/external/python/cpython2/Lib/lib2to3/pgen2/
H A Dconv.py71 mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
72 if not mo and line.strip():
76 symbol, number = mo.groups()
132 mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
134 assert mo, (lineno, line)
135 n, m, k = map(int, mo.groups())
139 mo = re.match(r"\s+{(\d+), (\d+)},$", line)
140 assert mo, (lineno, line)
141 i, j = map(int, mo.groups())
147 mo
[all...]
/external/python/cpython3/Lib/lib2to3/pgen2/
H A Dconv.py71 mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
72 if not mo and line.strip():
76 symbol, number = mo.groups()
132 mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
134 assert mo, (lineno, line)
135 n, m, k = list(map(int, mo.groups()))
139 mo = re.match(r"\s+{(\d+), (\d+)},$", line)
140 assert mo, (lineno, line)
141 i, j = list(map(int, mo.groups()))
147 mo
[all...]
/external/skia/include/private/
H A DSkAtomics.h56 T load(sk_memory_order mo = default_memory_order) const {
57 return sk_atomic_load(&fVal, mo);
60 void store(const T& val, sk_memory_order mo = default_memory_order) {
61 sk_atomic_store(&fVal, val, mo);
75 T fetch_add(const T& val, sk_memory_order mo = default_memory_order) {
76 return sk_atomic_fetch_add(&fVal, val, mo);
79 T fetch_sub(const T& val, sk_memory_order mo = default_memory_order) {
80 return sk_atomic_fetch_sub(&fVal, val, mo);
95 T sk_atomic_load(const T* ptr, sk_memory_order mo) { argument
96 SkASSERT(mo
105 sk_atomic_store(T* ptr, T val, sk_memory_order mo) argument
114 sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) argument
121 sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) argument
144 sk_atomic_exchange(T* ptr, T val, sk_memory_order mo) argument
[all...]
/external/skqp/include/private/
H A DSkAtomics.h56 T load(sk_memory_order mo = default_memory_order) const {
57 return sk_atomic_load(&fVal, mo);
60 void store(const T& val, sk_memory_order mo = default_memory_order) {
61 sk_atomic_store(&fVal, val, mo);
75 T fetch_add(const T& val, sk_memory_order mo = default_memory_order) {
76 return sk_atomic_fetch_add(&fVal, val, mo);
79 T fetch_sub(const T& val, sk_memory_order mo = default_memory_order) {
80 return sk_atomic_fetch_sub(&fVal, val, mo);
95 T sk_atomic_load(const T* ptr, sk_memory_order mo) { argument
96 SkASSERT(mo
105 sk_atomic_store(T* ptr, T val, sk_memory_order mo) argument
114 sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) argument
121 sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) argument
144 sk_atomic_exchange(T* ptr, T val, sk_memory_order mo) argument
[all...]

Completed in 1257 milliseconds

12345