1/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ANDROID_CUTILS_ATOMIC_H
18#define ANDROID_CUTILS_ATOMIC_H
19
20#include <stdint.h>
21#include <sys/types.h>
22#include <stdatomic.h>
23
24#ifndef ANDROID_ATOMIC_INLINE
25#define ANDROID_ATOMIC_INLINE static inline
26#endif
27
28/*
29 * A handful of basic atomic operations.
30 * THESE ARE HERE FOR LEGACY REASONS ONLY.  AVOID.
31 *
32 * PREFERRED ALTERNATIVES:
33 * - Use C++/C/pthread locks/mutexes whenever there is not a
34 *   convincing reason to do otherwise.  Note that very clever and
35 *   complicated, but correct, lock-free code is often slower than
36 *   using locks, especially where nontrivial data structures
37 *   are involved.
38 * - C11 stdatomic.h.
39 * - Where supported, C++11 std::atomic<T> .
40 *
41 * PLEASE STOP READING HERE UNLESS YOU ARE TRYING TO UNDERSTAND
42 * OR UPDATE OLD CODE.
43 *
44 * The "acquire" and "release" terms can be defined intuitively in terms
45 * of the placement of memory barriers in a simple lock implementation:
46 *   - wait until compare-and-swap(lock-is-free --> lock-is-held) succeeds
47 *   - barrier
48 *   - [do work]
49 *   - barrier
50 *   - store(lock-is-free)
51 * In very crude terms, the initial (acquire) barrier prevents any of the
52 * "work" from happening before the lock is held, and the later (release)
53 * barrier ensures that all of the work happens before the lock is released.
54 * (Think of cached writes, cache read-ahead, and instruction reordering
55 * around the CAS and store instructions.)
56 *
57 * The barriers must apply to both the compiler and the CPU.  Note it is
58 * legal for instructions that occur before an "acquire" barrier to be
59 * moved down below it, and for instructions that occur after a "release"
60 * barrier to be moved up above it.
61 *
62 * The ARM-driven implementation we use here is short on subtlety,
63 * and actually requests a full barrier from the compiler and the CPU.
64 * The only difference between acquire and release is in whether they
65 * are issued before or after the atomic operation with which they
66 * are associated.  To ease the transition to C/C++ atomic intrinsics,
67 * you should not rely on this, and instead assume that only the minimal
68 * acquire/release protection is provided.
69 *
70 * NOTE: all int32_t* values are expected to be aligned on 32-bit boundaries.
71 * If they are not, atomicity is not guaranteed.
72 */
73
74/*
75 * Basic arithmetic and bitwise operations.  These all provide a
76 * barrier with "release" ordering, and return the previous value.
77 *
78 * These have the same characteristics (e.g. what happens on overflow)
79 * as the equivalent non-atomic C operations.
80 */
81ANDROID_ATOMIC_INLINE
82int32_t android_atomic_inc(volatile int32_t* addr)
83{
84    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
85        /* Int32_t, if it exists, is the same as int_least32_t. */
86    return atomic_fetch_add_explicit(a, 1, memory_order_release);
87}
88
89ANDROID_ATOMIC_INLINE
90int32_t android_atomic_dec(volatile int32_t* addr)
91{
92    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
93    return atomic_fetch_sub_explicit(a, 1, memory_order_release);
94}
95
96ANDROID_ATOMIC_INLINE
97int32_t android_atomic_add(int32_t value, volatile int32_t* addr)
98{
99    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
100    return atomic_fetch_add_explicit(a, value, memory_order_release);
101}
102
103ANDROID_ATOMIC_INLINE
104int32_t android_atomic_and(int32_t value, volatile int32_t* addr)
105{
106    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
107    return atomic_fetch_and_explicit(a, value, memory_order_release);
108}
109
110ANDROID_ATOMIC_INLINE
111int32_t android_atomic_or(int32_t value, volatile int32_t* addr)
112{
113    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
114    return atomic_fetch_or_explicit(a, value, memory_order_release);
115}
116
117/*
118 * Perform an atomic load with "acquire" or "release" ordering.
119 *
120 * Note that the notion of a "release" ordering for a load does not
121 * really fit into the C11 or C++11 memory model.  The extra ordering
122 * is normally observable only by code using memory_order_relaxed
123 * atomics, or data races.  In the rare cases in which such ordering
124 * is called for, use memory_order_relaxed atomics and a leading
125 * atomic_thread_fence (typically with memory_order_acquire,
126 * not memory_order_release!) instead.  If you do not understand
127 * this comment, you are in the vast majority, and should not be
128 * using release loads or replacing them with anything other than
129 * locks or default sequentially consistent atomics.
130 */
131ANDROID_ATOMIC_INLINE
132int32_t android_atomic_acquire_load(volatile const int32_t* addr)
133{
134    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
135    return atomic_load_explicit(a, memory_order_acquire);
136}
137
138ANDROID_ATOMIC_INLINE
139int32_t android_atomic_release_load(volatile const int32_t* addr)
140{
141    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
142    atomic_thread_fence(memory_order_seq_cst);
143    /* Any reasonable clients of this interface would probably prefer   */
144    /* something weaker.  But some remaining clients seem to be         */
145    /* abusing this API in strange ways, e.g. by using it as a fence.   */
146    /* Thus we are conservative until we can get rid of remaining       */
147    /* clients (and this function).                                     */
148    return atomic_load_explicit(a, memory_order_relaxed);
149}
150
151/*
152 * Perform an atomic store with "acquire" or "release" ordering.
153 *
154 * Note that the notion of an "acquire" ordering for a store does not
155 * really fit into the C11 or C++11 memory model.  The extra ordering
156 * is normally observable only by code using memory_order_relaxed
157 * atomics, or data races.  In the rare cases in which such ordering
158 * is called for, use memory_order_relaxed atomics and a trailing
159 * atomic_thread_fence (typically with memory_order_release,
160 * not memory_order_acquire!) instead.
161 */
162ANDROID_ATOMIC_INLINE
163void android_atomic_acquire_store(int32_t value, volatile int32_t* addr)
164{
165    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
166    atomic_store_explicit(a, value, memory_order_relaxed);
167    atomic_thread_fence(memory_order_seq_cst);
168    /* Again overly conservative to accomodate weird clients.   */
169}
170
171ANDROID_ATOMIC_INLINE
172void android_atomic_release_store(int32_t value, volatile int32_t* addr)
173{
174    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
175    atomic_store_explicit(a, value, memory_order_release);
176}
177
178/*
179 * Compare-and-set operation with "acquire" or "release" ordering.
180 *
181 * This returns zero if the new value was successfully stored, which will
182 * only happen when *addr == oldvalue.
183 *
184 * (The return value is inverted from implementations on other platforms,
185 * but matches the ARM ldrex/strex result.)
186 *
187 * Implementations that use the release CAS in a loop may be less efficient
188 * than possible, because we re-issue the memory barrier on each iteration.
189 */
190ANDROID_ATOMIC_INLINE
191int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue,
192                           volatile int32_t* addr)
193{
194    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
195    return (int)(!atomic_compare_exchange_strong_explicit(
196                                          a, &oldvalue, newvalue,
197                                          memory_order_acquire,
198                                          memory_order_acquire));
199}
200
201ANDROID_ATOMIC_INLINE
202int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue,
203                               volatile int32_t* addr)
204{
205    volatile atomic_int_least32_t* a = (volatile atomic_int_least32_t*)addr;
206    return (int)(!atomic_compare_exchange_strong_explicit(
207                                          a, &oldvalue, newvalue,
208                                          memory_order_release,
209                                          memory_order_relaxed));
210}
211
212/*
213 * Fence primitives.
214 */
215ANDROID_ATOMIC_INLINE
216void android_compiler_barrier(void)
217{
218    __asm__ __volatile__ ("" : : : "memory");
219    /* Could probably also be:                          */
220    /* atomic_signal_fence(memory_order_seq_cst);       */
221}
222
223ANDROID_ATOMIC_INLINE
224void android_memory_barrier(void)
225{
226    atomic_thread_fence(memory_order_seq_cst);
227}
228
229/*
230 * Aliases for code using an older version of this header.  These are now
231 * deprecated and should not be used.  The definitions will be removed
232 * in a future release.
233 */
234#define android_atomic_write android_atomic_release_store
235#define android_atomic_cmpxchg android_atomic_release_cas
236
237#endif // ANDROID_CUTILS_ATOMIC_H
238