atomic.c revision c6af9114fc63accef839c2a413e18ab058f0beff
1/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <cutils/atomic.h>
18#ifdef HAVE_WIN32_THREADS
19#include <windows.h>
20#else
21#include <sched.h>
22#endif
23
24/*****************************************************************************/
25#if defined(HAVE_MACOSX_IPC)
26
27#include <libkern/OSAtomic.h>
28
29void android_atomic_write(int32_t value, volatile int32_t* addr) {
30    int32_t oldValue;
31    do {
32        oldValue = *addr;
33    } while (OSAtomicCompareAndSwap32Barrier(oldValue, value, (int32_t*)addr) == 0);
34}
35
36int32_t android_atomic_inc(volatile int32_t* addr) {
37    return OSAtomicIncrement32Barrier((int32_t*)addr)-1;
38}
39
40int32_t android_atomic_dec(volatile int32_t* addr) {
41    return OSAtomicDecrement32Barrier((int32_t*)addr)+1;
42}
43
44int32_t android_atomic_add(int32_t value, volatile int32_t* addr) {
45    return OSAtomicAdd32Barrier(value, (int32_t*)addr)-value;
46}
47
48int32_t android_atomic_and(int32_t value, volatile int32_t* addr) {
49    int32_t oldValue;
50    do {
51        oldValue = *addr;
52    } while (OSAtomicCompareAndSwap32Barrier(oldValue, oldValue&value, (int32_t*)addr) == 0);
53    return oldValue;
54}
55
56int32_t android_atomic_or(int32_t value, volatile int32_t* addr) {
57    int32_t oldValue;
58    do {
59        oldValue = *addr;
60    } while (OSAtomicCompareAndSwap32Barrier(oldValue, oldValue|value, (int32_t*)addr) == 0);
61    return oldValue;
62}
63
64int32_t android_atomic_swap(int32_t value, volatile int32_t* addr) {
65    int32_t oldValue;
66    do {
67        oldValue = *addr;
68    } while (android_atomic_cmpxchg(oldValue, value, addr));
69    return oldValue;
70}
71
72int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue, volatile int32_t* addr) {
73    return OSAtomicCompareAndSwap32Barrier(oldvalue, newvalue, (int32_t*)addr) == 0;
74}
75
76#if defined(__ppc__)        \
77    || defined(__PPC__)     \
78    || defined(__powerpc__) \
79    || defined(__powerpc)   \
80    || defined(__POWERPC__) \
81    || defined(_M_PPC)      \
82    || defined(__PPC)
83#define NEED_QUASIATOMICS 1
84#else
85
86int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue,
87        volatile int64_t* addr) {
88    return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
89            (int64_t*)addr) == 0;
90}
91
92int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) {
93    int64_t oldValue;
94    do {
95        oldValue = *addr;
96    } while (android_quasiatomic_cmpxchg_64(oldValue, value, addr));
97    return oldValue;
98}
99
100int64_t android_quasiatomic_read_64(volatile int64_t* addr) {
101    return OSAtomicAdd64Barrier(0, addr);
102}
103
104#endif
105
106
107/*****************************************************************************/
108#elif defined(__i386__) || defined(__x86_64__)
109
110void android_atomic_write(int32_t value, volatile int32_t* addr) {
111    int32_t oldValue;
112    do {
113        oldValue = *addr;
114    } while (android_atomic_cmpxchg(oldValue, value, addr));
115}
116
117int32_t android_atomic_inc(volatile int32_t* addr) {
118    int32_t oldValue;
119    do {
120        oldValue = *addr;
121    } while (android_atomic_cmpxchg(oldValue, oldValue+1, addr));
122    return oldValue;
123}
124
125int32_t android_atomic_dec(volatile int32_t* addr) {
126    int32_t oldValue;
127    do {
128        oldValue = *addr;
129    } while (android_atomic_cmpxchg(oldValue, oldValue-1, addr));
130    return oldValue;
131}
132
133int32_t android_atomic_add(int32_t value, volatile int32_t* addr) {
134    int32_t oldValue;
135    do {
136        oldValue = *addr;
137    } while (android_atomic_cmpxchg(oldValue, oldValue+value, addr));
138    return oldValue;
139}
140
141int32_t android_atomic_and(int32_t value, volatile int32_t* addr) {
142    int32_t oldValue;
143    do {
144        oldValue = *addr;
145    } while (android_atomic_cmpxchg(oldValue, oldValue&value, addr));
146    return oldValue;
147}
148
149int32_t android_atomic_or(int32_t value, volatile int32_t* addr) {
150    int32_t oldValue;
151    do {
152        oldValue = *addr;
153    } while (android_atomic_cmpxchg(oldValue, oldValue|value, addr));
154    return oldValue;
155}
156
157int32_t android_atomic_swap(int32_t value, volatile int32_t* addr) {
158    int32_t oldValue;
159    do {
160        oldValue = *addr;
161    } while (android_atomic_cmpxchg(oldValue, value, addr));
162    return oldValue;
163}
164
165int android_atomic_cmpxchg(int32_t oldvalue, int32_t newvalue, volatile int32_t* addr) {
166    int xchg;
167    asm volatile
168    (
169    "   lock; cmpxchg %%ecx, (%%edx);"
170    "   setne %%al;"
171    "   andl $1, %%eax"
172    : "=a" (xchg)
173    : "a" (oldvalue), "c" (newvalue), "d" (addr)
174    );
175    return xchg;
176}
177
178#define NEED_QUASIATOMICS 1
179
180/*****************************************************************************/
181#elif __arm__
182// Most of the implementation is in atomic-android-arm.s.
183
184// on the device, we implement the 64-bit atomic operations through
185// mutex locking. normally, this is bad because we must initialize
186// a pthread_mutex_t before being able to use it, and this means
187// having to do an initialization check on each function call, and
188// that's where really ugly things begin...
189//
190// BUT, as a special twist, we take advantage of the fact that in our
191// pthread library, a mutex is simply a volatile word whose value is always
192// initialized to 0. In other words, simply declaring a static mutex
193// object initializes it !
194//
195// another twist is that we use a small array of mutexes to dispatch
196// the contention locks from different memory addresses
197//
198
199#include <pthread.h>
200
201#define  SWAP_LOCK_COUNT  32U
202static pthread_mutex_t  _swap_locks[SWAP_LOCK_COUNT];
203
204#define  SWAP_LOCK(addr)   \
205   &_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT]
206
207
208int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) {
209    int64_t oldValue;
210    pthread_mutex_t*  lock = SWAP_LOCK(addr);
211
212    pthread_mutex_lock(lock);
213
214    oldValue = *addr;
215    *addr    = value;
216
217    pthread_mutex_unlock(lock);
218    return oldValue;
219}
220
221int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue,
222        volatile int64_t* addr) {
223    int result;
224    pthread_mutex_t*  lock = SWAP_LOCK(addr);
225
226    pthread_mutex_lock(lock);
227
228    if (*addr == oldvalue) {
229        *addr  = newvalue;
230        result = 0;
231    } else {
232        result = 1;
233    }
234    pthread_mutex_unlock(lock);
235    return result;
236}
237
238int64_t android_quasiatomic_read_64(volatile int64_t* addr) {
239    int64_t result;
240    pthread_mutex_t*  lock = SWAP_LOCK(addr);
241
242    pthread_mutex_lock(lock);
243    result = *addr;
244    pthread_mutex_unlock(lock);
245    return result;
246}
247
248/*****************************************************************************/
249#elif __sh__
250// implementation for SuperH is in atomic-android-sh.c.
251
252#else
253
254#error "Unsupported atomic operations for this platform"
255
256#endif
257
258
259
260#if NEED_QUASIATOMICS
261
262/* Note that a spinlock is *not* a good idea in general
263 * since they can introduce subtle issues. For example,
264 * a real-time thread trying to acquire a spinlock already
265 * acquired by another thread will never yeld, making the
266 * CPU loop endlessly!
267 *
268 * However, this code is only used on the Linux simulator
269 * so it's probably ok for us.
270 *
271 * The alternative is to use a pthread mutex, but
272 * these must be initialized before being used, and
273 * then you have the problem of lazily initializing
274 * a mutex without any other synchronization primitive.
275 */
276
277/* global spinlock for all 64-bit quasiatomic operations */
278static int32_t quasiatomic_spinlock = 0;
279
280int android_quasiatomic_cmpxchg_64(int64_t oldvalue, int64_t newvalue,
281        volatile int64_t* addr) {
282    int result;
283
284    while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) {
285#ifdef HAVE_WIN32_THREADS
286        Sleep(0);
287#else
288        sched_yield();
289#endif
290    }
291
292    if (*addr == oldvalue) {
293        *addr = newvalue;
294        result = 0;
295    } else {
296        result = 1;
297    }
298
299    android_atomic_swap(0, &quasiatomic_spinlock);
300
301    return result;
302}
303
304int64_t android_quasiatomic_read_64(volatile int64_t* addr) {
305    int64_t result;
306
307    while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) {
308#ifdef HAVE_WIN32_THREADS
309        Sleep(0);
310#else
311        sched_yield();
312#endif
313    }
314
315    result = *addr;
316    android_atomic_swap(0, &quasiatomic_spinlock);
317
318    return result;
319}
320
321int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) {
322    int64_t result;
323
324    while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) {
325#ifdef HAVE_WIN32_THREADS
326        Sleep(0);
327#else
328        sched_yield();
329#endif
330    }
331
332    result = *addr;
333    *addr = value;
334    android_atomic_swap(0, &quasiatomic_spinlock);
335
336    return result;
337}
338
339#endif
340