1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Dalvik.h"
18
19#include <cutils/atomic.h>
20
21#if defined(__arm__)
22#include <machine/cpu-features.h>
23#endif
24
25/*****************************************************************************/
26
27#if defined(HAVE_MACOSX_IPC)
28#define NEED_MAC_QUASI_ATOMICS 1
29
30#elif defined(__i386__) || defined(__x86_64__)
31#define NEED_PTHREADS_QUASI_ATOMICS 1
32
33#elif defined(__mips__)
34#define NEED_PTHREADS_QUASI_ATOMICS 1
35
36#elif defined(__arm__)
37
38// TODO: Clang can not process our inline assembly at the moment.
39#if defined(__ARM_HAVE_LDREXD) && !defined(__clang__)
40#define NEED_ARM_LDREXD_QUASI_ATOMICS 1
41#else
42#define NEED_PTHREADS_QUASI_ATOMICS 1
43#endif
44
45#else
46#error "Unsupported atomic operations for this platform"
47#endif
48
49/*****************************************************************************/
50
51#if NEED_ARM_LDREXD_QUASI_ATOMICS
52
53static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
54                                               volatile int64_t* addr)
55{
56    int64_t prev;
57    int status;
58    do {
59        __asm__ __volatile__ ("@ dvmQuasiAtomicSwap64\n"
60            "ldrexd     %0, %H0, [%3]\n"
61            "strexd     %1, %4, %H4, [%3]"
62            : "=&r" (prev), "=&r" (status), "+m"(*addr)
63            : "r" (addr), "r" (newvalue)
64            : "cc");
65    } while (__builtin_expect(status != 0, 0));
66    return prev;
67}
68
69int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
70{
71    return dvmQuasiAtomicSwap64Body(newvalue, addr);
72}
73
74int64_t dvmQuasiAtomicSwap64Sync(int64_t newvalue, volatile int64_t* addr)
75{
76    int64_t prev;
77    ANDROID_MEMBAR_STORE();
78    prev = dvmQuasiAtomicSwap64Body(newvalue, addr);
79    ANDROID_MEMBAR_FULL();
80    return prev;
81}
82
83int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
84    volatile int64_t* addr)
85{
86    int64_t prev;
87    int status;
88    do {
89        __asm__ __volatile__ ("@ dvmQuasiAtomicCas64\n"
90            "ldrexd     %0, %H0, [%3]\n"
91            "mov        %1, #0\n"
92            "teq        %0, %4\n"
93            "teqeq      %H0, %H4\n"
94            "strexdeq   %1, %5, %H5, [%3]"
95            : "=&r" (prev), "=&r" (status), "+m"(*addr)
96            : "r" (addr), "Ir" (oldvalue), "r" (newvalue)
97            : "cc");
98    } while (__builtin_expect(status != 0, 0));
99    return prev != oldvalue;
100}
101
102int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
103{
104    int64_t value;
105    __asm__ __volatile__ ("@ dvmQuasiAtomicRead64\n"
106        "ldrexd     %0, %H0, [%1]"
107        : "=&r" (value)
108        : "r" (addr));
109    return value;
110}
111#endif
112
113/*****************************************************************************/
114
115#if NEED_MAC_QUASI_ATOMICS
116
117#include <libkern/OSAtomic.h>
118
119int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
120    volatile int64_t* addr)
121{
122    return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
123            (int64_t*)addr) == 0;
124}
125
126
127static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
128                                               volatile int64_t* addr)
129{
130    int64_t oldValue;
131    do {
132        oldValue = *addr;
133    } while (dvmQuasiAtomicCas64(oldValue, value, addr));
134    return oldValue;
135}
136
137int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
138{
139    return dvmQuasiAtomicSwap64Body(value, addr);
140}
141
142int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
143{
144    int64_t oldValue;
145    ANDROID_MEMBAR_STORE();
146    oldValue = dvmQuasiAtomicSwap64Body(value, addr);
147    /* TUNING: barriers can be avoided on some architectures */
148    ANDROID_MEMBAR_FULL();
149    return oldValue;
150}
151
152int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
153{
154    return OSAtomicAdd64Barrier(0, addr);
155}
156#endif
157
158/*****************************************************************************/
159
160#if NEED_PTHREADS_QUASI_ATOMICS
161
162// In the absence of a better implementation, we implement the 64-bit atomic
163// operations through mutex locking.
164
165// another twist is that we use a small array of mutexes to dispatch
166// the contention locks from different memory addresses
167
168#include <pthread.h>
169
170static const size_t kSwapLockCount = 32;
171static pthread_mutex_t* gSwapLocks[kSwapLockCount];
172
173void dvmQuasiAtomicsStartup() {
174    for (size_t i = 0; i < kSwapLockCount; ++i) {
175        pthread_mutex_t* m = new pthread_mutex_t;
176        dvmInitMutex(m);
177        gSwapLocks[i] = m;
178    }
179}
180
181void dvmQuasiAtomicsShutdown() {
182    for (size_t i = 0; i < kSwapLockCount; ++i) {
183        pthread_mutex_t* m = gSwapLocks[i];
184        gSwapLocks[i] = NULL;
185        if (m != NULL) {
186            dvmDestroyMutex(m);
187        }
188        delete m;
189    }
190}
191
192static inline pthread_mutex_t* GetSwapLock(const volatile int64_t* addr) {
193    return gSwapLocks[((unsigned)(void*)(addr) >> 3U) % kSwapLockCount];
194}
195
196int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
197{
198    int64_t oldValue;
199    pthread_mutex_t* lock = GetSwapLock(addr);
200
201    pthread_mutex_lock(lock);
202
203    oldValue = *addr;
204    *addr    = value;
205
206    pthread_mutex_unlock(lock);
207    return oldValue;
208}
209
210/* Same as dvmQuasiAtomicSwap64 - mutex handles barrier */
211int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
212{
213    return dvmQuasiAtomicSwap64(value, addr);
214}
215
216int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
217    volatile int64_t* addr)
218{
219    int result;
220    pthread_mutex_t* lock = GetSwapLock(addr);
221
222    pthread_mutex_lock(lock);
223
224    if (*addr == oldvalue) {
225        *addr  = newvalue;
226        result = 0;
227    } else {
228        result = 1;
229    }
230    pthread_mutex_unlock(lock);
231    return result;
232}
233
234int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
235{
236    int64_t result;
237    pthread_mutex_t* lock = GetSwapLock(addr);
238
239    pthread_mutex_lock(lock);
240    result = *addr;
241    pthread_mutex_unlock(lock);
242    return result;
243}
244
245#else
246
247// The other implementations don't need any special setup.
248void dvmQuasiAtomicsStartup() {}
249void dvmQuasiAtomicsShutdown() {}
250
251#endif /*NEED_PTHREADS_QUASI_ATOMICS*/
252