1// Copyright 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file is an internal atomic implementation, use base/atomicops.h instead.
6
7// TODO(rmcilroy): Investigate whether we can use __sync__ intrinsics instead of
8//                 the hand coded assembly without introducing perf regressions.
9// TODO(rmcilroy): Investigate whether we can use acquire / release versions of
10//                 exclusive load / store assembly instructions and do away with
11//                 the barriers.
12
13#ifndef BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
14#define BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
15
16#if defined(OS_QNX)
17#include <sys/cpuinline.h>
18#endif
19
20namespace base {
21namespace subtle {
22
23inline void MemoryBarrier() {
24  __asm__ __volatile__ ("dmb ish" ::: "memory");  // NOLINT
25}
26
27// NoBarrier versions of the operation include "memory" in the clobber list.
28// This is not required for direct usage of the NoBarrier versions of the
29// operations. However this is required for correctness when they are used as
30// part of the Acquire or Release versions, to ensure that nothing from outside
31// the call is reordered between the operation and the memory barrier. This does
32// not change the code generated, so has no or minimal impact on the
33// NoBarrier operations.
34
35inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
36                                         Atomic32 old_value,
37                                         Atomic32 new_value) {
38  Atomic32 prev;
39  int32_t temp;
40
41  __asm__ __volatile__ (  // NOLINT
42    "0:                                    \n\t"
43    "ldxr %w[prev], %[ptr]                 \n\t"  // Load the previous value.
44    "cmp %w[prev], %w[old_value]           \n\t"
45    "bne 1f                                \n\t"
46    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
47    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
48    "1:                                    \n\t"
49    : [prev]"=&r" (prev),
50      [temp]"=&r" (temp),
51      [ptr]"+Q" (*ptr)
52    : [old_value]"IJr" (old_value),
53      [new_value]"r" (new_value)
54    : "cc", "memory"
55  );  // NOLINT
56
57  return prev;
58}
59
60inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
61                                         Atomic32 new_value) {
62  Atomic32 result;
63  int32_t temp;
64
65  __asm__ __volatile__ (  // NOLINT
66    "0:                                    \n\t"
67    "ldxr %w[result], %[ptr]               \n\t"  // Load the previous value.
68    "stxr %w[temp], %w[new_value], %[ptr]  \n\t"  // Try to store the new value.
69    "cbnz %w[temp], 0b                     \n\t"  // Retry if it did not work.
70    : [result]"=&r" (result),
71      [temp]"=&r" (temp),
72      [ptr]"+Q" (*ptr)
73    : [new_value]"r" (new_value)
74    : "memory"
75  );  // NOLINT
76
77  return result;
78}
79
80inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
81                                          Atomic32 increment) {
82  Atomic32 result;
83  int32_t temp;
84
85  __asm__ __volatile__ (  // NOLINT
86    "0:                                       \n\t"
87    "ldxr %w[result], %[ptr]                  \n\t"  // Load the previous value.
88    "add %w[result], %w[result], %w[increment]\n\t"
89    "stxr %w[temp], %w[result], %[ptr]        \n\t"  // Try to store the result.
90    "cbnz %w[temp], 0b                        \n\t"  // Retry on failure.
91    : [result]"=&r" (result),
92      [temp]"=&r" (temp),
93      [ptr]"+Q" (*ptr)
94    : [increment]"IJr" (increment)
95    : "memory"
96  );  // NOLINT
97
98  return result;
99}
100
101inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
102                                        Atomic32 increment) {
103  MemoryBarrier();
104  Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
105  MemoryBarrier();
106
107  return result;
108}
109
110inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
111                                       Atomic32 old_value,
112                                       Atomic32 new_value) {
113  Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
114  MemoryBarrier();
115
116  return prev;
117}
118
119inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
120                                       Atomic32 old_value,
121                                       Atomic32 new_value) {
122  MemoryBarrier();
123  Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
124
125  return prev;
126}
127
128inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
129  *ptr = value;
130}
131
132inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
133  *ptr = value;
134  MemoryBarrier();
135}
136
137inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
138  __asm__ __volatile__ (  // NOLINT
139    "stlr %w[value], %[ptr]  \n\t"
140    : [ptr]"=Q" (*ptr)
141    : [value]"r" (value)
142    : "memory"
143  );  // NOLINT
144}
145
146inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
147  return *ptr;
148}
149
150inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
151  Atomic32 value;
152
153  __asm__ __volatile__ (  // NOLINT
154    "ldar %w[value], %[ptr]  \n\t"
155    : [value]"=r" (value)
156    : [ptr]"Q" (*ptr)
157    : "memory"
158  );  // NOLINT
159
160  return value;
161}
162
163inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
164  MemoryBarrier();
165  return *ptr;
166}
167
168// 64-bit versions of the operations.
169// See the 32-bit versions for comments.
170
171inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
172                                         Atomic64 old_value,
173                                         Atomic64 new_value) {
174  Atomic64 prev;
175  int32_t temp;
176
177  __asm__ __volatile__ (  // NOLINT
178    "0:                                    \n\t"
179    "ldxr %[prev], %[ptr]                  \n\t"
180    "cmp %[prev], %[old_value]             \n\t"
181    "bne 1f                                \n\t"
182    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
183    "cbnz %w[temp], 0b                     \n\t"
184    "1:                                    \n\t"
185    : [prev]"=&r" (prev),
186      [temp]"=&r" (temp),
187      [ptr]"+Q" (*ptr)
188    : [old_value]"IJr" (old_value),
189      [new_value]"r" (new_value)
190    : "cc", "memory"
191  );  // NOLINT
192
193  return prev;
194}
195
196inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
197                                         Atomic64 new_value) {
198  Atomic64 result;
199  int32_t temp;
200
201  __asm__ __volatile__ (  // NOLINT
202    "0:                                    \n\t"
203    "ldxr %[result], %[ptr]                \n\t"
204    "stxr %w[temp], %[new_value], %[ptr]   \n\t"
205    "cbnz %w[temp], 0b                     \n\t"
206    : [result]"=&r" (result),
207      [temp]"=&r" (temp),
208      [ptr]"+Q" (*ptr)
209    : [new_value]"r" (new_value)
210    : "memory"
211  );  // NOLINT
212
213  return result;
214}
215
216inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
217                                          Atomic64 increment) {
218  Atomic64 result;
219  int32_t temp;
220
221  __asm__ __volatile__ (  // NOLINT
222    "0:                                     \n\t"
223    "ldxr %[result], %[ptr]                 \n\t"
224    "add %[result], %[result], %[increment] \n\t"
225    "stxr %w[temp], %[result], %[ptr]       \n\t"
226    "cbnz %w[temp], 0b                      \n\t"
227    : [result]"=&r" (result),
228      [temp]"=&r" (temp),
229      [ptr]"+Q" (*ptr)
230    : [increment]"IJr" (increment)
231    : "memory"
232  );  // NOLINT
233
234  return result;
235}
236
237inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
238                                        Atomic64 increment) {
239  MemoryBarrier();
240  Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
241  MemoryBarrier();
242
243  return result;
244}
245
246inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
247                                       Atomic64 old_value,
248                                       Atomic64 new_value) {
249  Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
250  MemoryBarrier();
251
252  return prev;
253}
254
255inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
256                                       Atomic64 old_value,
257                                       Atomic64 new_value) {
258  MemoryBarrier();
259  Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
260
261  return prev;
262}
263
264inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
265  *ptr = value;
266}
267
268inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
269  *ptr = value;
270  MemoryBarrier();
271}
272
273inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
274  __asm__ __volatile__ (  // NOLINT
275    "stlr %x[value], %[ptr]  \n\t"
276    : [ptr]"=Q" (*ptr)
277    : [value]"r" (value)
278    : "memory"
279  );  // NOLINT
280}
281
282inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
283  return *ptr;
284}
285
286inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
287  Atomic64 value;
288
289  __asm__ __volatile__ (  // NOLINT
290    "ldar %x[value], %[ptr]  \n\t"
291    : [value]"=r" (value)
292    : [ptr]"Q" (*ptr)
293    : "memory"
294  );  // NOLINT
295
296  return value;
297}
298
299inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
300  MemoryBarrier();
301  return *ptr;
302}
303
304}  // namespace base::subtle
305}  // namespace base
306
307#endif  // BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
308