1/* Copyright (c) 2006, Google Inc.
2 * All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 *     * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *     * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 *     * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * ---
31 * Author: Sanjay Ghemawat
32 */
33
34// For atomic operations on statistics counters, see atomic_stats_counter.h.
35// For atomic operations on sequence numbers, see atomic_sequence_num.h.
36// For atomic operations on reference counts, see atomic_refcount.h.
37
38// Some fast atomic operations -- typically with machine-dependent
39// implementations.  This file may need editing as Google code is
40// ported to different architectures.
41
42// The routines exported by this module are subtle.  If you use them, even if
43// you get the code right, it will depend on careful reasoning about atomicity
44// and memory ordering; it will be less readable, and harder to maintain.  If
45// you plan to use these routines, you should have a good reason, such as solid
46// evidence that performance would otherwise suffer, or there being no
47// alternative.  You should assume only properties explicitly guaranteed by the
48// specifications in this file.  You are almost certainly _not_ writing code
49// just for the x86; if you assume x86 semantics, x86 hardware bugs and
50// implementations on other archtectures will cause your code to break.  If you
51// do not know what you are doing, avoid these routines, and use a Mutex.
52//
53// It is incorrect to make direct assignments to/from an atomic variable.
54// You should use one of the Load or Store routines.  The NoBarrier
55// versions are provided when no barriers are needed:
56//   NoBarrier_Store()
57//   NoBarrier_Load()
58// Although there are currently no compiler enforcement, you are encouraged
59// to use these.  Moreover, if you choose to use base::subtle::Atomic64 type,
60// you MUST use one of the Load or Store routines to get correct behavior
61// on 32-bit platforms.
62//
63// The intent is eventually to put all of these routines in namespace
64// base::subtle
65
66#ifndef THREAD_ATOMICOPS_H_
67#define THREAD_ATOMICOPS_H_
68
69#include <config.h>
70#ifdef HAVE_STDINT_H
71#include <stdint.h>
72#endif
73
74// ------------------------------------------------------------------------
75// Include the platform specific implementations of the types
76// and operations listed below.  Implementations are to provide Atomic32
77// and Atomic64 operations. If there is a mismatch between intptr_t and
78// the Atomic32 or Atomic64 types for a platform, the platform-specific header
79// should define the macro, AtomicWordCastType in a clause similar to the
80// following:
81// #if ...pointers are 64 bits...
82// # define AtomicWordCastType base::subtle::Atomic64
83// #else
84// # define AtomicWordCastType Atomic32
85// #endif
86// TODO(csilvers): figure out ARCH_PIII/ARCH_K8 (perhaps via ./configure?)
87// ------------------------------------------------------------------------
88
89#include "base/arm_instruction_set_select.h"
90
91// TODO(csilvers): match piii, not just __i386.  Also, match k8
92#if defined(__MACH__) && defined(__APPLE__)
93#include "base/atomicops-internals-macosx.h"
94#elif defined(__GNUC__) && defined(ARMV6)
95#include "base/atomicops-internals-arm-v6plus.h"
96#elif defined(ARMV3)
97#include "base/atomicops-internals-arm-generic.h"
98#elif defined(_WIN32)
99#include "base/atomicops-internals-windows.h"
100#elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__))
101#include "base/atomicops-internals-x86.h"
102#elif defined(__linux__) && defined(__PPC__)
103#include "base/atomicops-internals-linuxppc.h"
104#else
105// Assume x86 for now.  If you need to support a new architecture and
106// don't know how to implement atomic ops, you can probably get away
107// with using pthreads, since atomicops is only used by spinlock.h/cc
108//#error You need to implement atomic operations for this architecture
109#include "base/atomicops-internals-x86.h"
110#endif
111
112// Signed type that can hold a pointer and supports the atomic ops below, as
113// well as atomic loads and stores.  Instances must be naturally-aligned.
114typedef intptr_t AtomicWord;
115
116#ifdef AtomicWordCastType
117// ------------------------------------------------------------------------
118// This section is needed only when explicit type casting is required to
119// cast AtomicWord to one of the basic atomic types (Atomic64 or Atomic32).
120// It also serves to document the AtomicWord interface.
121// ------------------------------------------------------------------------
122
123namespace base {
124namespace subtle {
125
126// Atomically execute:
127//      result = *ptr;
128//      if (*ptr == old_value)
129//        *ptr = new_value;
130//      return result;
131//
132// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
133// Always return the old value of "*ptr"
134//
135// This routine implies no memory barriers.
136inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
137                                           AtomicWord old_value,
138                                           AtomicWord new_value) {
139  return NoBarrier_CompareAndSwap(
140      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
141      old_value, new_value);
142}
143
144// Atomically store new_value into *ptr, returning the previous value held in
145// *ptr.  This routine implies no memory barriers.
146inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
147                                           AtomicWord new_value) {
148  return NoBarrier_AtomicExchange(
149      reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
150}
151
152// Atomically increment *ptr by "increment".  Returns the new value of
153// *ptr with the increment applied.  This routine implies no memory
154// barriers.
155inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
156                                            AtomicWord increment) {
157  return NoBarrier_AtomicIncrement(
158      reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
159}
160
161inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
162                                          AtomicWord increment) {
163  return Barrier_AtomicIncrement(
164      reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
165}
166
167// ------------------------------------------------------------------------
168// These following lower-level operations are typically useful only to people
169// implementing higher-level synchronization operations like spinlocks,
170// mutexes, and condition-variables.  They combine CompareAndSwap(), a load, or
171// a store with appropriate memory-ordering instructions.  "Acquire" operations
172// ensure that no later memory access can be reordered ahead of the operation.
173// "Release" operations ensure that no previous memory access can be reordered
174// after the operation.  "Barrier" operations have both "Acquire" and "Release"
175// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
176// access.
177// ------------------------------------------------------------------------
178inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
179                                         AtomicWord old_value,
180                                         AtomicWord new_value) {
181  return base::subtle::Acquire_CompareAndSwap(
182      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
183      old_value, new_value);
184}
185
186inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
187                                         AtomicWord old_value,
188                                         AtomicWord new_value) {
189  return base::subtle::Release_CompareAndSwap(
190      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
191      old_value, new_value);
192}
193
194inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
195  NoBarrier_Store(
196      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
197}
198
199inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
200  return base::subtle::Acquire_Store(
201      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
202}
203
204inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
205  return base::subtle::Release_Store(
206      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
207}
208
209inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
210  return NoBarrier_Load(
211      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
212}
213
214inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
215  return base::subtle::Acquire_Load(
216      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
217}
218
219inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
220  return base::subtle::Release_Load(
221      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
222}
223
224}  // namespace base::subtle
225}  // namespace base
226#endif  // AtomicWordCastType
227
228// ------------------------------------------------------------------------
229// Commented out type definitions and method declarations for documentation
230// of the interface provided by this module.
231// ------------------------------------------------------------------------
232
233#if 0
234
235// Signed 32-bit type that supports the atomic ops below, as well as atomic
236// loads and stores.  Instances must be naturally aligned.  This type differs
237// from AtomicWord in 64-bit binaries where AtomicWord is 64-bits.
238typedef int32_t Atomic32;
239
240// Corresponding operations on Atomic32
241namespace base {
242namespace subtle {
243
244// Signed 64-bit type that supports the atomic ops below, as well as atomic
245// loads and stores.  Instances must be naturally aligned.  This type differs
246// from AtomicWord in 32-bit binaries where AtomicWord is 32-bits.
247typedef int64_t Atomic64;
248
249Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
250                                  Atomic32 old_value,
251                                  Atomic32 new_value);
252Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
253Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
254Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
255                                 Atomic32 increment);
256Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
257                                Atomic32 old_value,
258                                Atomic32 new_value);
259Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
260                                Atomic32 old_value,
261                                Atomic32 new_value);
262void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
263void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
264void Release_Store(volatile Atomic32* ptr, Atomic32 value);
265Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
266Atomic32 Acquire_Load(volatile const Atomic32* ptr);
267Atomic32 Release_Load(volatile const Atomic32* ptr);
268
269// Corresponding operations on Atomic64
270Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
271                                  Atomic64 old_value,
272                                  Atomic64 new_value);
273Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
274Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
275Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
276
277Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
278                                Atomic64 old_value,
279                                Atomic64 new_value);
280Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
281                                Atomic64 old_value,
282                                Atomic64 new_value);
283void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
284void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
285void Release_Store(volatile Atomic64* ptr, Atomic64 value);
286Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
287Atomic64 Acquire_Load(volatile const Atomic64* ptr);
288Atomic64 Release_Load(volatile const Atomic64* ptr);
289}  // namespace base::subtle
290}  // namespace base
291
292void MemoryBarrier();
293
294#endif  // 0
295
296
297// ------------------------------------------------------------------------
298// The following are to be deprecated when all uses have been changed to
299// use the base::subtle namespace.
300// ------------------------------------------------------------------------
301
302#ifdef AtomicWordCastType
303// AtomicWord versions to be deprecated
304inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
305                                         AtomicWord old_value,
306                                         AtomicWord new_value) {
307  return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
308}
309
310inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
311                                         AtomicWord old_value,
312                                         AtomicWord new_value) {
313  return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
314}
315
316inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
317  return base::subtle::Acquire_Store(ptr, value);
318}
319
320inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
321  return base::subtle::Release_Store(ptr, value);
322}
323
324inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
325  return base::subtle::Acquire_Load(ptr);
326}
327
328inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
329  return base::subtle::Release_Load(ptr);
330}
331#endif  // AtomicWordCastType
332
333// 32-bit Acquire/Release operations to be deprecated.
334
335inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
336                                       Atomic32 old_value,
337                                       Atomic32 new_value) {
338  return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
339}
340inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
341                                       Atomic32 old_value,
342                                       Atomic32 new_value) {
343  return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
344}
345inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
346  base::subtle::Acquire_Store(ptr, value);
347}
348inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
349  return base::subtle::Release_Store(ptr, value);
350}
351inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
352  return base::subtle::Acquire_Load(ptr);
353}
354inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
355  return base::subtle::Release_Load(ptr);
356}
357
358#ifdef BASE_HAS_ATOMIC64
359
360// 64-bit Acquire/Release operations to be deprecated.
361
362inline base::subtle::Atomic64 Acquire_CompareAndSwap(
363    volatile base::subtle::Atomic64* ptr,
364    base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
365  return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
366}
367inline base::subtle::Atomic64 Release_CompareAndSwap(
368    volatile base::subtle::Atomic64* ptr,
369    base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
370  return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
371}
372inline void Acquire_Store(
373    volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
374  base::subtle::Acquire_Store(ptr, value);
375}
376inline void Release_Store(
377    volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
378  return base::subtle::Release_Store(ptr, value);
379}
380inline base::subtle::Atomic64 Acquire_Load(
381    volatile const base::subtle::Atomic64* ptr) {
382  return base::subtle::Acquire_Load(ptr);
383}
384inline base::subtle::Atomic64 Release_Load(
385    volatile const base::subtle::Atomic64* ptr) {
386  return base::subtle::Release_Load(ptr);
387}
388
389#endif  // BASE_HAS_ATOMIC64
390
391#endif  // THREAD_ATOMICOPS_H_
392