1// Copyright (c) 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file is an internal atomic implementation, use atomicops.h instead.
6//
7// This implementation uses C++11 atomics' member functions. The code base is
8// currently written assuming atomicity revolves around accesses instead of
9// C++11's memory locations. The burden is on the programmer to ensure that all
10// memory locations accessed atomically are never accessed non-atomically (tsan
11// should help with this).
12//
13// TODO(jfb) Modify the atomicops.h API and user code to declare atomic
14//           locations as truly atomic. See the static_assert below.
15//
16// Of note in this implementation:
17//  * All NoBarrier variants are implemented as relaxed.
18//  * All Barrier variants are implemented as sequentially-consistent.
19//  * Compare exchange's failure ordering is always the same as the success one
20//    (except for release, which fails as relaxed): using a weaker ordering is
21//    only valid under certain uses of compare exchange.
22//  * Acquire store doesn't exist in the C11 memory model, it is instead
23//    implemented as a relaxed store followed by a sequentially consistent
24//    fence.
25//  * Release load doesn't exist in the C11 memory model, it is instead
26//    implemented as sequentially consistent fence followed by a relaxed load.
27//  * Atomic increment is expected to return the post-incremented value, whereas
28//    C11 fetch add returns the previous value. The implementation therefore
29//    needs to increment twice (which the compiler should be able to detect and
30//    optimize).
31
32#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
33#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
34
35#include <atomic>
36
37#include "build/build_config.h"
38
39namespace base {
40namespace subtle {
41
42// This implementation is transitional and maintains the original API for
43// atomicops.h. This requires casting memory locations to the atomic types, and
44// assumes that the API and the C++11 implementation are layout-compatible,
45// which isn't true for all implementations or hardware platforms. The static
46// assertion should detect this issue, were it to fire then this header
47// shouldn't be used.
48//
49// TODO(jfb) If this header manages to stay committed then the API should be
50//           modified, and all call sites updated.
51typedef volatile std::atomic<Atomic32>* AtomicLocation32;
52static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
53              "incompatible 32-bit atomic layout");
54
55inline void MemoryBarrier() {
56#if defined(__GLIBCXX__)
57  // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
58  // not defined, leading to the linker complaining about undefined references.
59  __atomic_thread_fence(std::memory_order_seq_cst);
60#else
61  std::atomic_thread_fence(std::memory_order_seq_cst);
62#endif
63}
64
65inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
66                                         Atomic32 old_value,
67                                         Atomic32 new_value) {
68  ((AtomicLocation32)ptr)
69      ->compare_exchange_strong(old_value,
70                                new_value,
71                                std::memory_order_relaxed,
72                                std::memory_order_relaxed);
73  return old_value;
74}
75
76inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
77                                         Atomic32 new_value) {
78  return ((AtomicLocation32)ptr)
79      ->exchange(new_value, std::memory_order_relaxed);
80}
81
82inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
83                                          Atomic32 increment) {
84  return increment +
85         ((AtomicLocation32)ptr)
86             ->fetch_add(increment, std::memory_order_relaxed);
87}
88
89inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
90                                        Atomic32 increment) {
91  return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
92}
93
94inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
95                                       Atomic32 old_value,
96                                       Atomic32 new_value) {
97  ((AtomicLocation32)ptr)
98      ->compare_exchange_strong(old_value,
99                                new_value,
100                                std::memory_order_acquire,
101                                std::memory_order_acquire);
102  return old_value;
103}
104
105inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
106                                       Atomic32 old_value,
107                                       Atomic32 new_value) {
108  ((AtomicLocation32)ptr)
109      ->compare_exchange_strong(old_value,
110                                new_value,
111                                std::memory_order_release,
112                                std::memory_order_relaxed);
113  return old_value;
114}
115
116inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
117  ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
118}
119
120inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
121  ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
122  MemoryBarrier();
123}
124
125inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
126  ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
127}
128
129inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
130  return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
131}
132
133inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
134  return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
135}
136
137inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
138  MemoryBarrier();
139  return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
140}
141
142#if defined(ARCH_CPU_64_BITS)
143
144typedef volatile std::atomic<Atomic64>* AtomicLocation64;
145static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
146              "incompatible 64-bit atomic layout");
147
148inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
149                                         Atomic64 old_value,
150                                         Atomic64 new_value) {
151  ((AtomicLocation64)ptr)
152      ->compare_exchange_strong(old_value,
153                                new_value,
154                                std::memory_order_relaxed,
155                                std::memory_order_relaxed);
156  return old_value;
157}
158
159inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
160                                         Atomic64 new_value) {
161  return ((AtomicLocation64)ptr)
162      ->exchange(new_value, std::memory_order_relaxed);
163}
164
165inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
166                                          Atomic64 increment) {
167  return increment +
168         ((AtomicLocation64)ptr)
169             ->fetch_add(increment, std::memory_order_relaxed);
170}
171
172inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
173                                        Atomic64 increment) {
174  return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
175}
176
177inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
178                                       Atomic64 old_value,
179                                       Atomic64 new_value) {
180  ((AtomicLocation64)ptr)
181      ->compare_exchange_strong(old_value,
182                                new_value,
183                                std::memory_order_acquire,
184                                std::memory_order_acquire);
185  return old_value;
186}
187
188inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
189                                       Atomic64 old_value,
190                                       Atomic64 new_value) {
191  ((AtomicLocation64)ptr)
192      ->compare_exchange_strong(old_value,
193                                new_value,
194                                std::memory_order_release,
195                                std::memory_order_relaxed);
196  return old_value;
197}
198
199inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
200  ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
201}
202
203inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
204  ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
205  MemoryBarrier();
206}
207
208inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
209  ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
210}
211
212inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
213  return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
214}
215
216inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
217  return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
218}
219
220inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
221  MemoryBarrier();
222  return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
223}
224
225#endif  // defined(ARCH_CPU_64_BITS)
226}  // namespace subtle
227}  // namespace base
228
229#endif  // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
230