1// Protocol Buffers - Google's data interchange format
2// Copyright 2012 Google Inc.  All rights reserved.
3// http://code.google.com/p/protobuf/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9//     * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11//     * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15//     * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// This file is an internal atomic implementation, use atomicops.h instead.
32
33#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_
34#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_
35
36#include <libkern/OSAtomic.h>
37
38namespace google {
39namespace protobuf {
40namespace internal {
41
42inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
43                                         Atomic32 old_value,
44                                         Atomic32 new_value) {
45  Atomic32 prev_value;
46  do {
47    if (OSAtomicCompareAndSwap32(old_value, new_value,
48                                 const_cast<Atomic32*>(ptr))) {
49      return old_value;
50    }
51    prev_value = *ptr;
52  } while (prev_value == old_value);
53  return prev_value;
54}
55
56inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
57                                         Atomic32 new_value) {
58  Atomic32 old_value;
59  do {
60    old_value = *ptr;
61  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
62                                     const_cast<Atomic32*>(ptr)));
63  return old_value;
64}
65
66inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
67                                          Atomic32 increment) {
68  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
69}
70
71inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
72                                          Atomic32 increment) {
73  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
74}
75
76inline void MemoryBarrier() {
77  OSMemoryBarrier();
78}
79
80inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
81                                       Atomic32 old_value,
82                                       Atomic32 new_value) {
83  Atomic32 prev_value;
84  do {
85    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
86                                        const_cast<Atomic32*>(ptr))) {
87      return old_value;
88    }
89    prev_value = *ptr;
90  } while (prev_value == old_value);
91  return prev_value;
92}
93
94inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
95                                       Atomic32 old_value,
96                                       Atomic32 new_value) {
97  return Acquire_CompareAndSwap(ptr, old_value, new_value);
98}
99
100inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
101  *ptr = value;
102}
103
104inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
105  *ptr = value;
106  MemoryBarrier();
107}
108
109inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
110  MemoryBarrier();
111  *ptr = value;
112}
113
114inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
115  return *ptr;
116}
117
118inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
119  Atomic32 value = *ptr;
120  MemoryBarrier();
121  return value;
122}
123
124inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
125  MemoryBarrier();
126  return *ptr;
127}
128
129#ifdef __LP64__
130
131// 64-bit implementation on 64-bit platform
132
133inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
134                                         Atomic64 old_value,
135                                         Atomic64 new_value) {
136  Atomic64 prev_value;
137  do {
138    if (OSAtomicCompareAndSwap64(old_value, new_value,
139                                 reinterpret_cast<volatile int64_t*>(ptr))) {
140      return old_value;
141    }
142    prev_value = *ptr;
143  } while (prev_value == old_value);
144  return prev_value;
145}
146
147inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
148                                         Atomic64 new_value) {
149  Atomic64 old_value;
150  do {
151    old_value = *ptr;
152  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
153                                     reinterpret_cast<volatile int64_t*>(ptr)));
154  return old_value;
155}
156
157inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
158                                          Atomic64 increment) {
159  return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
160}
161
162inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
163                                        Atomic64 increment) {
164  return OSAtomicAdd64Barrier(increment,
165                              reinterpret_cast<volatile int64_t*>(ptr));
166}
167
168inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
169                                       Atomic64 old_value,
170                                       Atomic64 new_value) {
171  Atomic64 prev_value;
172  do {
173    if (OSAtomicCompareAndSwap64Barrier(
174        old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
175      return old_value;
176    }
177    prev_value = *ptr;
178  } while (prev_value == old_value);
179  return prev_value;
180}
181
182inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
183                                       Atomic64 old_value,
184                                       Atomic64 new_value) {
185  // The lib kern interface does not distinguish between
186  // Acquire and Release memory barriers; they are equivalent.
187  return Acquire_CompareAndSwap(ptr, old_value, new_value);
188}
189
190inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
191  *ptr = value;
192}
193
194inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
195  *ptr = value;
196  MemoryBarrier();
197}
198
199inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
200  MemoryBarrier();
201  *ptr = value;
202}
203
204inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
205  return *ptr;
206}
207
208inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
209  Atomic64 value = *ptr;
210  MemoryBarrier();
211  return value;
212}
213
214inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
215  MemoryBarrier();
216  return *ptr;
217}
218
219#endif  // defined(__LP64__)
220
221}  // namespace internal
222}  // namespace protobuf
223}  // namespace google
224
225#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_
226