1// Copyright 2010 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5// This file is an internal atomic implementation, use atomicops.h instead. 6 7#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ 8#define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ 9 10namespace v8 { 11namespace base { 12 13// This struct is not part of the public API of this module; clients may not 14// use it. 15// Features of this x86. Values may not be correct before main() is run, 16// but are set conservatively. 17struct AtomicOps_x86CPUFeatureStruct { 18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence 19 // after acquire compare-and-swap. 20#if !defined(__SSE2__) 21 bool has_sse2; // Processor has SSE2. 22#endif 23}; 24extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; 25 26#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") 27 28// 32-bit low-level operations on any platform. 29 30inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 31 Atomic32 old_value, 32 Atomic32 new_value) { 33 Atomic32 prev; 34 __asm__ __volatile__("lock; cmpxchgl %1,%2" 35 : "=a" (prev) 36 : "q" (new_value), "m" (*ptr), "0" (old_value) 37 : "memory"); 38 return prev; 39} 40 41inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, 42 Atomic32 new_value) { 43 __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg. 44 : "=r" (new_value) 45 : "m" (*ptr), "0" (new_value) 46 : "memory"); 47 return new_value; // Now it's the previous value. 48} 49 50inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, 51 Atomic32 increment) { 52 Atomic32 temp = increment; 53 __asm__ __volatile__("lock; xaddl %0,%1" 54 : "+r" (temp), "+m" (*ptr) 55 : : "memory"); 56 // temp now holds the old value of *ptr 57 return temp + increment; 58} 59 60inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 61 Atomic32 increment) { 62 Atomic32 temp = increment; 63 __asm__ __volatile__("lock; xaddl %0,%1" 64 : "+r" (temp), "+m" (*ptr) 65 : : "memory"); 66 // temp now holds the old value of *ptr 67 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { 68 __asm__ __volatile__("lfence" : : : "memory"); 69 } 70 return temp + increment; 71} 72 73inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 74 Atomic32 old_value, 75 Atomic32 new_value) { 76 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 77 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { 78 __asm__ __volatile__("lfence" : : : "memory"); 79 } 80 return x; 81} 82 83inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, 84 Atomic32 old_value, 85 Atomic32 new_value) { 86 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); 87} 88 89inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { 90 *ptr = value; 91} 92 93inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { 94 *ptr = value; 95} 96 97#if defined(__x86_64__) || defined(__SSE2__) 98 99// 64-bit implementations of memory barrier can be simpler, because it 100// "mfence" is guaranteed to exist. 101inline void MemoryBarrier() { 102 __asm__ __volatile__("mfence" : : : "memory"); 103} 104 105inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 106 *ptr = value; 107 MemoryBarrier(); 108} 109 110#else 111 112inline void MemoryBarrier() { 113 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { 114 __asm__ __volatile__("mfence" : : : "memory"); 115 } else { // mfence is faster but not present on PIII 116 Atomic32 x = 0; 117 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII 118 } 119} 120 121inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 122 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { 123 *ptr = value; 124 __asm__ __volatile__("mfence" : : : "memory"); 125 } else { 126 NoBarrier_AtomicExchange(ptr, value); 127 // acts as a barrier on PIII 128 } 129} 130#endif 131 132inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { 133 ATOMICOPS_COMPILER_BARRIER(); 134 *ptr = value; // An x86 store acts as a release barrier. 135 // See comments in Atomic64 version of Release_Store(), below. 136} 137 138inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { 139 return *ptr; 140} 141 142inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { 143 return *ptr; 144} 145 146inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { 147 Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. 148 // See comments in Atomic64 version of Release_Store(), below. 149 ATOMICOPS_COMPILER_BARRIER(); 150 return value; 151} 152 153inline Atomic32 Release_Load(volatile const Atomic32* ptr) { 154 MemoryBarrier(); 155 return *ptr; 156} 157 158#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT) 159 160// 64-bit low-level operations on 64-bit platform. 161 162inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, 163 Atomic64 old_value, 164 Atomic64 new_value) { 165 Atomic64 prev; 166 __asm__ __volatile__("lock; cmpxchgq %1,%2" 167 : "=a" (prev) 168 : "q" (new_value), "m" (*ptr), "0" (old_value) 169 : "memory"); 170 return prev; 171} 172 173inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, 174 Atomic64 new_value) { 175 __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg. 176 : "=r" (new_value) 177 : "m" (*ptr), "0" (new_value) 178 : "memory"); 179 return new_value; // Now it's the previous value. 180} 181 182inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, 183 Atomic64 increment) { 184 Atomic64 temp = increment; 185 __asm__ __volatile__("lock; xaddq %0,%1" 186 : "+r" (temp), "+m" (*ptr) 187 : : "memory"); 188 // temp now contains the previous value of *ptr 189 return temp + increment; 190} 191 192inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, 193 Atomic64 increment) { 194 Atomic64 temp = increment; 195 __asm__ __volatile__("lock; xaddq %0,%1" 196 : "+r" (temp), "+m" (*ptr) 197 : : "memory"); 198 // temp now contains the previous value of *ptr 199 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { 200 __asm__ __volatile__("lfence" : : : "memory"); 201 } 202 return temp + increment; 203} 204 205inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { 206 *ptr = value; 207} 208 209inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { 210 *ptr = value; 211 MemoryBarrier(); 212} 213 214inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { 215 ATOMICOPS_COMPILER_BARRIER(); 216 217 *ptr = value; // An x86 store acts as a release barrier 218 // for current AMD/Intel chips as of Jan 2008. 219 // See also Acquire_Load(), below. 220 221 // When new chips come out, check: 222 // IA-32 Intel Architecture Software Developer's Manual, Volume 3: 223 // System Programming Guide, Chatper 7: Multiple-processor management, 224 // Section 7.2, Memory Ordering. 225 // Last seen at: 226 // http://developer.intel.com/design/pentium4/manuals/index_new.htm 227 // 228 // x86 stores/loads fail to act as barriers for a few instructions (clflush 229 // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are 230 // not generated by the compiler, and are rare. Users of these instructions 231 // need to know about cache behaviour in any case since all of these involve 232 // either flushing cache lines or non-temporal cache hints. 233} 234 235inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { 236 return *ptr; 237} 238 239inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { 240 Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, 241 // for current AMD/Intel chips as of Jan 2008. 242 // See also Release_Store(), above. 243 ATOMICOPS_COMPILER_BARRIER(); 244 return value; 245} 246 247inline Atomic64 Release_Load(volatile const Atomic64* ptr) { 248 MemoryBarrier(); 249 return *ptr; 250} 251 252inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 253 Atomic64 old_value, 254 Atomic64 new_value) { 255 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); 256 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { 257 __asm__ __volatile__("lfence" : : : "memory"); 258 } 259 return x; 260} 261 262inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 263 Atomic64 old_value, 264 Atomic64 new_value) { 265 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); 266} 267 268#endif // defined(__x86_64__) 269 270} // namespace base 271} // namespace v8 272 273#undef ATOMICOPS_COMPILER_BARRIER 274 275#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ 276