1// Copyright 2010 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// This file is an internal atomic implementation, use atomicops.h instead.
6
7#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
8#define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
9
10#include "src/base/macros.h"
11#include "src/base/win32-headers.h"
12
13#if defined(V8_HOST_ARCH_64_BIT)
14// windows.h #defines this (only on x64). This causes problems because the
15// public API also uses MemoryBarrier at the public name for this fence. So, on
16// X64, undef it, and call its documented
17// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
18// implementation directly.
19#undef MemoryBarrier
20#endif
21
22namespace v8 {
23namespace base {
24
25inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
26                                         Atomic32 old_value,
27                                         Atomic32 new_value) {
28  LONG result = InterlockedCompareExchange(
29      reinterpret_cast<volatile LONG*>(ptr),
30      static_cast<LONG>(new_value),
31      static_cast<LONG>(old_value));
32  return static_cast<Atomic32>(result);
33}
34
35inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
36                                         Atomic32 new_value) {
37  LONG result = InterlockedExchange(
38      reinterpret_cast<volatile LONG*>(ptr),
39      static_cast<LONG>(new_value));
40  return static_cast<Atomic32>(result);
41}
42
43inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
44                                        Atomic32 increment) {
45  return InterlockedExchangeAdd(
46      reinterpret_cast<volatile LONG*>(ptr),
47      static_cast<LONG>(increment)) + increment;
48}
49
50inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
51                                          Atomic32 increment) {
52  return Barrier_AtomicIncrement(ptr, increment);
53}
54
55#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
56#error "We require at least vs2005 for MemoryBarrier"
57#endif
58inline void MemoryBarrier() {
59#if defined(V8_HOST_ARCH_64_BIT)
60  // See #undef and note at the top of this file.
61  __faststorefence();
62#else
63  // We use MemoryBarrier from WinNT.h
64  ::MemoryBarrier();
65#endif
66}
67
68inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
69                                       Atomic32 old_value,
70                                       Atomic32 new_value) {
71  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
72}
73
74inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
75                                       Atomic32 old_value,
76                                       Atomic32 new_value) {
77  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
78}
79
80inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
81  *ptr = value;
82}
83
84inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
85  *ptr = value;
86}
87
88inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
89  NoBarrier_AtomicExchange(ptr, value);
90              // acts as a barrier in this implementation
91}
92
93inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
94  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
95  // See comments in Atomic64 version of Release_Store() below.
96}
97
98inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
99  return *ptr;
100}
101
102inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
103  return *ptr;
104}
105
106inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
107  Atomic32 value = *ptr;
108  return value;
109}
110
111inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
112  MemoryBarrier();
113  return *ptr;
114}
115
116#if defined(_WIN64)
117
118// 64-bit low-level operations on 64-bit platform.
119
120STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
121
122inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
123                                         Atomic64 old_value,
124                                         Atomic64 new_value) {
125  PVOID result = InterlockedCompareExchangePointer(
126    reinterpret_cast<volatile PVOID*>(ptr),
127    reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
128  return reinterpret_cast<Atomic64>(result);
129}
130
131inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
132                                         Atomic64 new_value) {
133  PVOID result = InterlockedExchangePointer(
134    reinterpret_cast<volatile PVOID*>(ptr),
135    reinterpret_cast<PVOID>(new_value));
136  return reinterpret_cast<Atomic64>(result);
137}
138
139inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
140                                        Atomic64 increment) {
141  return InterlockedExchangeAdd64(
142      reinterpret_cast<volatile LONGLONG*>(ptr),
143      static_cast<LONGLONG>(increment)) + increment;
144}
145
146inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
147                                          Atomic64 increment) {
148  return Barrier_AtomicIncrement(ptr, increment);
149}
150
151inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
152  *ptr = value;
153}
154
155inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
156  NoBarrier_AtomicExchange(ptr, value);
157              // acts as a barrier in this implementation
158}
159
160inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
161  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
162
163  // When new chips come out, check:
164  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
165  //  System Programming Guide, Chatper 7: Multiple-processor management,
166  //  Section 7.2, Memory Ordering.
167  // Last seen at:
168  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
169}
170
171inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
172  return *ptr;
173}
174
175inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
176  Atomic64 value = *ptr;
177  return value;
178}
179
180inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
181  MemoryBarrier();
182  return *ptr;
183}
184
185inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
186                                       Atomic64 old_value,
187                                       Atomic64 new_value) {
188  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
189}
190
191inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
192                                       Atomic64 old_value,
193                                       Atomic64 new_value) {
194  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
195}
196
197
198#endif  // defined(_WIN64)
199
200} }  // namespace v8::base
201
202#endif  // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
203