1//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11// Not intended for direct inclusion. Include sanitizer_atomic.h.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef SANITIZER_ATOMIC_CLANG_X86_H
16#define SANITIZER_ATOMIC_CLANG_X86_H
17
18namespace __sanitizer {
19
20INLINE void proc_yield(int cnt) {
21  __asm__ __volatile__("" ::: "memory");
22  for (int i = 0; i < cnt; i++)
23    __asm__ __volatile__("pause");
24  __asm__ __volatile__("" ::: "memory");
25}
26
27template<typename T>
28INLINE typename T::Type atomic_load(
29    const volatile T *a, memory_order mo) {
30  DCHECK(mo & (memory_order_relaxed | memory_order_consume
31      | memory_order_acquire | memory_order_seq_cst));
32  DCHECK(!((uptr)a % sizeof(*a)));
33  typename T::Type v;
34
35  if (sizeof(*a) < 8 || sizeof(void*) == 8) {
36    // Assume that aligned loads are atomic.
37    if (mo == memory_order_relaxed) {
38      v = a->val_dont_use;
39    } else if (mo == memory_order_consume) {
40      // Assume that processor respects data dependencies
41      // (and that compiler won't break them).
42      __asm__ __volatile__("" ::: "memory");
43      v = a->val_dont_use;
44      __asm__ __volatile__("" ::: "memory");
45    } else if (mo == memory_order_acquire) {
46      __asm__ __volatile__("" ::: "memory");
47      v = a->val_dont_use;
48      // On x86 loads are implicitly acquire.
49      __asm__ __volatile__("" ::: "memory");
50    } else {  // seq_cst
51      // On x86 plain MOV is enough for seq_cst store.
52      __asm__ __volatile__("" ::: "memory");
53      v = a->val_dont_use;
54      __asm__ __volatile__("" ::: "memory");
55    }
56  } else {
57    // 64-bit load on 32-bit platform.
58    __asm__ __volatile__(
59        "movq %1, %%mm0;"  // Use mmx reg for 64-bit atomic moves
60        "movq %%mm0, %0;"  // (ptr could be read-only)
61        "emms;"            // Empty mmx state/Reset FP regs
62        : "=m" (v)
63        : "m" (a->val_dont_use)
64        : // mark the FP stack and mmx registers as clobbered
65          "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
66#ifdef __MMX__
67          "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
68#endif  // #ifdef __MMX__
69          "memory");
70  }
71  return v;
72}
73
74template<typename T>
75INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
76  DCHECK(mo & (memory_order_relaxed | memory_order_release
77      | memory_order_seq_cst));
78  DCHECK(!((uptr)a % sizeof(*a)));
79
80  if (sizeof(*a) < 8 || sizeof(void*) == 8) {
81    // Assume that aligned loads are atomic.
82    if (mo == memory_order_relaxed) {
83      a->val_dont_use = v;
84    } else if (mo == memory_order_release) {
85      // On x86 stores are implicitly release.
86      __asm__ __volatile__("" ::: "memory");
87      a->val_dont_use = v;
88      __asm__ __volatile__("" ::: "memory");
89    } else {  // seq_cst
90      // On x86 stores are implicitly release.
91      __asm__ __volatile__("" ::: "memory");
92      a->val_dont_use = v;
93      __sync_synchronize();
94    }
95  } else {
96    // 64-bit store on 32-bit platform.
97    __asm__ __volatile__(
98        "movq %1, %%mm0;"  // Use mmx reg for 64-bit atomic moves
99        "movq %%mm0, %0;"
100        "emms;"            // Empty mmx state/Reset FP regs
101        : "=m" (a->val_dont_use)
102        : "m" (v)
103        : // mark the FP stack and mmx registers as clobbered
104          "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
105#ifdef __MMX__
106          "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
107#endif  // #ifdef __MMX__
108          "memory");
109    if (mo == memory_order_seq_cst)
110      __sync_synchronize();
111  }
112}
113
114}  // namespace __sanitizer
115
116#endif  // #ifndef SANITIZER_ATOMIC_CLANG_X86_H
117