15ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes/* 25ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * Copyright (C) 2008 The Android Open Source Project 35ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * 45ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * Licensed under the Apache License, Version 2.0 (the "License"); 55ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * you may not use this file except in compliance with the License. 65ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * You may obtain a copy of the License at 75ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * 85ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * http://www.apache.org/licenses/LICENSE-2.0 95ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * 105ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * Unless required by applicable law or agreed to in writing, software 115ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * distributed under the License is distributed on an "AS IS" BASIS, 125ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 135ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * See the License for the specific language governing permissions and 145ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * limitations under the License. 155ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes */ 165ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 17fc0e3219edc9a5bf81b166e82fd5db2796eb6a0dBrian Carlstrom#ifndef ART_RUNTIME_ATOMIC_H_ 18fc0e3219edc9a5bf81b166e82fd5db2796eb6a0dBrian Carlstrom#define ART_RUNTIME_ATOMIC_H_ 195ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 207c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes#include <stdint.h> 213e5cf305db800b2989ad57b7cde8fb3cc9fa1b9eIan Rogers#include <atomic> 223e5cf305db800b2989ad57b7cde8fb3cc9fa1b9eIan Rogers#include <limits> 23b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers#include <vector> 245ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 25a984454098971739a1469d62cba02cda3600268bIan Rogers#include "base/logging.h" 26761600567d73b23324ae0251e871c15d6849ffd8Elliott Hughes#include "base/macros.h" 275ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 287c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughesnamespace art { 295ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 30b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogersclass Mutex; 31b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers 323035961cb41865b80b927546be0c708b6389cec6Hans Boehm// QuasiAtomic encapsulates two separate facilities that we are 333035961cb41865b80b927546be0c708b6389cec6Hans Boehm// trying to move away from: "quasiatomic" 64 bit operations 343035961cb41865b80b927546be0c708b6389cec6Hans Boehm// and custom memory fences. For the time being, they remain 353035961cb41865b80b927546be0c708b6389cec6Hans Boehm// exposed. Clients should be converted to use either class Atomic 363035961cb41865b80b927546be0c708b6389cec6Hans Boehm// below whenever possible, and should eventually use C++11 atomics. 373035961cb41865b80b927546be0c708b6389cec6Hans Boehm// The two facilities that do not have a good C++11 analog are 383035961cb41865b80b927546be0c708b6389cec6Hans Boehm// ThreadFenceForConstructor and Atomic::*JavaData. 393035961cb41865b80b927546be0c708b6389cec6Hans Boehm// 407c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// NOTE: Two "quasiatomic" operations on the exact same memory address 417c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// are guaranteed to operate atomically with respect to each other, 427c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// but no guarantees are made about quasiatomic operations mixed with 437c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// non-quasiatomic operations on the same address, nor about 447c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// quasiatomic operations that are performed on partially-overlapping 457c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// memory. 467c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughesclass QuasiAtomic { 47936b37f3a7f224d990a36b2ec66782a4462180d6Ian Rogers#if defined(__mips__) && !defined(__LP64__) 48b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static constexpr bool kNeedSwapMutexes = true; 491a5c40672783fac98aca5a04ac798a0a0014de65Andreas Gampe#elif defined(__mips__) && defined(__LP64__) 501a5c40672783fac98aca5a04ac798a0a0014de65Andreas Gampe // TODO - mips64 still need this for Cas64 ??? 511a5c40672783fac98aca5a04ac798a0a0014de65Andreas Gampe static constexpr bool kNeedSwapMutexes = true; 52b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers#else 53b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static constexpr bool kNeedSwapMutexes = false; 54b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers#endif 55b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers 567c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes public: 577c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes static void Startup(); 587c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes 597c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes static void Shutdown(); 607c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes 619adbff5b85fcae2b3e2443344415f6c17ea3ba0aIan Rogers // Reads the 64-bit value at "addr" without tearing. 62b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static int64_t Read64(volatile const int64_t* addr) { 63b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers if (!kNeedSwapMutexes) { 64a984454098971739a1469d62cba02cda3600268bIan Rogers int64_t value; 65a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__LP64__) 66a984454098971739a1469d62cba02cda3600268bIan Rogers value = *addr; 67a984454098971739a1469d62cba02cda3600268bIan Rogers#else 68a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__arm__) 69a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__ARM_FEATURE_LPAE) 70a984454098971739a1469d62cba02cda3600268bIan Rogers // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear. 71a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__("@ QuasiAtomic::Read64\n" 72a984454098971739a1469d62cba02cda3600268bIan Rogers "ldrd %0, %H0, %1" 73a984454098971739a1469d62cba02cda3600268bIan Rogers : "=r" (value) 74a984454098971739a1469d62cba02cda3600268bIan Rogers : "m" (*addr)); 75a984454098971739a1469d62cba02cda3600268bIan Rogers#else 76a984454098971739a1469d62cba02cda3600268bIan Rogers // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary. 77a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__("@ QuasiAtomic::Read64\n" 78a984454098971739a1469d62cba02cda3600268bIan Rogers "ldrexd %0, %H0, %1" 79a984454098971739a1469d62cba02cda3600268bIan Rogers : "=r" (value) 80a984454098971739a1469d62cba02cda3600268bIan Rogers : "Q" (*addr)); 81a984454098971739a1469d62cba02cda3600268bIan Rogers#endif 82a984454098971739a1469d62cba02cda3600268bIan Rogers#elif defined(__i386__) 83a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__( 84a984454098971739a1469d62cba02cda3600268bIan Rogers "movq %1, %0\n" 85a984454098971739a1469d62cba02cda3600268bIan Rogers : "=x" (value) 86a984454098971739a1469d62cba02cda3600268bIan Rogers : "m" (*addr)); 87a984454098971739a1469d62cba02cda3600268bIan Rogers#else 88a984454098971739a1469d62cba02cda3600268bIan Rogers LOG(FATAL) << "Unsupported architecture"; 89a984454098971739a1469d62cba02cda3600268bIan Rogers#endif 90a984454098971739a1469d62cba02cda3600268bIan Rogers#endif // defined(__LP64__) 91a984454098971739a1469d62cba02cda3600268bIan Rogers return value; 92b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } else { 93b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers return SwapMutexRead64(addr); 94b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 95b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 967c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes 979adbff5b85fcae2b3e2443344415f6c17ea3ba0aIan Rogers // Writes to the 64-bit value at "addr" without tearing. 98a984454098971739a1469d62cba02cda3600268bIan Rogers static void Write64(volatile int64_t* addr, int64_t value) { 99b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers if (!kNeedSwapMutexes) { 100a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__LP64__) 101a984454098971739a1469d62cba02cda3600268bIan Rogers *addr = value; 102a984454098971739a1469d62cba02cda3600268bIan Rogers#else 103a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__arm__) 104a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__ARM_FEATURE_LPAE) 105a984454098971739a1469d62cba02cda3600268bIan Rogers // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear. 106a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__("@ QuasiAtomic::Write64\n" 107a984454098971739a1469d62cba02cda3600268bIan Rogers "strd %1, %H1, %0" 108a984454098971739a1469d62cba02cda3600268bIan Rogers : "=m"(*addr) 109a984454098971739a1469d62cba02cda3600268bIan Rogers : "r" (value)); 110a984454098971739a1469d62cba02cda3600268bIan Rogers#else 111a984454098971739a1469d62cba02cda3600268bIan Rogers // The write is done as a swap so that the cache-line is in the exclusive state for the store. 112a984454098971739a1469d62cba02cda3600268bIan Rogers int64_t prev; 113a984454098971739a1469d62cba02cda3600268bIan Rogers int status; 114a984454098971739a1469d62cba02cda3600268bIan Rogers do { 115a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__("@ QuasiAtomic::Write64\n" 116a984454098971739a1469d62cba02cda3600268bIan Rogers "ldrexd %0, %H0, %2\n" 117a984454098971739a1469d62cba02cda3600268bIan Rogers "strexd %1, %3, %H3, %2" 118a984454098971739a1469d62cba02cda3600268bIan Rogers : "=&r" (prev), "=&r" (status), "+Q"(*addr) 119a984454098971739a1469d62cba02cda3600268bIan Rogers : "r" (value) 120a984454098971739a1469d62cba02cda3600268bIan Rogers : "cc"); 121a984454098971739a1469d62cba02cda3600268bIan Rogers } while (UNLIKELY(status != 0)); 122a984454098971739a1469d62cba02cda3600268bIan Rogers#endif 123a984454098971739a1469d62cba02cda3600268bIan Rogers#elif defined(__i386__) 124a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__( 125a984454098971739a1469d62cba02cda3600268bIan Rogers "movq %1, %0" 126a984454098971739a1469d62cba02cda3600268bIan Rogers : "=m" (*addr) 127a984454098971739a1469d62cba02cda3600268bIan Rogers : "x" (value)); 128a984454098971739a1469d62cba02cda3600268bIan Rogers#else 129a984454098971739a1469d62cba02cda3600268bIan Rogers LOG(FATAL) << "Unsupported architecture"; 130a984454098971739a1469d62cba02cda3600268bIan Rogers#endif 131a984454098971739a1469d62cba02cda3600268bIan Rogers#endif // defined(__LP64__) 132b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } else { 133a984454098971739a1469d62cba02cda3600268bIan Rogers SwapMutexWrite64(addr, value); 134b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 135b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 1367c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes 1379adbff5b85fcae2b3e2443344415f6c17ea3ba0aIan Rogers // Atomically compare the value at "addr" to "old_value", if equal replace it with "new_value" 1389adbff5b85fcae2b3e2443344415f6c17ea3ba0aIan Rogers // and return true. Otherwise, don't swap, and return false. 1393035961cb41865b80b927546be0c708b6389cec6Hans Boehm // This is fully ordered, i.e. it has C++11 memory_order_seq_cst 1403035961cb41865b80b927546be0c708b6389cec6Hans Boehm // semantics (assuming all other accesses use a mutex if this one does). 1413035961cb41865b80b927546be0c708b6389cec6Hans Boehm // This has "strong" semantics; if it fails then it is guaranteed that 1423035961cb41865b80b927546be0c708b6389cec6Hans Boehm // at some point during the execution of Cas64, *addr was not equal to 1433035961cb41865b80b927546be0c708b6389cec6Hans Boehm // old_value. 144b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) { 145b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers if (!kNeedSwapMutexes) { 146b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers return __sync_bool_compare_and_swap(addr, old_value, new_value); 147b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } else { 148b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers return SwapMutexCas64(old_value, new_value, addr); 149b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 150b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 1517c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes 1529adbff5b85fcae2b3e2443344415f6c17ea3ba0aIan Rogers // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes? 153b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static bool LongAtomicsUseMutexes() { 15463c5dd056fa20993b35ec5c8548b26c988445763Ian Rogers return kNeedSwapMutexes; 155b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 156b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers 157a1ec065a4c5504d0619bde95e4da93c0564eafdbHans Boehm static void ThreadFenceAcquire() { 1583035961cb41865b80b927546be0c708b6389cec6Hans Boehm std::atomic_thread_fence(std::memory_order_acquire); 1593035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 1603035961cb41865b80b927546be0c708b6389cec6Hans Boehm 161a1ec065a4c5504d0619bde95e4da93c0564eafdbHans Boehm static void ThreadFenceRelease() { 1623035961cb41865b80b927546be0c708b6389cec6Hans Boehm std::atomic_thread_fence(std::memory_order_release); 1633035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 1643035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1653035961cb41865b80b927546be0c708b6389cec6Hans Boehm static void ThreadFenceForConstructor() { 1663035961cb41865b80b927546be0c708b6389cec6Hans Boehm #if defined(__aarch64__) 1673035961cb41865b80b927546be0c708b6389cec6Hans Boehm __asm__ __volatile__("dmb ishst" : : : "memory"); 1683035961cb41865b80b927546be0c708b6389cec6Hans Boehm #else 1693035961cb41865b80b927546be0c708b6389cec6Hans Boehm std::atomic_thread_fence(std::memory_order_release); 1703035961cb41865b80b927546be0c708b6389cec6Hans Boehm #endif 1713035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 1723035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1733035961cb41865b80b927546be0c708b6389cec6Hans Boehm static void ThreadFenceSequentiallyConsistent() { 1743035961cb41865b80b927546be0c708b6389cec6Hans Boehm std::atomic_thread_fence(std::memory_order_seq_cst); 1753035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 1763035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1777c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes private: 178b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static Mutex* GetSwapMutex(const volatile int64_t* addr); 179b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static int64_t SwapMutexRead64(volatile const int64_t* addr); 180b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static void SwapMutexWrite64(volatile int64_t* addr, int64_t val); 181b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static bool SwapMutexCas64(int64_t old_value, int64_t new_value, volatile int64_t* addr); 182b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers 183b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers // We stripe across a bunch of different mutexes to reduce contention. 184b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static constexpr size_t kSwapMutexCount = 32; 185b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static std::vector<Mutex*>* gSwapMutexes; 186b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers 1877c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes DISALLOW_COPY_AND_ASSIGN(QuasiAtomic); 1887c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes}; 1895ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 1903035961cb41865b80b927546be0c708b6389cec6Hans Boehmtemplate<typename T> 191aab0f86e3b079598d41c3a00bfa765a7589c5110Dan Albertclass PACKED(sizeof(T)) Atomic : public std::atomic<T> { 1923035961cb41865b80b927546be0c708b6389cec6Hans Boehm public: 1936a3f8d93ddf09b5f6667820089e488958cba8361Dan Albert Atomic<T>() : std::atomic<T>(0) { } 1943035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1953035961cb41865b80b927546be0c708b6389cec6Hans Boehm explicit Atomic<T>(T value) : std::atomic<T>(value) { } 1963035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1973035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Load from memory without ordering or synchronization constraints. 1983035961cb41865b80b927546be0c708b6389cec6Hans Boehm T LoadRelaxed() const { 1993035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->load(std::memory_order_relaxed); 2003035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2013035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2023035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Word tearing allowed, but may race. 2033035961cb41865b80b927546be0c708b6389cec6Hans Boehm // TODO: Optimize? 2043035961cb41865b80b927546be0c708b6389cec6Hans Boehm // There has been some discussion of eventually disallowing word 2053035961cb41865b80b927546be0c708b6389cec6Hans Boehm // tearing for Java data loads. 2063035961cb41865b80b927546be0c708b6389cec6Hans Boehm T LoadJavaData() const { 2073035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->load(std::memory_order_relaxed); 2083035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2093035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2103035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Load from memory with a total ordering. 2113035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Corresponds exactly to a Java volatile load. 2123035961cb41865b80b927546be0c708b6389cec6Hans Boehm T LoadSequentiallyConsistent() const { 2133035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->load(std::memory_order_seq_cst); 2143035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2153035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2163035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Store to memory without ordering or synchronization constraints. 2173035961cb41865b80b927546be0c708b6389cec6Hans Boehm void StoreRelaxed(T desired) { 2183035961cb41865b80b927546be0c708b6389cec6Hans Boehm this->store(desired, std::memory_order_relaxed); 2193035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2203035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2213035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Word tearing allowed, but may race. 2223035961cb41865b80b927546be0c708b6389cec6Hans Boehm void StoreJavaData(T desired) { 2233035961cb41865b80b927546be0c708b6389cec6Hans Boehm this->store(desired, std::memory_order_relaxed); 2243035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2253035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2263035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Store to memory with release ordering. 2273035961cb41865b80b927546be0c708b6389cec6Hans Boehm void StoreRelease(T desired) { 2283035961cb41865b80b927546be0c708b6389cec6Hans Boehm this->store(desired, std::memory_order_release); 2293035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2303035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2313035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Store to memory with a total ordering. 2323035961cb41865b80b927546be0c708b6389cec6Hans Boehm void StoreSequentiallyConsistent(T desired) { 2333035961cb41865b80b927546be0c708b6389cec6Hans Boehm this->store(desired, std::memory_order_seq_cst); 2343035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2353035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2363035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Atomically replace the value with desired value if it matches the expected value. 2373035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Participates in total ordering of atomic operations. 2383035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) { 2393035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_seq_cst); 2403035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2413035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2423035961cb41865b80b927546be0c708b6389cec6Hans Boehm // The same, except it may fail spuriously. 2433035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeWeakSequentiallyConsistent(T expected_value, T desired_value) { 2443035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_seq_cst); 2453035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2463035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2473035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Atomically replace the value with desired value if it matches the expected value. Doesn't 2483035961cb41865b80b927546be0c708b6389cec6Hans Boehm // imply ordering or synchronization constraints. 2493035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeStrongRelaxed(T expected_value, T desired_value) { 2503035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed); 2513035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2523035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2533035961cb41865b80b927546be0c708b6389cec6Hans Boehm // The same, except it may fail spuriously. 2543035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) { 2553035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed); 2563035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2573035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2583035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Atomically replace the value with desired value if it matches the expected value. Prior writes 2593035961cb41865b80b927546be0c708b6389cec6Hans Boehm // made to other memory locations by the thread that did the release become visible in this 2603035961cb41865b80b927546be0c708b6389cec6Hans Boehm // thread. 2613035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeWeakAcquire(T expected_value, T desired_value) { 2623035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_acquire); 2633035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2643035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2653035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Atomically replace the value with desired value if it matches the expected value. prior writes 2663035961cb41865b80b927546be0c708b6389cec6Hans Boehm // to other memory locations become visible to the threads that do a consume or an acquire on the 2673035961cb41865b80b927546be0c708b6389cec6Hans Boehm // same location. 2683035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeWeakRelease(T expected_value, T desired_value) { 2693035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release); 2703035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2713035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2723035961cb41865b80b927546be0c708b6389cec6Hans Boehm T FetchAndAddSequentiallyConsistent(const T value) { 2733035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->fetch_add(value, std::memory_order_seq_cst); // Return old_value. 2743035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2753035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2763035961cb41865b80b927546be0c708b6389cec6Hans Boehm T FetchAndSubSequentiallyConsistent(const T value) { 2773035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value. 2783035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2793035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2808c1b5f71a8005743756206120624121d7678381fIan Rogers T FetchAndOrSequentiallyConsistent(const T value) { 2818c1b5f71a8005743756206120624121d7678381fIan Rogers return this->fetch_or(value, std::memory_order_seq_cst); // Return old_value. 2828c1b5f71a8005743756206120624121d7678381fIan Rogers } 2838c1b5f71a8005743756206120624121d7678381fIan Rogers 2848c1b5f71a8005743756206120624121d7678381fIan Rogers T FetchAndAndSequentiallyConsistent(const T value) { 2858c1b5f71a8005743756206120624121d7678381fIan Rogers return this->fetch_and(value, std::memory_order_seq_cst); // Return old_value. 2868c1b5f71a8005743756206120624121d7678381fIan Rogers } 2878c1b5f71a8005743756206120624121d7678381fIan Rogers 2883035961cb41865b80b927546be0c708b6389cec6Hans Boehm volatile T* Address() { 2893035961cb41865b80b927546be0c708b6389cec6Hans Boehm return reinterpret_cast<T*>(this); 2903035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2913035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2923035961cb41865b80b927546be0c708b6389cec6Hans Boehm static T MaxValue() { 2933035961cb41865b80b927546be0c708b6389cec6Hans Boehm return std::numeric_limits<T>::max(); 2943035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2953035961cb41865b80b927546be0c708b6389cec6Hans Boehm}; 2963035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2973035961cb41865b80b927546be0c708b6389cec6Hans Boehmtypedef Atomic<int32_t> AtomicInteger; 2983035961cb41865b80b927546be0c708b6389cec6Hans Boehm 299575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampestatic_assert(sizeof(AtomicInteger) == sizeof(int32_t), "Weird AtomicInteger size"); 300575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampestatic_assert(alignof(AtomicInteger) == alignof(int32_t), 301575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampe "AtomicInteger alignment differs from that of underlyingtype"); 302575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampestatic_assert(sizeof(Atomic<int64_t>) == sizeof(int64_t), "Weird Atomic<int64> size"); 303aab0f86e3b079598d41c3a00bfa765a7589c5110Dan Albert 304aab0f86e3b079598d41c3a00bfa765a7589c5110Dan Albert// Assert the alignment of 64-bit integers is 64-bit. This isn't true on certain 32-bit 305aab0f86e3b079598d41c3a00bfa765a7589c5110Dan Albert// architectures (e.g. x86-32) but we know that 64-bit integers here are arranged to be 8-byte 306aab0f86e3b079598d41c3a00bfa765a7589c5110Dan Albert// aligned. 3072f4a2edda128bbee5c6ba6ba7e3cbca9260368c2Hans Boehm#if defined(__LP64__) 308575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampe static_assert(alignof(Atomic<int64_t>) == alignof(int64_t), 309575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampe "Atomic<int64> alignment differs from that of underlying type"); 3102f4a2edda128bbee5c6ba6ba7e3cbca9260368c2Hans Boehm#endif 3113e5cf305db800b2989ad57b7cde8fb3cc9fa1b9eIan Rogers 3125ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes} // namespace art 3135ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 314fc0e3219edc9a5bf81b166e82fd5db2796eb6a0dBrian Carlstrom#endif // ART_RUNTIME_ATOMIC_H_ 315