15ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes/* 25ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * Copyright (C) 2008 The Android Open Source Project 35ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * 45ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * Licensed under the Apache License, Version 2.0 (the "License"); 55ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * you may not use this file except in compliance with the License. 65ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * You may obtain a copy of the License at 75ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * 85ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * http://www.apache.org/licenses/LICENSE-2.0 95ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * 105ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * Unless required by applicable law or agreed to in writing, software 115ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * distributed under the License is distributed on an "AS IS" BASIS, 125ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 135ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * See the License for the specific language governing permissions and 145ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes * limitations under the License. 155ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes */ 165ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 17fc0e3219edc9a5bf81b166e82fd5db2796eb6a0dBrian Carlstrom#ifndef ART_RUNTIME_ATOMIC_H_ 18fc0e3219edc9a5bf81b166e82fd5db2796eb6a0dBrian Carlstrom#define ART_RUNTIME_ATOMIC_H_ 195ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 207c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes#include <stdint.h> 213e5cf305db800b2989ad57b7cde8fb3cc9fa1b9eIan Rogers#include <atomic> 223e5cf305db800b2989ad57b7cde8fb3cc9fa1b9eIan Rogers#include <limits> 23b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers#include <vector> 245ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 250866f4ed6338faa4a193b7e819fc7cd72bd7b0aeAndreas Gampe#include "arch/instruction_set.h" 26a984454098971739a1469d62cba02cda3600268bIan Rogers#include "base/logging.h" 27761600567d73b23324ae0251e871c15d6849ffd8Elliott Hughes#include "base/macros.h" 285ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 297c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughesnamespace art { 305ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 31b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogersclass Mutex; 32b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers 333035961cb41865b80b927546be0c708b6389cec6Hans Boehm// QuasiAtomic encapsulates two separate facilities that we are 343035961cb41865b80b927546be0c708b6389cec6Hans Boehm// trying to move away from: "quasiatomic" 64 bit operations 353035961cb41865b80b927546be0c708b6389cec6Hans Boehm// and custom memory fences. For the time being, they remain 363035961cb41865b80b927546be0c708b6389cec6Hans Boehm// exposed. Clients should be converted to use either class Atomic 373035961cb41865b80b927546be0c708b6389cec6Hans Boehm// below whenever possible, and should eventually use C++11 atomics. 383035961cb41865b80b927546be0c708b6389cec6Hans Boehm// The two facilities that do not have a good C++11 analog are 393035961cb41865b80b927546be0c708b6389cec6Hans Boehm// ThreadFenceForConstructor and Atomic::*JavaData. 403035961cb41865b80b927546be0c708b6389cec6Hans Boehm// 417c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// NOTE: Two "quasiatomic" operations on the exact same memory address 427c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// are guaranteed to operate atomically with respect to each other, 437c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// but no guarantees are made about quasiatomic operations mixed with 447c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// non-quasiatomic operations on the same address, nor about 457c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// quasiatomic operations that are performed on partially-overlapping 467c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes// memory. 477c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughesclass QuasiAtomic { 480866f4ed6338faa4a193b7e819fc7cd72bd7b0aeAndreas Gampe static constexpr bool NeedSwapMutexes(InstructionSet isa) { 490866f4ed6338faa4a193b7e819fc7cd72bd7b0aeAndreas Gampe // TODO - mips64 still need this for Cas64 ??? 500866f4ed6338faa4a193b7e819fc7cd72bd7b0aeAndreas Gampe return (isa == kMips) || (isa == kMips64); 510866f4ed6338faa4a193b7e819fc7cd72bd7b0aeAndreas Gampe } 52b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers 537c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes public: 547c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes static void Startup(); 557c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes 567c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes static void Shutdown(); 577c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes 589adbff5b85fcae2b3e2443344415f6c17ea3ba0aIan Rogers // Reads the 64-bit value at "addr" without tearing. 59b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static int64_t Read64(volatile const int64_t* addr) { 600866f4ed6338faa4a193b7e819fc7cd72bd7b0aeAndreas Gampe if (!NeedSwapMutexes(kRuntimeISA)) { 61a984454098971739a1469d62cba02cda3600268bIan Rogers int64_t value; 62a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__LP64__) 63a984454098971739a1469d62cba02cda3600268bIan Rogers value = *addr; 64a984454098971739a1469d62cba02cda3600268bIan Rogers#else 65a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__arm__) 66a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__ARM_FEATURE_LPAE) 67a984454098971739a1469d62cba02cda3600268bIan Rogers // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear. 68a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__("@ QuasiAtomic::Read64\n" 69a984454098971739a1469d62cba02cda3600268bIan Rogers "ldrd %0, %H0, %1" 70a984454098971739a1469d62cba02cda3600268bIan Rogers : "=r" (value) 71a984454098971739a1469d62cba02cda3600268bIan Rogers : "m" (*addr)); 72a984454098971739a1469d62cba02cda3600268bIan Rogers#else 73a984454098971739a1469d62cba02cda3600268bIan Rogers // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary. 74a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__("@ QuasiAtomic::Read64\n" 75a984454098971739a1469d62cba02cda3600268bIan Rogers "ldrexd %0, %H0, %1" 76a984454098971739a1469d62cba02cda3600268bIan Rogers : "=r" (value) 77a984454098971739a1469d62cba02cda3600268bIan Rogers : "Q" (*addr)); 78a984454098971739a1469d62cba02cda3600268bIan Rogers#endif 79a984454098971739a1469d62cba02cda3600268bIan Rogers#elif defined(__i386__) 80a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__( 81a984454098971739a1469d62cba02cda3600268bIan Rogers "movq %1, %0\n" 82a984454098971739a1469d62cba02cda3600268bIan Rogers : "=x" (value) 83a984454098971739a1469d62cba02cda3600268bIan Rogers : "m" (*addr)); 84a984454098971739a1469d62cba02cda3600268bIan Rogers#else 85a984454098971739a1469d62cba02cda3600268bIan Rogers LOG(FATAL) << "Unsupported architecture"; 86a984454098971739a1469d62cba02cda3600268bIan Rogers#endif 87a984454098971739a1469d62cba02cda3600268bIan Rogers#endif // defined(__LP64__) 88a984454098971739a1469d62cba02cda3600268bIan Rogers return value; 89b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } else { 90b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers return SwapMutexRead64(addr); 91b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 92b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 937c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes 949adbff5b85fcae2b3e2443344415f6c17ea3ba0aIan Rogers // Writes to the 64-bit value at "addr" without tearing. 95a984454098971739a1469d62cba02cda3600268bIan Rogers static void Write64(volatile int64_t* addr, int64_t value) { 960866f4ed6338faa4a193b7e819fc7cd72bd7b0aeAndreas Gampe if (!NeedSwapMutexes(kRuntimeISA)) { 97a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__LP64__) 98a984454098971739a1469d62cba02cda3600268bIan Rogers *addr = value; 99a984454098971739a1469d62cba02cda3600268bIan Rogers#else 100a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__arm__) 101a984454098971739a1469d62cba02cda3600268bIan Rogers#if defined(__ARM_FEATURE_LPAE) 102a984454098971739a1469d62cba02cda3600268bIan Rogers // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear. 103a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__("@ QuasiAtomic::Write64\n" 104a984454098971739a1469d62cba02cda3600268bIan Rogers "strd %1, %H1, %0" 105a984454098971739a1469d62cba02cda3600268bIan Rogers : "=m"(*addr) 106a984454098971739a1469d62cba02cda3600268bIan Rogers : "r" (value)); 107a984454098971739a1469d62cba02cda3600268bIan Rogers#else 108a984454098971739a1469d62cba02cda3600268bIan Rogers // The write is done as a swap so that the cache-line is in the exclusive state for the store. 109a984454098971739a1469d62cba02cda3600268bIan Rogers int64_t prev; 110a984454098971739a1469d62cba02cda3600268bIan Rogers int status; 111a984454098971739a1469d62cba02cda3600268bIan Rogers do { 112a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__("@ QuasiAtomic::Write64\n" 113a984454098971739a1469d62cba02cda3600268bIan Rogers "ldrexd %0, %H0, %2\n" 114a984454098971739a1469d62cba02cda3600268bIan Rogers "strexd %1, %3, %H3, %2" 115a984454098971739a1469d62cba02cda3600268bIan Rogers : "=&r" (prev), "=&r" (status), "+Q"(*addr) 116a984454098971739a1469d62cba02cda3600268bIan Rogers : "r" (value) 117a984454098971739a1469d62cba02cda3600268bIan Rogers : "cc"); 118a984454098971739a1469d62cba02cda3600268bIan Rogers } while (UNLIKELY(status != 0)); 119a984454098971739a1469d62cba02cda3600268bIan Rogers#endif 120a984454098971739a1469d62cba02cda3600268bIan Rogers#elif defined(__i386__) 121a984454098971739a1469d62cba02cda3600268bIan Rogers __asm__ __volatile__( 122a984454098971739a1469d62cba02cda3600268bIan Rogers "movq %1, %0" 123a984454098971739a1469d62cba02cda3600268bIan Rogers : "=m" (*addr) 124a984454098971739a1469d62cba02cda3600268bIan Rogers : "x" (value)); 125a984454098971739a1469d62cba02cda3600268bIan Rogers#else 126a984454098971739a1469d62cba02cda3600268bIan Rogers LOG(FATAL) << "Unsupported architecture"; 127a984454098971739a1469d62cba02cda3600268bIan Rogers#endif 128a984454098971739a1469d62cba02cda3600268bIan Rogers#endif // defined(__LP64__) 129b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } else { 130a984454098971739a1469d62cba02cda3600268bIan Rogers SwapMutexWrite64(addr, value); 131b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 132b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 1337c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes 1349adbff5b85fcae2b3e2443344415f6c17ea3ba0aIan Rogers // Atomically compare the value at "addr" to "old_value", if equal replace it with "new_value" 1359adbff5b85fcae2b3e2443344415f6c17ea3ba0aIan Rogers // and return true. Otherwise, don't swap, and return false. 1363035961cb41865b80b927546be0c708b6389cec6Hans Boehm // This is fully ordered, i.e. it has C++11 memory_order_seq_cst 1373035961cb41865b80b927546be0c708b6389cec6Hans Boehm // semantics (assuming all other accesses use a mutex if this one does). 1383035961cb41865b80b927546be0c708b6389cec6Hans Boehm // This has "strong" semantics; if it fails then it is guaranteed that 1393035961cb41865b80b927546be0c708b6389cec6Hans Boehm // at some point during the execution of Cas64, *addr was not equal to 1403035961cb41865b80b927546be0c708b6389cec6Hans Boehm // old_value. 141b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) { 1420866f4ed6338faa4a193b7e819fc7cd72bd7b0aeAndreas Gampe if (!NeedSwapMutexes(kRuntimeISA)) { 143b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers return __sync_bool_compare_and_swap(addr, old_value, new_value); 144b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } else { 145b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers return SwapMutexCas64(old_value, new_value, addr); 146b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 147b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 1487c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes 1499adbff5b85fcae2b3e2443344415f6c17ea3ba0aIan Rogers // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes? 1500866f4ed6338faa4a193b7e819fc7cd72bd7b0aeAndreas Gampe static bool LongAtomicsUseMutexes(InstructionSet isa) { 1510866f4ed6338faa4a193b7e819fc7cd72bd7b0aeAndreas Gampe return NeedSwapMutexes(isa); 152b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers } 153b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers 154a1ec065a4c5504d0619bde95e4da93c0564eafdbHans Boehm static void ThreadFenceAcquire() { 1553035961cb41865b80b927546be0c708b6389cec6Hans Boehm std::atomic_thread_fence(std::memory_order_acquire); 1563035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 1573035961cb41865b80b927546be0c708b6389cec6Hans Boehm 158a1ec065a4c5504d0619bde95e4da93c0564eafdbHans Boehm static void ThreadFenceRelease() { 1593035961cb41865b80b927546be0c708b6389cec6Hans Boehm std::atomic_thread_fence(std::memory_order_release); 1603035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 1613035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1623035961cb41865b80b927546be0c708b6389cec6Hans Boehm static void ThreadFenceForConstructor() { 1633035961cb41865b80b927546be0c708b6389cec6Hans Boehm #if defined(__aarch64__) 1643035961cb41865b80b927546be0c708b6389cec6Hans Boehm __asm__ __volatile__("dmb ishst" : : : "memory"); 1653035961cb41865b80b927546be0c708b6389cec6Hans Boehm #else 1663035961cb41865b80b927546be0c708b6389cec6Hans Boehm std::atomic_thread_fence(std::memory_order_release); 1673035961cb41865b80b927546be0c708b6389cec6Hans Boehm #endif 1683035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 1693035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1703035961cb41865b80b927546be0c708b6389cec6Hans Boehm static void ThreadFenceSequentiallyConsistent() { 1713035961cb41865b80b927546be0c708b6389cec6Hans Boehm std::atomic_thread_fence(std::memory_order_seq_cst); 1723035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 1733035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1747c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes private: 175b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static Mutex* GetSwapMutex(const volatile int64_t* addr); 176b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static int64_t SwapMutexRead64(volatile const int64_t* addr); 177b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static void SwapMutexWrite64(volatile int64_t* addr, int64_t val); 178b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static bool SwapMutexCas64(int64_t old_value, int64_t new_value, volatile int64_t* addr); 179b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers 180b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers // We stripe across a bunch of different mutexes to reduce contention. 181b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static constexpr size_t kSwapMutexCount = 32; 182b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers static std::vector<Mutex*>* gSwapMutexes; 183b122a4bbed34ab22b4c1541ee25e5cf22f12a926Ian Rogers 1847c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes DISALLOW_COPY_AND_ASSIGN(QuasiAtomic); 1857c6169de901fd0a39c8e0c078874dc25207f5b59Elliott Hughes}; 1865ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 1873035961cb41865b80b927546be0c708b6389cec6Hans Boehmtemplate<typename T> 188aab0f86e3b079598d41c3a00bfa765a7589c5110Dan Albertclass PACKED(sizeof(T)) Atomic : public std::atomic<T> { 1893035961cb41865b80b927546be0c708b6389cec6Hans Boehm public: 1906a3f8d93ddf09b5f6667820089e488958cba8361Dan Albert Atomic<T>() : std::atomic<T>(0) { } 1913035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1923035961cb41865b80b927546be0c708b6389cec6Hans Boehm explicit Atomic<T>(T value) : std::atomic<T>(value) { } 1933035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1943035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Load from memory without ordering or synchronization constraints. 1953035961cb41865b80b927546be0c708b6389cec6Hans Boehm T LoadRelaxed() const { 1963035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->load(std::memory_order_relaxed); 1973035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 1983035961cb41865b80b927546be0c708b6389cec6Hans Boehm 1994d77b6a511659f26fdc711e23825ffa6e7feed7aCalin Juravle // Load from memory with acquire ordering. 2004d77b6a511659f26fdc711e23825ffa6e7feed7aCalin Juravle T LoadAcquire() const { 2014d77b6a511659f26fdc711e23825ffa6e7feed7aCalin Juravle return this->load(std::memory_order_acquire); 2024d77b6a511659f26fdc711e23825ffa6e7feed7aCalin Juravle } 2034d77b6a511659f26fdc711e23825ffa6e7feed7aCalin Juravle 2043035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Word tearing allowed, but may race. 2053035961cb41865b80b927546be0c708b6389cec6Hans Boehm // TODO: Optimize? 2063035961cb41865b80b927546be0c708b6389cec6Hans Boehm // There has been some discussion of eventually disallowing word 2073035961cb41865b80b927546be0c708b6389cec6Hans Boehm // tearing for Java data loads. 2083035961cb41865b80b927546be0c708b6389cec6Hans Boehm T LoadJavaData() const { 2093035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->load(std::memory_order_relaxed); 2103035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2113035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2123035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Load from memory with a total ordering. 2133035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Corresponds exactly to a Java volatile load. 2143035961cb41865b80b927546be0c708b6389cec6Hans Boehm T LoadSequentiallyConsistent() const { 2153035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->load(std::memory_order_seq_cst); 2163035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2173035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2183035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Store to memory without ordering or synchronization constraints. 2193035961cb41865b80b927546be0c708b6389cec6Hans Boehm void StoreRelaxed(T desired) { 2203035961cb41865b80b927546be0c708b6389cec6Hans Boehm this->store(desired, std::memory_order_relaxed); 2213035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2223035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2233035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Word tearing allowed, but may race. 2243035961cb41865b80b927546be0c708b6389cec6Hans Boehm void StoreJavaData(T desired) { 2253035961cb41865b80b927546be0c708b6389cec6Hans Boehm this->store(desired, std::memory_order_relaxed); 2263035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2273035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2283035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Store to memory with release ordering. 2293035961cb41865b80b927546be0c708b6389cec6Hans Boehm void StoreRelease(T desired) { 2303035961cb41865b80b927546be0c708b6389cec6Hans Boehm this->store(desired, std::memory_order_release); 2313035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2323035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2333035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Store to memory with a total ordering. 2343035961cb41865b80b927546be0c708b6389cec6Hans Boehm void StoreSequentiallyConsistent(T desired) { 2353035961cb41865b80b927546be0c708b6389cec6Hans Boehm this->store(desired, std::memory_order_seq_cst); 2363035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2373035961cb41865b80b927546be0c708b6389cec6Hans Boehm 238caaa2b05cf581d5c5fc4253723ddd3299b3c3e25Richard Uhler // Atomically replace the value with desired value. 239caaa2b05cf581d5c5fc4253723ddd3299b3c3e25Richard Uhler T ExchangeRelaxed(T desired_value) { 240caaa2b05cf581d5c5fc4253723ddd3299b3c3e25Richard Uhler return this->exchange(desired_value, std::memory_order_relaxed); 241caaa2b05cf581d5c5fc4253723ddd3299b3c3e25Richard Uhler } 242caaa2b05cf581d5c5fc4253723ddd3299b3c3e25Richard Uhler 2433035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Atomically replace the value with desired value if it matches the expected value. 2443035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Participates in total ordering of atomic operations. 2453035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) { 2463035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_seq_cst); 2473035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2483035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2493035961cb41865b80b927546be0c708b6389cec6Hans Boehm // The same, except it may fail spuriously. 2503035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeWeakSequentiallyConsistent(T expected_value, T desired_value) { 2513035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_seq_cst); 2523035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2533035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2543035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Atomically replace the value with desired value if it matches the expected value. Doesn't 2553035961cb41865b80b927546be0c708b6389cec6Hans Boehm // imply ordering or synchronization constraints. 2563035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeStrongRelaxed(T expected_value, T desired_value) { 2573035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed); 2583035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2593035961cb41865b80b927546be0c708b6389cec6Hans Boehm 260a1f20c3f8d0dabb9723acccf3ba760acf3ebe62dMathieu Chartier // Atomically replace the value with desired value if it matches the expected value. Prior writes 261a1f20c3f8d0dabb9723acccf3ba760acf3ebe62dMathieu Chartier // to other memory locations become visible to the threads that do a consume or an acquire on the 262a1f20c3f8d0dabb9723acccf3ba760acf3ebe62dMathieu Chartier // same location. 263a1f20c3f8d0dabb9723acccf3ba760acf3ebe62dMathieu Chartier bool CompareExchangeStrongRelease(T expected_value, T desired_value) { 264a1f20c3f8d0dabb9723acccf3ba760acf3ebe62dMathieu Chartier return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_release); 265a1f20c3f8d0dabb9723acccf3ba760acf3ebe62dMathieu Chartier } 266a1f20c3f8d0dabb9723acccf3ba760acf3ebe62dMathieu Chartier 2673035961cb41865b80b927546be0c708b6389cec6Hans Boehm // The same, except it may fail spuriously. 2683035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) { 2693035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed); 2703035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2713035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2723035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Atomically replace the value with desired value if it matches the expected value. Prior writes 2733035961cb41865b80b927546be0c708b6389cec6Hans Boehm // made to other memory locations by the thread that did the release become visible in this 2743035961cb41865b80b927546be0c708b6389cec6Hans Boehm // thread. 2753035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeWeakAcquire(T expected_value, T desired_value) { 2763035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_acquire); 2773035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2783035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2793035961cb41865b80b927546be0c708b6389cec6Hans Boehm // Atomically replace the value with desired value if it matches the expected value. prior writes 2803035961cb41865b80b927546be0c708b6389cec6Hans Boehm // to other memory locations become visible to the threads that do a consume or an acquire on the 2813035961cb41865b80b927546be0c708b6389cec6Hans Boehm // same location. 2823035961cb41865b80b927546be0c708b6389cec6Hans Boehm bool CompareExchangeWeakRelease(T expected_value, T desired_value) { 2833035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release); 2843035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2853035961cb41865b80b927546be0c708b6389cec6Hans Boehm 2863035961cb41865b80b927546be0c708b6389cec6Hans Boehm T FetchAndAddSequentiallyConsistent(const T value) { 2873035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->fetch_add(value, std::memory_order_seq_cst); // Return old_value. 2883035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2893035961cb41865b80b927546be0c708b6389cec6Hans Boehm 290b0171b9573c446724c10c86d41887d0133590b6cHans Boehm T FetchAndAddRelaxed(const T value) { 291b0171b9573c446724c10c86d41887d0133590b6cHans Boehm return this->fetch_add(value, std::memory_order_relaxed); // Return old_value. 292b0171b9573c446724c10c86d41887d0133590b6cHans Boehm } 293b0171b9573c446724c10c86d41887d0133590b6cHans Boehm 2943035961cb41865b80b927546be0c708b6389cec6Hans Boehm T FetchAndSubSequentiallyConsistent(const T value) { 2953035961cb41865b80b927546be0c708b6389cec6Hans Boehm return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value. 2963035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 2973035961cb41865b80b927546be0c708b6389cec6Hans Boehm 298caaa2b05cf581d5c5fc4253723ddd3299b3c3e25Richard Uhler T FetchAndSubRelaxed(const T value) { 299caaa2b05cf581d5c5fc4253723ddd3299b3c3e25Richard Uhler return this->fetch_sub(value, std::memory_order_relaxed); // Return old value. 300caaa2b05cf581d5c5fc4253723ddd3299b3c3e25Richard Uhler } 301caaa2b05cf581d5c5fc4253723ddd3299b3c3e25Richard Uhler 3028c1b5f71a8005743756206120624121d7678381fIan Rogers T FetchAndOrSequentiallyConsistent(const T value) { 3038c1b5f71a8005743756206120624121d7678381fIan Rogers return this->fetch_or(value, std::memory_order_seq_cst); // Return old_value. 3048c1b5f71a8005743756206120624121d7678381fIan Rogers } 3058c1b5f71a8005743756206120624121d7678381fIan Rogers 3068c1b5f71a8005743756206120624121d7678381fIan Rogers T FetchAndAndSequentiallyConsistent(const T value) { 3078c1b5f71a8005743756206120624121d7678381fIan Rogers return this->fetch_and(value, std::memory_order_seq_cst); // Return old_value. 3088c1b5f71a8005743756206120624121d7678381fIan Rogers } 3098c1b5f71a8005743756206120624121d7678381fIan Rogers 3103035961cb41865b80b927546be0c708b6389cec6Hans Boehm volatile T* Address() { 3113035961cb41865b80b927546be0c708b6389cec6Hans Boehm return reinterpret_cast<T*>(this); 3123035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 3133035961cb41865b80b927546be0c708b6389cec6Hans Boehm 3143035961cb41865b80b927546be0c708b6389cec6Hans Boehm static T MaxValue() { 3153035961cb41865b80b927546be0c708b6389cec6Hans Boehm return std::numeric_limits<T>::max(); 3163035961cb41865b80b927546be0c708b6389cec6Hans Boehm } 3173035961cb41865b80b927546be0c708b6389cec6Hans Boehm}; 3183035961cb41865b80b927546be0c708b6389cec6Hans Boehm 3193035961cb41865b80b927546be0c708b6389cec6Hans Boehmtypedef Atomic<int32_t> AtomicInteger; 3203035961cb41865b80b927546be0c708b6389cec6Hans Boehm 321575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampestatic_assert(sizeof(AtomicInteger) == sizeof(int32_t), "Weird AtomicInteger size"); 322575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampestatic_assert(alignof(AtomicInteger) == alignof(int32_t), 323575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampe "AtomicInteger alignment differs from that of underlyingtype"); 324575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampestatic_assert(sizeof(Atomic<int64_t>) == sizeof(int64_t), "Weird Atomic<int64> size"); 325aab0f86e3b079598d41c3a00bfa765a7589c5110Dan Albert 326aab0f86e3b079598d41c3a00bfa765a7589c5110Dan Albert// Assert the alignment of 64-bit integers is 64-bit. This isn't true on certain 32-bit 327aab0f86e3b079598d41c3a00bfa765a7589c5110Dan Albert// architectures (e.g. x86-32) but we know that 64-bit integers here are arranged to be 8-byte 328aab0f86e3b079598d41c3a00bfa765a7589c5110Dan Albert// aligned. 3292f4a2edda128bbee5c6ba6ba7e3cbca9260368c2Hans Boehm#if defined(__LP64__) 330575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampe static_assert(alignof(Atomic<int64_t>) == alignof(int64_t), 331575e78c41ece0dec969d31f46be563d4eb7ae43bAndreas Gampe "Atomic<int64> alignment differs from that of underlying type"); 3322f4a2edda128bbee5c6ba6ba7e3cbca9260368c2Hans Boehm#endif 3333e5cf305db800b2989ad57b7cde8fb3cc9fa1b9eIan Rogers 3345ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes} // namespace art 3355ea047b386c5dac78eda62305d14dedf7b5611a8Elliott Hughes 336fc0e3219edc9a5bf81b166e82fd5db2796eb6a0dBrian Carlstrom#endif // ART_RUNTIME_ATOMIC_H_ 337