memory.cpp revision c6e466911f803e85548887c3acb50d6fa5c4b071
1//===------------------------ memory.cpp ----------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is dual licensed under the MIT and the University of Illinois Open 6// Source Licenses. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#define _LIBCPP_BUILDING_MEMORY 11#include "memory" 12#ifndef _LIBCPP_HAS_NO_THREADS 13#include "mutex" 14#include "thread" 15#endif 16#include "support/atomic_support.h" 17 18_LIBCPP_BEGIN_NAMESPACE_STD 19 20namespace 21{ 22 23// NOTE: Relaxed and acq/rel atomics (for increment and decrement respectively) 24// should be sufficient for thread safety. 25// See https://llvm.org/bugs/show_bug.cgi?id=22803 26template <class T> 27inline T 28increment(T& t) _NOEXCEPT 29{ 30 return __libcpp_atomic_add(&t, 1, _AO_Relaxed); 31} 32 33template <class T> 34inline T 35decrement(T& t) _NOEXCEPT 36{ 37 return __libcpp_atomic_add(&t, -1, _AO_Acq_Rel); 38} 39 40} // namespace 41 42const allocator_arg_t allocator_arg = allocator_arg_t(); 43 44bad_weak_ptr::~bad_weak_ptr() _NOEXCEPT {} 45 46const char* 47bad_weak_ptr::what() const _NOEXCEPT 48{ 49 return "bad_weak_ptr"; 50} 51 52__shared_count::~__shared_count() 53{ 54} 55 56void 57__shared_count::__add_shared() _NOEXCEPT 58{ 59 increment(__shared_owners_); 60} 61 62bool 63__shared_count::__release_shared() _NOEXCEPT 64{ 65 if (decrement(__shared_owners_) == -1) 66 { 67 __on_zero_shared(); 68 return true; 69 } 70 return false; 71} 72 73__shared_weak_count::~__shared_weak_count() 74{ 75} 76 77void 78__shared_weak_count::__add_shared() _NOEXCEPT 79{ 80 __shared_count::__add_shared(); 81} 82 83void 84__shared_weak_count::__add_weak() _NOEXCEPT 85{ 86 increment(__shared_weak_owners_); 87} 88 89void 90__shared_weak_count::__release_shared() _NOEXCEPT 91{ 92 if (__shared_count::__release_shared()) 93 __release_weak(); 94} 95 96void 97__shared_weak_count::__release_weak() _NOEXCEPT 98{ 99 if (decrement(__shared_weak_owners_) == -1) 100 __on_zero_shared_weak(); 101} 102 103__shared_weak_count* 104__shared_weak_count::lock() _NOEXCEPT 105{ 106 long object_owners = __libcpp_atomic_load(&__shared_owners_); 107 while (object_owners != -1) 108 { 109 if (__libcpp_atomic_compare_exchange(&__shared_owners_, 110 &object_owners, 111 object_owners+1)) 112 return this; 113 } 114 return 0; 115} 116 117#if !defined(_LIBCPP_NO_RTTI) || !defined(_LIBCPP_BUILD_STATIC) 118 119const void* 120__shared_weak_count::__get_deleter(const type_info&) const _NOEXCEPT 121{ 122 return 0; 123} 124 125#endif // _LIBCPP_NO_RTTI 126 127#if __has_feature(cxx_atomic) && !defined(_LIBCPP_HAS_NO_THREADS) 128 129static const std::size_t __sp_mut_count = 16; 130static pthread_mutex_t mut_back_imp[__sp_mut_count] = 131{ 132 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, 133 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, 134 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, 135 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER 136}; 137 138static mutex* mut_back = reinterpret_cast<std::mutex*>(mut_back_imp); 139 140_LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) _NOEXCEPT 141 : __lx(p) 142{ 143} 144 145void 146__sp_mut::lock() _NOEXCEPT 147{ 148 mutex& m = *static_cast<mutex*>(__lx); 149 unsigned count = 0; 150 while (!m.try_lock()) 151 { 152 if (++count > 16) 153 { 154 m.lock(); 155 break; 156 } 157 this_thread::yield(); 158 } 159} 160 161void 162__sp_mut::unlock() _NOEXCEPT 163{ 164 static_cast<mutex*>(__lx)->unlock(); 165} 166 167__sp_mut& 168__get_sp_mut(const void* p) 169{ 170 static __sp_mut muts[__sp_mut_count] 171 { 172 &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3], 173 &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7], 174 &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11], 175 &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15] 176 }; 177 return muts[hash<const void*>()(p) & (__sp_mut_count-1)]; 178} 179 180#endif // __has_feature(cxx_atomic) && !_LIBCPP_HAS_NO_THREADS 181 182void 183declare_reachable(void*) 184{ 185} 186 187void 188declare_no_pointers(char*, size_t) 189{ 190} 191 192void 193undeclare_no_pointers(char*, size_t) 194{ 195} 196 197pointer_safety 198get_pointer_safety() _NOEXCEPT 199{ 200 return pointer_safety::relaxed; 201} 202 203void* 204__undeclare_reachable(void* p) 205{ 206 return p; 207} 208 209void* 210align(size_t alignment, size_t size, void*& ptr, size_t& space) 211{ 212 void* r = nullptr; 213 if (size <= space) 214 { 215 char* p1 = static_cast<char*>(ptr); 216 char* p2 = reinterpret_cast<char*>(reinterpret_cast<size_t>(p1 + (alignment - 1)) & -alignment); 217 size_t d = static_cast<size_t>(p2 - p1); 218 if (d <= space - size) 219 { 220 r = p2; 221 ptr = r; 222 space -= d; 223 } 224 } 225 return r; 226} 227 228_LIBCPP_END_NAMESPACE_STD 229