bump_pointer_space.cc revision 31f441464c0c8f840aba37e236ad133f30308d70
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "bump_pointer_space.h" 18#include "bump_pointer_space-inl.h" 19#include "mirror/object-inl.h" 20#include "mirror/class-inl.h" 21#include "thread_list.h" 22 23namespace art { 24namespace gc { 25namespace space { 26 27BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity, 28 byte* requested_begin) { 29 capacity = RoundUp(capacity, kPageSize); 30 std::string error_msg; 31 UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity, 32 PROT_READ | PROT_WRITE, true, &error_msg)); 33 if (mem_map.get() == nullptr) { 34 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " 35 << PrettySize(capacity) << " with message " << error_msg; 36 return nullptr; 37 } 38 return new BumpPointerSpace(name, mem_map.release()); 39} 40 41BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap* mem_map) { 42 return new BumpPointerSpace(name, mem_map); 43} 44 45BumpPointerSpace::BumpPointerSpace(const std::string& name, byte* begin, byte* limit) 46 : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit, 47 kGcRetentionPolicyAlwaysCollect), 48 growth_end_(limit), 49 objects_allocated_(0), bytes_allocated_(0), 50 block_lock_("Block lock"), 51 main_block_size_(0), 52 num_blocks_(0) { 53} 54 55BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map) 56 : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(), 57 kGcRetentionPolicyAlwaysCollect), 58 growth_end_(mem_map->End()), 59 objects_allocated_(0), bytes_allocated_(0), 60 block_lock_("Block lock"), 61 main_block_size_(0), 62 num_blocks_(0) { 63} 64 65void BumpPointerSpace::Clear() { 66 // Release the pages back to the operating system. 67 CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed"; 68 // Reset the end of the space back to the beginning, we move the end forward as we allocate 69 // objects. 70 SetEnd(Begin()); 71 objects_allocated_ = 0; 72 bytes_allocated_ = 0; 73 growth_end_ = Limit(); 74 { 75 MutexLock mu(Thread::Current(), block_lock_); 76 num_blocks_ = 0; 77 main_block_size_ = 0; 78 } 79} 80 81void BumpPointerSpace::Dump(std::ostream& os) const { 82 os << GetName() << " " 83 << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - " 84 << reinterpret_cast<void*>(Limit()); 85} 86 87mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) { 88 const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf(); 89 return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment)); 90} 91 92void BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) { 93 MutexLock mu(Thread::Current(), block_lock_); 94 RevokeThreadLocalBuffersLocked(thread); 95} 96 97void BumpPointerSpace::RevokeAllThreadLocalBuffers() { 98 Thread* self = Thread::Current(); 99 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 100 MutexLock mu2(self, *Locks::thread_list_lock_); 101 // TODO: Not do a copy of the thread list? 102 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 103 for (Thread* thread : thread_list) { 104 RevokeThreadLocalBuffers(thread); 105 } 106} 107 108void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) { 109 if (kIsDebugBuild) { 110 MutexLock mu(Thread::Current(), block_lock_); 111 DCHECK(!thread->HasTlab()); 112 } 113} 114 115void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() { 116 if (kIsDebugBuild) { 117 Thread* self = Thread::Current(); 118 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 119 MutexLock mu2(self, *Locks::thread_list_lock_); 120 // TODO: Not do a copy of the thread list? 121 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 122 for (Thread* thread : thread_list) { 123 AssertThreadLocalBuffersAreRevoked(thread); 124 } 125 } 126} 127 128void BumpPointerSpace::UpdateMainBlock() { 129 DCHECK_EQ(num_blocks_, 0U); 130 main_block_size_ = Size(); 131} 132 133// Returns the start of the storage. 134byte* BumpPointerSpace::AllocBlock(size_t bytes) { 135 bytes = RoundUp(bytes, kAlignment); 136 if (!num_blocks_) { 137 UpdateMainBlock(); 138 } 139 byte* storage = reinterpret_cast<byte*>( 140 AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader))); 141 if (LIKELY(storage != nullptr)) { 142 BlockHeader* header = reinterpret_cast<BlockHeader*>(storage); 143 header->size_ = bytes; // Write out the block header. 144 storage += sizeof(BlockHeader); 145 ++num_blocks_; 146 } 147 return storage; 148} 149 150void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) { 151 byte* pos = Begin(); 152 byte* end = End(); 153 byte* main_end = pos; 154 { 155 MutexLock mu(Thread::Current(), block_lock_); 156 // If we have 0 blocks then we need to update the main header since we have bump pointer style 157 // allocation into an unbounded region (actually bounded by Capacity()). 158 if (num_blocks_ == 0) { 159 UpdateMainBlock(); 160 } 161 main_end = Begin() + main_block_size_; 162 if (num_blocks_ == 0) { 163 // We don't have any other blocks, this means someone else may be allocating into the main 164 // block. In this case, we don't want to try and visit the other blocks after the main block 165 // since these could actually be part of the main block. 166 end = main_end; 167 } 168 } 169 // Walk all of the objects in the main block first. 170 while (pos < main_end) { 171 mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos); 172 if (obj->GetClass() == nullptr) { 173 // There is a race condition where a thread has just allocated an object but not set the 174 // class. We can't know the size of this object, so we don't visit it and exit the function 175 // since there is guaranteed to be not other blocks. 176 return; 177 } else { 178 callback(obj, arg); 179 pos = reinterpret_cast<byte*>(GetNextObject(obj)); 180 } 181 } 182 // Walk the other blocks (currently only TLABs). 183 while (pos < end) { 184 BlockHeader* header = reinterpret_cast<BlockHeader*>(pos); 185 size_t block_size = header->size_; 186 pos += sizeof(BlockHeader); // Skip the header so that we know where the objects 187 mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos); 188 const mirror::Object* end = reinterpret_cast<const mirror::Object*>(pos + block_size); 189 CHECK_LE(reinterpret_cast<const byte*>(end), End()); 190 // We don't know how many objects are allocated in the current block. When we hit a null class 191 // assume its the end. TODO: Have a thread update the header when it flushes the block? 192 while (obj < end && obj->GetClass() != nullptr) { 193 callback(obj, arg); 194 obj = GetNextObject(obj); 195 } 196 pos += block_size; 197 } 198} 199 200accounting::SpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() { 201 LOG(FATAL) << "Unimplemented"; 202 return nullptr; 203} 204 205uint64_t BumpPointerSpace::GetBytesAllocated() { 206 // Start out pre-determined amount (blocks which are not being allocated into). 207 uint64_t total = static_cast<uint64_t>(bytes_allocated_.Load()); 208 Thread* self = Thread::Current(); 209 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 210 MutexLock mu2(self, *Locks::thread_list_lock_); 211 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 212 MutexLock mu3(Thread::Current(), block_lock_); 213 // If we don't have any blocks, we don't have any thread local buffers. This check is required 214 // since there can exist multiple bump pointer spaces which exist at the same time. 215 if (num_blocks_ > 0) { 216 for (Thread* thread : thread_list) { 217 total += thread->GetThreadLocalBytesAllocated(); 218 } 219 } 220 return total; 221} 222 223uint64_t BumpPointerSpace::GetObjectsAllocated() { 224 // Start out pre-determined amount (blocks which are not being allocated into). 225 uint64_t total = static_cast<uint64_t>(objects_allocated_.Load()); 226 Thread* self = Thread::Current(); 227 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 228 MutexLock mu2(self, *Locks::thread_list_lock_); 229 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 230 MutexLock mu3(Thread::Current(), block_lock_); 231 // If we don't have any blocks, we don't have any thread local buffers. This check is required 232 // since there can exist multiple bump pointer spaces which exist at the same time. 233 if (num_blocks_ > 0) { 234 for (Thread* thread : thread_list) { 235 total += thread->GetThreadLocalObjectsAllocated(); 236 } 237 } 238 return total; 239} 240 241void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) { 242 objects_allocated_.FetchAndAdd(thread->GetThreadLocalObjectsAllocated()); 243 bytes_allocated_.FetchAndAdd(thread->GetThreadLocalBytesAllocated()); 244 thread->SetTlab(nullptr, nullptr); 245} 246 247bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) { 248 MutexLock mu(Thread::Current(), block_lock_); 249 RevokeThreadLocalBuffersLocked(self); 250 byte* start = AllocBlock(bytes); 251 if (start == nullptr) { 252 return false; 253 } 254 self->SetTlab(start, start + bytes); 255 return true; 256} 257 258} // namespace space 259} // namespace gc 260} // namespace art 261