large_object_space.cc revision 38c488bcd41ba632a646d7a1d790ec71a2fcf6fa
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "large_object_space.h" 18 19#include <memory> 20 21#include "gc/accounting/space_bitmap-inl.h" 22#include "base/logging.h" 23#include "base/mutex-inl.h" 24#include "base/stl_util.h" 25#include "image.h" 26#include "os.h" 27#include "space-inl.h" 28#include "thread-inl.h" 29#include "utils.h" 30 31namespace art { 32namespace gc { 33namespace space { 34 35class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace { 36 public: 37 explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) { 38 } 39 40 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, 41 size_t* usable_size) OVERRIDE { 42 mirror::Object* obj = 43 LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated, 44 usable_size); 45 mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>( 46 reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes); 47 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes); 48 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes, 49 kValgrindRedZoneBytes); 50 if (usable_size != nullptr) { 51 *usable_size = num_bytes; // Since we have redzones, shrink the usable size. 52 } 53 return object_without_rdz; 54 } 55 56 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE { 57 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>( 58 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes); 59 return LargeObjectMapSpace::AllocationSize(object_with_rdz, usable_size); 60 } 61 62 virtual size_t Free(Thread* self, mirror::Object* obj) OVERRIDE { 63 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>( 64 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes); 65 VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr)); 66 return LargeObjectMapSpace::Free(self, object_with_rdz); 67 } 68 69 bool Contains(const mirror::Object* obj) const OVERRIDE { 70 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>( 71 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes); 72 return LargeObjectMapSpace::Contains(object_with_rdz); 73 } 74 75 private: 76 static constexpr size_t kValgrindRedZoneBytes = kPageSize; 77}; 78 79void LargeObjectSpace::SwapBitmaps() { 80 live_bitmap_.swap(mark_bitmap_); 81 // Swap names to get more descriptive diagnostics. 82 std::string temp_name = live_bitmap_->GetName(); 83 live_bitmap_->SetName(mark_bitmap_->GetName()); 84 mark_bitmap_->SetName(temp_name); 85} 86 87LargeObjectSpace::LargeObjectSpace(const std::string& name, byte* begin, byte* end) 88 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect), 89 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0), 90 total_objects_allocated_(0), begin_(begin), end_(end) { 91} 92 93 94void LargeObjectSpace::CopyLiveToMarked() { 95 mark_bitmap_->CopyFrom(live_bitmap_.get()); 96} 97 98LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name) 99 : LargeObjectSpace(name, nullptr, nullptr), 100 lock_("large object map space lock", kAllocSpaceLock) {} 101 102LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) { 103 if (Runtime::Current()->RunningOnValgrind()) { 104 return new ValgrindLargeObjectMapSpace(name); 105 } else { 106 return new LargeObjectMapSpace(name); 107 } 108} 109 110mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, 111 size_t* bytes_allocated, size_t* usable_size) { 112 std::string error_msg; 113 MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes, 114 PROT_READ | PROT_WRITE, true, &error_msg); 115 if (UNLIKELY(mem_map == NULL)) { 116 LOG(WARNING) << "Large object allocation failed: " << error_msg; 117 return NULL; 118 } 119 MutexLock mu(self, lock_); 120 mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin()); 121 large_objects_.push_back(obj); 122 mem_maps_.Put(obj, mem_map); 123 size_t allocation_size = mem_map->Size(); 124 DCHECK(bytes_allocated != nullptr); 125 begin_ = std::min(begin_, reinterpret_cast<byte*>(obj)); 126 byte* obj_end = reinterpret_cast<byte*>(obj) + allocation_size; 127 if (end_ == nullptr || obj_end > end_) { 128 end_ = obj_end; 129 } 130 *bytes_allocated = allocation_size; 131 if (usable_size != nullptr) { 132 *usable_size = allocation_size; 133 } 134 num_bytes_allocated_ += allocation_size; 135 total_bytes_allocated_ += allocation_size; 136 ++num_objects_allocated_; 137 ++total_objects_allocated_; 138 return obj; 139} 140 141size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) { 142 MutexLock mu(self, lock_); 143 MemMaps::iterator found = mem_maps_.find(ptr); 144 if (UNLIKELY(found == mem_maps_.end())) { 145 Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); 146 LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live"; 147 } 148 DCHECK_GE(num_bytes_allocated_, found->second->Size()); 149 size_t allocation_size = found->second->Size(); 150 num_bytes_allocated_ -= allocation_size; 151 --num_objects_allocated_; 152 delete found->second; 153 mem_maps_.erase(found); 154 return allocation_size; 155} 156 157size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) { 158 MutexLock mu(Thread::Current(), lock_); 159 auto found = mem_maps_.find(obj); 160 CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live"; 161 return found->second->Size(); 162} 163 164size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { 165 size_t total = 0; 166 for (size_t i = 0; i < num_ptrs; ++i) { 167 if (kDebugSpaces) { 168 CHECK(Contains(ptrs[i])); 169 } 170 total += Free(self, ptrs[i]); 171 } 172 return total; 173} 174 175void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) { 176 MutexLock mu(Thread::Current(), lock_); 177 for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) { 178 MemMap* mem_map = it->second; 179 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg); 180 callback(NULL, NULL, 0, arg); 181 } 182} 183 184bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const { 185 Thread* self = Thread::Current(); 186 if (lock_.IsExclusiveHeld(self)) { 187 // We hold lock_ so do the check. 188 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end(); 189 } else { 190 MutexLock mu(self, lock_); 191 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end(); 192 } 193} 194 195FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) { 196 CHECK_EQ(size % kAlignment, 0U); 197 std::string error_msg; 198 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size, 199 PROT_READ | PROT_WRITE, true, &error_msg); 200 CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg; 201 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End()); 202} 203 204FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end) 205 : LargeObjectSpace(name, begin, end), 206 mem_map_(mem_map), 207 lock_("free list space lock", kAllocSpaceLock) { 208 free_end_ = end - begin; 209} 210 211FreeListSpace::~FreeListSpace() {} 212 213void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) { 214 MutexLock mu(Thread::Current(), lock_); 215 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_; 216 AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin()); 217 while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) { 218 cur_header = cur_header->GetNextNonFree(); 219 size_t alloc_size = cur_header->AllocationSize(); 220 byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress()); 221 byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader); 222 callback(byte_start, byte_end, alloc_size, arg); 223 callback(NULL, NULL, 0, arg); 224 cur_header = reinterpret_cast<AllocationHeader*>(byte_end); 225 } 226} 227 228void FreeListSpace::RemoveFreePrev(AllocationHeader* header) { 229 CHECK(!header->IsFree()); 230 CHECK_GT(header->GetPrevFree(), size_t(0)); 231 FreeBlocks::iterator found = free_blocks_.lower_bound(header); 232 CHECK(found != free_blocks_.end()); 233 CHECK_EQ(*found, header); 234 free_blocks_.erase(found); 235} 236 237FreeListSpace::AllocationHeader* FreeListSpace::GetAllocationHeader(const mirror::Object* obj) { 238 DCHECK(Contains(obj)); 239 return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(obj) - 240 sizeof(AllocationHeader)); 241} 242 243FreeListSpace::AllocationHeader* FreeListSpace::AllocationHeader::GetNextNonFree() { 244 // We know that there has to be at least one object after us or else we would have 245 // coalesced with the free end region. May be worth investigating a better way to do this 246 // as it may be expensive for large allocations. 247 for (uintptr_t pos = reinterpret_cast<uintptr_t>(this);; pos += kAlignment) { 248 AllocationHeader* cur = reinterpret_cast<AllocationHeader*>(pos); 249 if (!cur->IsFree()) return cur; 250 } 251} 252 253size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) { 254 MutexLock mu(self, lock_); 255 DCHECK(Contains(obj)); 256 AllocationHeader* header = GetAllocationHeader(obj); 257 CHECK(IsAligned<kAlignment>(header)); 258 size_t allocation_size = header->AllocationSize(); 259 DCHECK_GT(allocation_size, size_t(0)); 260 DCHECK(IsAligned<kAlignment>(allocation_size)); 261 // Look at the next chunk. 262 AllocationHeader* next_header = header->GetNextAllocationHeader(); 263 // Calculate the start of the end free block. 264 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_; 265 size_t header_prev_free = header->GetPrevFree(); 266 size_t new_free_size = allocation_size; 267 if (header_prev_free) { 268 new_free_size += header_prev_free; 269 RemoveFreePrev(header); 270 } 271 if (reinterpret_cast<uintptr_t>(next_header) >= free_end_start) { 272 // Easy case, the next chunk is the end free region. 273 CHECK_EQ(reinterpret_cast<uintptr_t>(next_header), free_end_start); 274 free_end_ += new_free_size; 275 } else { 276 AllocationHeader* new_free_header; 277 DCHECK(IsAligned<kAlignment>(next_header)); 278 if (next_header->IsFree()) { 279 // Find the next chunk by reading each page until we hit one with non-zero chunk. 280 AllocationHeader* next_next_header = next_header->GetNextNonFree(); 281 DCHECK(IsAligned<kAlignment>(next_next_header)); 282 DCHECK(IsAligned<kAlignment>(next_next_header->AllocationSize())); 283 RemoveFreePrev(next_next_header); 284 new_free_header = next_next_header; 285 new_free_size += next_next_header->GetPrevFree(); 286 } else { 287 new_free_header = next_header; 288 } 289 new_free_header->prev_free_ = new_free_size; 290 free_blocks_.insert(new_free_header); 291 } 292 --num_objects_allocated_; 293 DCHECK_LE(allocation_size, num_bytes_allocated_); 294 num_bytes_allocated_ -= allocation_size; 295 madvise(header, allocation_size, MADV_DONTNEED); 296 if (kIsDebugBuild) { 297 // Can't disallow reads since we use them to find next chunks during coalescing. 298 mprotect(header, allocation_size, PROT_READ); 299 } 300 return allocation_size; 301} 302 303bool FreeListSpace::Contains(const mirror::Object* obj) const { 304 return mem_map_->HasAddress(obj); 305} 306 307size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) { 308 AllocationHeader* header = GetAllocationHeader(obj); 309 DCHECK(Contains(obj)); 310 DCHECK(!header->IsFree()); 311 size_t alloc_size = header->AllocationSize(); 312 if (usable_size != nullptr) { 313 *usable_size = alloc_size - sizeof(AllocationHeader); 314 } 315 return alloc_size; 316} 317 318mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, 319 size_t* usable_size) { 320 MutexLock mu(self, lock_); 321 size_t allocation_size = RoundUp(num_bytes + sizeof(AllocationHeader), kAlignment); 322 AllocationHeader temp; 323 temp.SetPrevFree(allocation_size); 324 temp.SetAllocationSize(0); 325 AllocationHeader* new_header; 326 // Find the smallest chunk at least num_bytes in size. 327 FreeBlocks::iterator found = free_blocks_.lower_bound(&temp); 328 if (found != free_blocks_.end()) { 329 AllocationHeader* header = *found; 330 free_blocks_.erase(found); 331 332 // Fit our object in the previous free header space. 333 new_header = header->GetPrevFreeAllocationHeader(); 334 335 // Remove the newly allocated block from the header and update the prev_free_. 336 header->prev_free_ -= allocation_size; 337 if (header->prev_free_ > 0) { 338 // If there is remaining space, insert back into the free set. 339 free_blocks_.insert(header); 340 } 341 } else { 342 // Try to steal some memory from the free space at the end of the space. 343 if (LIKELY(free_end_ >= allocation_size)) { 344 // Fit our object at the start of the end free block. 345 new_header = reinterpret_cast<AllocationHeader*>(end_ - free_end_); 346 free_end_ -= allocation_size; 347 } else { 348 return nullptr; 349 } 350 } 351 352 DCHECK(bytes_allocated != nullptr); 353 *bytes_allocated = allocation_size; 354 if (usable_size != nullptr) { 355 *usable_size = allocation_size - sizeof(AllocationHeader); 356 } 357 // Need to do these inside of the lock. 358 ++num_objects_allocated_; 359 ++total_objects_allocated_; 360 num_bytes_allocated_ += allocation_size; 361 total_bytes_allocated_ += allocation_size; 362 363 // We always put our object at the start of the free block, there can not be another free block 364 // before it. 365 if (kIsDebugBuild) { 366 mprotect(new_header, allocation_size, PROT_READ | PROT_WRITE); 367 } 368 new_header->SetPrevFree(0); 369 new_header->SetAllocationSize(allocation_size); 370 return new_header->GetObjectAddress(); 371} 372 373void FreeListSpace::Dump(std::ostream& os) const { 374 MutexLock mu(Thread::Current(), const_cast<Mutex&>(lock_)); 375 os << GetName() << " -" 376 << " begin: " << reinterpret_cast<void*>(Begin()) 377 << " end: " << reinterpret_cast<void*>(End()) << "\n"; 378 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_; 379 AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin()); 380 while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) { 381 byte* free_start = reinterpret_cast<byte*>(cur_header); 382 cur_header = cur_header->GetNextNonFree(); 383 byte* free_end = reinterpret_cast<byte*>(cur_header); 384 if (free_start != free_end) { 385 os << "Free block at address: " << reinterpret_cast<const void*>(free_start) 386 << " of length " << free_end - free_start << " bytes\n"; 387 } 388 size_t alloc_size = cur_header->AllocationSize(); 389 byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress()); 390 byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader); 391 os << "Large object at address: " << reinterpret_cast<const void*>(free_start) 392 << " of length " << byte_end - byte_start << " bytes\n"; 393 cur_header = reinterpret_cast<AllocationHeader*>(byte_end); 394 } 395 if (free_end_) { 396 os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start) 397 << " of length " << free_end_ << " bytes\n"; 398 } 399} 400 401void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) { 402 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 403 space::LargeObjectSpace* space = context->space->AsLargeObjectSpace(); 404 Thread* self = context->self; 405 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 406 // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap 407 // the bitmaps as an optimization. 408 if (!context->swap_bitmaps) { 409 accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap(); 410 for (size_t i = 0; i < num_ptrs; ++i) { 411 bitmap->Clear(ptrs[i]); 412 } 413 } 414 context->freed.objects += num_ptrs; 415 context->freed.bytes += space->FreeList(self, num_ptrs, ptrs); 416} 417 418collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) { 419 if (Begin() >= End()) { 420 return collector::ObjectBytePair(0, 0); 421 } 422 accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap(); 423 accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap(); 424 if (swap_bitmaps) { 425 std::swap(live_bitmap, mark_bitmap); 426 } 427 AllocSpace::SweepCallbackContext scc(swap_bitmaps, this); 428 accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap, 429 reinterpret_cast<uintptr_t>(Begin()), 430 reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc); 431 return scc.freed; 432} 433 434void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/, 435 size_t /*failed_alloc_bytes*/) { 436 UNIMPLEMENTED(FATAL); 437} 438 439} // namespace space 440} // namespace gc 441} // namespace art 442