semi_space.cc revision 893263b7d5bc2ca43a91ecb8071867f5134fc60a
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/space_bitmap-inl.h" 31#include "gc/heap.h" 32#include "gc/space/bump_pointer_space.h" 33#include "gc/space/bump_pointer_space-inl.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "mark_sweep-inl.h" 41#include "monitor.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/object-inl.h" 48#include "mirror/object_array.h" 49#include "mirror/object_array-inl.h" 50#include "runtime.h" 51#include "semi_space-inl.h" 52#include "thread-inl.h" 53#include "thread_list.h" 54#include "verifier/method_verifier.h" 55 56using ::art::mirror::Class; 57using ::art::mirror::Object; 58 59namespace art { 60namespace gc { 61namespace collector { 62 63static constexpr bool kProtectFromSpace = true; 64static constexpr bool kClearFromSpace = true; 65static constexpr bool kStoreStackTraces = false; 66 67// TODO: Unduplicate logic. 68void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) { 69 // Bind live to mark bitmap if necessary. 70 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 71 CHECK(space->IsContinuousMemMapAllocSpace()); 72 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 73 } 74 // Add the space to the immune region. 75 if (immune_begin_ == nullptr) { 76 DCHECK(immune_end_ == nullptr); 77 immune_begin_ = reinterpret_cast<Object*>(space->Begin()); 78 immune_end_ = reinterpret_cast<Object*>(space->End()); 79 } else { 80 const space::ContinuousSpace* prev_space = nullptr; 81 // Find out if the previous space is immune. 82 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { 83 if (cur_space == space) { 84 break; 85 } 86 prev_space = cur_space; 87 } 88 // If previous space was immune, then extend the immune region. Relies on continuous spaces 89 // being sorted by Heap::AddContinuousSpace. 90 if (prev_space != nullptr && IsImmuneSpace(prev_space)) { 91 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 92 // Use Limit() instead of End() because otherwise if the 93 // generational mode is enabled, the alloc space might expand 94 // due to promotion and the sense of immunity may change in the 95 // middle of a GC. 96 immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_); 97 } 98 } 99} 100 101void SemiSpace::BindBitmaps() { 102 timings_.StartSplit("BindBitmaps"); 103 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 104 // Mark all of the spaces we never collect as immune. 105 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 106 if (space->GetLiveBitmap() != nullptr) { 107 if (space == to_space_) { 108 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 109 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 110 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 111 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 112 // Add the main free list space and the non-moving 113 // space to the immune space if a bump pointer space 114 // only collection. 115 || (generational_ && !whole_heap_collection_ && 116 (space == GetHeap()->GetNonMovingSpace() || 117 space == GetHeap()->GetPrimaryFreeListSpace()))) { 118 ImmuneSpace(space); 119 } 120 } 121 } 122 if (generational_ && !whole_heap_collection_) { 123 // We won't collect the large object space if a bump pointer space only collection. 124 is_large_object_space_immune_ = true; 125 } 126 timings_.EndSplit(); 127} 128 129SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 130 : GarbageCollector(heap, 131 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 132 mark_stack_(nullptr), 133 immune_begin_(nullptr), 134 immune_end_(nullptr), 135 is_large_object_space_immune_(false), 136 to_space_(nullptr), 137 to_space_live_bitmap_(nullptr), 138 from_space_(nullptr), 139 self_(nullptr), 140 generational_(generational), 141 last_gc_to_space_end_(nullptr), 142 bytes_promoted_(0), 143 whole_heap_collection_(true), 144 whole_heap_collection_interval_counter_(0), 145 saved_bytes_(0) { 146} 147 148void SemiSpace::InitializePhase() { 149 timings_.Reset(); 150 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 151 mark_stack_ = heap_->mark_stack_.get(); 152 DCHECK(mark_stack_ != nullptr); 153 immune_begin_ = nullptr; 154 immune_end_ = nullptr; 155 is_large_object_space_immune_ = false; 156 saved_bytes_ = 0; 157 self_ = Thread::Current(); 158 // Do any pre GC verification. 159 timings_.NewSplit("PreGcVerification"); 160 heap_->PreGcVerification(this); 161 // Set the initial bitmap. 162 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 163} 164 165void SemiSpace::ProcessReferences(Thread* self) { 166 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 167 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 168 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 169 &MarkObjectCallback, &ProcessMarkStackCallback, this); 170} 171 172void SemiSpace::MarkingPhase() { 173 if (kStoreStackTraces) { 174 Locks::mutator_lock_->AssertExclusiveHeld(self_); 175 // Store the stack traces into the runtime fault string in case we get a heap corruption 176 // related crash later. 177 ThreadState old_state = self_->SetStateUnsafe(kRunnable); 178 std::ostringstream oss; 179 Runtime* runtime = Runtime::Current(); 180 runtime->GetThreadList()->DumpForSigQuit(oss); 181 runtime->GetThreadList()->DumpNativeStacks(oss); 182 runtime->SetFaultMessage(oss.str()); 183 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); 184 } 185 186 if (generational_) { 187 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 188 clear_soft_references_) { 189 // If an explicit, native allocation-triggered, or last attempt 190 // collection, collect the whole heap (and reset the interval 191 // counter to be consistent.) 192 whole_heap_collection_ = true; 193 whole_heap_collection_interval_counter_ = 0; 194 } 195 if (whole_heap_collection_) { 196 VLOG(heap) << "Whole heap collection"; 197 } else { 198 VLOG(heap) << "Bump pointer space only collection"; 199 } 200 } 201 Locks::mutator_lock_->AssertExclusiveHeld(self_); 202 203 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 204 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 205 // wrong space. 206 heap_->SwapSemiSpaces(); 207 if (generational_) { 208 // If last_gc_to_space_end_ is out of the bounds of the from-space 209 // (the to-space from last GC), then point it to the beginning of 210 // the from-space. For example, the very first GC or the 211 // pre-zygote compaction. 212 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 213 last_gc_to_space_end_ = from_space_->Begin(); 214 } 215 // Reset this before the marking starts below. 216 bytes_promoted_ = 0; 217 } 218 // Assume the cleared space is already empty. 219 BindBitmaps(); 220 // Process dirty cards and add dirty cards to mod-union tables. 221 heap_->ProcessCards(timings_); 222 // Clear the whole card table since we can not get any additional dirty cards during the 223 // paused GC. This saves memory but only works for pause the world collectors. 224 timings_.NewSplit("ClearCardTable"); 225 heap_->GetCardTable()->ClearCardTable(); 226 // Need to do this before the checkpoint since we don't want any threads to add references to 227 // the live stack during the recursive mark. 228 timings_.NewSplit("SwapStacks"); 229 if (kUseThreadLocalAllocationStack) { 230 heap_->RevokeAllThreadLocalAllocationStacks(self_); 231 } 232 heap_->SwapStacks(self_); 233 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 234 MarkRoots(); 235 // Mark roots of immune spaces. 236 UpdateAndMarkModUnion(); 237 // Recursively mark remaining objects. 238 MarkReachableObjects(); 239} 240 241bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const { 242 return 243 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && 244 immune_end_ >= reinterpret_cast<Object*>(space->End()); 245} 246 247void SemiSpace::UpdateAndMarkModUnion() { 248 for (auto& space : heap_->GetContinuousSpaces()) { 249 // If the space is immune then we need to mark the references to other spaces. 250 if (IsImmuneSpace(space)) { 251 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 252 if (table != nullptr) { 253 // TODO: Improve naming. 254 TimingLogger::ScopedSplit split( 255 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 256 "UpdateAndMarkImageModUnionTable", 257 &timings_); 258 table->UpdateAndMarkReferences(MarkObjectCallback, this); 259 } else { 260 // If a bump pointer space only collection, the non-moving 261 // space is added to the immune space. But the non-moving 262 // space doesn't have a mod union table. Instead, its live 263 // bitmap will be scanned later in MarkReachableObjects(). 264 DCHECK(generational_ && !whole_heap_collection_ && 265 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())); 266 } 267 } 268 } 269} 270 271class SemiSpaceScanObjectVisitor { 272 public: 273 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 274 void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 275 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an 276 // exclusive lock on the mutator lock, but 277 // SpaceBitmap::VisitMarkedRange() only requires the shared lock. 278 DCHECK(obj != nullptr); 279 semi_space_->ScanObject(obj); 280 } 281 private: 282 SemiSpace* const semi_space_; 283}; 284 285void SemiSpace::MarkReachableObjects() { 286 timings_.StartSplit("MarkStackAsLive"); 287 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 288 heap_->MarkAllocStackAsLive(live_stack); 289 live_stack->Reset(); 290 timings_.EndSplit(); 291 292 for (auto& space : heap_->GetContinuousSpaces()) { 293 // If the space is immune and has no mod union table (the 294 // non-moving space when the bump pointer space only collection is 295 // enabled,) then we need to scan its live bitmap as roots 296 // (including the objects on the live stack which have just marked 297 // in the live bitmap above in MarkAllocStackAsLive().) 298 if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) { 299 DCHECK(generational_ && !whole_heap_collection_ && 300 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 301 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 302 SemiSpaceScanObjectVisitor visitor(this); 303 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 304 reinterpret_cast<uintptr_t>(space->End()), 305 visitor); 306 } 307 } 308 309 if (is_large_object_space_immune_) { 310 DCHECK(generational_ && !whole_heap_collection_); 311 // Delay copying the live set to the marked set until here from 312 // BindBitmaps() as the large objects on the allocation stack may 313 // be newly added to the live set above in MarkAllocStackAsLive(). 314 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 315 316 // When the large object space is immune, we need to scan the 317 // large object space as roots as they contain references to their 318 // classes (primitive array classes) that could move though they 319 // don't contain any other references. 320 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 321 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 322 SemiSpaceScanObjectVisitor visitor(this); 323 for (const Object* obj : large_live_objects->GetObjects()) { 324 visitor(const_cast<Object*>(obj)); 325 } 326 } 327 328 // Recursively process the mark stack. 329 ProcessMarkStack(); 330} 331 332void SemiSpace::ReclaimPhase() { 333 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 334 ProcessReferences(self_); 335 { 336 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 337 SweepSystemWeaks(); 338 } 339 // Record freed memory. 340 uint64_t from_bytes = from_space_->GetBytesAllocated(); 341 uint64_t to_bytes = to_space_->GetBytesAllocated(); 342 uint64_t from_objects = from_space_->GetObjectsAllocated(); 343 uint64_t to_objects = to_space_->GetObjectsAllocated(); 344 CHECK_LE(to_objects, from_objects); 345 int64_t freed_bytes = from_bytes - to_bytes; 346 int64_t freed_objects = from_objects - to_objects; 347 freed_bytes_.FetchAndAdd(freed_bytes); 348 freed_objects_.FetchAndAdd(freed_objects); 349 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 350 // space. 351 heap_->RecordFree(freed_objects, freed_bytes); 352 timings_.StartSplit("PreSweepingGcVerification"); 353 heap_->PreSweepingGcVerification(this); 354 timings_.EndSplit(); 355 356 { 357 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 358 // Reclaim unmarked objects. 359 Sweep(false); 360 // Swap the live and mark bitmaps for each space which we modified space. This is an 361 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 362 // bitmaps. 363 timings_.StartSplit("SwapBitmaps"); 364 SwapBitmaps(); 365 timings_.EndSplit(); 366 // Unbind the live and mark bitmaps. 367 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 368 GetHeap()->UnBindBitmaps(); 369 } 370 if (kClearFromSpace) { 371 // Release the memory used by the from space. 372 from_space_->Clear(); 373 } 374 from_space_->Reset(); 375 // Protect the from space. 376 VLOG(heap) << "Protecting space " << *from_space_; 377 if (kProtectFromSpace) { 378 from_space_->GetMemMap()->Protect(PROT_NONE); 379 } else { 380 from_space_->GetMemMap()->Protect(PROT_READ); 381 } 382 if (saved_bytes_ > 0) { 383 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 384 } 385 386 if (generational_) { 387 // Record the end (top) of the to space so we can distinguish 388 // between objects that were allocated since the last GC and the 389 // older objects. 390 last_gc_to_space_end_ = to_space_->End(); 391 } 392} 393 394void SemiSpace::ResizeMarkStack(size_t new_size) { 395 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 396 CHECK_LE(mark_stack_->Size(), new_size); 397 mark_stack_->Resize(new_size); 398 for (const auto& obj : temp) { 399 mark_stack_->PushBack(obj); 400 } 401} 402 403inline void SemiSpace::MarkStackPush(Object* obj) { 404 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 405 ResizeMarkStack(mark_stack_->Capacity() * 2); 406 } 407 // The object must be pushed on to the mark stack. 408 mark_stack_->PushBack(obj); 409} 410 411// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 412bool SemiSpace::MarkLargeObject(const Object* obj) { 413 // TODO: support >1 discontinuous space. 414 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 415 DCHECK(large_object_space->Contains(obj)); 416 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 417 if (UNLIKELY(!large_objects->Test(obj))) { 418 large_objects->Set(obj); 419 return true; 420 } 421 return false; 422} 423 424static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 425 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 426 // We will dirty the current page and somewhere in the middle of the next page. This means 427 // that the next object copied will also dirty that page. 428 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 429 // not necessary per GC. 430 memcpy(dest, src, size); 431 return 0; 432 } 433 size_t saved_bytes = 0; 434 byte* byte_dest = reinterpret_cast<byte*>(dest); 435 if (kIsDebugBuild) { 436 for (size_t i = 0; i < size; ++i) { 437 CHECK_EQ(byte_dest[i], 0U); 438 } 439 } 440 // Process the start of the page. The page must already be dirty, don't bother with checking. 441 const byte* byte_src = reinterpret_cast<const byte*>(src); 442 const byte* limit = byte_src + size; 443 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 444 // Copy the bytes until the start of the next page. 445 memcpy(dest, src, page_remain); 446 byte_src += page_remain; 447 byte_dest += page_remain; 448 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 449 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 450 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 451 while (byte_src + kPageSize < limit) { 452 bool all_zero = true; 453 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 454 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 455 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 456 // Assumes the destination of the copy is all zeros. 457 if (word_src[i] != 0) { 458 all_zero = false; 459 word_dest[i] = word_src[i]; 460 } 461 } 462 if (all_zero) { 463 // Avoided copying into the page since it was all zeros. 464 saved_bytes += kPageSize; 465 } 466 byte_src += kPageSize; 467 byte_dest += kPageSize; 468 } 469 // Handle the part of the page at the end. 470 memcpy(byte_dest, byte_src, limit - byte_src); 471 return saved_bytes; 472} 473 474mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 475 size_t object_size = obj->SizeOf(); 476 size_t bytes_allocated; 477 mirror::Object* forward_address = nullptr; 478 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 479 // If it's allocated before the last GC (older), move 480 // (pseudo-promote) it to the main free list space (as sort 481 // of an old generation.) 482 size_t bytes_promoted; 483 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 484 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr); 485 if (forward_address == nullptr) { 486 // If out of space, fall back to the to-space. 487 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 488 } else { 489 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); 490 bytes_promoted_ += bytes_promoted; 491 // Handle the bitmaps marking. 492 accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 493 DCHECK(live_bitmap != nullptr); 494 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 495 DCHECK(mark_bitmap != nullptr); 496 DCHECK(!live_bitmap->Test(forward_address)); 497 if (!whole_heap_collection_) { 498 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 499 DCHECK_EQ(live_bitmap, mark_bitmap); 500 501 // If a bump pointer space only collection, delay the live 502 // bitmap marking of the promoted object until it's popped off 503 // the mark stack (ProcessMarkStack()). The rationale: we may 504 // be in the middle of scanning the objects in the promo 505 // destination space for 506 // non-moving-space-to-bump-pointer-space references by 507 // iterating over the marked bits of the live bitmap 508 // (MarkReachableObjects()). If we don't delay it (and instead 509 // mark the promoted object here), the above promo destination 510 // space scan could encounter the just-promoted object and 511 // forward the references in the promoted object's fields even 512 // through it is pushed onto the mark stack. If this happens, 513 // the promoted object would be in an inconsistent state, that 514 // is, it's on the mark stack (gray) but its fields are 515 // already forwarded (black), which would cause a 516 // DCHECK(!to_space_->HasAddress(obj)) failure below. 517 } else { 518 // Mark forward_address on the live bit map. 519 live_bitmap->Set(forward_address); 520 // Mark forward_address on the mark bit map. 521 DCHECK(!mark_bitmap->Test(forward_address)); 522 mark_bitmap->Set(forward_address); 523 } 524 } 525 DCHECK(forward_address != nullptr); 526 } else { 527 // If it's allocated after the last GC (younger), copy it to the to-space. 528 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 529 } 530 // Copy over the object and add it to the mark stack since we still need to update its 531 // references. 532 saved_bytes_ += 533 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 534 if (kUseBrooksPointer) { 535 obj->AssertSelfBrooksPointer(); 536 DCHECK_EQ(forward_address->GetBrooksPointer(), obj); 537 forward_address->SetBrooksPointer(forward_address); 538 forward_address->AssertSelfBrooksPointer(); 539 } 540 if (to_space_live_bitmap_ != nullptr) { 541 to_space_live_bitmap_->Set(forward_address); 542 } 543 DCHECK(to_space_->HasAddress(forward_address) || 544 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 545 return forward_address; 546} 547 548// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to 549// the to-space and have their forward address updated. Objects which have been newly marked are 550// pushed on the mark stack. 551Object* SemiSpace::MarkObject(Object* obj) { 552 if (kUseBrooksPointer) { 553 // Verify all the objects have the correct forward pointer installed. 554 if (obj != nullptr) { 555 obj->AssertSelfBrooksPointer(); 556 } 557 } 558 Object* forward_address = obj; 559 if (obj != nullptr && !IsImmune(obj)) { 560 if (from_space_->HasAddress(obj)) { 561 forward_address = GetForwardingAddressInFromSpace(obj); 562 // If the object has already been moved, return the new forward address. 563 if (forward_address == nullptr) { 564 forward_address = MarkNonForwardedObject(obj); 565 DCHECK(forward_address != nullptr); 566 // Make sure to only update the forwarding address AFTER you copy the object so that the 567 // monitor word doesn't get stomped over. 568 obj->SetLockWord(LockWord::FromForwardingAddress( 569 reinterpret_cast<size_t>(forward_address))); 570 // Push the object onto the mark stack for later processing. 571 MarkStackPush(forward_address); 572 } 573 // TODO: Do we need this if in the else statement? 574 } else { 575 accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 576 if (LIKELY(object_bitmap != nullptr)) { 577 if (generational_) { 578 // If a bump pointer space only collection, we should not 579 // reach here as we don't/won't mark the objects in the 580 // non-moving space (except for the promoted objects.) Note 581 // the non-moving space is added to the immune space. 582 DCHECK(whole_heap_collection_); 583 } 584 // This object was not previously marked. 585 if (!object_bitmap->Test(obj)) { 586 object_bitmap->Set(obj); 587 MarkStackPush(obj); 588 } 589 } else { 590 CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; 591 if (MarkLargeObject(obj)) { 592 MarkStackPush(obj); 593 } 594 } 595 } 596 } 597 return forward_address; 598} 599 600void SemiSpace::ProcessMarkStackCallback(void* arg) { 601 DCHECK(arg != nullptr); 602 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); 603} 604 605mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { 606 DCHECK(root != nullptr); 607 DCHECK(arg != nullptr); 608 return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root); 609} 610 611void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 612 RootType /*root_type*/) { 613 DCHECK(root != nullptr); 614 DCHECK(arg != nullptr); 615 *root = reinterpret_cast<SemiSpace*>(arg)->MarkObject(*root); 616} 617 618// Marks all objects in the root set. 619void SemiSpace::MarkRoots() { 620 timings_.StartSplit("MarkRoots"); 621 // TODO: Visit up image roots as well? 622 Runtime::Current()->VisitRoots(MarkRootCallback, this); 623 timings_.EndSplit(); 624} 625 626mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 627 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 628} 629 630void SemiSpace::SweepSystemWeaks() { 631 timings_.StartSplit("SweepSystemWeaks"); 632 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 633 timings_.EndSplit(); 634} 635 636bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 637 return space != from_space_ && space != to_space_ && !IsImmuneSpace(space); 638} 639 640void SemiSpace::Sweep(bool swap_bitmaps) { 641 DCHECK(mark_stack_->IsEmpty()); 642 TimingLogger::ScopedSplit("Sweep", &timings_); 643 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 644 if (space->IsContinuousMemMapAllocSpace()) { 645 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 646 if (!ShouldSweepSpace(alloc_space)) { 647 continue; 648 } 649 TimingLogger::ScopedSplit split( 650 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 651 size_t freed_objects = 0; 652 size_t freed_bytes = 0; 653 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 654 heap_->RecordFree(freed_objects, freed_bytes); 655 freed_objects_.FetchAndAdd(freed_objects); 656 freed_bytes_.FetchAndAdd(freed_bytes); 657 } 658 } 659 if (!is_large_object_space_immune_) { 660 SweepLargeObjects(swap_bitmaps); 661 } 662} 663 664void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 665 DCHECK(!is_large_object_space_immune_); 666 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 667 size_t freed_objects = 0; 668 size_t freed_bytes = 0; 669 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 670 freed_large_objects_.FetchAndAdd(freed_objects); 671 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 672 GetHeap()->RecordFree(freed_objects, freed_bytes); 673} 674 675// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 676// marked, put it on the appropriate list in the heap for later processing. 677void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 678 heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this); 679} 680 681// Visit all of the references of an object and update. 682void SemiSpace::ScanObject(Object* obj) { 683 DCHECK(obj != NULL); 684 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 685 MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset, 686 bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS { 687 mirror::Object* new_address = MarkObject(ref); 688 if (new_address != ref) { 689 DCHECK(new_address != nullptr); 690 // Don't need to mark the card since we updating the object address and not changing the 691 // actual objects its pointing to. Using SetFieldObjectWithoutWriteBarrier is better in this 692 // case since it does not dirty cards and use additional memory. 693 // Since we do not change the actual object, we can safely use non-transactional mode. Also 694 // disable check as we could run inside a transaction. 695 obj->SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(offset, new_address, false); 696 } 697 }, kMovingClasses); 698 mirror::Class* klass = obj->GetClass<kVerifyNone>(); 699 if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) { 700 DelayReferenceReferent(klass, obj); 701 } 702} 703 704// Scan anything that's on the mark stack. 705void SemiSpace::ProcessMarkStack() { 706 space::MallocSpace* promo_dest_space = NULL; 707 accounting::SpaceBitmap* live_bitmap = NULL; 708 if (generational_ && !whole_heap_collection_) { 709 // If a bump pointer space only collection (and the promotion is 710 // enabled,) we delay the live-bitmap marking of promoted objects 711 // from MarkObject() until this function. 712 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 713 live_bitmap = promo_dest_space->GetLiveBitmap(); 714 DCHECK(live_bitmap != nullptr); 715 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 716 DCHECK(mark_bitmap != nullptr); 717 DCHECK_EQ(live_bitmap, mark_bitmap); 718 } 719 timings_.StartSplit("ProcessMarkStack"); 720 while (!mark_stack_->IsEmpty()) { 721 Object* obj = mark_stack_->PopBack(); 722 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 723 // obj has just been promoted. Mark the live bitmap for it, 724 // which is delayed from MarkObject(). 725 DCHECK(!live_bitmap->Test(obj)); 726 live_bitmap->Set(obj); 727 } 728 ScanObject(obj); 729 } 730 timings_.EndSplit(); 731} 732 733inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 734 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 735 // All immune objects are assumed marked. 736 if (IsImmune(obj)) { 737 return obj; 738 } 739 if (from_space_->HasAddress(obj)) { 740 mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); 741 return forwarding_address; // Returns either the forwarding address or nullptr. 742 } else if (to_space_->HasAddress(obj)) { 743 // Should be unlikely. 744 // Already forwarded, must be marked. 745 return obj; 746 } 747 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 748} 749 750void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 751 DCHECK(to_space != nullptr); 752 to_space_ = to_space; 753} 754 755void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 756 DCHECK(from_space != nullptr); 757 from_space_ = from_space; 758} 759 760void SemiSpace::FinishPhase() { 761 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 762 Heap* heap = GetHeap(); 763 timings_.NewSplit("PostGcVerification"); 764 heap->PostGcVerification(this); 765 766 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 767 // further action is done by the heap. 768 to_space_ = nullptr; 769 from_space_ = nullptr; 770 771 // Update the cumulative statistics 772 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 773 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 774 775 // Ensure that the mark stack is empty. 776 CHECK(mark_stack_->IsEmpty()); 777 778 // Update the cumulative loggers. 779 cumulative_timings_.Start(); 780 cumulative_timings_.AddLogger(timings_); 781 cumulative_timings_.End(); 782 783 // Clear all of the spaces' mark bitmaps. 784 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 785 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 786 if (bitmap != nullptr && 787 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 788 bitmap->Clear(); 789 } 790 } 791 mark_stack_->Reset(); 792 793 // Reset the marked large objects. 794 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 795 large_objects->GetMarkObjects()->Clear(); 796 797 if (generational_) { 798 // Decide whether to do a whole heap collection or a bump pointer 799 // only space collection at the next collection by updating 800 // whole_heap_collection. Enable whole_heap_collection once every 801 // kDefaultWholeHeapCollectionInterval collections. 802 if (!whole_heap_collection_) { 803 --whole_heap_collection_interval_counter_; 804 DCHECK_GE(whole_heap_collection_interval_counter_, 0); 805 if (whole_heap_collection_interval_counter_ == 0) { 806 whole_heap_collection_ = true; 807 } 808 } else { 809 DCHECK_EQ(whole_heap_collection_interval_counter_, 0); 810 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; 811 whole_heap_collection_ = false; 812 } 813 } 814} 815 816} // namespace collector 817} // namespace gc 818} // namespace art 819