semi_space.cc revision f85c2fb317399ab540854cd7551ac47690366543
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space-inl.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap-inl.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/remembered_set.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/reference_processor.h" 34#include "gc/space/bump_pointer_space.h" 35#include "gc/space/bump_pointer_space-inl.h" 36#include "gc/space/image_space.h" 37#include "gc/space/large_object_space.h" 38#include "gc/space/space-inl.h" 39#include "indirect_reference_table.h" 40#include "intern_table.h" 41#include "jni_internal.h" 42#include "mark_sweep-inl.h" 43#include "monitor.h" 44#include "mirror/reference-inl.h" 45#include "mirror/object-inl.h" 46#include "runtime.h" 47#include "thread-inl.h" 48#include "thread_list.h" 49 50using ::art::mirror::Object; 51 52namespace art { 53namespace gc { 54namespace collector { 55 56static constexpr bool kProtectFromSpace = true; 57static constexpr bool kStoreStackTraces = false; 58static constexpr size_t kBytesPromotedThreshold = 4 * MB; 59static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB; 60 61void SemiSpace::BindBitmaps() { 62 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 63 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 64 // Mark all of the spaces we never collect as immune. 65 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 66 if (space->GetLiveBitmap() != nullptr) { 67 if (space == to_space_) { 68 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 69 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 70 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 71 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 72 // Add the main free list space and the non-moving 73 // space to the immune space if a bump pointer space 74 // only collection. 75 || (generational_ && !whole_heap_collection_ && 76 (space == GetHeap()->GetNonMovingSpace() || 77 space == GetHeap()->GetPrimaryFreeListSpace()))) { 78 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 79 } 80 } 81 } 82 if (generational_ && !whole_heap_collection_) { 83 // We won't collect the large object space if a bump pointer space only collection. 84 is_large_object_space_immune_ = true; 85 } 86} 87 88SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 89 : GarbageCollector(heap, 90 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 91 to_space_(nullptr), 92 from_space_(nullptr), 93 generational_(generational), 94 last_gc_to_space_end_(nullptr), 95 bytes_promoted_(0), 96 bytes_promoted_since_last_whole_heap_collection_(0), 97 large_object_bytes_allocated_at_last_whole_heap_collection_(0), 98 whole_heap_collection_(true), 99 collector_name_(name_), 100 swap_semi_spaces_(true) { 101} 102 103void SemiSpace::RunPhases() { 104 Thread* self = Thread::Current(); 105 InitializePhase(); 106 // Semi-space collector is special since it is sometimes called with the mutators suspended 107 // during the zygote creation and collector transitions. If we already exclusively hold the 108 // mutator lock, then we can't lock it again since it will cause a deadlock. 109 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 110 GetHeap()->PreGcVerificationPaused(this); 111 GetHeap()->PrePauseRosAllocVerification(this); 112 MarkingPhase(); 113 ReclaimPhase(); 114 GetHeap()->PostGcVerificationPaused(this); 115 } else { 116 Locks::mutator_lock_->AssertNotHeld(self); 117 { 118 ScopedPause pause(this); 119 GetHeap()->PreGcVerificationPaused(this); 120 GetHeap()->PrePauseRosAllocVerification(this); 121 MarkingPhase(); 122 } 123 { 124 ReaderMutexLock mu(self, *Locks::mutator_lock_); 125 ReclaimPhase(); 126 } 127 GetHeap()->PostGcVerification(this); 128 } 129 FinishPhase(); 130} 131 132void SemiSpace::InitializePhase() { 133 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 134 mark_stack_ = heap_->GetMarkStack(); 135 DCHECK(mark_stack_ != nullptr); 136 immune_region_.Reset(); 137 is_large_object_space_immune_ = false; 138 saved_bytes_ = 0; 139 bytes_moved_ = 0; 140 objects_moved_ = 0; 141 self_ = Thread::Current(); 142 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_; 143 // Set the initial bitmap. 144 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 145 { 146 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 147 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 148 mark_bitmap_ = heap_->GetMarkBitmap(); 149 } 150} 151 152void SemiSpace::ProcessReferences(Thread* self) { 153 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 154 GetHeap()->GetReferenceProcessor()->ProcessReferences( 155 false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), 156 &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this); 157} 158 159void SemiSpace::MarkingPhase() { 160 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 161 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_)); 162 if (kStoreStackTraces) { 163 Locks::mutator_lock_->AssertExclusiveHeld(self_); 164 // Store the stack traces into the runtime fault string in case we Get a heap corruption 165 // related crash later. 166 ThreadState old_state = self_->SetStateUnsafe(kRunnable); 167 std::ostringstream oss; 168 Runtime* runtime = Runtime::Current(); 169 runtime->GetThreadList()->DumpForSigQuit(oss); 170 runtime->GetThreadList()->DumpNativeStacks(oss); 171 runtime->SetFaultMessage(oss.str()); 172 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); 173 } 174 // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps 175 // to prevent fragmentation. 176 RevokeAllThreadLocalBuffers(); 177 if (generational_) { 178 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 179 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 180 GetCurrentIteration()->GetClearSoftReferences()) { 181 // If an explicit, native allocation-triggered, or last attempt 182 // collection, collect the whole heap. 183 whole_heap_collection_ = true; 184 } 185 if (whole_heap_collection_) { 186 VLOG(heap) << "Whole heap collection"; 187 name_ = collector_name_ + " whole"; 188 } else { 189 VLOG(heap) << "Bump pointer space only collection"; 190 name_ = collector_name_ + " bps"; 191 } 192 } 193 194 if (!generational_ || whole_heap_collection_) { 195 // If non-generational, always clear soft references. 196 // If generational, clear soft references if a whole heap collection. 197 GetCurrentIteration()->SetClearSoftReferences(true); 198 } 199 Locks::mutator_lock_->AssertExclusiveHeld(self_); 200 if (generational_) { 201 // If last_gc_to_space_end_ is out of the bounds of the from-space 202 // (the to-space from last GC), then point it to the beginning of 203 // the from-space. For example, the very first GC or the 204 // pre-zygote compaction. 205 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 206 last_gc_to_space_end_ = from_space_->Begin(); 207 } 208 // Reset this before the marking starts below. 209 bytes_promoted_ = 0; 210 } 211 // Assume the cleared space is already empty. 212 BindBitmaps(); 213 // Process dirty cards and add dirty cards to mod-union tables. 214 heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_); 215 // Clear the whole card table since we can not Get any additional dirty cards during the 216 // paused GC. This saves memory but only works for pause the world collectors. 217 t.NewTiming("ClearCardTable"); 218 heap_->GetCardTable()->ClearCardTable(); 219 // Need to do this before the checkpoint since we don't want any threads to add references to 220 // the live stack during the recursive mark. 221 t.NewTiming("SwapStacks"); 222 if (kUseThreadLocalAllocationStack) { 223 TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings()); 224 heap_->RevokeAllThreadLocalAllocationStacks(self_); 225 } 226 heap_->SwapStacks(self_); 227 { 228 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 229 MarkRoots(); 230 // Mark roots of immune spaces. 231 UpdateAndMarkModUnion(); 232 // Recursively mark remaining objects. 233 MarkReachableObjects(); 234 } 235 ProcessReferences(self_); 236 { 237 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 238 SweepSystemWeaks(); 239 } 240 // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked 241 // before they are properly counted. 242 RevokeAllThreadLocalBuffers(); 243 // Record freed memory. 244 const int64_t from_bytes = from_space_->GetBytesAllocated(); 245 const int64_t to_bytes = bytes_moved_; 246 const uint64_t from_objects = from_space_->GetObjectsAllocated(); 247 const uint64_t to_objects = objects_moved_; 248 CHECK_LE(to_objects, from_objects); 249 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 250 // space. 251 RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes)); 252 // Clear and protect the from space. 253 from_space_->Clear(); 254 VLOG(heap) << "Protecting from_space_: " << *from_space_; 255 from_space_->GetMemMap()->Protect(kProtectFromSpace ? PROT_NONE : PROT_READ); 256 heap_->PreSweepingGcVerification(this); 257 if (swap_semi_spaces_) { 258 heap_->SwapSemiSpaces(); 259 } 260} 261 262void SemiSpace::UpdateAndMarkModUnion() { 263 for (auto& space : heap_->GetContinuousSpaces()) { 264 // If the space is immune then we need to mark the references to other spaces. 265 if (immune_region_.ContainsSpace(space)) { 266 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 267 if (table != nullptr) { 268 // TODO: Improve naming. 269 TimingLogger::ScopedTiming t( 270 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 271 "UpdateAndMarkImageModUnionTable", 272 GetTimings()); 273 table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 274 } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) { 275 DCHECK(kUseRememberedSet); 276 // If a bump pointer space only collection, the non-moving 277 // space is added to the immune space. The non-moving space 278 // doesn't have a mod union table, but has a remembered 279 // set. Its dirty cards will be scanned later in 280 // MarkReachableObjects(). 281 DCHECK(generational_ && !whole_heap_collection_ && 282 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 283 << "Space " << space->GetName() << " " 284 << "generational_=" << generational_ << " " 285 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 286 } else { 287 DCHECK(!kUseRememberedSet); 288 // If a bump pointer space only collection, the non-moving 289 // space is added to the immune space. But the non-moving 290 // space doesn't have a mod union table. Instead, its live 291 // bitmap will be scanned later in MarkReachableObjects(). 292 DCHECK(generational_ && !whole_heap_collection_ && 293 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 294 << "Space " << space->GetName() << " " 295 << "generational_=" << generational_ << " " 296 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 297 } 298 } 299 } 300} 301 302class SemiSpaceScanObjectVisitor { 303 public: 304 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 305 void operator()(Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, 306 Locks::heap_bitmap_lock_) { 307 DCHECK(obj != nullptr); 308 semi_space_->ScanObject(obj); 309 } 310 private: 311 SemiSpace* const semi_space_; 312}; 313 314// Used to verify that there's no references to the from-space. 315class SemiSpaceVerifyNoFromSpaceReferencesVisitor { 316 public: 317 explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : 318 from_space_(from_space) {} 319 320 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 321 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 322 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 323 if (from_space_->HasAddress(ref)) { 324 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); 325 LOG(FATAL) << ref << " found in from space"; 326 } 327 } 328 private: 329 space::ContinuousMemMapAllocSpace* from_space_; 330}; 331 332void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { 333 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 334 SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); 335 obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor()); 336} 337 338class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { 339 public: 340 explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 341 void operator()(Object* obj) const 342 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 343 DCHECK(obj != nullptr); 344 semi_space_->VerifyNoFromSpaceReferences(obj); 345 } 346 private: 347 SemiSpace* const semi_space_; 348}; 349 350void SemiSpace::MarkReachableObjects() { 351 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 352 { 353 TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings()); 354 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 355 heap_->MarkAllocStackAsLive(live_stack); 356 live_stack->Reset(); 357 } 358 t.NewTiming("UpdateAndMarkRememberedSets"); 359 for (auto& space : heap_->GetContinuousSpaces()) { 360 // If the space is immune and has no mod union table (the 361 // non-moving space when the bump pointer space only collection is 362 // enabled,) then we need to scan its live bitmap or dirty cards as roots 363 // (including the objects on the live stack which have just marked 364 // in the live bitmap above in MarkAllocStackAsLive().) 365 if (immune_region_.ContainsSpace(space) && 366 heap_->FindModUnionTableFromSpace(space) == nullptr) { 367 DCHECK(generational_ && !whole_heap_collection_ && 368 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 369 accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space); 370 if (kUseRememberedSet) { 371 DCHECK(rem_set != nullptr); 372 rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback, 373 from_space_, this); 374 if (kIsDebugBuild) { 375 // Verify that there are no from-space references that 376 // remain in the space, that is, the remembered set (and the 377 // card table) didn't miss any from-space references in the 378 // space. 379 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 380 SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); 381 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 382 reinterpret_cast<uintptr_t>(space->End()), 383 visitor); 384 } 385 } else { 386 DCHECK(rem_set == nullptr); 387 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 388 SemiSpaceScanObjectVisitor visitor(this); 389 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 390 reinterpret_cast<uintptr_t>(space->End()), 391 visitor); 392 } 393 } 394 } 395 396 if (is_large_object_space_immune_) { 397 TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings()); 398 DCHECK(generational_ && !whole_heap_collection_); 399 // Delay copying the live set to the marked set until here from 400 // BindBitmaps() as the large objects on the allocation stack may 401 // be newly added to the live set above in MarkAllocStackAsLive(). 402 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 403 404 // When the large object space is immune, we need to scan the 405 // large object space as roots as they contain references to their 406 // classes (primitive array classes) that could move though they 407 // don't contain any other references. 408 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 409 accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap(); 410 SemiSpaceScanObjectVisitor visitor(this); 411 large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()), 412 reinterpret_cast<uintptr_t>(large_object_space->End()), 413 visitor); 414 } 415 // Recursively process the mark stack. 416 ProcessMarkStack(); 417} 418 419void SemiSpace::ReclaimPhase() { 420 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 421 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 422 // Reclaim unmarked objects. 423 Sweep(false); 424 // Swap the live and mark bitmaps for each space which we modified space. This is an 425 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 426 // bitmaps. 427 SwapBitmaps(); 428 // Unbind the live and mark bitmaps. 429 GetHeap()->UnBindBitmaps(); 430 if (saved_bytes_ > 0) { 431 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 432 } 433 if (generational_) { 434 // Record the end (top) of the to space so we can distinguish 435 // between objects that were allocated since the last GC and the 436 // older objects. 437 last_gc_to_space_end_ = to_space_->End(); 438 } 439} 440 441void SemiSpace::ResizeMarkStack(size_t new_size) { 442 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 443 CHECK_LE(mark_stack_->Size(), new_size); 444 mark_stack_->Resize(new_size); 445 for (const auto& obj : temp) { 446 mark_stack_->PushBack(obj); 447 } 448} 449 450inline void SemiSpace::MarkStackPush(Object* obj) { 451 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 452 ResizeMarkStack(mark_stack_->Capacity() * 2); 453 } 454 // The object must be pushed on to the mark stack. 455 mark_stack_->PushBack(obj); 456} 457 458static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 459 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 460 // We will dirty the current page and somewhere in the middle of the next page. This means 461 // that the next object copied will also dirty that page. 462 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 463 // not necessary per GC. 464 memcpy(dest, src, size); 465 return 0; 466 } 467 size_t saved_bytes = 0; 468 byte* byte_dest = reinterpret_cast<byte*>(dest); 469 if (kIsDebugBuild) { 470 for (size_t i = 0; i < size; ++i) { 471 CHECK_EQ(byte_dest[i], 0U); 472 } 473 } 474 // Process the start of the page. The page must already be dirty, don't bother with checking. 475 const byte* byte_src = reinterpret_cast<const byte*>(src); 476 const byte* limit = byte_src + size; 477 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 478 // Copy the bytes until the start of the next page. 479 memcpy(dest, src, page_remain); 480 byte_src += page_remain; 481 byte_dest += page_remain; 482 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 483 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 484 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 485 while (byte_src + kPageSize < limit) { 486 bool all_zero = true; 487 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 488 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 489 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 490 // Assumes the destination of the copy is all zeros. 491 if (word_src[i] != 0) { 492 all_zero = false; 493 word_dest[i] = word_src[i]; 494 } 495 } 496 if (all_zero) { 497 // Avoided copying into the page since it was all zeros. 498 saved_bytes += kPageSize; 499 } 500 byte_src += kPageSize; 501 byte_dest += kPageSize; 502 } 503 // Handle the part of the page at the end. 504 memcpy(byte_dest, byte_src, limit - byte_src); 505 return saved_bytes; 506} 507 508mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 509 size_t object_size = obj->SizeOf(); 510 size_t bytes_allocated; 511 mirror::Object* forward_address = nullptr; 512 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 513 // If it's allocated before the last GC (older), move 514 // (pseudo-promote) it to the main free list space (as sort 515 // of an old generation.) 516 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 517 forward_address = promo_dest_space->AllocThreadUnsafe(self_, object_size, &bytes_allocated, 518 nullptr); 519 if (UNLIKELY(forward_address == nullptr)) { 520 // If out of space, fall back to the to-space. 521 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr); 522 } else { 523 bytes_promoted_ += bytes_allocated; 524 // Dirty the card at the destionation as it may contain 525 // references (including the class pointer) to the bump pointer 526 // space. 527 GetHeap()->WriteBarrierEveryFieldOf(forward_address); 528 // Handle the bitmaps marking. 529 accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 530 DCHECK(live_bitmap != nullptr); 531 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 532 DCHECK(mark_bitmap != nullptr); 533 DCHECK(!live_bitmap->Test(forward_address)); 534 if (!whole_heap_collection_) { 535 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 536 DCHECK_EQ(live_bitmap, mark_bitmap); 537 538 // If a bump pointer space only collection, delay the live 539 // bitmap marking of the promoted object until it's popped off 540 // the mark stack (ProcessMarkStack()). The rationale: we may 541 // be in the middle of scanning the objects in the promo 542 // destination space for 543 // non-moving-space-to-bump-pointer-space references by 544 // iterating over the marked bits of the live bitmap 545 // (MarkReachableObjects()). If we don't delay it (and instead 546 // mark the promoted object here), the above promo destination 547 // space scan could encounter the just-promoted object and 548 // forward the references in the promoted object's fields even 549 // through it is pushed onto the mark stack. If this happens, 550 // the promoted object would be in an inconsistent state, that 551 // is, it's on the mark stack (gray) but its fields are 552 // already forwarded (black), which would cause a 553 // DCHECK(!to_space_->HasAddress(obj)) failure below. 554 } else { 555 // Mark forward_address on the live bit map. 556 live_bitmap->Set(forward_address); 557 // Mark forward_address on the mark bit map. 558 DCHECK(!mark_bitmap->Test(forward_address)); 559 mark_bitmap->Set(forward_address); 560 } 561 } 562 DCHECK(forward_address != nullptr); 563 } else { 564 // If it's allocated after the last GC (younger), copy it to the to-space. 565 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr); 566 } 567 CHECK(forward_address != nullptr) << "Out of memory in the to-space."; 568 ++objects_moved_; 569 bytes_moved_ += bytes_allocated; 570 // Copy over the object and add it to the mark stack since we still need to update its 571 // references. 572 saved_bytes_ += 573 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 574 if (kUseBakerOrBrooksReadBarrier) { 575 obj->AssertReadBarrierPointer(); 576 if (kUseBrooksReadBarrier) { 577 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj); 578 forward_address->SetReadBarrierPointer(forward_address); 579 } 580 forward_address->AssertReadBarrierPointer(); 581 } 582 if (to_space_live_bitmap_ != nullptr) { 583 to_space_live_bitmap_->Set(forward_address); 584 } 585 DCHECK(to_space_->HasAddress(forward_address) || 586 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 587 return forward_address; 588} 589 590void SemiSpace::ProcessMarkStackCallback(void* arg) { 591 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); 592} 593 594mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { 595 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root); 596 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 597 return ref.AsMirrorPtr(); 598} 599 600void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, 601 void* arg) { 602 reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr); 603} 604 605void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref, 606 void* arg) { 607 reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref); 608} 609 610void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 611 RootType /*root_type*/) { 612 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root); 613 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 614 if (*root != ref.AsMirrorPtr()) { 615 *root = ref.AsMirrorPtr(); 616 } 617} 618 619// Marks all objects in the root set. 620void SemiSpace::MarkRoots() { 621 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 622 Runtime::Current()->VisitRoots(MarkRootCallback, this); 623} 624 625bool SemiSpace::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object, 626 void* arg) { 627 mirror::Object* obj = object->AsMirrorPtr(); 628 mirror::Object* new_obj = 629 reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(obj); 630 if (new_obj == nullptr) { 631 return false; 632 } 633 if (new_obj != obj) { 634 // Write barrier is not necessary since it still points to the same object, just at a different 635 // address. 636 object->Assign(new_obj); 637 } 638 return true; 639} 640 641mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 642 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 643} 644 645void SemiSpace::SweepSystemWeaks() { 646 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 647 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 648} 649 650bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 651 return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); 652} 653 654void SemiSpace::Sweep(bool swap_bitmaps) { 655 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 656 DCHECK(mark_stack_->IsEmpty()); 657 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 658 if (space->IsContinuousMemMapAllocSpace()) { 659 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 660 if (!ShouldSweepSpace(alloc_space)) { 661 continue; 662 } 663 TimingLogger::ScopedTiming split( 664 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 665 RecordFree(alloc_space->Sweep(swap_bitmaps)); 666 } 667 } 668 if (!is_large_object_space_immune_) { 669 SweepLargeObjects(swap_bitmaps); 670 } 671} 672 673void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 674 DCHECK(!is_large_object_space_immune_); 675 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 676 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 677} 678 679// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 680// marked, put it on the appropriate list in the heap for later processing. 681void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 682 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, 683 &HeapReferenceMarkedCallback, this); 684} 685 686class SemiSpaceMarkObjectVisitor { 687 public: 688 explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) { 689 } 690 691 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE 692 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 693 // Object was already verified when we scanned it. 694 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); 695 } 696 697 void operator()(mirror::Class* klass, mirror::Reference* ref) const 698 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 699 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 700 collector_->DelayReferenceReferent(klass, ref); 701 } 702 703 private: 704 SemiSpace* const collector_; 705}; 706 707// Visit all of the references of an object and update. 708void SemiSpace::ScanObject(Object* obj) { 709 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 710 SemiSpaceMarkObjectVisitor visitor(this); 711 obj->VisitReferences<kMovingClasses>(visitor, visitor); 712} 713 714// Scan anything that's on the mark stack. 715void SemiSpace::ProcessMarkStack() { 716 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 717 space::MallocSpace* promo_dest_space = nullptr; 718 accounting::ContinuousSpaceBitmap* live_bitmap = nullptr; 719 if (generational_ && !whole_heap_collection_) { 720 // If a bump pointer space only collection (and the promotion is 721 // enabled,) we delay the live-bitmap marking of promoted objects 722 // from MarkObject() until this function. 723 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 724 live_bitmap = promo_dest_space->GetLiveBitmap(); 725 DCHECK(live_bitmap != nullptr); 726 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 727 DCHECK(mark_bitmap != nullptr); 728 DCHECK_EQ(live_bitmap, mark_bitmap); 729 } 730 while (!mark_stack_->IsEmpty()) { 731 Object* obj = mark_stack_->PopBack(); 732 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 733 // obj has just been promoted. Mark the live bitmap for it, 734 // which is delayed from MarkObject(). 735 DCHECK(!live_bitmap->Test(obj)); 736 live_bitmap->Set(obj); 737 } 738 ScanObject(obj); 739 } 740} 741 742inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 743 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 744 // All immune objects are assumed marked. 745 if (immune_region_.ContainsObject(obj)) { 746 return obj; 747 } 748 if (from_space_->HasAddress(obj)) { 749 // Returns either the forwarding address or nullptr. 750 return GetForwardingAddressInFromSpace(obj); 751 } else if (to_space_->HasAddress(obj)) { 752 // Should be unlikely. 753 // Already forwarded, must be marked. 754 return obj; 755 } 756 return mark_bitmap_->Test(obj) ? obj : nullptr; 757} 758 759void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 760 DCHECK(to_space != nullptr); 761 to_space_ = to_space; 762} 763 764void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 765 DCHECK(from_space != nullptr); 766 from_space_ = from_space; 767} 768 769void SemiSpace::FinishPhase() { 770 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 771 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 772 // further action is done by the heap. 773 to_space_ = nullptr; 774 from_space_ = nullptr; 775 CHECK(mark_stack_->IsEmpty()); 776 mark_stack_->Reset(); 777 if (generational_) { 778 // Decide whether to do a whole heap collection or a bump pointer 779 // only space collection at the next collection by updating 780 // whole_heap_collection. 781 if (!whole_heap_collection_) { 782 // Enable whole_heap_collection if the bytes promoted since the 783 // last whole heap collection or the large object bytes 784 // allocated exceeds a threshold. 785 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_; 786 bool bytes_promoted_threshold_exceeded = 787 bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold; 788 uint64_t current_los_bytes_allocated = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated(); 789 uint64_t last_los_bytes_allocated = 790 large_object_bytes_allocated_at_last_whole_heap_collection_; 791 bool large_object_bytes_threshold_exceeded = 792 current_los_bytes_allocated >= 793 last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold; 794 if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) { 795 whole_heap_collection_ = true; 796 } 797 } else { 798 // Reset the counters. 799 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_; 800 large_object_bytes_allocated_at_last_whole_heap_collection_ = 801 GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated(); 802 whole_heap_collection_ = false; 803 } 804 } 805 // Clear all of the spaces' mark bitmaps. 806 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 807 heap_->ClearMarkedObjects(); 808} 809 810void SemiSpace::RevokeAllThreadLocalBuffers() { 811 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 812 GetHeap()->RevokeAllThreadLocalBuffers(); 813} 814 815} // namespace collector 816} // namespace gc 817} // namespace art 818