semi_space.cc revision be0562fb14e6754ee932b8d9c97e2a6df3a91119
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space-inl.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap-inl.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/remembered_set.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/reference_processor.h" 34#include "gc/space/bump_pointer_space.h" 35#include "gc/space/bump_pointer_space-inl.h" 36#include "gc/space/image_space.h" 37#include "gc/space/large_object_space.h" 38#include "gc/space/space-inl.h" 39#include "indirect_reference_table.h" 40#include "intern_table.h" 41#include "jni_internal.h" 42#include "mark_sweep-inl.h" 43#include "monitor.h" 44#include "mirror/reference-inl.h" 45#include "mirror/object-inl.h" 46#include "runtime.h" 47#include "thread-inl.h" 48#include "thread_list.h" 49 50using ::art::mirror::Object; 51 52namespace art { 53namespace gc { 54namespace collector { 55 56static constexpr bool kProtectFromSpace = true; 57static constexpr bool kStoreStackTraces = false; 58static constexpr size_t kBytesPromotedThreshold = 4 * MB; 59static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB; 60 61void SemiSpace::BindBitmaps() { 62 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 63 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 64 // Mark all of the spaces we never collect as immune. 65 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 66 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect || 67 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 68 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 69 } else if (space->GetLiveBitmap() != nullptr) { 70 if (space == to_space_ || collect_from_space_only_) { 71 if (collect_from_space_only_) { 72 // Bind the main free list space and the non-moving space to the immune space if a bump 73 // pointer space only collection. 74 CHECK(space == to_space_ || space == GetHeap()->GetPrimaryFreeListSpace() || 75 space == GetHeap()->GetNonMovingSpace()); 76 } 77 CHECK(space->IsContinuousMemMapAllocSpace()); 78 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 79 } 80 } 81 } 82 if (collect_from_space_only_) { 83 // We won't collect the large object space if a bump pointer space only collection. 84 is_large_object_space_immune_ = true; 85 } 86} 87 88SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 89 : GarbageCollector(heap, 90 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 91 to_space_(nullptr), 92 from_space_(nullptr), 93 generational_(generational), 94 last_gc_to_space_end_(nullptr), 95 bytes_promoted_(0), 96 bytes_promoted_since_last_whole_heap_collection_(0), 97 large_object_bytes_allocated_at_last_whole_heap_collection_(0), 98 collect_from_space_only_(generational), 99 collector_name_(name_), 100 swap_semi_spaces_(true) { 101} 102 103void SemiSpace::RunPhases() { 104 Thread* self = Thread::Current(); 105 InitializePhase(); 106 // Semi-space collector is special since it is sometimes called with the mutators suspended 107 // during the zygote creation and collector transitions. If we already exclusively hold the 108 // mutator lock, then we can't lock it again since it will cause a deadlock. 109 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 110 GetHeap()->PreGcVerificationPaused(this); 111 GetHeap()->PrePauseRosAllocVerification(this); 112 MarkingPhase(); 113 ReclaimPhase(); 114 GetHeap()->PostGcVerificationPaused(this); 115 } else { 116 Locks::mutator_lock_->AssertNotHeld(self); 117 { 118 ScopedPause pause(this); 119 GetHeap()->PreGcVerificationPaused(this); 120 GetHeap()->PrePauseRosAllocVerification(this); 121 MarkingPhase(); 122 } 123 { 124 ReaderMutexLock mu(self, *Locks::mutator_lock_); 125 ReclaimPhase(); 126 } 127 GetHeap()->PostGcVerification(this); 128 } 129 FinishPhase(); 130} 131 132void SemiSpace::InitializePhase() { 133 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 134 mark_stack_ = heap_->GetMarkStack(); 135 DCHECK(mark_stack_ != nullptr); 136 immune_region_.Reset(); 137 is_large_object_space_immune_ = false; 138 saved_bytes_ = 0; 139 bytes_moved_ = 0; 140 objects_moved_ = 0; 141 self_ = Thread::Current(); 142 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_; 143 // Set the initial bitmap. 144 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 145 { 146 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 147 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 148 mark_bitmap_ = heap_->GetMarkBitmap(); 149 } 150 if (generational_) { 151 promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace(); 152 } 153 fallback_space_ = GetHeap()->GetNonMovingSpace(); 154} 155 156void SemiSpace::ProcessReferences(Thread* self) { 157 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 158 GetHeap()->GetReferenceProcessor()->ProcessReferences( 159 false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), 160 &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this); 161} 162 163void SemiSpace::MarkingPhase() { 164 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 165 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_)); 166 if (kStoreStackTraces) { 167 Locks::mutator_lock_->AssertExclusiveHeld(self_); 168 // Store the stack traces into the runtime fault string in case we Get a heap corruption 169 // related crash later. 170 ThreadState old_state = self_->SetStateUnsafe(kRunnable); 171 std::ostringstream oss; 172 Runtime* runtime = Runtime::Current(); 173 runtime->GetThreadList()->DumpForSigQuit(oss); 174 runtime->GetThreadList()->DumpNativeStacks(oss); 175 runtime->SetFaultMessage(oss.str()); 176 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); 177 } 178 // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps 179 // to prevent fragmentation. 180 RevokeAllThreadLocalBuffers(); 181 if (generational_) { 182 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 183 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 184 GetCurrentIteration()->GetClearSoftReferences()) { 185 // If an explicit, native allocation-triggered, or last attempt 186 // collection, collect the whole heap. 187 collect_from_space_only_ = false; 188 } 189 if (!collect_from_space_only_) { 190 VLOG(heap) << "Whole heap collection"; 191 name_ = collector_name_ + " whole"; 192 } else { 193 VLOG(heap) << "Bump pointer space only collection"; 194 name_ = collector_name_ + " bps"; 195 } 196 } 197 198 if (!collect_from_space_only_) { 199 // If non-generational, always clear soft references. 200 // If generational, clear soft references if a whole heap collection. 201 GetCurrentIteration()->SetClearSoftReferences(true); 202 } 203 Locks::mutator_lock_->AssertExclusiveHeld(self_); 204 if (generational_) { 205 // If last_gc_to_space_end_ is out of the bounds of the from-space 206 // (the to-space from last GC), then point it to the beginning of 207 // the from-space. For example, the very first GC or the 208 // pre-zygote compaction. 209 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 210 last_gc_to_space_end_ = from_space_->Begin(); 211 } 212 // Reset this before the marking starts below. 213 bytes_promoted_ = 0; 214 } 215 // Assume the cleared space is already empty. 216 BindBitmaps(); 217 // Process dirty cards and add dirty cards to mod-union tables. 218 heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_); 219 // Clear the whole card table since we can not Get any additional dirty cards during the 220 // paused GC. This saves memory but only works for pause the world collectors. 221 t.NewTiming("ClearCardTable"); 222 heap_->GetCardTable()->ClearCardTable(); 223 // Need to do this before the checkpoint since we don't want any threads to add references to 224 // the live stack during the recursive mark. 225 t.NewTiming("SwapStacks"); 226 if (kUseThreadLocalAllocationStack) { 227 TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings()); 228 heap_->RevokeAllThreadLocalAllocationStacks(self_); 229 } 230 heap_->SwapStacks(self_); 231 { 232 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 233 MarkRoots(); 234 // Recursively mark remaining objects. 235 MarkReachableObjects(); 236 } 237 ProcessReferences(self_); 238 { 239 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 240 SweepSystemWeaks(); 241 } 242 // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked 243 // before they are properly counted. 244 RevokeAllThreadLocalBuffers(); 245 // Record freed memory. 246 const int64_t from_bytes = from_space_->GetBytesAllocated(); 247 const int64_t to_bytes = bytes_moved_; 248 const uint64_t from_objects = from_space_->GetObjectsAllocated(); 249 const uint64_t to_objects = objects_moved_; 250 CHECK_LE(to_objects, from_objects); 251 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 252 // space. 253 RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes)); 254 // Clear and protect the from space. 255 from_space_->Clear(); 256 VLOG(heap) << "Protecting from_space_: " << *from_space_; 257 from_space_->GetMemMap()->Protect(kProtectFromSpace ? PROT_NONE : PROT_READ); 258 heap_->PreSweepingGcVerification(this); 259 if (swap_semi_spaces_) { 260 heap_->SwapSemiSpaces(); 261 } 262} 263 264class SemiSpaceScanObjectVisitor { 265 public: 266 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 267 void operator()(Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, 268 Locks::heap_bitmap_lock_) { 269 DCHECK(obj != nullptr); 270 semi_space_->ScanObject(obj); 271 } 272 private: 273 SemiSpace* const semi_space_; 274}; 275 276// Used to verify that there's no references to the from-space. 277class SemiSpaceVerifyNoFromSpaceReferencesVisitor { 278 public: 279 explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : 280 from_space_(from_space) {} 281 282 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 283 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 284 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 285 if (from_space_->HasAddress(ref)) { 286 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); 287 LOG(FATAL) << ref << " found in from space"; 288 } 289 } 290 private: 291 space::ContinuousMemMapAllocSpace* from_space_; 292}; 293 294void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { 295 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 296 SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); 297 obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor()); 298} 299 300class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { 301 public: 302 explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 303 void operator()(Object* obj) const 304 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 305 DCHECK(obj != nullptr); 306 semi_space_->VerifyNoFromSpaceReferences(obj); 307 } 308 private: 309 SemiSpace* const semi_space_; 310}; 311 312void SemiSpace::MarkReachableObjects() { 313 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 314 { 315 TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings()); 316 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 317 heap_->MarkAllocStackAsLive(live_stack); 318 live_stack->Reset(); 319 } 320 for (auto& space : heap_->GetContinuousSpaces()) { 321 // If the space is immune then we need to mark the references to other spaces. 322 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 323 if (table != nullptr) { 324 // TODO: Improve naming. 325 TimingLogger::ScopedTiming t2( 326 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 327 "UpdateAndMarkImageModUnionTable", 328 GetTimings()); 329 table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 330 DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr); 331 } else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) { 332 // If the space has no mod union table (the non-moving space and main spaces when the bump 333 // pointer space only collection is enabled,) then we need to scan its live bitmap or dirty 334 // cards as roots (including the objects on the live stack which have just marked in the live 335 // bitmap above in MarkAllocStackAsLive().) 336 DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()) 337 << "Space " << space->GetName() << " " 338 << "generational_=" << generational_ << " " 339 << "collect_from_space_only_=" << collect_from_space_only_; 340 accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space); 341 CHECK_EQ(rem_set != nullptr, kUseRememberedSet); 342 if (rem_set != nullptr) { 343 TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings()); 344 rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback, 345 from_space_, this); 346 if (kIsDebugBuild) { 347 // Verify that there are no from-space references that 348 // remain in the space, that is, the remembered set (and the 349 // card table) didn't miss any from-space references in the 350 // space. 351 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 352 SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); 353 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 354 reinterpret_cast<uintptr_t>(space->End()), 355 visitor); 356 } 357 } else { 358 TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings()); 359 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 360 SemiSpaceScanObjectVisitor visitor(this); 361 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 362 reinterpret_cast<uintptr_t>(space->End()), 363 visitor); 364 } 365 } 366 } 367 368 CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_); 369 if (is_large_object_space_immune_) { 370 TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings()); 371 DCHECK(collect_from_space_only_); 372 // Delay copying the live set to the marked set until here from 373 // BindBitmaps() as the large objects on the allocation stack may 374 // be newly added to the live set above in MarkAllocStackAsLive(). 375 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 376 377 // When the large object space is immune, we need to scan the 378 // large object space as roots as they contain references to their 379 // classes (primitive array classes) that could move though they 380 // don't contain any other references. 381 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 382 accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap(); 383 SemiSpaceScanObjectVisitor visitor(this); 384 large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()), 385 reinterpret_cast<uintptr_t>(large_object_space->End()), 386 visitor); 387 } 388 // Recursively process the mark stack. 389 ProcessMarkStack(); 390} 391 392void SemiSpace::ReclaimPhase() { 393 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 394 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 395 // Reclaim unmarked objects. 396 Sweep(false); 397 // Swap the live and mark bitmaps for each space which we modified space. This is an 398 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 399 // bitmaps. 400 SwapBitmaps(); 401 // Unbind the live and mark bitmaps. 402 GetHeap()->UnBindBitmaps(); 403 if (saved_bytes_ > 0) { 404 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 405 } 406 if (generational_) { 407 // Record the end (top) of the to space so we can distinguish 408 // between objects that were allocated since the last GC and the 409 // older objects. 410 last_gc_to_space_end_ = to_space_->End(); 411 } 412} 413 414void SemiSpace::ResizeMarkStack(size_t new_size) { 415 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 416 CHECK_LE(mark_stack_->Size(), new_size); 417 mark_stack_->Resize(new_size); 418 for (const auto& obj : temp) { 419 mark_stack_->PushBack(obj); 420 } 421} 422 423inline void SemiSpace::MarkStackPush(Object* obj) { 424 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 425 ResizeMarkStack(mark_stack_->Capacity() * 2); 426 } 427 // The object must be pushed on to the mark stack. 428 mark_stack_->PushBack(obj); 429} 430 431static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 432 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 433 // We will dirty the current page and somewhere in the middle of the next page. This means 434 // that the next object copied will also dirty that page. 435 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 436 // not necessary per GC. 437 memcpy(dest, src, size); 438 return 0; 439 } 440 size_t saved_bytes = 0; 441 byte* byte_dest = reinterpret_cast<byte*>(dest); 442 if (kIsDebugBuild) { 443 for (size_t i = 0; i < size; ++i) { 444 CHECK_EQ(byte_dest[i], 0U); 445 } 446 } 447 // Process the start of the page. The page must already be dirty, don't bother with checking. 448 const byte* byte_src = reinterpret_cast<const byte*>(src); 449 const byte* limit = byte_src + size; 450 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 451 // Copy the bytes until the start of the next page. 452 memcpy(dest, src, page_remain); 453 byte_src += page_remain; 454 byte_dest += page_remain; 455 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 456 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 457 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 458 while (byte_src + kPageSize < limit) { 459 bool all_zero = true; 460 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 461 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 462 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 463 // Assumes the destination of the copy is all zeros. 464 if (word_src[i] != 0) { 465 all_zero = false; 466 word_dest[i] = word_src[i]; 467 } 468 } 469 if (all_zero) { 470 // Avoided copying into the page since it was all zeros. 471 saved_bytes += kPageSize; 472 } 473 byte_src += kPageSize; 474 byte_dest += kPageSize; 475 } 476 // Handle the part of the page at the end. 477 memcpy(byte_dest, byte_src, limit - byte_src); 478 return saved_bytes; 479} 480 481mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 482 const size_t object_size = obj->SizeOf(); 483 size_t bytes_allocated; 484 mirror::Object* forward_address = nullptr; 485 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 486 // If it's allocated before the last GC (older), move 487 // (pseudo-promote) it to the main free list space (as sort 488 // of an old generation.) 489 forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, 490 nullptr); 491 if (UNLIKELY(forward_address == nullptr)) { 492 // If out of space, fall back to the to-space. 493 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr); 494 // No logic for marking the bitmap, so it must be null. 495 DCHECK(to_space_->GetLiveBitmap() == nullptr); 496 } else { 497 bytes_promoted_ += bytes_allocated; 498 // Dirty the card at the destionation as it may contain 499 // references (including the class pointer) to the bump pointer 500 // space. 501 GetHeap()->WriteBarrierEveryFieldOf(forward_address); 502 // Handle the bitmaps marking. 503 accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap(); 504 DCHECK(live_bitmap != nullptr); 505 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap(); 506 DCHECK(mark_bitmap != nullptr); 507 DCHECK(!live_bitmap->Test(forward_address)); 508 if (collect_from_space_only_) { 509 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 510 DCHECK_EQ(live_bitmap, mark_bitmap); 511 512 // If a bump pointer space only collection, delay the live 513 // bitmap marking of the promoted object until it's popped off 514 // the mark stack (ProcessMarkStack()). The rationale: we may 515 // be in the middle of scanning the objects in the promo 516 // destination space for 517 // non-moving-space-to-bump-pointer-space references by 518 // iterating over the marked bits of the live bitmap 519 // (MarkReachableObjects()). If we don't delay it (and instead 520 // mark the promoted object here), the above promo destination 521 // space scan could encounter the just-promoted object and 522 // forward the references in the promoted object's fields even 523 // through it is pushed onto the mark stack. If this happens, 524 // the promoted object would be in an inconsistent state, that 525 // is, it's on the mark stack (gray) but its fields are 526 // already forwarded (black), which would cause a 527 // DCHECK(!to_space_->HasAddress(obj)) failure below. 528 } else { 529 // Mark forward_address on the live bit map. 530 live_bitmap->Set(forward_address); 531 // Mark forward_address on the mark bit map. 532 DCHECK(!mark_bitmap->Test(forward_address)); 533 mark_bitmap->Set(forward_address); 534 } 535 } 536 } else { 537 // If it's allocated after the last GC (younger), copy it to the to-space. 538 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr); 539 if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) { 540 to_space_live_bitmap_->Set(forward_address); 541 } 542 } 543 // If it's still null, attempt to use the fallback space. 544 if (UNLIKELY(forward_address == nullptr)) { 545 forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, 546 nullptr); 547 CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space."; 548 accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap(); 549 if (bitmap != nullptr) { 550 bitmap->Set(forward_address); 551 } 552 } 553 ++objects_moved_; 554 bytes_moved_ += bytes_allocated; 555 // Copy over the object and add it to the mark stack since we still need to update its 556 // references. 557 saved_bytes_ += 558 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 559 if (kUseBakerOrBrooksReadBarrier) { 560 obj->AssertReadBarrierPointer(); 561 if (kUseBrooksReadBarrier) { 562 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj); 563 forward_address->SetReadBarrierPointer(forward_address); 564 } 565 forward_address->AssertReadBarrierPointer(); 566 } 567 DCHECK(to_space_->HasAddress(forward_address) || 568 fallback_space_->HasAddress(forward_address) || 569 (generational_ && promo_dest_space_->HasAddress(forward_address))) 570 << forward_address << "\n" << GetHeap()->DumpSpaces(); 571 return forward_address; 572} 573 574void SemiSpace::ProcessMarkStackCallback(void* arg) { 575 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); 576} 577 578mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { 579 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root); 580 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 581 return ref.AsMirrorPtr(); 582} 583 584void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, 585 void* arg) { 586 reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr); 587} 588 589void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref, 590 void* arg) { 591 reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref); 592} 593 594void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 595 RootType /*root_type*/) { 596 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root); 597 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 598 if (*root != ref.AsMirrorPtr()) { 599 *root = ref.AsMirrorPtr(); 600 } 601} 602 603// Marks all objects in the root set. 604void SemiSpace::MarkRoots() { 605 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 606 Runtime::Current()->VisitRoots(MarkRootCallback, this); 607} 608 609bool SemiSpace::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object, 610 void* arg) { 611 mirror::Object* obj = object->AsMirrorPtr(); 612 mirror::Object* new_obj = 613 reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(obj); 614 if (new_obj == nullptr) { 615 return false; 616 } 617 if (new_obj != obj) { 618 // Write barrier is not necessary since it still points to the same object, just at a different 619 // address. 620 object->Assign(new_obj); 621 } 622 return true; 623} 624 625mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 626 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 627} 628 629void SemiSpace::SweepSystemWeaks() { 630 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 631 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 632} 633 634bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 635 return space != from_space_ && space != to_space_; 636} 637 638void SemiSpace::Sweep(bool swap_bitmaps) { 639 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 640 DCHECK(mark_stack_->IsEmpty()); 641 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 642 if (space->IsContinuousMemMapAllocSpace()) { 643 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 644 if (!ShouldSweepSpace(alloc_space)) { 645 continue; 646 } 647 TimingLogger::ScopedTiming split( 648 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 649 RecordFree(alloc_space->Sweep(swap_bitmaps)); 650 } 651 } 652 if (!is_large_object_space_immune_) { 653 SweepLargeObjects(swap_bitmaps); 654 } 655} 656 657void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 658 DCHECK(!is_large_object_space_immune_); 659 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 660 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 661} 662 663// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 664// marked, put it on the appropriate list in the heap for later processing. 665void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 666 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, 667 &HeapReferenceMarkedCallback, this); 668} 669 670class SemiSpaceMarkObjectVisitor { 671 public: 672 explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) { 673 } 674 675 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE 676 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 677 // Object was already verified when we scanned it. 678 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); 679 } 680 681 void operator()(mirror::Class* klass, mirror::Reference* ref) const 682 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 683 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 684 collector_->DelayReferenceReferent(klass, ref); 685 } 686 687 private: 688 SemiSpace* const collector_; 689}; 690 691// Visit all of the references of an object and update. 692void SemiSpace::ScanObject(Object* obj) { 693 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 694 SemiSpaceMarkObjectVisitor visitor(this); 695 obj->VisitReferences<kMovingClasses>(visitor, visitor); 696} 697 698// Scan anything that's on the mark stack. 699void SemiSpace::ProcessMarkStack() { 700 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 701 accounting::ContinuousSpaceBitmap* live_bitmap = nullptr; 702 if (collect_from_space_only_) { 703 // If a bump pointer space only collection (and the promotion is 704 // enabled,) we delay the live-bitmap marking of promoted objects 705 // from MarkObject() until this function. 706 live_bitmap = promo_dest_space_->GetLiveBitmap(); 707 DCHECK(live_bitmap != nullptr); 708 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap(); 709 DCHECK(mark_bitmap != nullptr); 710 DCHECK_EQ(live_bitmap, mark_bitmap); 711 } 712 while (!mark_stack_->IsEmpty()) { 713 Object* obj = mark_stack_->PopBack(); 714 if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) { 715 // obj has just been promoted. Mark the live bitmap for it, 716 // which is delayed from MarkObject(). 717 DCHECK(!live_bitmap->Test(obj)); 718 live_bitmap->Set(obj); 719 } 720 ScanObject(obj); 721 } 722} 723 724inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 725 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 726 // All immune objects are assumed marked. 727 if (from_space_->HasAddress(obj)) { 728 // Returns either the forwarding address or nullptr. 729 return GetForwardingAddressInFromSpace(obj); 730 } else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) || 731 to_space_->HasAddress(obj)) { 732 return obj; // Already forwarded, must be marked. 733 } 734 return mark_bitmap_->Test(obj) ? obj : nullptr; 735} 736 737void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 738 DCHECK(to_space != nullptr); 739 to_space_ = to_space; 740} 741 742void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 743 DCHECK(from_space != nullptr); 744 from_space_ = from_space; 745} 746 747void SemiSpace::FinishPhase() { 748 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 749 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 750 // further action is done by the heap. 751 to_space_ = nullptr; 752 from_space_ = nullptr; 753 CHECK(mark_stack_->IsEmpty()); 754 mark_stack_->Reset(); 755 if (generational_) { 756 // Decide whether to do a whole heap collection or a bump pointer 757 // only space collection at the next collection by updating 758 // collect_from_space_only_. 759 if (collect_from_space_only_) { 760 // Disable collect_from_space_only_ if the bytes promoted since the 761 // last whole heap collection or the large object bytes 762 // allocated exceeds a threshold. 763 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_; 764 bool bytes_promoted_threshold_exceeded = 765 bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold; 766 uint64_t current_los_bytes_allocated = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated(); 767 uint64_t last_los_bytes_allocated = 768 large_object_bytes_allocated_at_last_whole_heap_collection_; 769 bool large_object_bytes_threshold_exceeded = 770 current_los_bytes_allocated >= 771 last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold; 772 if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) { 773 collect_from_space_only_ = false; 774 } 775 } else { 776 // Reset the counters. 777 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_; 778 large_object_bytes_allocated_at_last_whole_heap_collection_ = 779 GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated(); 780 collect_from_space_only_ = true; 781 } 782 } 783 // Clear all of the spaces' mark bitmaps. 784 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 785 heap_->ClearMarkedObjects(); 786} 787 788void SemiSpace::RevokeAllThreadLocalBuffers() { 789 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 790 GetHeap()->RevokeAllThreadLocalBuffers(); 791} 792 793} // namespace collector 794} // namespace gc 795} // namespace art 796