semi_space.cc revision 13bf2e6a6c14bccf5377998b7568100ffd417f8e
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space-inl.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap-inl.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/remembered_set.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/reference_processor.h" 34#include "gc/space/bump_pointer_space.h" 35#include "gc/space/bump_pointer_space-inl.h" 36#include "gc/space/image_space.h" 37#include "gc/space/large_object_space.h" 38#include "gc/space/space-inl.h" 39#include "indirect_reference_table.h" 40#include "intern_table.h" 41#include "jni_internal.h" 42#include "mark_sweep-inl.h" 43#include "monitor.h" 44#include "mirror/art_field.h" 45#include "mirror/art_field-inl.h" 46#include "mirror/class-inl.h" 47#include "mirror/class_loader.h" 48#include "mirror/dex_cache.h" 49#include "mirror/reference-inl.h" 50#include "mirror/object-inl.h" 51#include "mirror/object_array.h" 52#include "mirror/object_array-inl.h" 53#include "runtime.h" 54#include "stack.h" 55#include "thread-inl.h" 56#include "thread_list.h" 57#include "verifier/method_verifier.h" 58 59using ::art::mirror::Class; 60using ::art::mirror::Object; 61 62namespace art { 63namespace gc { 64namespace collector { 65 66static constexpr bool kProtectFromSpace = true; 67static constexpr bool kStoreStackTraces = false; 68static constexpr size_t kBytesPromotedThreshold = 4 * MB; 69static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB; 70 71void SemiSpace::BindBitmaps() { 72 timings_.StartSplit("BindBitmaps"); 73 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 74 // Mark all of the spaces we never collect as immune. 75 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 76 if (space->GetLiveBitmap() != nullptr) { 77 if (space == to_space_) { 78 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 79 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 80 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 81 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 82 // Add the main free list space and the non-moving 83 // space to the immune space if a bump pointer space 84 // only collection. 85 || (generational_ && !whole_heap_collection_ && 86 (space == GetHeap()->GetNonMovingSpace() || 87 space == GetHeap()->GetPrimaryFreeListSpace()))) { 88 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 89 } 90 } 91 } 92 if (generational_ && !whole_heap_collection_) { 93 // We won't collect the large object space if a bump pointer space only collection. 94 is_large_object_space_immune_ = true; 95 } 96 timings_.EndSplit(); 97} 98 99SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 100 : GarbageCollector(heap, 101 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 102 to_space_(nullptr), 103 from_space_(nullptr), 104 generational_(generational), 105 last_gc_to_space_end_(nullptr), 106 bytes_promoted_(0), 107 bytes_promoted_since_last_whole_heap_collection_(0), 108 large_object_bytes_allocated_at_last_whole_heap_collection_(0), 109 whole_heap_collection_(true), 110 collector_name_(name_), 111 swap_semi_spaces_(true) { 112} 113 114void SemiSpace::RunPhases() { 115 Thread* self = Thread::Current(); 116 InitializePhase(); 117 // Semi-space collector is special since it is sometimes called with the mutators suspended 118 // during the zygote creation and collector transitions. If we already exclusively hold the 119 // mutator lock, then we can't lock it again since it will cause a deadlock. 120 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 121 GetHeap()->PreGcVerificationPaused(this); 122 GetHeap()->PrePauseRosAllocVerification(this); 123 MarkingPhase(); 124 ReclaimPhase(); 125 GetHeap()->PostGcVerificationPaused(this); 126 } else { 127 Locks::mutator_lock_->AssertNotHeld(self); 128 { 129 ScopedPause pause(this); 130 GetHeap()->PreGcVerificationPaused(this); 131 GetHeap()->PrePauseRosAllocVerification(this); 132 MarkingPhase(); 133 } 134 { 135 ReaderMutexLock mu(self, *Locks::mutator_lock_); 136 ReclaimPhase(); 137 } 138 GetHeap()->PostGcVerification(this); 139 } 140 FinishPhase(); 141} 142 143void SemiSpace::InitializePhase() { 144 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 145 mark_stack_ = heap_->GetMarkStack(); 146 DCHECK(mark_stack_ != nullptr); 147 immune_region_.Reset(); 148 is_large_object_space_immune_ = false; 149 saved_bytes_ = 0; 150 bytes_moved_ = 0; 151 objects_moved_ = 0; 152 self_ = Thread::Current(); 153 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_; 154 // Set the initial bitmap. 155 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 156 { 157 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 158 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 159 mark_bitmap_ = heap_->GetMarkBitmap(); 160 } 161} 162 163void SemiSpace::ProcessReferences(Thread* self) { 164 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 165 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 166 GetHeap()->GetReferenceProcessor()->ProcessReferences( 167 false, &timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 168 &MarkObjectCallback, &ProcessMarkStackCallback, this); 169} 170 171void SemiSpace::MarkingPhase() { 172 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_)); 173 if (kStoreStackTraces) { 174 Locks::mutator_lock_->AssertExclusiveHeld(self_); 175 // Store the stack traces into the runtime fault string in case we Get a heap corruption 176 // related crash later. 177 ThreadState old_state = self_->SetStateUnsafe(kRunnable); 178 std::ostringstream oss; 179 Runtime* runtime = Runtime::Current(); 180 runtime->GetThreadList()->DumpForSigQuit(oss); 181 runtime->GetThreadList()->DumpNativeStacks(oss); 182 runtime->SetFaultMessage(oss.str()); 183 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); 184 } 185 // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps 186 // to prevent fragmentation. 187 RevokeAllThreadLocalBuffers(); 188 if (generational_) { 189 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 190 clear_soft_references_) { 191 // If an explicit, native allocation-triggered, or last attempt 192 // collection, collect the whole heap. 193 whole_heap_collection_ = true; 194 } 195 if (whole_heap_collection_) { 196 VLOG(heap) << "Whole heap collection"; 197 name_ = collector_name_ + " whole"; 198 } else { 199 VLOG(heap) << "Bump pointer space only collection"; 200 name_ = collector_name_ + " bps"; 201 } 202 } 203 204 if (!clear_soft_references_) { 205 if (!generational_) { 206 // If non-generational, always clear soft references. 207 clear_soft_references_ = true; 208 } else { 209 // If generational, clear soft references if a whole heap collection. 210 if (whole_heap_collection_) { 211 clear_soft_references_ = true; 212 } 213 } 214 } 215 216 Locks::mutator_lock_->AssertExclusiveHeld(self_); 217 218 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 219 if (generational_) { 220 // If last_gc_to_space_end_ is out of the bounds of the from-space 221 // (the to-space from last GC), then point it to the beginning of 222 // the from-space. For example, the very first GC or the 223 // pre-zygote compaction. 224 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 225 last_gc_to_space_end_ = from_space_->Begin(); 226 } 227 // Reset this before the marking starts below. 228 bytes_promoted_ = 0; 229 } 230 // Assume the cleared space is already empty. 231 BindBitmaps(); 232 // Process dirty cards and add dirty cards to mod-union tables. 233 heap_->ProcessCards(timings_, kUseRememberedSet && generational_); 234 // Clear the whole card table since we can not Get any additional dirty cards during the 235 // paused GC. This saves memory but only works for pause the world collectors. 236 timings_.NewSplit("ClearCardTable"); 237 heap_->GetCardTable()->ClearCardTable(); 238 // Need to do this before the checkpoint since we don't want any threads to add references to 239 // the live stack during the recursive mark. 240 timings_.NewSplit("SwapStacks"); 241 if (kUseThreadLocalAllocationStack) { 242 heap_->RevokeAllThreadLocalAllocationStacks(self_); 243 } 244 heap_->SwapStacks(self_); 245 { 246 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 247 MarkRoots(); 248 // Mark roots of immune spaces. 249 UpdateAndMarkModUnion(); 250 // Recursively mark remaining objects. 251 MarkReachableObjects(); 252 } 253 ProcessReferences(self_); 254 { 255 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 256 SweepSystemWeaks(); 257 } 258 timings_.NewSplit("RecordFree"); 259 // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked 260 // before they are properly counted. 261 RevokeAllThreadLocalBuffers(); 262 // Record freed memory. 263 const int64_t from_bytes = from_space_->GetBytesAllocated(); 264 const int64_t to_bytes = bytes_moved_; 265 const uint64_t from_objects = from_space_->GetObjectsAllocated(); 266 const uint64_t to_objects = objects_moved_; 267 CHECK_LE(to_objects, from_objects); 268 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 269 // space. 270 RecordFree(from_objects - to_objects, from_bytes - to_bytes); 271 // Clear and protect the from space. 272 from_space_->Clear(); 273 VLOG(heap) << "Protecting from_space_: " << *from_space_; 274 from_space_->GetMemMap()->Protect(kProtectFromSpace ? PROT_NONE : PROT_READ); 275 if (swap_semi_spaces_) { 276 heap_->SwapSemiSpaces(); 277 } 278 timings_.StartSplit("PreSweepingGcVerification"); 279 heap_->PreSweepingGcVerification(this); 280 timings_.EndSplit(); 281} 282 283void SemiSpace::UpdateAndMarkModUnion() { 284 for (auto& space : heap_->GetContinuousSpaces()) { 285 // If the space is immune then we need to mark the references to other spaces. 286 if (immune_region_.ContainsSpace(space)) { 287 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 288 if (table != nullptr) { 289 // TODO: Improve naming. 290 TimingLogger::ScopedSplit split( 291 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 292 "UpdateAndMarkImageModUnionTable", 293 &timings_); 294 table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 295 } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) { 296 DCHECK(kUseRememberedSet); 297 // If a bump pointer space only collection, the non-moving 298 // space is added to the immune space. The non-moving space 299 // doesn't have a mod union table, but has a remembered 300 // set. Its dirty cards will be scanned later in 301 // MarkReachableObjects(). 302 DCHECK(generational_ && !whole_heap_collection_ && 303 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 304 << "Space " << space->GetName() << " " 305 << "generational_=" << generational_ << " " 306 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 307 } else { 308 DCHECK(!kUseRememberedSet); 309 // If a bump pointer space only collection, the non-moving 310 // space is added to the immune space. But the non-moving 311 // space doesn't have a mod union table. Instead, its live 312 // bitmap will be scanned later in MarkReachableObjects(). 313 DCHECK(generational_ && !whole_heap_collection_ && 314 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 315 << "Space " << space->GetName() << " " 316 << "generational_=" << generational_ << " " 317 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 318 } 319 } 320 } 321} 322 323class SemiSpaceScanObjectVisitor { 324 public: 325 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 326 void operator()(Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, 327 Locks::heap_bitmap_lock_) { 328 DCHECK(obj != nullptr); 329 semi_space_->ScanObject(obj); 330 } 331 private: 332 SemiSpace* const semi_space_; 333}; 334 335// Used to verify that there's no references to the from-space. 336class SemiSpaceVerifyNoFromSpaceReferencesVisitor { 337 public: 338 explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : 339 from_space_(from_space) {} 340 341 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 342 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 343 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 344 if (from_space_->HasAddress(ref)) { 345 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); 346 LOG(FATAL) << ref << " found in from space"; 347 } 348 } 349 private: 350 space::ContinuousMemMapAllocSpace* from_space_; 351}; 352 353void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { 354 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 355 SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); 356 obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor()); 357} 358 359class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { 360 public: 361 explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 362 void operator()(Object* obj) const 363 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 364 DCHECK(obj != nullptr); 365 semi_space_->VerifyNoFromSpaceReferences(obj); 366 } 367 private: 368 SemiSpace* const semi_space_; 369}; 370 371void SemiSpace::MarkReachableObjects() { 372 timings_.StartSplit("MarkStackAsLive"); 373 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 374 heap_->MarkAllocStackAsLive(live_stack); 375 live_stack->Reset(); 376 377 timings_.NewSplit("UpdateAndMarkRememberedSets"); 378 for (auto& space : heap_->GetContinuousSpaces()) { 379 // If the space is immune and has no mod union table (the 380 // non-moving space when the bump pointer space only collection is 381 // enabled,) then we need to scan its live bitmap or dirty cards as roots 382 // (including the objects on the live stack which have just marked 383 // in the live bitmap above in MarkAllocStackAsLive().) 384 if (immune_region_.ContainsSpace(space) && 385 heap_->FindModUnionTableFromSpace(space) == nullptr) { 386 DCHECK(generational_ && !whole_heap_collection_ && 387 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 388 accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space); 389 if (kUseRememberedSet) { 390 DCHECK(rem_set != nullptr); 391 rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback, 392 from_space_, this); 393 if (kIsDebugBuild) { 394 // Verify that there are no from-space references that 395 // remain in the space, that is, the remembered set (and the 396 // card table) didn't miss any from-space references in the 397 // space. 398 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 399 SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); 400 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 401 reinterpret_cast<uintptr_t>(space->End()), 402 visitor); 403 } 404 } else { 405 DCHECK(rem_set == nullptr); 406 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 407 SemiSpaceScanObjectVisitor visitor(this); 408 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 409 reinterpret_cast<uintptr_t>(space->End()), 410 visitor); 411 } 412 } 413 } 414 415 if (is_large_object_space_immune_) { 416 timings_.NewSplit("VisitLargeObjects"); 417 DCHECK(generational_ && !whole_heap_collection_); 418 // Delay copying the live set to the marked set until here from 419 // BindBitmaps() as the large objects on the allocation stack may 420 // be newly added to the live set above in MarkAllocStackAsLive(). 421 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 422 423 // When the large object space is immune, we need to scan the 424 // large object space as roots as they contain references to their 425 // classes (primitive array classes) that could move though they 426 // don't contain any other references. 427 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 428 accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap(); 429 SemiSpaceScanObjectVisitor visitor(this); 430 large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()), 431 reinterpret_cast<uintptr_t>(large_object_space->End()), 432 visitor); 433 } 434 timings_.EndSplit(); 435 // Recursively process the mark stack. 436 ProcessMarkStack(); 437} 438 439void SemiSpace::ReclaimPhase() { 440 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 441 { 442 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 443 // Reclaim unmarked objects. 444 Sweep(false); 445 // Swap the live and mark bitmaps for each space which we modified space. This is an 446 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 447 // bitmaps. 448 timings_.StartSplit("SwapBitmaps"); 449 SwapBitmaps(); 450 timings_.EndSplit(); 451 // Unbind the live and mark bitmaps. 452 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 453 GetHeap()->UnBindBitmaps(); 454 } 455 if (saved_bytes_ > 0) { 456 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 457 } 458 459 if (generational_) { 460 // Record the end (top) of the to space so we can distinguish 461 // between objects that were allocated since the last GC and the 462 // older objects. 463 last_gc_to_space_end_ = to_space_->End(); 464 } 465} 466 467void SemiSpace::ResizeMarkStack(size_t new_size) { 468 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 469 CHECK_LE(mark_stack_->Size(), new_size); 470 mark_stack_->Resize(new_size); 471 for (const auto& obj : temp) { 472 mark_stack_->PushBack(obj); 473 } 474} 475 476inline void SemiSpace::MarkStackPush(Object* obj) { 477 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 478 ResizeMarkStack(mark_stack_->Capacity() * 2); 479 } 480 // The object must be pushed on to the mark stack. 481 mark_stack_->PushBack(obj); 482} 483 484static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 485 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 486 // We will dirty the current page and somewhere in the middle of the next page. This means 487 // that the next object copied will also dirty that page. 488 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 489 // not necessary per GC. 490 memcpy(dest, src, size); 491 return 0; 492 } 493 size_t saved_bytes = 0; 494 byte* byte_dest = reinterpret_cast<byte*>(dest); 495 if (kIsDebugBuild) { 496 for (size_t i = 0; i < size; ++i) { 497 CHECK_EQ(byte_dest[i], 0U); 498 } 499 } 500 // Process the start of the page. The page must already be dirty, don't bother with checking. 501 const byte* byte_src = reinterpret_cast<const byte*>(src); 502 const byte* limit = byte_src + size; 503 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 504 // Copy the bytes until the start of the next page. 505 memcpy(dest, src, page_remain); 506 byte_src += page_remain; 507 byte_dest += page_remain; 508 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 509 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 510 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 511 while (byte_src + kPageSize < limit) { 512 bool all_zero = true; 513 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 514 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 515 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 516 // Assumes the destination of the copy is all zeros. 517 if (word_src[i] != 0) { 518 all_zero = false; 519 word_dest[i] = word_src[i]; 520 } 521 } 522 if (all_zero) { 523 // Avoided copying into the page since it was all zeros. 524 saved_bytes += kPageSize; 525 } 526 byte_src += kPageSize; 527 byte_dest += kPageSize; 528 } 529 // Handle the part of the page at the end. 530 memcpy(byte_dest, byte_src, limit - byte_src); 531 return saved_bytes; 532} 533 534mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 535 size_t object_size = obj->SizeOf(); 536 size_t bytes_allocated; 537 mirror::Object* forward_address = nullptr; 538 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 539 // If it's allocated before the last GC (older), move 540 // (pseudo-promote) it to the main free list space (as sort 541 // of an old generation.) 542 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 543 forward_address = promo_dest_space->AllocThreadUnsafe(self_, object_size, &bytes_allocated, 544 nullptr); 545 if (UNLIKELY(forward_address == nullptr)) { 546 // If out of space, fall back to the to-space. 547 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr); 548 } else { 549 bytes_promoted_ += bytes_allocated; 550 // Dirty the card at the destionation as it may contain 551 // references (including the class pointer) to the bump pointer 552 // space. 553 GetHeap()->WriteBarrierEveryFieldOf(forward_address); 554 // Handle the bitmaps marking. 555 accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 556 DCHECK(live_bitmap != nullptr); 557 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 558 DCHECK(mark_bitmap != nullptr); 559 DCHECK(!live_bitmap->Test(forward_address)); 560 if (!whole_heap_collection_) { 561 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 562 DCHECK_EQ(live_bitmap, mark_bitmap); 563 564 // If a bump pointer space only collection, delay the live 565 // bitmap marking of the promoted object until it's popped off 566 // the mark stack (ProcessMarkStack()). The rationale: we may 567 // be in the middle of scanning the objects in the promo 568 // destination space for 569 // non-moving-space-to-bump-pointer-space references by 570 // iterating over the marked bits of the live bitmap 571 // (MarkReachableObjects()). If we don't delay it (and instead 572 // mark the promoted object here), the above promo destination 573 // space scan could encounter the just-promoted object and 574 // forward the references in the promoted object's fields even 575 // through it is pushed onto the mark stack. If this happens, 576 // the promoted object would be in an inconsistent state, that 577 // is, it's on the mark stack (gray) but its fields are 578 // already forwarded (black), which would cause a 579 // DCHECK(!to_space_->HasAddress(obj)) failure below. 580 } else { 581 // Mark forward_address on the live bit map. 582 live_bitmap->Set(forward_address); 583 // Mark forward_address on the mark bit map. 584 DCHECK(!mark_bitmap->Test(forward_address)); 585 mark_bitmap->Set(forward_address); 586 } 587 } 588 DCHECK(forward_address != nullptr); 589 } else { 590 // If it's allocated after the last GC (younger), copy it to the to-space. 591 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr); 592 } 593 CHECK(forward_address != nullptr) << "Out of memory in the to-space."; 594 ++objects_moved_; 595 bytes_moved_ += bytes_allocated; 596 // Copy over the object and add it to the mark stack since we still need to update its 597 // references. 598 saved_bytes_ += 599 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 600 if (kUseBakerOrBrooksReadBarrier) { 601 obj->AssertReadBarrierPointer(); 602 if (kUseBrooksReadBarrier) { 603 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj); 604 forward_address->SetReadBarrierPointer(forward_address); 605 } 606 forward_address->AssertReadBarrierPointer(); 607 } 608 if (to_space_live_bitmap_ != nullptr) { 609 to_space_live_bitmap_->Set(forward_address); 610 } 611 DCHECK(to_space_->HasAddress(forward_address) || 612 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 613 return forward_address; 614} 615 616void SemiSpace::ProcessMarkStackCallback(void* arg) { 617 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); 618} 619 620mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { 621 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root); 622 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 623 return ref.AsMirrorPtr(); 624} 625 626void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, 627 void* arg) { 628 reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr); 629} 630 631void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref, 632 void* arg) { 633 reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref); 634} 635 636void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 637 RootType /*root_type*/) { 638 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root); 639 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 640 if (*root != ref.AsMirrorPtr()) { 641 *root = ref.AsMirrorPtr(); 642 } 643} 644 645// Marks all objects in the root set. 646void SemiSpace::MarkRoots() { 647 timings_.NewSplit("MarkRoots"); 648 // TODO: Visit up image roots as well? 649 Runtime::Current()->VisitRoots(MarkRootCallback, this); 650} 651 652mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 653 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 654} 655 656void SemiSpace::SweepSystemWeaks() { 657 timings_.StartSplit("SweepSystemWeaks"); 658 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 659 timings_.EndSplit(); 660} 661 662bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 663 return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); 664} 665 666void SemiSpace::Sweep(bool swap_bitmaps) { 667 DCHECK(mark_stack_->IsEmpty()); 668 TimingLogger::ScopedSplit split("Sweep", &timings_); 669 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 670 if (space->IsContinuousMemMapAllocSpace()) { 671 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 672 if (!ShouldSweepSpace(alloc_space)) { 673 continue; 674 } 675 TimingLogger::ScopedSplit split( 676 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 677 size_t freed_objects = 0; 678 size_t freed_bytes = 0; 679 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 680 RecordFree(freed_objects, freed_bytes); 681 } 682 } 683 if (!is_large_object_space_immune_) { 684 SweepLargeObjects(swap_bitmaps); 685 } 686} 687 688void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 689 DCHECK(!is_large_object_space_immune_); 690 TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_); 691 size_t freed_objects = 0; 692 size_t freed_bytes = 0; 693 heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 694 RecordFreeLargeObjects(freed_objects, freed_bytes); 695} 696 697// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 698// marked, put it on the appropriate list in the heap for later processing. 699void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 700 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, 701 MarkedForwardingAddressCallback, this); 702} 703 704class SemiSpaceMarkObjectVisitor { 705 public: 706 explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) { 707 } 708 709 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE 710 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 711 // Object was already verified when we scanned it. 712 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); 713 } 714 715 void operator()(mirror::Class* klass, mirror::Reference* ref) const 716 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 717 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 718 collector_->DelayReferenceReferent(klass, ref); 719 } 720 721 private: 722 SemiSpace* const collector_; 723}; 724 725// Visit all of the references of an object and update. 726void SemiSpace::ScanObject(Object* obj) { 727 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 728 SemiSpaceMarkObjectVisitor visitor(this); 729 obj->VisitReferences<kMovingClasses>(visitor, visitor); 730} 731 732// Scan anything that's on the mark stack. 733void SemiSpace::ProcessMarkStack() { 734 space::MallocSpace* promo_dest_space = nullptr; 735 accounting::ContinuousSpaceBitmap* live_bitmap = nullptr; 736 if (generational_ && !whole_heap_collection_) { 737 // If a bump pointer space only collection (and the promotion is 738 // enabled,) we delay the live-bitmap marking of promoted objects 739 // from MarkObject() until this function. 740 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 741 live_bitmap = promo_dest_space->GetLiveBitmap(); 742 DCHECK(live_bitmap != nullptr); 743 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 744 DCHECK(mark_bitmap != nullptr); 745 DCHECK_EQ(live_bitmap, mark_bitmap); 746 } 747 timings_.StartSplit("ProcessMarkStack"); 748 while (!mark_stack_->IsEmpty()) { 749 Object* obj = mark_stack_->PopBack(); 750 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 751 // obj has just been promoted. Mark the live bitmap for it, 752 // which is delayed from MarkObject(). 753 DCHECK(!live_bitmap->Test(obj)); 754 live_bitmap->Set(obj); 755 } 756 ScanObject(obj); 757 } 758 timings_.EndSplit(); 759} 760 761inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 762 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 763 // All immune objects are assumed marked. 764 if (immune_region_.ContainsObject(obj)) { 765 return obj; 766 } 767 if (from_space_->HasAddress(obj)) { 768 // Returns either the forwarding address or nullptr. 769 return GetForwardingAddressInFromSpace(obj); 770 } else if (to_space_->HasAddress(obj)) { 771 // Should be unlikely. 772 // Already forwarded, must be marked. 773 return obj; 774 } 775 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 776} 777 778void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 779 DCHECK(to_space != nullptr); 780 to_space_ = to_space; 781} 782 783void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 784 DCHECK(from_space != nullptr); 785 from_space_ = from_space; 786} 787 788void SemiSpace::FinishPhase() { 789 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 790 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 791 // further action is done by the heap. 792 to_space_ = nullptr; 793 from_space_ = nullptr; 794 CHECK(mark_stack_->IsEmpty()); 795 mark_stack_->Reset(); 796 if (generational_) { 797 // Decide whether to do a whole heap collection or a bump pointer 798 // only space collection at the next collection by updating 799 // whole_heap_collection. 800 if (!whole_heap_collection_) { 801 // Enable whole_heap_collection if the bytes promoted since the 802 // last whole heap collection or the large object bytes 803 // allocated exceeds a threshold. 804 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_; 805 bool bytes_promoted_threshold_exceeded = 806 bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold; 807 uint64_t current_los_bytes_allocated = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated(); 808 uint64_t last_los_bytes_allocated = 809 large_object_bytes_allocated_at_last_whole_heap_collection_; 810 bool large_object_bytes_threshold_exceeded = 811 current_los_bytes_allocated >= 812 last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold; 813 if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) { 814 whole_heap_collection_ = true; 815 } 816 } else { 817 // Reset the counters. 818 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_; 819 large_object_bytes_allocated_at_last_whole_heap_collection_ = 820 GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated(); 821 whole_heap_collection_ = false; 822 } 823 } 824 // Clear all of the spaces' mark bitmaps. 825 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 826 heap_->ClearMarkedObjects(); 827} 828 829void SemiSpace::RevokeAllThreadLocalBuffers() { 830 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 831 GetHeap()->RevokeAllThreadLocalBuffers(); 832 timings_.EndSplit(); 833} 834 835} // namespace collector 836} // namespace gc 837} // namespace art 838