semi_space.cc revision 62ab87bb3ff4830def25a1716f6785256c7eebca
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space-inl.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap-inl.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/remembered_set.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/space/bump_pointer_space.h" 34#include "gc/space/bump_pointer_space-inl.h" 35#include "gc/space/image_space.h" 36#include "gc/space/large_object_space.h" 37#include "gc/space/space-inl.h" 38#include "indirect_reference_table.h" 39#include "intern_table.h" 40#include "jni_internal.h" 41#include "mark_sweep-inl.h" 42#include "monitor.h" 43#include "mirror/art_field.h" 44#include "mirror/art_field-inl.h" 45#include "mirror/class-inl.h" 46#include "mirror/class_loader.h" 47#include "mirror/dex_cache.h" 48#include "mirror/reference-inl.h" 49#include "mirror/object-inl.h" 50#include "mirror/object_array.h" 51#include "mirror/object_array-inl.h" 52#include "runtime.h" 53#include "stack.h" 54#include "thread-inl.h" 55#include "thread_list.h" 56#include "verifier/method_verifier.h" 57 58using ::art::mirror::Class; 59using ::art::mirror::Object; 60 61namespace art { 62namespace gc { 63namespace collector { 64 65static constexpr bool kProtectFromSpace = true; 66static constexpr bool kStoreStackTraces = false; 67static constexpr bool kUseBytesPromoted = true; 68static constexpr size_t kBytesPromotedThreshold = 4 * MB; 69 70void SemiSpace::BindBitmaps() { 71 timings_.StartSplit("BindBitmaps"); 72 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 73 // Mark all of the spaces we never collect as immune. 74 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 75 if (space->GetLiveBitmap() != nullptr) { 76 if (space == to_space_) { 77 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 78 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 79 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 80 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 81 // Add the main free list space and the non-moving 82 // space to the immune space if a bump pointer space 83 // only collection. 84 || (generational_ && !whole_heap_collection_ && 85 (space == GetHeap()->GetNonMovingSpace() || 86 space == GetHeap()->GetPrimaryFreeListSpace()))) { 87 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 88 } 89 } 90 } 91 if (generational_ && !whole_heap_collection_) { 92 // We won't collect the large object space if a bump pointer space only collection. 93 is_large_object_space_immune_ = true; 94 } 95 timings_.EndSplit(); 96} 97 98SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 99 : GarbageCollector(heap, 100 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 101 to_space_(nullptr), 102 from_space_(nullptr), 103 generational_(generational), 104 last_gc_to_space_end_(nullptr), 105 bytes_promoted_(0), 106 bytes_promoted_since_last_whole_heap_collection_(0), 107 whole_heap_collection_(true), 108 whole_heap_collection_interval_counter_(0), 109 collector_name_(name_) { 110} 111 112void SemiSpace::InitializePhase() { 113 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 114 mark_stack_ = heap_->mark_stack_.get(); 115 DCHECK(mark_stack_ != nullptr); 116 immune_region_.Reset(); 117 is_large_object_space_immune_ = false; 118 saved_bytes_ = 0; 119 bytes_moved_ = 0; 120 objects_moved_ = 0; 121 self_ = Thread::Current(); 122 // Do any pre GC verification. 123 timings_.NewSplit("PreGcVerification"); 124 heap_->PreGcVerification(this); 125 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_; 126 // Set the initial bitmap. 127 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 128 { 129 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap. 130 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 131 mark_bitmap_ = heap_->GetMarkBitmap(); 132 } 133} 134 135void SemiSpace::ProcessReferences(Thread* self) { 136 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 137 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 138 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 139 &MarkObjectCallback, &ProcessMarkStackCallback, this); 140} 141 142void SemiSpace::MarkingPhase() { 143 if (kStoreStackTraces) { 144 Locks::mutator_lock_->AssertExclusiveHeld(self_); 145 // Store the stack traces into the runtime fault string in case we get a heap corruption 146 // related crash later. 147 ThreadState old_state = self_->SetStateUnsafe(kRunnable); 148 std::ostringstream oss; 149 Runtime* runtime = Runtime::Current(); 150 runtime->GetThreadList()->DumpForSigQuit(oss); 151 runtime->GetThreadList()->DumpNativeStacks(oss); 152 runtime->SetFaultMessage(oss.str()); 153 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); 154 } 155 156 if (generational_) { 157 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 158 clear_soft_references_) { 159 // If an explicit, native allocation-triggered, or last attempt 160 // collection, collect the whole heap (and reset the interval 161 // counter to be consistent.) 162 whole_heap_collection_ = true; 163 if (!kUseBytesPromoted) { 164 whole_heap_collection_interval_counter_ = 0; 165 } 166 } 167 if (whole_heap_collection_) { 168 VLOG(heap) << "Whole heap collection"; 169 name_ = collector_name_ + " whole"; 170 } else { 171 VLOG(heap) << "Bump pointer space only collection"; 172 name_ = collector_name_ + " bps"; 173 } 174 } 175 176 if (!clear_soft_references_) { 177 if (!generational_) { 178 // If non-generational, always clear soft references. 179 clear_soft_references_ = true; 180 } else { 181 // If generational, clear soft references if a whole heap collection. 182 if (whole_heap_collection_) { 183 clear_soft_references_ = true; 184 } 185 } 186 } 187 188 Locks::mutator_lock_->AssertExclusiveHeld(self_); 189 190 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 191 if (generational_) { 192 // If last_gc_to_space_end_ is out of the bounds of the from-space 193 // (the to-space from last GC), then point it to the beginning of 194 // the from-space. For example, the very first GC or the 195 // pre-zygote compaction. 196 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 197 last_gc_to_space_end_ = from_space_->Begin(); 198 } 199 // Reset this before the marking starts below. 200 bytes_promoted_ = 0; 201 } 202 // Assume the cleared space is already empty. 203 BindBitmaps(); 204 // Process dirty cards and add dirty cards to mod-union tables. 205 heap_->ProcessCards(timings_, kUseRememberedSet && generational_); 206 // Clear the whole card table since we can not get any additional dirty cards during the 207 // paused GC. This saves memory but only works for pause the world collectors. 208 timings_.NewSplit("ClearCardTable"); 209 heap_->GetCardTable()->ClearCardTable(); 210 // Need to do this before the checkpoint since we don't want any threads to add references to 211 // the live stack during the recursive mark. 212 timings_.NewSplit("SwapStacks"); 213 if (kUseThreadLocalAllocationStack) { 214 heap_->RevokeAllThreadLocalAllocationStacks(self_); 215 } 216 heap_->SwapStacks(self_); 217 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 218 MarkRoots(); 219 // Mark roots of immune spaces. 220 UpdateAndMarkModUnion(); 221 // Recursively mark remaining objects. 222 MarkReachableObjects(); 223} 224 225void SemiSpace::UpdateAndMarkModUnion() { 226 for (auto& space : heap_->GetContinuousSpaces()) { 227 // If the space is immune then we need to mark the references to other spaces. 228 if (immune_region_.ContainsSpace(space)) { 229 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 230 if (table != nullptr) { 231 // TODO: Improve naming. 232 TimingLogger::ScopedSplit split( 233 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 234 "UpdateAndMarkImageModUnionTable", 235 &timings_); 236 table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 237 } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) { 238 DCHECK(kUseRememberedSet); 239 // If a bump pointer space only collection, the non-moving 240 // space is added to the immune space. The non-moving space 241 // doesn't have a mod union table, but has a remembered 242 // set. Its dirty cards will be scanned later in 243 // MarkReachableObjects(). 244 DCHECK(generational_ && !whole_heap_collection_ && 245 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 246 << "Space " << space->GetName() << " " 247 << "generational_=" << generational_ << " " 248 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 249 } else { 250 DCHECK(!kUseRememberedSet); 251 // If a bump pointer space only collection, the non-moving 252 // space is added to the immune space. But the non-moving 253 // space doesn't have a mod union table. Instead, its live 254 // bitmap will be scanned later in MarkReachableObjects(). 255 DCHECK(generational_ && !whole_heap_collection_ && 256 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 257 << "Space " << space->GetName() << " " 258 << "generational_=" << generational_ << " " 259 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 260 } 261 } 262 } 263} 264 265class SemiSpaceScanObjectVisitor { 266 public: 267 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 268 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 269 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 270 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an 271 // exclusive lock on the mutator lock, but 272 // SpaceBitmap::VisitMarkedRange() only requires the shared lock. 273 DCHECK(obj != nullptr); 274 semi_space_->ScanObject(obj); 275 } 276 private: 277 SemiSpace* const semi_space_; 278}; 279 280// Used to verify that there's no references to the from-space. 281class SemiSpaceVerifyNoFromSpaceReferencesVisitor { 282 public: 283 explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : 284 from_space_(from_space) {} 285 286 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 287 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 288 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); 289 if (from_space_->HasAddress(ref)) { 290 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); 291 LOG(FATAL) << ref << " found in from space"; 292 } 293 } 294 private: 295 space::ContinuousMemMapAllocSpace* from_space_; 296}; 297 298void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { 299 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 300 SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); 301 obj->VisitReferences<kMovingClasses>(visitor); 302} 303 304class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { 305 public: 306 explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 307 void operator()(Object* obj) const 308 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 309 DCHECK(obj != nullptr); 310 semi_space_->VerifyNoFromSpaceReferences(obj); 311 } 312 private: 313 SemiSpace* const semi_space_; 314}; 315 316void SemiSpace::MarkReachableObjects() { 317 timings_.StartSplit("MarkStackAsLive"); 318 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 319 heap_->MarkAllocStackAsLive(live_stack); 320 live_stack->Reset(); 321 322 timings_.NewSplit("UpdateAndMarkRememberedSets"); 323 for (auto& space : heap_->GetContinuousSpaces()) { 324 // If the space is immune and has no mod union table (the 325 // non-moving space when the bump pointer space only collection is 326 // enabled,) then we need to scan its live bitmap or dirty cards as roots 327 // (including the objects on the live stack which have just marked 328 // in the live bitmap above in MarkAllocStackAsLive().) 329 if (immune_region_.ContainsSpace(space) && 330 heap_->FindModUnionTableFromSpace(space) == nullptr) { 331 DCHECK(generational_ && !whole_heap_collection_ && 332 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 333 accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space); 334 if (kUseRememberedSet) { 335 DCHECK(rem_set != nullptr); 336 rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, from_space_, this); 337 if (kIsDebugBuild) { 338 // Verify that there are no from-space references that 339 // remain in the space, that is, the remembered set (and the 340 // card table) didn't miss any from-space references in the 341 // space. 342 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 343 SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); 344 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 345 reinterpret_cast<uintptr_t>(space->End()), 346 visitor); 347 } 348 } else { 349 DCHECK(rem_set == nullptr); 350 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 351 SemiSpaceScanObjectVisitor visitor(this); 352 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 353 reinterpret_cast<uintptr_t>(space->End()), 354 visitor); 355 } 356 } 357 } 358 359 if (is_large_object_space_immune_) { 360 timings_.NewSplit("VisitLargeObjects"); 361 DCHECK(generational_ && !whole_heap_collection_); 362 // Delay copying the live set to the marked set until here from 363 // BindBitmaps() as the large objects on the allocation stack may 364 // be newly added to the live set above in MarkAllocStackAsLive(). 365 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 366 367 // When the large object space is immune, we need to scan the 368 // large object space as roots as they contain references to their 369 // classes (primitive array classes) that could move though they 370 // don't contain any other references. 371 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 372 accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap(); 373 SemiSpaceScanObjectVisitor visitor(this); 374 large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()), 375 reinterpret_cast<uintptr_t>(large_object_space->End()), 376 visitor); 377 } 378 timings_.EndSplit(); 379 // Recursively process the mark stack. 380 ProcessMarkStack(); 381} 382 383void SemiSpace::ReclaimPhase() { 384 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 385 ProcessReferences(self_); 386 { 387 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 388 SweepSystemWeaks(); 389 } 390 // Record freed memory. 391 uint64_t from_bytes = from_space_->GetBytesAllocated(); 392 uint64_t to_bytes = bytes_moved_; 393 uint64_t from_objects = from_space_->GetObjectsAllocated(); 394 uint64_t to_objects = objects_moved_; 395 CHECK_LE(to_objects, from_objects); 396 int64_t freed_bytes = from_bytes - to_bytes; 397 int64_t freed_objects = from_objects - to_objects; 398 freed_bytes_.FetchAndAdd(freed_bytes); 399 freed_objects_.FetchAndAdd(freed_objects); 400 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 401 // space. 402 heap_->RecordFree(freed_objects, freed_bytes); 403 404 timings_.StartSplit("PreSweepingGcVerification"); 405 heap_->PreSweepingGcVerification(this); 406 timings_.EndSplit(); 407 { 408 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 409 // Reclaim unmarked objects. 410 Sweep(false); 411 // Swap the live and mark bitmaps for each space which we modified space. This is an 412 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 413 // bitmaps. 414 timings_.StartSplit("SwapBitmaps"); 415 SwapBitmaps(); 416 timings_.EndSplit(); 417 // Unbind the live and mark bitmaps. 418 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 419 GetHeap()->UnBindBitmaps(); 420 } 421 // TODO: Do this before doing verification since the from space may have objects which weren't 422 // moved and point to dead objects. 423 from_space_->Clear(); 424 // Protect the from space. 425 VLOG(heap) << "Protecting space " << *from_space_; 426 if (kProtectFromSpace) { 427 from_space_->GetMemMap()->Protect(PROT_NONE); 428 } else { 429 from_space_->GetMemMap()->Protect(PROT_READ); 430 } 431 if (saved_bytes_ > 0) { 432 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 433 } 434 435 if (generational_) { 436 // Record the end (top) of the to space so we can distinguish 437 // between objects that were allocated since the last GC and the 438 // older objects. 439 last_gc_to_space_end_ = to_space_->End(); 440 } 441} 442 443void SemiSpace::ResizeMarkStack(size_t new_size) { 444 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 445 CHECK_LE(mark_stack_->Size(), new_size); 446 mark_stack_->Resize(new_size); 447 for (const auto& obj : temp) { 448 mark_stack_->PushBack(obj); 449 } 450} 451 452inline void SemiSpace::MarkStackPush(Object* obj) { 453 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 454 ResizeMarkStack(mark_stack_->Capacity() * 2); 455 } 456 // The object must be pushed on to the mark stack. 457 mark_stack_->PushBack(obj); 458} 459 460static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 461 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 462 // We will dirty the current page and somewhere in the middle of the next page. This means 463 // that the next object copied will also dirty that page. 464 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 465 // not necessary per GC. 466 memcpy(dest, src, size); 467 return 0; 468 } 469 size_t saved_bytes = 0; 470 byte* byte_dest = reinterpret_cast<byte*>(dest); 471 if (kIsDebugBuild) { 472 for (size_t i = 0; i < size; ++i) { 473 CHECK_EQ(byte_dest[i], 0U); 474 } 475 } 476 // Process the start of the page. The page must already be dirty, don't bother with checking. 477 const byte* byte_src = reinterpret_cast<const byte*>(src); 478 const byte* limit = byte_src + size; 479 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 480 // Copy the bytes until the start of the next page. 481 memcpy(dest, src, page_remain); 482 byte_src += page_remain; 483 byte_dest += page_remain; 484 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 485 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 486 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 487 while (byte_src + kPageSize < limit) { 488 bool all_zero = true; 489 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 490 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 491 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 492 // Assumes the destination of the copy is all zeros. 493 if (word_src[i] != 0) { 494 all_zero = false; 495 word_dest[i] = word_src[i]; 496 } 497 } 498 if (all_zero) { 499 // Avoided copying into the page since it was all zeros. 500 saved_bytes += kPageSize; 501 } 502 byte_src += kPageSize; 503 byte_dest += kPageSize; 504 } 505 // Handle the part of the page at the end. 506 memcpy(byte_dest, byte_src, limit - byte_src); 507 return saved_bytes; 508} 509 510mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 511 size_t object_size = obj->SizeOf(); 512 size_t bytes_allocated; 513 mirror::Object* forward_address = nullptr; 514 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 515 // If it's allocated before the last GC (older), move 516 // (pseudo-promote) it to the main free list space (as sort 517 // of an old generation.) 518 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 519 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_allocated, nullptr); 520 if (UNLIKELY(forward_address == nullptr)) { 521 // If out of space, fall back to the to-space. 522 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 523 } else { 524 bytes_promoted_ += bytes_allocated; 525 // Dirty the card at the destionation as it may contain 526 // references (including the class pointer) to the bump pointer 527 // space. 528 GetHeap()->WriteBarrierEveryFieldOf(forward_address); 529 // Handle the bitmaps marking. 530 accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 531 DCHECK(live_bitmap != nullptr); 532 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 533 DCHECK(mark_bitmap != nullptr); 534 DCHECK(!live_bitmap->Test(forward_address)); 535 if (!whole_heap_collection_) { 536 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 537 DCHECK_EQ(live_bitmap, mark_bitmap); 538 539 // If a bump pointer space only collection, delay the live 540 // bitmap marking of the promoted object until it's popped off 541 // the mark stack (ProcessMarkStack()). The rationale: we may 542 // be in the middle of scanning the objects in the promo 543 // destination space for 544 // non-moving-space-to-bump-pointer-space references by 545 // iterating over the marked bits of the live bitmap 546 // (MarkReachableObjects()). If we don't delay it (and instead 547 // mark the promoted object here), the above promo destination 548 // space scan could encounter the just-promoted object and 549 // forward the references in the promoted object's fields even 550 // through it is pushed onto the mark stack. If this happens, 551 // the promoted object would be in an inconsistent state, that 552 // is, it's on the mark stack (gray) but its fields are 553 // already forwarded (black), which would cause a 554 // DCHECK(!to_space_->HasAddress(obj)) failure below. 555 } else { 556 // Mark forward_address on the live bit map. 557 live_bitmap->Set(forward_address); 558 // Mark forward_address on the mark bit map. 559 DCHECK(!mark_bitmap->Test(forward_address)); 560 mark_bitmap->Set(forward_address); 561 } 562 } 563 DCHECK(forward_address != nullptr); 564 } else { 565 // If it's allocated after the last GC (younger), copy it to the to-space. 566 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 567 } 568 ++objects_moved_; 569 bytes_moved_ += bytes_allocated; 570 // Copy over the object and add it to the mark stack since we still need to update its 571 // references. 572 saved_bytes_ += 573 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 574 if (kUseBakerOrBrooksReadBarrier) { 575 obj->AssertReadBarrierPointer(); 576 if (kUseBrooksReadBarrier) { 577 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj); 578 forward_address->SetReadBarrierPointer(forward_address); 579 } 580 forward_address->AssertReadBarrierPointer(); 581 } 582 if (to_space_live_bitmap_ != nullptr) { 583 to_space_live_bitmap_->Set(forward_address); 584 } 585 DCHECK(to_space_->HasAddress(forward_address) || 586 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 587 return forward_address; 588} 589 590void SemiSpace::ProcessMarkStackCallback(void* arg) { 591 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); 592} 593 594mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { 595 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root); 596 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 597 return ref.AsMirrorPtr(); 598} 599 600void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, 601 void* arg) { 602 reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr); 603} 604 605void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 606 RootType /*root_type*/) { 607 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root); 608 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 609 if (*root != ref.AsMirrorPtr()) { 610 *root = ref.AsMirrorPtr(); 611 } 612} 613 614// Marks all objects in the root set. 615void SemiSpace::MarkRoots() { 616 timings_.NewSplit("MarkRoots"); 617 // TODO: Visit up image roots as well? 618 Runtime::Current()->VisitRoots(MarkRootCallback, this); 619} 620 621mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 622 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 623} 624 625void SemiSpace::SweepSystemWeaks() { 626 timings_.StartSplit("SweepSystemWeaks"); 627 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 628 timings_.EndSplit(); 629} 630 631bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 632 return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); 633} 634 635void SemiSpace::Sweep(bool swap_bitmaps) { 636 DCHECK(mark_stack_->IsEmpty()); 637 TimingLogger::ScopedSplit split("Sweep", &timings_); 638 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 639 if (space->IsContinuousMemMapAllocSpace()) { 640 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 641 if (!ShouldSweepSpace(alloc_space)) { 642 continue; 643 } 644 TimingLogger::ScopedSplit split( 645 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 646 size_t freed_objects = 0; 647 size_t freed_bytes = 0; 648 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 649 heap_->RecordFree(freed_objects, freed_bytes); 650 freed_objects_.FetchAndAdd(freed_objects); 651 freed_bytes_.FetchAndAdd(freed_bytes); 652 } 653 } 654 if (!is_large_object_space_immune_) { 655 SweepLargeObjects(swap_bitmaps); 656 } 657} 658 659void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 660 DCHECK(!is_large_object_space_immune_); 661 TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_); 662 size_t freed_objects = 0; 663 size_t freed_bytes = 0; 664 heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 665 freed_large_objects_.FetchAndAdd(freed_objects); 666 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 667 heap_->RecordFree(freed_objects, freed_bytes); 668} 669 670// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 671// marked, put it on the appropriate list in the heap for later processing. 672void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 673 heap_->DelayReferenceReferent(klass, reference, MarkedForwardingAddressCallback, this); 674} 675 676class SemiSpaceMarkObjectVisitor { 677 public: 678 explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) { 679 } 680 681 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE 682 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 683 // Object was already verified when we scanned it. 684 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); 685 } 686 687 void operator()(mirror::Class* klass, mirror::Reference* ref) const 688 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 689 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 690 collector_->DelayReferenceReferent(klass, ref); 691 } 692 693 private: 694 SemiSpace* const collector_; 695}; 696 697// Visit all of the references of an object and update. 698void SemiSpace::ScanObject(Object* obj) { 699 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 700 SemiSpaceMarkObjectVisitor visitor(this); 701 obj->VisitReferences<kMovingClasses>(visitor, visitor); 702} 703 704// Scan anything that's on the mark stack. 705void SemiSpace::ProcessMarkStack() { 706 space::MallocSpace* promo_dest_space = nullptr; 707 accounting::ContinuousSpaceBitmap* live_bitmap = nullptr; 708 if (generational_ && !whole_heap_collection_) { 709 // If a bump pointer space only collection (and the promotion is 710 // enabled,) we delay the live-bitmap marking of promoted objects 711 // from MarkObject() until this function. 712 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 713 live_bitmap = promo_dest_space->GetLiveBitmap(); 714 DCHECK(live_bitmap != nullptr); 715 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 716 DCHECK(mark_bitmap != nullptr); 717 DCHECK_EQ(live_bitmap, mark_bitmap); 718 } 719 timings_.StartSplit("ProcessMarkStack"); 720 while (!mark_stack_->IsEmpty()) { 721 Object* obj = mark_stack_->PopBack(); 722 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 723 // obj has just been promoted. Mark the live bitmap for it, 724 // which is delayed from MarkObject(). 725 DCHECK(!live_bitmap->Test(obj)); 726 live_bitmap->Set(obj); 727 } 728 ScanObject(obj); 729 } 730 timings_.EndSplit(); 731} 732 733inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 734 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 735 // All immune objects are assumed marked. 736 if (immune_region_.ContainsObject(obj)) { 737 return obj; 738 } 739 if (from_space_->HasAddress(obj)) { 740 // Returns either the forwarding address or nullptr. 741 return GetForwardingAddressInFromSpace(obj); 742 } else if (to_space_->HasAddress(obj)) { 743 // Should be unlikely. 744 // Already forwarded, must be marked. 745 return obj; 746 } 747 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 748} 749 750void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 751 DCHECK(to_space != nullptr); 752 to_space_ = to_space; 753} 754 755void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 756 DCHECK(from_space != nullptr); 757 from_space_ = from_space; 758} 759 760void SemiSpace::FinishPhase() { 761 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 762 Heap* heap = GetHeap(); 763 timings_.NewSplit("PostGcVerification"); 764 heap->PostGcVerification(this); 765 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 766 // further action is done by the heap. 767 to_space_ = nullptr; 768 from_space_ = nullptr; 769 CHECK(mark_stack_->IsEmpty()); 770 mark_stack_->Reset(); 771 if (generational_) { 772 // Decide whether to do a whole heap collection or a bump pointer 773 // only space collection at the next collection by updating 774 // whole_heap_collection. 775 if (!whole_heap_collection_) { 776 if (!kUseBytesPromoted) { 777 // Enable whole_heap_collection once every 778 // kDefaultWholeHeapCollectionInterval collections. 779 --whole_heap_collection_interval_counter_; 780 DCHECK_GE(whole_heap_collection_interval_counter_, 0); 781 if (whole_heap_collection_interval_counter_ == 0) { 782 whole_heap_collection_ = true; 783 } 784 } else { 785 // Enable whole_heap_collection if the bytes promoted since 786 // the last whole heap collection exceeds a threshold. 787 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_; 788 if (bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold) { 789 whole_heap_collection_ = true; 790 } 791 } 792 } else { 793 if (!kUseBytesPromoted) { 794 DCHECK_EQ(whole_heap_collection_interval_counter_, 0); 795 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; 796 whole_heap_collection_ = false; 797 } else { 798 // Reset it. 799 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_; 800 whole_heap_collection_ = false; 801 } 802 } 803 } 804 // Clear all of the spaces' mark bitmaps. 805 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 806 heap_->ClearMarkedObjects(); 807} 808 809void SemiSpace::RevokeAllThreadLocalBuffers() { 810 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 811 GetHeap()->RevokeAllThreadLocalBuffers(); 812 timings_.EndSplit(); 813} 814 815} // namespace collector 816} // namespace gc 817} // namespace art 818