semi_space.cc revision 4db7449c0065971ec3a64ca04aeb64cfd2e802f0
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space-inl.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap-inl.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/remembered_set.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/space/bump_pointer_space.h" 34#include "gc/space/bump_pointer_space-inl.h" 35#include "gc/space/image_space.h" 36#include "gc/space/large_object_space.h" 37#include "gc/space/space-inl.h" 38#include "indirect_reference_table.h" 39#include "intern_table.h" 40#include "jni_internal.h" 41#include "mark_sweep-inl.h" 42#include "monitor.h" 43#include "mirror/art_field.h" 44#include "mirror/art_field-inl.h" 45#include "mirror/class-inl.h" 46#include "mirror/class_loader.h" 47#include "mirror/dex_cache.h" 48#include "mirror/reference-inl.h" 49#include "mirror/object-inl.h" 50#include "mirror/object_array.h" 51#include "mirror/object_array-inl.h" 52#include "runtime.h" 53#include "stack.h" 54#include "thread-inl.h" 55#include "thread_list.h" 56#include "verifier/method_verifier.h" 57 58using ::art::mirror::Class; 59using ::art::mirror::Object; 60 61namespace art { 62namespace gc { 63namespace collector { 64 65static constexpr bool kProtectFromSpace = true; 66static constexpr bool kStoreStackTraces = false; 67static constexpr bool kUseBytesPromoted = true; 68static constexpr size_t kBytesPromotedThreshold = 4 * MB; 69 70void SemiSpace::BindBitmaps() { 71 timings_.StartSplit("BindBitmaps"); 72 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 73 // Mark all of the spaces we never collect as immune. 74 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 75 if (space->GetLiveBitmap() != nullptr) { 76 if (space == to_space_) { 77 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 78 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 79 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 80 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 81 // Add the main free list space and the non-moving 82 // space to the immune space if a bump pointer space 83 // only collection. 84 || (generational_ && !whole_heap_collection_ && 85 (space == GetHeap()->GetNonMovingSpace() || 86 space == GetHeap()->GetPrimaryFreeListSpace()))) { 87 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 88 } 89 } 90 } 91 if (generational_ && !whole_heap_collection_) { 92 // We won't collect the large object space if a bump pointer space only collection. 93 is_large_object_space_immune_ = true; 94 } 95 timings_.EndSplit(); 96} 97 98SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 99 : GarbageCollector(heap, 100 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 101 to_space_(nullptr), 102 from_space_(nullptr), 103 generational_(generational), 104 last_gc_to_space_end_(nullptr), 105 bytes_promoted_(0), 106 bytes_promoted_since_last_whole_heap_collection_(0), 107 whole_heap_collection_(true), 108 whole_heap_collection_interval_counter_(0), 109 collector_name_(name_) { 110} 111 112void SemiSpace::InitializePhase() { 113 timings_.Reset(); 114 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 115 mark_stack_ = heap_->mark_stack_.get(); 116 DCHECK(mark_stack_ != nullptr); 117 immune_region_.Reset(); 118 is_large_object_space_immune_ = false; 119 saved_bytes_ = 0; 120 bytes_moved_ = 0; 121 objects_moved_ = 0; 122 self_ = Thread::Current(); 123 // Do any pre GC verification. 124 timings_.NewSplit("PreGcVerification"); 125 heap_->PreGcVerification(this); 126 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_; 127 // Set the initial bitmap. 128 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 129 { 130 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap. 131 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 132 mark_bitmap_ = heap_->GetMarkBitmap(); 133 } 134} 135 136void SemiSpace::ProcessReferences(Thread* self) { 137 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 138 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 139 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 140 &MarkObjectCallback, &ProcessMarkStackCallback, this); 141} 142 143void SemiSpace::MarkingPhase() { 144 if (kStoreStackTraces) { 145 Locks::mutator_lock_->AssertExclusiveHeld(self_); 146 // Store the stack traces into the runtime fault string in case we get a heap corruption 147 // related crash later. 148 ThreadState old_state = self_->SetStateUnsafe(kRunnable); 149 std::ostringstream oss; 150 Runtime* runtime = Runtime::Current(); 151 runtime->GetThreadList()->DumpForSigQuit(oss); 152 runtime->GetThreadList()->DumpNativeStacks(oss); 153 runtime->SetFaultMessage(oss.str()); 154 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); 155 } 156 157 if (generational_) { 158 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 159 clear_soft_references_) { 160 // If an explicit, native allocation-triggered, or last attempt 161 // collection, collect the whole heap (and reset the interval 162 // counter to be consistent.) 163 whole_heap_collection_ = true; 164 if (!kUseBytesPromoted) { 165 whole_heap_collection_interval_counter_ = 0; 166 } 167 } 168 if (whole_heap_collection_) { 169 VLOG(heap) << "Whole heap collection"; 170 name_ = collector_name_ + " whole"; 171 } else { 172 VLOG(heap) << "Bump pointer space only collection"; 173 name_ = collector_name_ + " bps"; 174 } 175 } 176 177 if (!clear_soft_references_) { 178 if (!generational_) { 179 // If non-generational, always clear soft references. 180 clear_soft_references_ = true; 181 } else { 182 // If generational, clear soft references if a whole heap collection. 183 if (whole_heap_collection_) { 184 clear_soft_references_ = true; 185 } 186 } 187 } 188 189 Locks::mutator_lock_->AssertExclusiveHeld(self_); 190 191 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 192 if (generational_) { 193 // If last_gc_to_space_end_ is out of the bounds of the from-space 194 // (the to-space from last GC), then point it to the beginning of 195 // the from-space. For example, the very first GC or the 196 // pre-zygote compaction. 197 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 198 last_gc_to_space_end_ = from_space_->Begin(); 199 } 200 // Reset this before the marking starts below. 201 bytes_promoted_ = 0; 202 } 203 // Assume the cleared space is already empty. 204 BindBitmaps(); 205 // Process dirty cards and add dirty cards to mod-union tables. 206 heap_->ProcessCards(timings_, kUseRememberedSet && generational_); 207 // Clear the whole card table since we can not get any additional dirty cards during the 208 // paused GC. This saves memory but only works for pause the world collectors. 209 timings_.NewSplit("ClearCardTable"); 210 heap_->GetCardTable()->ClearCardTable(); 211 // Need to do this before the checkpoint since we don't want any threads to add references to 212 // the live stack during the recursive mark. 213 timings_.NewSplit("SwapStacks"); 214 if (kUseThreadLocalAllocationStack) { 215 heap_->RevokeAllThreadLocalAllocationStacks(self_); 216 } 217 heap_->SwapStacks(self_); 218 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 219 MarkRoots(); 220 // Mark roots of immune spaces. 221 UpdateAndMarkModUnion(); 222 // Recursively mark remaining objects. 223 MarkReachableObjects(); 224} 225 226void SemiSpace::UpdateAndMarkModUnion() { 227 for (auto& space : heap_->GetContinuousSpaces()) { 228 // If the space is immune then we need to mark the references to other spaces. 229 if (immune_region_.ContainsSpace(space)) { 230 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 231 if (table != nullptr) { 232 // TODO: Improve naming. 233 TimingLogger::ScopedSplit split( 234 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 235 "UpdateAndMarkImageModUnionTable", 236 &timings_); 237 table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 238 } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) { 239 DCHECK(kUseRememberedSet); 240 // If a bump pointer space only collection, the non-moving 241 // space is added to the immune space. The non-moving space 242 // doesn't have a mod union table, but has a remembered 243 // set. Its dirty cards will be scanned later in 244 // MarkReachableObjects(). 245 DCHECK(generational_ && !whole_heap_collection_ && 246 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 247 << "Space " << space->GetName() << " " 248 << "generational_=" << generational_ << " " 249 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 250 } else { 251 DCHECK(!kUseRememberedSet); 252 // If a bump pointer space only collection, the non-moving 253 // space is added to the immune space. But the non-moving 254 // space doesn't have a mod union table. Instead, its live 255 // bitmap will be scanned later in MarkReachableObjects(). 256 DCHECK(generational_ && !whole_heap_collection_ && 257 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 258 << "Space " << space->GetName() << " " 259 << "generational_=" << generational_ << " " 260 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 261 } 262 } 263 } 264} 265 266class SemiSpaceScanObjectVisitor { 267 public: 268 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 269 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 270 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 271 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an 272 // exclusive lock on the mutator lock, but 273 // SpaceBitmap::VisitMarkedRange() only requires the shared lock. 274 DCHECK(obj != nullptr); 275 semi_space_->ScanObject(obj); 276 } 277 private: 278 SemiSpace* const semi_space_; 279}; 280 281// Used to verify that there's no references to the from-space. 282class SemiSpaceVerifyNoFromSpaceReferencesVisitor { 283 public: 284 explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : 285 from_space_(from_space) {} 286 287 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 288 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 289 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); 290 if (from_space_->HasAddress(ref)) { 291 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); 292 LOG(FATAL) << ref << " found in from space"; 293 } 294 } 295 private: 296 space::ContinuousMemMapAllocSpace* from_space_; 297}; 298 299void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { 300 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 301 SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); 302 obj->VisitReferences<kMovingClasses>(visitor); 303} 304 305class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { 306 public: 307 explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 308 void operator()(Object* obj) const 309 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 310 DCHECK(obj != nullptr); 311 semi_space_->VerifyNoFromSpaceReferences(obj); 312 } 313 private: 314 SemiSpace* const semi_space_; 315}; 316 317void SemiSpace::MarkReachableObjects() { 318 timings_.StartSplit("MarkStackAsLive"); 319 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 320 heap_->MarkAllocStackAsLive(live_stack); 321 live_stack->Reset(); 322 323 timings_.NewSplit("UpdateAndMarkRememberedSets"); 324 for (auto& space : heap_->GetContinuousSpaces()) { 325 // If the space is immune and has no mod union table (the 326 // non-moving space when the bump pointer space only collection is 327 // enabled,) then we need to scan its live bitmap or dirty cards as roots 328 // (including the objects on the live stack which have just marked 329 // in the live bitmap above in MarkAllocStackAsLive().) 330 if (immune_region_.ContainsSpace(space) && 331 heap_->FindModUnionTableFromSpace(space) == nullptr) { 332 DCHECK(generational_ && !whole_heap_collection_ && 333 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 334 accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space); 335 if (kUseRememberedSet) { 336 DCHECK(rem_set != nullptr); 337 rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback, 338 from_space_, this); 339 if (kIsDebugBuild) { 340 // Verify that there are no from-space references that 341 // remain in the space, that is, the remembered set (and the 342 // card table) didn't miss any from-space references in the 343 // space. 344 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 345 SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); 346 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 347 reinterpret_cast<uintptr_t>(space->End()), 348 visitor); 349 } 350 } else { 351 DCHECK(rem_set == nullptr); 352 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 353 SemiSpaceScanObjectVisitor visitor(this); 354 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 355 reinterpret_cast<uintptr_t>(space->End()), 356 visitor); 357 } 358 } 359 } 360 361 if (is_large_object_space_immune_) { 362 timings_.NewSplit("VisitLargeObjects"); 363 DCHECK(generational_ && !whole_heap_collection_); 364 // Delay copying the live set to the marked set until here from 365 // BindBitmaps() as the large objects on the allocation stack may 366 // be newly added to the live set above in MarkAllocStackAsLive(). 367 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 368 369 // When the large object space is immune, we need to scan the 370 // large object space as roots as they contain references to their 371 // classes (primitive array classes) that could move though they 372 // don't contain any other references. 373 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 374 accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap(); 375 SemiSpaceScanObjectVisitor visitor(this); 376 large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()), 377 reinterpret_cast<uintptr_t>(large_object_space->End()), 378 visitor); 379 } 380 timings_.EndSplit(); 381 // Recursively process the mark stack. 382 ProcessMarkStack(); 383} 384 385void SemiSpace::ReclaimPhase() { 386 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 387 ProcessReferences(self_); 388 { 389 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 390 SweepSystemWeaks(); 391 } 392 // Record freed memory. 393 uint64_t from_bytes = from_space_->GetBytesAllocated(); 394 uint64_t to_bytes = bytes_moved_; 395 uint64_t from_objects = from_space_->GetObjectsAllocated(); 396 uint64_t to_objects = objects_moved_; 397 CHECK_LE(to_objects, from_objects); 398 int64_t freed_bytes = from_bytes - to_bytes; 399 int64_t freed_objects = from_objects - to_objects; 400 freed_bytes_.FetchAndAdd(freed_bytes); 401 freed_objects_.FetchAndAdd(freed_objects); 402 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 403 // space. 404 heap_->RecordFree(freed_objects, freed_bytes); 405 406 timings_.StartSplit("PreSweepingGcVerification"); 407 heap_->PreSweepingGcVerification(this); 408 timings_.EndSplit(); 409 { 410 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 411 // Reclaim unmarked objects. 412 Sweep(false); 413 // Swap the live and mark bitmaps for each space which we modified space. This is an 414 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 415 // bitmaps. 416 timings_.StartSplit("SwapBitmaps"); 417 SwapBitmaps(); 418 timings_.EndSplit(); 419 // Unbind the live and mark bitmaps. 420 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 421 GetHeap()->UnBindBitmaps(); 422 } 423 // TODO: Do this before doing verification since the from space may have objects which weren't 424 // moved and point to dead objects. 425 from_space_->Clear(); 426 // Protect the from space. 427 VLOG(heap) << "Protecting space " << *from_space_; 428 if (kProtectFromSpace) { 429 from_space_->GetMemMap()->Protect(PROT_NONE); 430 } else { 431 from_space_->GetMemMap()->Protect(PROT_READ); 432 } 433 if (saved_bytes_ > 0) { 434 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 435 } 436 437 if (generational_) { 438 // Record the end (top) of the to space so we can distinguish 439 // between objects that were allocated since the last GC and the 440 // older objects. 441 last_gc_to_space_end_ = to_space_->End(); 442 } 443} 444 445void SemiSpace::ResizeMarkStack(size_t new_size) { 446 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 447 CHECK_LE(mark_stack_->Size(), new_size); 448 mark_stack_->Resize(new_size); 449 for (const auto& obj : temp) { 450 mark_stack_->PushBack(obj); 451 } 452} 453 454inline void SemiSpace::MarkStackPush(Object* obj) { 455 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 456 ResizeMarkStack(mark_stack_->Capacity() * 2); 457 } 458 // The object must be pushed on to the mark stack. 459 mark_stack_->PushBack(obj); 460} 461 462static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 463 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 464 // We will dirty the current page and somewhere in the middle of the next page. This means 465 // that the next object copied will also dirty that page. 466 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 467 // not necessary per GC. 468 memcpy(dest, src, size); 469 return 0; 470 } 471 size_t saved_bytes = 0; 472 byte* byte_dest = reinterpret_cast<byte*>(dest); 473 if (kIsDebugBuild) { 474 for (size_t i = 0; i < size; ++i) { 475 CHECK_EQ(byte_dest[i], 0U); 476 } 477 } 478 // Process the start of the page. The page must already be dirty, don't bother with checking. 479 const byte* byte_src = reinterpret_cast<const byte*>(src); 480 const byte* limit = byte_src + size; 481 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 482 // Copy the bytes until the start of the next page. 483 memcpy(dest, src, page_remain); 484 byte_src += page_remain; 485 byte_dest += page_remain; 486 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 487 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 488 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 489 while (byte_src + kPageSize < limit) { 490 bool all_zero = true; 491 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 492 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 493 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 494 // Assumes the destination of the copy is all zeros. 495 if (word_src[i] != 0) { 496 all_zero = false; 497 word_dest[i] = word_src[i]; 498 } 499 } 500 if (all_zero) { 501 // Avoided copying into the page since it was all zeros. 502 saved_bytes += kPageSize; 503 } 504 byte_src += kPageSize; 505 byte_dest += kPageSize; 506 } 507 // Handle the part of the page at the end. 508 memcpy(byte_dest, byte_src, limit - byte_src); 509 return saved_bytes; 510} 511 512mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 513 size_t object_size = obj->SizeOf(); 514 size_t bytes_allocated; 515 mirror::Object* forward_address = nullptr; 516 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 517 // If it's allocated before the last GC (older), move 518 // (pseudo-promote) it to the main free list space (as sort 519 // of an old generation.) 520 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 521 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_allocated, nullptr); 522 if (UNLIKELY(forward_address == nullptr)) { 523 // If out of space, fall back to the to-space. 524 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 525 } else { 526 bytes_promoted_ += bytes_allocated; 527 // Dirty the card at the destionation as it may contain 528 // references (including the class pointer) to the bump pointer 529 // space. 530 GetHeap()->WriteBarrierEveryFieldOf(forward_address); 531 // Handle the bitmaps marking. 532 accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 533 DCHECK(live_bitmap != nullptr); 534 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 535 DCHECK(mark_bitmap != nullptr); 536 DCHECK(!live_bitmap->Test(forward_address)); 537 if (!whole_heap_collection_) { 538 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 539 DCHECK_EQ(live_bitmap, mark_bitmap); 540 541 // If a bump pointer space only collection, delay the live 542 // bitmap marking of the promoted object until it's popped off 543 // the mark stack (ProcessMarkStack()). The rationale: we may 544 // be in the middle of scanning the objects in the promo 545 // destination space for 546 // non-moving-space-to-bump-pointer-space references by 547 // iterating over the marked bits of the live bitmap 548 // (MarkReachableObjects()). If we don't delay it (and instead 549 // mark the promoted object here), the above promo destination 550 // space scan could encounter the just-promoted object and 551 // forward the references in the promoted object's fields even 552 // through it is pushed onto the mark stack. If this happens, 553 // the promoted object would be in an inconsistent state, that 554 // is, it's on the mark stack (gray) but its fields are 555 // already forwarded (black), which would cause a 556 // DCHECK(!to_space_->HasAddress(obj)) failure below. 557 } else { 558 // Mark forward_address on the live bit map. 559 live_bitmap->Set(forward_address); 560 // Mark forward_address on the mark bit map. 561 DCHECK(!mark_bitmap->Test(forward_address)); 562 mark_bitmap->Set(forward_address); 563 } 564 } 565 DCHECK(forward_address != nullptr); 566 } else { 567 // If it's allocated after the last GC (younger), copy it to the to-space. 568 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 569 } 570 ++objects_moved_; 571 bytes_moved_ += bytes_allocated; 572 // Copy over the object and add it to the mark stack since we still need to update its 573 // references. 574 saved_bytes_ += 575 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 576 if (kUseBakerOrBrooksReadBarrier) { 577 obj->AssertReadBarrierPointer(); 578 if (kUseBrooksReadBarrier) { 579 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj); 580 forward_address->SetReadBarrierPointer(forward_address); 581 } 582 forward_address->AssertReadBarrierPointer(); 583 } 584 if (to_space_live_bitmap_ != nullptr) { 585 to_space_live_bitmap_->Set(forward_address); 586 } 587 DCHECK(to_space_->HasAddress(forward_address) || 588 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 589 return forward_address; 590} 591 592void SemiSpace::ProcessMarkStackCallback(void* arg) { 593 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); 594} 595 596mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { 597 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root); 598 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 599 return ref.AsMirrorPtr(); 600} 601 602void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, 603 void* arg) { 604 reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr); 605} 606 607void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref, 608 void* arg) { 609 reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref); 610} 611 612void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 613 RootType /*root_type*/) { 614 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root); 615 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 616 if (*root != ref.AsMirrorPtr()) { 617 *root = ref.AsMirrorPtr(); 618 } 619} 620 621// Marks all objects in the root set. 622void SemiSpace::MarkRoots() { 623 timings_.NewSplit("MarkRoots"); 624 // TODO: Visit up image roots as well? 625 Runtime::Current()->VisitRoots(MarkRootCallback, this); 626} 627 628mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 629 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 630} 631 632void SemiSpace::SweepSystemWeaks() { 633 timings_.StartSplit("SweepSystemWeaks"); 634 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 635 timings_.EndSplit(); 636} 637 638bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 639 return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); 640} 641 642void SemiSpace::Sweep(bool swap_bitmaps) { 643 DCHECK(mark_stack_->IsEmpty()); 644 TimingLogger::ScopedSplit split("Sweep", &timings_); 645 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 646 if (space->IsContinuousMemMapAllocSpace()) { 647 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 648 if (!ShouldSweepSpace(alloc_space)) { 649 continue; 650 } 651 TimingLogger::ScopedSplit split( 652 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 653 size_t freed_objects = 0; 654 size_t freed_bytes = 0; 655 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 656 heap_->RecordFree(freed_objects, freed_bytes); 657 freed_objects_.FetchAndAdd(freed_objects); 658 freed_bytes_.FetchAndAdd(freed_bytes); 659 } 660 } 661 if (!is_large_object_space_immune_) { 662 SweepLargeObjects(swap_bitmaps); 663 } 664} 665 666void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 667 DCHECK(!is_large_object_space_immune_); 668 TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_); 669 size_t freed_objects = 0; 670 size_t freed_bytes = 0; 671 heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 672 freed_large_objects_.FetchAndAdd(freed_objects); 673 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 674 heap_->RecordFree(freed_objects, freed_bytes); 675} 676 677// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 678// marked, put it on the appropriate list in the heap for later processing. 679void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 680 heap_->DelayReferenceReferent(klass, reference, MarkedForwardingAddressCallback, this); 681} 682 683class SemiSpaceMarkObjectVisitor { 684 public: 685 explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) { 686 } 687 688 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE 689 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 690 // Object was already verified when we scanned it. 691 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); 692 } 693 694 void operator()(mirror::Class* klass, mirror::Reference* ref) const 695 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 696 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 697 collector_->DelayReferenceReferent(klass, ref); 698 } 699 700 private: 701 SemiSpace* const collector_; 702}; 703 704// Visit all of the references of an object and update. 705void SemiSpace::ScanObject(Object* obj) { 706 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 707 SemiSpaceMarkObjectVisitor visitor(this); 708 obj->VisitReferences<kMovingClasses>(visitor, visitor); 709} 710 711// Scan anything that's on the mark stack. 712void SemiSpace::ProcessMarkStack() { 713 space::MallocSpace* promo_dest_space = nullptr; 714 accounting::ContinuousSpaceBitmap* live_bitmap = nullptr; 715 if (generational_ && !whole_heap_collection_) { 716 // If a bump pointer space only collection (and the promotion is 717 // enabled,) we delay the live-bitmap marking of promoted objects 718 // from MarkObject() until this function. 719 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 720 live_bitmap = promo_dest_space->GetLiveBitmap(); 721 DCHECK(live_bitmap != nullptr); 722 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 723 DCHECK(mark_bitmap != nullptr); 724 DCHECK_EQ(live_bitmap, mark_bitmap); 725 } 726 timings_.StartSplit("ProcessMarkStack"); 727 while (!mark_stack_->IsEmpty()) { 728 Object* obj = mark_stack_->PopBack(); 729 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 730 // obj has just been promoted. Mark the live bitmap for it, 731 // which is delayed from MarkObject(). 732 DCHECK(!live_bitmap->Test(obj)); 733 live_bitmap->Set(obj); 734 } 735 ScanObject(obj); 736 } 737 timings_.EndSplit(); 738} 739 740inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 741 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 742 // All immune objects are assumed marked. 743 if (immune_region_.ContainsObject(obj)) { 744 return obj; 745 } 746 if (from_space_->HasAddress(obj)) { 747 // Returns either the forwarding address or nullptr. 748 return GetForwardingAddressInFromSpace(obj); 749 } else if (to_space_->HasAddress(obj)) { 750 // Should be unlikely. 751 // Already forwarded, must be marked. 752 return obj; 753 } 754 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 755} 756 757void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 758 DCHECK(to_space != nullptr); 759 to_space_ = to_space; 760} 761 762void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 763 DCHECK(from_space != nullptr); 764 from_space_ = from_space; 765} 766 767void SemiSpace::FinishPhase() { 768 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 769 Heap* heap = GetHeap(); 770 timings_.NewSplit("PostGcVerification"); 771 heap->PostGcVerification(this); 772 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 773 // further action is done by the heap. 774 to_space_ = nullptr; 775 from_space_ = nullptr; 776 CHECK(mark_stack_->IsEmpty()); 777 mark_stack_->Reset(); 778 if (generational_) { 779 // Decide whether to do a whole heap collection or a bump pointer 780 // only space collection at the next collection by updating 781 // whole_heap_collection. 782 if (!whole_heap_collection_) { 783 if (!kUseBytesPromoted) { 784 // Enable whole_heap_collection once every 785 // kDefaultWholeHeapCollectionInterval collections. 786 --whole_heap_collection_interval_counter_; 787 DCHECK_GE(whole_heap_collection_interval_counter_, 0); 788 if (whole_heap_collection_interval_counter_ == 0) { 789 whole_heap_collection_ = true; 790 } 791 } else { 792 // Enable whole_heap_collection if the bytes promoted since 793 // the last whole heap collection exceeds a threshold. 794 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_; 795 if (bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold) { 796 whole_heap_collection_ = true; 797 } 798 } 799 } else { 800 if (!kUseBytesPromoted) { 801 DCHECK_EQ(whole_heap_collection_interval_counter_, 0); 802 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; 803 whole_heap_collection_ = false; 804 } else { 805 // Reset it. 806 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_; 807 whole_heap_collection_ = false; 808 } 809 } 810 } 811 // Clear all of the spaces' mark bitmaps. 812 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 813 heap_->ClearMarkedObjects(); 814} 815 816void SemiSpace::RevokeAllThreadLocalBuffers() { 817 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 818 GetHeap()->RevokeAllThreadLocalBuffers(); 819 timings_.EndSplit(); 820} 821 822} // namespace collector 823} // namespace gc 824} // namespace art 825