semi_space.cc revision df386c551405ce9668e827584f744c6f098761fa
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space-inl.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap-inl.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/remembered_set.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/space/bump_pointer_space.h" 34#include "gc/space/bump_pointer_space-inl.h" 35#include "gc/space/image_space.h" 36#include "gc/space/large_object_space.h" 37#include "gc/space/space-inl.h" 38#include "indirect_reference_table.h" 39#include "intern_table.h" 40#include "jni_internal.h" 41#include "mark_sweep-inl.h" 42#include "monitor.h" 43#include "mirror/art_field.h" 44#include "mirror/art_field-inl.h" 45#include "mirror/class-inl.h" 46#include "mirror/class_loader.h" 47#include "mirror/dex_cache.h" 48#include "mirror/reference-inl.h" 49#include "mirror/object-inl.h" 50#include "mirror/object_array.h" 51#include "mirror/object_array-inl.h" 52#include "runtime.h" 53#include "stack.h" 54#include "thread-inl.h" 55#include "thread_list.h" 56#include "verifier/method_verifier.h" 57 58using ::art::mirror::Class; 59using ::art::mirror::Object; 60 61namespace art { 62namespace gc { 63namespace collector { 64 65static constexpr bool kProtectFromSpace = true; 66static constexpr bool kClearFromSpace = true; 67static constexpr bool kStoreStackTraces = false; 68static constexpr bool kUseBytesPromoted = true; 69static constexpr size_t kBytesPromotedThreshold = 4 * MB; 70 71void SemiSpace::BindBitmaps() { 72 timings_.StartSplit("BindBitmaps"); 73 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 74 // Mark all of the spaces we never collect as immune. 75 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 76 if (space->GetLiveBitmap() != nullptr) { 77 if (space == to_space_) { 78 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 79 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 80 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 81 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 82 // Add the main free list space and the non-moving 83 // space to the immune space if a bump pointer space 84 // only collection. 85 || (generational_ && !whole_heap_collection_ && 86 (space == GetHeap()->GetNonMovingSpace() || 87 space == GetHeap()->GetPrimaryFreeListSpace()))) { 88 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 89 } 90 } 91 } 92 if (generational_ && !whole_heap_collection_) { 93 // We won't collect the large object space if a bump pointer space only collection. 94 is_large_object_space_immune_ = true; 95 } 96 timings_.EndSplit(); 97} 98 99SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 100 : GarbageCollector(heap, 101 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 102 to_space_(nullptr), 103 from_space_(nullptr), 104 generational_(generational), 105 last_gc_to_space_end_(nullptr), 106 bytes_promoted_(0), 107 bytes_promoted_since_last_whole_heap_collection_(0), 108 whole_heap_collection_(true), 109 whole_heap_collection_interval_counter_(0), 110 collector_name_(name_) { 111} 112 113void SemiSpace::InitializePhase() { 114 timings_.Reset(); 115 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 116 mark_stack_ = heap_->mark_stack_.get(); 117 DCHECK(mark_stack_ != nullptr); 118 immune_region_.Reset(); 119 is_large_object_space_immune_ = false; 120 saved_bytes_ = 0; 121 self_ = Thread::Current(); 122 // Do any pre GC verification. 123 timings_.NewSplit("PreGcVerification"); 124 heap_->PreGcVerification(this); 125 // Set the initial bitmap. 126 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 127} 128 129void SemiSpace::ProcessReferences(Thread* self) { 130 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 131 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 132 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 133 &MarkObjectCallback, &ProcessMarkStackCallback, this); 134} 135 136void SemiSpace::MarkingPhase() { 137 if (kStoreStackTraces) { 138 Locks::mutator_lock_->AssertExclusiveHeld(self_); 139 // Store the stack traces into the runtime fault string in case we get a heap corruption 140 // related crash later. 141 ThreadState old_state = self_->SetStateUnsafe(kRunnable); 142 std::ostringstream oss; 143 Runtime* runtime = Runtime::Current(); 144 runtime->GetThreadList()->DumpForSigQuit(oss); 145 runtime->GetThreadList()->DumpNativeStacks(oss); 146 runtime->SetFaultMessage(oss.str()); 147 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); 148 } 149 150 if (generational_) { 151 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 152 clear_soft_references_) { 153 // If an explicit, native allocation-triggered, or last attempt 154 // collection, collect the whole heap (and reset the interval 155 // counter to be consistent.) 156 whole_heap_collection_ = true; 157 if (!kUseBytesPromoted) { 158 whole_heap_collection_interval_counter_ = 0; 159 } 160 } 161 if (whole_heap_collection_) { 162 VLOG(heap) << "Whole heap collection"; 163 name_ = collector_name_ + " whole"; 164 } else { 165 VLOG(heap) << "Bump pointer space only collection"; 166 name_ = collector_name_ + " bps"; 167 } 168 } 169 170 if (!clear_soft_references_) { 171 if (!generational_) { 172 // If non-generational, always clear soft references. 173 clear_soft_references_ = true; 174 } else { 175 // If generational, clear soft references if a whole heap collection. 176 if (whole_heap_collection_) { 177 clear_soft_references_ = true; 178 } 179 } 180 } 181 182 Locks::mutator_lock_->AssertExclusiveHeld(self_); 183 184 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 185 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 186 // wrong space. 187 heap_->SwapSemiSpaces(); 188 if (generational_) { 189 // If last_gc_to_space_end_ is out of the bounds of the from-space 190 // (the to-space from last GC), then point it to the beginning of 191 // the from-space. For example, the very first GC or the 192 // pre-zygote compaction. 193 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 194 last_gc_to_space_end_ = from_space_->Begin(); 195 } 196 // Reset this before the marking starts below. 197 bytes_promoted_ = 0; 198 } 199 // Assume the cleared space is already empty. 200 BindBitmaps(); 201 // Process dirty cards and add dirty cards to mod-union tables. 202 heap_->ProcessCards(timings_, kUseRememberedSet && generational_); 203 // Clear the whole card table since we can not get any additional dirty cards during the 204 // paused GC. This saves memory but only works for pause the world collectors. 205 timings_.NewSplit("ClearCardTable"); 206 heap_->GetCardTable()->ClearCardTable(); 207 // Need to do this before the checkpoint since we don't want any threads to add references to 208 // the live stack during the recursive mark. 209 timings_.NewSplit("SwapStacks"); 210 if (kUseThreadLocalAllocationStack) { 211 heap_->RevokeAllThreadLocalAllocationStacks(self_); 212 } 213 heap_->SwapStacks(self_); 214 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 215 MarkRoots(); 216 // Mark roots of immune spaces. 217 UpdateAndMarkModUnion(); 218 // Recursively mark remaining objects. 219 MarkReachableObjects(); 220} 221 222void SemiSpace::UpdateAndMarkModUnion() { 223 for (auto& space : heap_->GetContinuousSpaces()) { 224 // If the space is immune then we need to mark the references to other spaces. 225 if (immune_region_.ContainsSpace(space)) { 226 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 227 if (table != nullptr) { 228 // TODO: Improve naming. 229 TimingLogger::ScopedSplit split( 230 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 231 "UpdateAndMarkImageModUnionTable", 232 &timings_); 233 table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 234 } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) { 235 DCHECK(kUseRememberedSet); 236 // If a bump pointer space only collection, the non-moving 237 // space is added to the immune space. The non-moving space 238 // doesn't have a mod union table, but has a remembered 239 // set. Its dirty cards will be scanned later in 240 // MarkReachableObjects(). 241 DCHECK(generational_ && !whole_heap_collection_ && 242 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 243 << "Space " << space->GetName() << " " 244 << "generational_=" << generational_ << " " 245 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 246 } else { 247 DCHECK(!kUseRememberedSet); 248 // If a bump pointer space only collection, the non-moving 249 // space is added to the immune space. But the non-moving 250 // space doesn't have a mod union table. Instead, its live 251 // bitmap will be scanned later in MarkReachableObjects(). 252 DCHECK(generational_ && !whole_heap_collection_ && 253 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 254 << "Space " << space->GetName() << " " 255 << "generational_=" << generational_ << " " 256 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 257 } 258 } 259 } 260} 261 262class SemiSpaceScanObjectVisitor { 263 public: 264 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 265 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 266 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 267 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an 268 // exclusive lock on the mutator lock, but 269 // SpaceBitmap::VisitMarkedRange() only requires the shared lock. 270 DCHECK(obj != nullptr); 271 semi_space_->ScanObject(obj); 272 } 273 private: 274 SemiSpace* const semi_space_; 275}; 276 277// Used to verify that there's no references to the from-space. 278class SemiSpaceVerifyNoFromSpaceReferencesVisitor { 279 public: 280 explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : 281 from_space_(from_space) {} 282 283 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 284 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 285 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); 286 if (from_space_->HasAddress(ref)) { 287 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); 288 LOG(FATAL) << ref << " found in from space"; 289 } 290 } 291 private: 292 space::ContinuousMemMapAllocSpace* from_space_; 293}; 294 295void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { 296 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 297 SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); 298 obj->VisitReferences<kMovingClasses>(visitor); 299} 300 301class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { 302 public: 303 explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 304 void operator()(Object* obj) const 305 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 306 DCHECK(obj != nullptr); 307 semi_space_->VerifyNoFromSpaceReferences(obj); 308 } 309 private: 310 SemiSpace* const semi_space_; 311}; 312 313void SemiSpace::MarkReachableObjects() { 314 timings_.StartSplit("MarkStackAsLive"); 315 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 316 heap_->MarkAllocStackAsLive(live_stack); 317 live_stack->Reset(); 318 timings_.EndSplit(); 319 320 for (auto& space : heap_->GetContinuousSpaces()) { 321 // If the space is immune and has no mod union table (the 322 // non-moving space when the bump pointer space only collection is 323 // enabled,) then we need to scan its live bitmap or dirty cards as roots 324 // (including the objects on the live stack which have just marked 325 // in the live bitmap above in MarkAllocStackAsLive().) 326 if (immune_region_.ContainsSpace(space) && 327 heap_->FindModUnionTableFromSpace(space) == nullptr) { 328 DCHECK(generational_ && !whole_heap_collection_ && 329 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 330 accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space); 331 if (kUseRememberedSet) { 332 DCHECK(rem_set != nullptr); 333 rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, from_space_, this); 334 if (kIsDebugBuild) { 335 // Verify that there are no from-space references that 336 // remain in the space, that is, the remembered set (and the 337 // card table) didn't miss any from-space references in the 338 // space. 339 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 340 SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); 341 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 342 reinterpret_cast<uintptr_t>(space->End()), 343 visitor); 344 } 345 } else { 346 DCHECK(rem_set == nullptr); 347 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 348 SemiSpaceScanObjectVisitor visitor(this); 349 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 350 reinterpret_cast<uintptr_t>(space->End()), 351 visitor); 352 } 353 } 354 } 355 356 if (is_large_object_space_immune_) { 357 DCHECK(generational_ && !whole_heap_collection_); 358 // Delay copying the live set to the marked set until here from 359 // BindBitmaps() as the large objects on the allocation stack may 360 // be newly added to the live set above in MarkAllocStackAsLive(). 361 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 362 363 // When the large object space is immune, we need to scan the 364 // large object space as roots as they contain references to their 365 // classes (primitive array classes) that could move though they 366 // don't contain any other references. 367 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 368 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 369 SemiSpaceScanObjectVisitor visitor(this); 370 for (const Object* obj : large_live_objects->GetObjects()) { 371 visitor(const_cast<Object*>(obj)); 372 } 373 } 374 375 // Recursively process the mark stack. 376 ProcessMarkStack(); 377} 378 379void SemiSpace::ReclaimPhase() { 380 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 381 ProcessReferences(self_); 382 { 383 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 384 SweepSystemWeaks(); 385 } 386 // Record freed memory. 387 uint64_t from_bytes = from_space_->GetBytesAllocated(); 388 uint64_t to_bytes = to_space_->GetBytesAllocated(); 389 uint64_t from_objects = from_space_->GetObjectsAllocated(); 390 uint64_t to_objects = to_space_->GetObjectsAllocated(); 391 CHECK_LE(to_objects, from_objects); 392 int64_t freed_bytes = from_bytes - to_bytes; 393 int64_t freed_objects = from_objects - to_objects; 394 freed_bytes_.FetchAndAdd(freed_bytes); 395 freed_objects_.FetchAndAdd(freed_objects); 396 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 397 // space. 398 heap_->RecordFree(freed_objects, freed_bytes); 399 timings_.StartSplit("PreSweepingGcVerification"); 400 heap_->PreSweepingGcVerification(this); 401 timings_.EndSplit(); 402 403 { 404 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 405 // Reclaim unmarked objects. 406 Sweep(false); 407 // Swap the live and mark bitmaps for each space which we modified space. This is an 408 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 409 // bitmaps. 410 timings_.StartSplit("SwapBitmaps"); 411 SwapBitmaps(); 412 timings_.EndSplit(); 413 // Unbind the live and mark bitmaps. 414 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 415 GetHeap()->UnBindBitmaps(); 416 } 417 if (kClearFromSpace) { 418 // Release the memory used by the from space. 419 from_space_->Clear(); 420 } 421 from_space_->Reset(); 422 // Protect the from space. 423 VLOG(heap) << "Protecting space " << *from_space_; 424 if (kProtectFromSpace) { 425 from_space_->GetMemMap()->Protect(PROT_NONE); 426 } else { 427 from_space_->GetMemMap()->Protect(PROT_READ); 428 } 429 if (saved_bytes_ > 0) { 430 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 431 } 432 433 if (generational_) { 434 // Record the end (top) of the to space so we can distinguish 435 // between objects that were allocated since the last GC and the 436 // older objects. 437 last_gc_to_space_end_ = to_space_->End(); 438 } 439} 440 441void SemiSpace::ResizeMarkStack(size_t new_size) { 442 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 443 CHECK_LE(mark_stack_->Size(), new_size); 444 mark_stack_->Resize(new_size); 445 for (const auto& obj : temp) { 446 mark_stack_->PushBack(obj); 447 } 448} 449 450inline void SemiSpace::MarkStackPush(Object* obj) { 451 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 452 ResizeMarkStack(mark_stack_->Capacity() * 2); 453 } 454 // The object must be pushed on to the mark stack. 455 mark_stack_->PushBack(obj); 456} 457 458// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 459bool SemiSpace::MarkLargeObject(const Object* obj) { 460 // TODO: support >1 discontinuous space. 461 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 462 DCHECK(large_object_space->Contains(obj)); 463 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 464 if (UNLIKELY(!large_objects->Test(obj))) { 465 large_objects->Set(obj); 466 return true; 467 } 468 return false; 469} 470 471static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 472 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 473 // We will dirty the current page and somewhere in the middle of the next page. This means 474 // that the next object copied will also dirty that page. 475 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 476 // not necessary per GC. 477 memcpy(dest, src, size); 478 return 0; 479 } 480 size_t saved_bytes = 0; 481 byte* byte_dest = reinterpret_cast<byte*>(dest); 482 if (kIsDebugBuild) { 483 for (size_t i = 0; i < size; ++i) { 484 CHECK_EQ(byte_dest[i], 0U); 485 } 486 } 487 // Process the start of the page. The page must already be dirty, don't bother with checking. 488 const byte* byte_src = reinterpret_cast<const byte*>(src); 489 const byte* limit = byte_src + size; 490 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 491 // Copy the bytes until the start of the next page. 492 memcpy(dest, src, page_remain); 493 byte_src += page_remain; 494 byte_dest += page_remain; 495 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 496 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 497 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 498 while (byte_src + kPageSize < limit) { 499 bool all_zero = true; 500 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 501 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 502 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 503 // Assumes the destination of the copy is all zeros. 504 if (word_src[i] != 0) { 505 all_zero = false; 506 word_dest[i] = word_src[i]; 507 } 508 } 509 if (all_zero) { 510 // Avoided copying into the page since it was all zeros. 511 saved_bytes += kPageSize; 512 } 513 byte_src += kPageSize; 514 byte_dest += kPageSize; 515 } 516 // Handle the part of the page at the end. 517 memcpy(byte_dest, byte_src, limit - byte_src); 518 return saved_bytes; 519} 520 521mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 522 size_t object_size = obj->SizeOf(); 523 size_t bytes_allocated; 524 mirror::Object* forward_address = nullptr; 525 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 526 // If it's allocated before the last GC (older), move 527 // (pseudo-promote) it to the main free list space (as sort 528 // of an old generation.) 529 size_t bytes_promoted; 530 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 531 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr); 532 if (forward_address == nullptr) { 533 // If out of space, fall back to the to-space. 534 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 535 } else { 536 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); 537 bytes_promoted_ += bytes_promoted; 538 // Dirty the card at the destionation as it may contain 539 // references (including the class pointer) to the bump pointer 540 // space. 541 GetHeap()->WriteBarrierEveryFieldOf(forward_address); 542 // Handle the bitmaps marking. 543 accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 544 DCHECK(live_bitmap != nullptr); 545 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 546 DCHECK(mark_bitmap != nullptr); 547 DCHECK(!live_bitmap->Test(forward_address)); 548 if (!whole_heap_collection_) { 549 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 550 DCHECK_EQ(live_bitmap, mark_bitmap); 551 552 // If a bump pointer space only collection, delay the live 553 // bitmap marking of the promoted object until it's popped off 554 // the mark stack (ProcessMarkStack()). The rationale: we may 555 // be in the middle of scanning the objects in the promo 556 // destination space for 557 // non-moving-space-to-bump-pointer-space references by 558 // iterating over the marked bits of the live bitmap 559 // (MarkReachableObjects()). If we don't delay it (and instead 560 // mark the promoted object here), the above promo destination 561 // space scan could encounter the just-promoted object and 562 // forward the references in the promoted object's fields even 563 // through it is pushed onto the mark stack. If this happens, 564 // the promoted object would be in an inconsistent state, that 565 // is, it's on the mark stack (gray) but its fields are 566 // already forwarded (black), which would cause a 567 // DCHECK(!to_space_->HasAddress(obj)) failure below. 568 } else { 569 // Mark forward_address on the live bit map. 570 live_bitmap->Set(forward_address); 571 // Mark forward_address on the mark bit map. 572 DCHECK(!mark_bitmap->Test(forward_address)); 573 mark_bitmap->Set(forward_address); 574 } 575 } 576 DCHECK(forward_address != nullptr); 577 } else { 578 // If it's allocated after the last GC (younger), copy it to the to-space. 579 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 580 } 581 // Copy over the object and add it to the mark stack since we still need to update its 582 // references. 583 saved_bytes_ += 584 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 585 if (kUseBakerOrBrooksReadBarrier) { 586 obj->AssertReadBarrierPointer(); 587 if (kUseBrooksReadBarrier) { 588 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj); 589 forward_address->SetReadBarrierPointer(forward_address); 590 } 591 forward_address->AssertReadBarrierPointer(); 592 } 593 if (to_space_live_bitmap_ != nullptr) { 594 to_space_live_bitmap_->Set(forward_address); 595 } 596 DCHECK(to_space_->HasAddress(forward_address) || 597 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 598 return forward_address; 599} 600 601void SemiSpace::ProcessMarkStackCallback(void* arg) { 602 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); 603} 604 605mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { 606 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root); 607 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 608 return ref.AsMirrorPtr(); 609} 610 611void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, 612 void* arg) { 613 reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr); 614} 615 616void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 617 RootType /*root_type*/) { 618 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root); 619 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 620 if (*root != ref.AsMirrorPtr()) { 621 *root = ref.AsMirrorPtr(); 622 } 623} 624 625// Marks all objects in the root set. 626void SemiSpace::MarkRoots() { 627 timings_.StartSplit("MarkRoots"); 628 // TODO: Visit up image roots as well? 629 Runtime::Current()->VisitRoots(MarkRootCallback, this); 630 timings_.EndSplit(); 631} 632 633mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 634 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 635} 636 637void SemiSpace::SweepSystemWeaks() { 638 timings_.StartSplit("SweepSystemWeaks"); 639 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 640 timings_.EndSplit(); 641} 642 643bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 644 return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); 645} 646 647void SemiSpace::Sweep(bool swap_bitmaps) { 648 DCHECK(mark_stack_->IsEmpty()); 649 TimingLogger::ScopedSplit("Sweep", &timings_); 650 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 651 if (space->IsContinuousMemMapAllocSpace()) { 652 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 653 if (!ShouldSweepSpace(alloc_space)) { 654 continue; 655 } 656 TimingLogger::ScopedSplit split( 657 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 658 size_t freed_objects = 0; 659 size_t freed_bytes = 0; 660 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 661 heap_->RecordFree(freed_objects, freed_bytes); 662 freed_objects_.FetchAndAdd(freed_objects); 663 freed_bytes_.FetchAndAdd(freed_bytes); 664 } 665 } 666 if (!is_large_object_space_immune_) { 667 SweepLargeObjects(swap_bitmaps); 668 } 669} 670 671void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 672 DCHECK(!is_large_object_space_immune_); 673 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 674 size_t freed_objects = 0; 675 size_t freed_bytes = 0; 676 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 677 freed_large_objects_.FetchAndAdd(freed_objects); 678 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 679 GetHeap()->RecordFree(freed_objects, freed_bytes); 680} 681 682// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 683// marked, put it on the appropriate list in the heap for later processing. 684void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 685 heap_->DelayReferenceReferent(klass, reference, MarkedForwardingAddressCallback, this); 686} 687 688class SemiSpaceMarkObjectVisitor { 689 public: 690 explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) { 691 } 692 693 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE 694 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 695 // Object was already verified when we scanned it. 696 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); 697 } 698 699 void operator()(mirror::Class* klass, mirror::Reference* ref) const 700 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 701 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 702 collector_->DelayReferenceReferent(klass, ref); 703 } 704 705 private: 706 SemiSpace* const collector_; 707}; 708 709// Visit all of the references of an object and update. 710void SemiSpace::ScanObject(Object* obj) { 711 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 712 SemiSpaceMarkObjectVisitor visitor(this); 713 obj->VisitReferences<kMovingClasses>(visitor, visitor); 714} 715 716// Scan anything that's on the mark stack. 717void SemiSpace::ProcessMarkStack() { 718 space::MallocSpace* promo_dest_space = NULL; 719 accounting::SpaceBitmap* live_bitmap = NULL; 720 if (generational_ && !whole_heap_collection_) { 721 // If a bump pointer space only collection (and the promotion is 722 // enabled,) we delay the live-bitmap marking of promoted objects 723 // from MarkObject() until this function. 724 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 725 live_bitmap = promo_dest_space->GetLiveBitmap(); 726 DCHECK(live_bitmap != nullptr); 727 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 728 DCHECK(mark_bitmap != nullptr); 729 DCHECK_EQ(live_bitmap, mark_bitmap); 730 } 731 timings_.StartSplit("ProcessMarkStack"); 732 while (!mark_stack_->IsEmpty()) { 733 Object* obj = mark_stack_->PopBack(); 734 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 735 // obj has just been promoted. Mark the live bitmap for it, 736 // which is delayed from MarkObject(). 737 DCHECK(!live_bitmap->Test(obj)); 738 live_bitmap->Set(obj); 739 } 740 ScanObject(obj); 741 } 742 timings_.EndSplit(); 743} 744 745inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 746 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 747 // All immune objects are assumed marked. 748 if (immune_region_.ContainsObject(obj)) { 749 return obj; 750 } 751 if (from_space_->HasAddress(obj)) { 752 // Returns either the forwarding address or nullptr. 753 return GetForwardingAddressInFromSpace(obj); 754 } else if (to_space_->HasAddress(obj)) { 755 // Should be unlikely. 756 // Already forwarded, must be marked. 757 return obj; 758 } 759 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 760} 761 762void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 763 DCHECK(to_space != nullptr); 764 to_space_ = to_space; 765} 766 767void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 768 DCHECK(from_space != nullptr); 769 from_space_ = from_space; 770} 771 772void SemiSpace::FinishPhase() { 773 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 774 Heap* heap = GetHeap(); 775 timings_.NewSplit("PostGcVerification"); 776 heap->PostGcVerification(this); 777 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 778 // further action is done by the heap. 779 to_space_ = nullptr; 780 from_space_ = nullptr; 781 CHECK(mark_stack_->IsEmpty()); 782 mark_stack_->Reset(); 783 if (generational_) { 784 // Decide whether to do a whole heap collection or a bump pointer 785 // only space collection at the next collection by updating 786 // whole_heap_collection. 787 if (!whole_heap_collection_) { 788 if (!kUseBytesPromoted) { 789 // Enable whole_heap_collection once every 790 // kDefaultWholeHeapCollectionInterval collections. 791 --whole_heap_collection_interval_counter_; 792 DCHECK_GE(whole_heap_collection_interval_counter_, 0); 793 if (whole_heap_collection_interval_counter_ == 0) { 794 whole_heap_collection_ = true; 795 } 796 } else { 797 // Enable whole_heap_collection if the bytes promoted since 798 // the last whole heap collection exceeds a threshold. 799 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_; 800 if (bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold) { 801 whole_heap_collection_ = true; 802 } 803 } 804 } else { 805 if (!kUseBytesPromoted) { 806 DCHECK_EQ(whole_heap_collection_interval_counter_, 0); 807 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; 808 whole_heap_collection_ = false; 809 } else { 810 // Reset it. 811 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_; 812 whole_heap_collection_ = false; 813 } 814 } 815 } 816 // Clear all of the spaces' mark bitmaps. 817 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 818 heap_->ClearMarkedObjects(); 819} 820 821void SemiSpace::RevokeAllThreadLocalBuffers() { 822 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 823 GetHeap()->RevokeAllThreadLocalBuffers(); 824 timings_.EndSplit(); 825} 826 827} // namespace collector 828} // namespace gc 829} // namespace art 830