semi_space.cc revision d1e05bf325fa4f57e8d79155a328313da800cf27
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/space_bitmap-inl.h" 31#include "gc/heap.h" 32#include "gc/space/bump_pointer_space.h" 33#include "gc/space/bump_pointer_space-inl.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "mark_sweep-inl.h" 41#include "monitor.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/object-inl.h" 48#include "mirror/object_array.h" 49#include "mirror/object_array-inl.h" 50#include "runtime.h" 51#include "semi_space-inl.h" 52#include "thread-inl.h" 53#include "thread_list.h" 54#include "verifier/method_verifier.h" 55 56using ::art::mirror::Class; 57using ::art::mirror::Object; 58 59namespace art { 60namespace gc { 61namespace collector { 62 63static constexpr bool kProtectFromSpace = true; 64static constexpr bool kResetFromSpace = true; 65 66// TODO: Unduplicate logic. 67void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) { 68 // Bind live to mark bitmap if necessary. 69 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 70 CHECK(space->IsContinuousMemMapAllocSpace()); 71 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 72 } 73 // Add the space to the immune region. 74 if (immune_begin_ == nullptr) { 75 DCHECK(immune_end_ == nullptr); 76 immune_begin_ = reinterpret_cast<Object*>(space->Begin()); 77 immune_end_ = reinterpret_cast<Object*>(space->End()); 78 } else { 79 const space::ContinuousSpace* prev_space = nullptr; 80 // Find out if the previous space is immune. 81 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { 82 if (cur_space == space) { 83 break; 84 } 85 prev_space = cur_space; 86 } 87 // If previous space was immune, then extend the immune region. Relies on continuous spaces 88 // being sorted by Heap::AddContinuousSpace. 89 if (prev_space != nullptr && IsImmuneSpace(prev_space)) { 90 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 91 // Use Limit() instead of End() because otherwise if the 92 // generational mode is enabled, the alloc space might expand 93 // due to promotion and the sense of immunity may change in the 94 // middle of a GC. 95 immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_); 96 } 97 } 98} 99 100void SemiSpace::BindBitmaps() { 101 timings_.StartSplit("BindBitmaps"); 102 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 103 // Mark all of the spaces we never collect as immune. 104 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 105 if (space->GetLiveBitmap() != nullptr) { 106 if (space == to_space_) { 107 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 108 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 109 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 110 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 111 // Add the main free list space and the non-moving 112 // space to the immune space if a bump pointer space 113 // only collection. 114 || (generational_ && !whole_heap_collection_ && 115 (space == GetHeap()->GetNonMovingSpace() || 116 space == GetHeap()->GetPrimaryFreeListSpace()))) { 117 ImmuneSpace(space); 118 } 119 } 120 } 121 if (generational_ && !whole_heap_collection_) { 122 // We won't collect the large object space if a bump pointer space only collection. 123 is_large_object_space_immune_ = true; 124 } 125 timings_.EndSplit(); 126} 127 128SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 129 : GarbageCollector(heap, 130 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 131 mark_stack_(nullptr), 132 immune_begin_(nullptr), 133 immune_end_(nullptr), 134 is_large_object_space_immune_(false), 135 to_space_(nullptr), 136 from_space_(nullptr), 137 self_(nullptr), 138 generational_(generational), 139 last_gc_to_space_end_(nullptr), 140 bytes_promoted_(0), 141 whole_heap_collection_(true), 142 whole_heap_collection_interval_counter_(0) { 143} 144 145void SemiSpace::InitializePhase() { 146 timings_.Reset(); 147 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 148 mark_stack_ = heap_->mark_stack_.get(); 149 DCHECK(mark_stack_ != nullptr); 150 immune_begin_ = nullptr; 151 immune_end_ = nullptr; 152 is_large_object_space_immune_ = false; 153 self_ = Thread::Current(); 154 // Do any pre GC verification. 155 timings_.NewSplit("PreGcVerification"); 156 heap_->PreGcVerification(this); 157 // Set the initial bitmap. 158 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 159} 160 161void SemiSpace::ProcessReferences(Thread* self) { 162 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 163 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 164 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 165 &RecursiveMarkObjectCallback, this); 166} 167 168void SemiSpace::MarkingPhase() { 169 if (generational_) { 170 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 171 clear_soft_references_) { 172 // If an explicit, native allocation-triggered, or last attempt 173 // collection, collect the whole heap (and reset the interval 174 // counter to be consistent.) 175 whole_heap_collection_ = true; 176 whole_heap_collection_interval_counter_ = 0; 177 } 178 if (whole_heap_collection_) { 179 VLOG(heap) << "Whole heap collection"; 180 } else { 181 VLOG(heap) << "Bump pointer space only collection"; 182 } 183 } 184 Locks::mutator_lock_->AssertExclusiveHeld(self_); 185 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 186 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 187 // wrong space. 188 heap_->SwapSemiSpaces(); 189 if (generational_) { 190 // If last_gc_to_space_end_ is out of the bounds of the from-space 191 // (the to-space from last GC), then point it to the beginning of 192 // the from-space. For example, the very first GC or the 193 // pre-zygote compaction. 194 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 195 last_gc_to_space_end_ = from_space_->Begin(); 196 } 197 // Reset this before the marking starts below. 198 bytes_promoted_ = 0; 199 } 200 // Assume the cleared space is already empty. 201 BindBitmaps(); 202 // Process dirty cards and add dirty cards to mod-union tables. 203 heap_->ProcessCards(timings_); 204 // Clear the whole card table since we can not get any additional dirty cards during the 205 // paused GC. This saves memory but only works for pause the world collectors. 206 timings_.NewSplit("ClearCardTable"); 207 heap_->GetCardTable()->ClearCardTable(); 208 // Need to do this before the checkpoint since we don't want any threads to add references to 209 // the live stack during the recursive mark. 210 timings_.NewSplit("SwapStacks"); 211 heap_->SwapStacks(); 212 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 213 MarkRoots(); 214 // Mark roots of immune spaces. 215 UpdateAndMarkModUnion(); 216 // Recursively mark remaining objects. 217 MarkReachableObjects(); 218} 219 220bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const { 221 return 222 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && 223 immune_end_ >= reinterpret_cast<Object*>(space->End()); 224} 225 226void SemiSpace::UpdateAndMarkModUnion() { 227 for (auto& space : heap_->GetContinuousSpaces()) { 228 // If the space is immune then we need to mark the references to other spaces. 229 if (IsImmuneSpace(space)) { 230 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 231 if (table != nullptr) { 232 // TODO: Improve naming. 233 TimingLogger::ScopedSplit split( 234 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 235 "UpdateAndMarkImageModUnionTable", 236 &timings_); 237 table->UpdateAndMarkReferences(MarkRootCallback, this); 238 } else { 239 // If a bump pointer space only collection, the non-moving 240 // space is added to the immune space. But the non-moving 241 // space doesn't have a mod union table. Instead, its live 242 // bitmap will be scanned later in MarkReachableObjects(). 243 DCHECK(generational_ && !whole_heap_collection_ && 244 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())); 245 } 246 } 247 } 248} 249 250class SemiSpaceScanObjectVisitor { 251 public: 252 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 253 void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 254 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an 255 // exclusive lock on the mutator lock, but 256 // SpaceBitmap::VisitMarkedRange() only requires the shared lock. 257 DCHECK(obj != nullptr); 258 semi_space_->ScanObject(obj); 259 } 260 private: 261 SemiSpace* semi_space_; 262}; 263 264void SemiSpace::MarkReachableObjects() { 265 timings_.StartSplit("MarkStackAsLive"); 266 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 267 heap_->MarkAllocStackAsLive(live_stack); 268 live_stack->Reset(); 269 timings_.EndSplit(); 270 271 for (auto& space : heap_->GetContinuousSpaces()) { 272 // If the space is immune and has no mod union table (the 273 // non-moving space when the bump pointer space only collection is 274 // enabled,) then we need to scan its live bitmap as roots 275 // (including the objects on the live stack which have just marked 276 // in the live bitmap above in MarkAllocStackAsLive().) 277 if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) { 278 DCHECK(generational_ && !whole_heap_collection_ && 279 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 280 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 281 SemiSpaceScanObjectVisitor visitor(this); 282 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 283 reinterpret_cast<uintptr_t>(space->End()), 284 visitor); 285 } 286 } 287 288 if (is_large_object_space_immune_) { 289 DCHECK(generational_ && !whole_heap_collection_); 290 // Delay copying the live set to the marked set until here from 291 // BindBitmaps() as the large objects on the allocation stack may 292 // be newly added to the live set above in MarkAllocStackAsLive(). 293 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 294 295 // When the large object space is immune, we need to scan the 296 // large object space as roots as they contain references to their 297 // classes (primitive array classes) that could move though they 298 // don't contain any other references. 299 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 300 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 301 SemiSpaceScanObjectVisitor visitor(this); 302 for (const Object* obj : large_live_objects->GetObjects()) { 303 visitor(const_cast<Object*>(obj)); 304 } 305 } 306 307 // Recursively process the mark stack. 308 ProcessMarkStack(true); 309} 310 311void SemiSpace::ReclaimPhase() { 312 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 313 ProcessReferences(self_); 314 { 315 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 316 SweepSystemWeaks(); 317 } 318 // Record freed memory. 319 uint64_t from_bytes = from_space_->GetBytesAllocated(); 320 uint64_t to_bytes = to_space_->GetBytesAllocated(); 321 uint64_t from_objects = from_space_->GetObjectsAllocated(); 322 uint64_t to_objects = to_space_->GetObjectsAllocated(); 323 CHECK_LE(to_objects, from_objects); 324 int64_t freed_bytes = from_bytes - to_bytes; 325 int64_t freed_objects = from_objects - to_objects; 326 freed_bytes_.FetchAndAdd(freed_bytes); 327 freed_objects_.FetchAndAdd(freed_objects); 328 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 329 // space. 330 heap_->RecordFree(freed_objects, freed_bytes); 331 timings_.StartSplit("PreSweepingGcVerification"); 332 heap_->PreSweepingGcVerification(this); 333 timings_.EndSplit(); 334 335 { 336 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 337 // Reclaim unmarked objects. 338 Sweep(false); 339 // Swap the live and mark bitmaps for each space which we modified space. This is an 340 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 341 // bitmaps. 342 timings_.StartSplit("SwapBitmaps"); 343 SwapBitmaps(); 344 timings_.EndSplit(); 345 // Unbind the live and mark bitmaps. 346 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 347 GetHeap()->UnBindBitmaps(); 348 } 349 // Release the memory used by the from space. 350 if (kResetFromSpace) { 351 // Clearing from space. 352 from_space_->Clear(); 353 } 354 // Protect the from space. 355 VLOG(heap) 356 << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - " 357 << reinterpret_cast<void*>(from_space_->Limit()); 358 if (kProtectFromSpace) { 359 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE); 360 } else { 361 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ); 362 } 363 364 if (generational_) { 365 // Record the end (top) of the to space so we can distinguish 366 // between objects that were allocated since the last GC and the 367 // older objects. 368 last_gc_to_space_end_ = to_space_->End(); 369 } 370} 371 372void SemiSpace::ResizeMarkStack(size_t new_size) { 373 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 374 CHECK_LE(mark_stack_->Size(), new_size); 375 mark_stack_->Resize(new_size); 376 for (const auto& obj : temp) { 377 mark_stack_->PushBack(obj); 378 } 379} 380 381inline void SemiSpace::MarkStackPush(Object* obj) { 382 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 383 ResizeMarkStack(mark_stack_->Capacity() * 2); 384 } 385 // The object must be pushed on to the mark stack. 386 mark_stack_->PushBack(obj); 387} 388 389// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 390bool SemiSpace::MarkLargeObject(const Object* obj) { 391 // TODO: support >1 discontinuous space. 392 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 393 DCHECK(large_object_space->Contains(obj)); 394 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 395 if (UNLIKELY(!large_objects->Test(obj))) { 396 large_objects->Set(obj); 397 return true; 398 } 399 return false; 400} 401 402mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 403 size_t object_size = obj->SizeOf(); 404 size_t bytes_allocated; 405 mirror::Object* forward_address = nullptr; 406 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 407 // If it's allocated before the last GC (older), move 408 // (pseudo-promote) it to the main free list space (as sort 409 // of an old generation.) 410 size_t bytes_promoted; 411 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 412 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted); 413 if (forward_address == nullptr) { 414 // If out of space, fall back to the to-space. 415 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); 416 } else { 417 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); 418 bytes_promoted_ += bytes_promoted; 419 // Handle the bitmaps marking. 420 accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 421 DCHECK(live_bitmap != nullptr); 422 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 423 DCHECK(mark_bitmap != nullptr); 424 DCHECK(!live_bitmap->Test(forward_address)); 425 if (!whole_heap_collection_) { 426 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 427 DCHECK_EQ(live_bitmap, mark_bitmap); 428 429 // If a bump pointer space only collection, delay the live 430 // bitmap marking of the promoted object until it's popped off 431 // the mark stack (ProcessMarkStack()). The rationale: we may 432 // be in the middle of scanning the objects in the promo 433 // destination space for 434 // non-moving-space-to-bump-pointer-space references by 435 // iterating over the marked bits of the live bitmap 436 // (MarkReachableObjects()). If we don't delay it (and instead 437 // mark the promoted object here), the above promo destination 438 // space scan could encounter the just-promoted object and 439 // forward the references in the promoted object's fields even 440 // through it is pushed onto the mark stack. If this happens, 441 // the promoted object would be in an inconsistent state, that 442 // is, it's on the mark stack (gray) but its fields are 443 // already forwarded (black), which would cause a 444 // DCHECK(!to_space_->HasAddress(obj)) failure below. 445 } else { 446 // Mark forward_address on the live bit map. 447 live_bitmap->Set(forward_address); 448 // Mark forward_address on the mark bit map. 449 DCHECK(!mark_bitmap->Test(forward_address)); 450 mark_bitmap->Set(forward_address); 451 } 452 } 453 DCHECK(forward_address != nullptr); 454 } else { 455 // If it's allocated after the last GC (younger), copy it to the to-space. 456 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); 457 } 458 // Copy over the object and add it to the mark stack since we still need to update its 459 // references. 460 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size); 461 if (to_space_live_bitmap_ != nullptr) { 462 to_space_live_bitmap_->Set(forward_address); 463 } 464 DCHECK(to_space_->HasAddress(forward_address) || 465 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 466 return forward_address; 467} 468 469// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to 470// the to-space and have their forward address updated. Objects which have been newly marked are 471// pushed on the mark stack. 472Object* SemiSpace::MarkObject(Object* obj) { 473 Object* forward_address = obj; 474 if (obj != nullptr && !IsImmune(obj)) { 475 if (from_space_->HasAddress(obj)) { 476 forward_address = GetForwardingAddressInFromSpace(obj); 477 // If the object has already been moved, return the new forward address. 478 if (forward_address == nullptr) { 479 forward_address = MarkNonForwardedObject(obj); 480 DCHECK(forward_address != nullptr); 481 // Make sure to only update the forwarding address AFTER you copy the object so that the 482 // monitor word doesn't get stomped over. 483 obj->SetLockWord(LockWord::FromForwardingAddress( 484 reinterpret_cast<size_t>(forward_address))); 485 // Push the object onto the mark stack for later processing. 486 MarkStackPush(forward_address); 487 } 488 // TODO: Do we need this if in the else statement? 489 } else { 490 accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 491 if (LIKELY(object_bitmap != nullptr)) { 492 if (generational_) { 493 // If a bump pointer space only collection, we should not 494 // reach here as we don't/won't mark the objects in the 495 // non-moving space (except for the promoted objects.) Note 496 // the non-moving space is added to the immune space. 497 DCHECK(whole_heap_collection_); 498 } 499 // This object was not previously marked. 500 if (!object_bitmap->Test(obj)) { 501 object_bitmap->Set(obj); 502 MarkStackPush(obj); 503 } 504 } else { 505 CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; 506 if (MarkLargeObject(obj)) { 507 MarkStackPush(obj); 508 } 509 } 510 } 511 } 512 return forward_address; 513} 514 515Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) { 516 DCHECK(root != nullptr); 517 DCHECK(arg != nullptr); 518 SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg); 519 mirror::Object* ret = semi_space->MarkObject(root); 520 semi_space->ProcessMarkStack(true); 521 return ret; 522} 523 524Object* SemiSpace::MarkRootCallback(Object* root, void* arg) { 525 DCHECK(root != nullptr); 526 DCHECK(arg != nullptr); 527 return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root); 528} 529 530// Marks all objects in the root set. 531void SemiSpace::MarkRoots() { 532 timings_.StartSplit("MarkRoots"); 533 // TODO: Visit up image roots as well? 534 Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true); 535 timings_.EndSplit(); 536} 537 538mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) { 539 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 540} 541 542void SemiSpace::SweepSystemWeaks() { 543 timings_.StartSplit("SweepSystemWeaks"); 544 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 545 timings_.EndSplit(); 546} 547 548bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 549 return space != from_space_ && space != to_space_ && !IsImmuneSpace(space); 550} 551 552void SemiSpace::Sweep(bool swap_bitmaps) { 553 DCHECK(mark_stack_->IsEmpty()); 554 TimingLogger::ScopedSplit("Sweep", &timings_); 555 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 556 if (space->IsContinuousMemMapAllocSpace()) { 557 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 558 if (!ShouldSweepSpace(alloc_space)) { 559 continue; 560 } 561 TimingLogger::ScopedSplit split( 562 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 563 size_t freed_objects = 0; 564 size_t freed_bytes = 0; 565 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 566 heap_->RecordFree(freed_objects, freed_bytes); 567 freed_objects_.FetchAndAdd(freed_objects); 568 freed_bytes_.FetchAndAdd(freed_bytes); 569 } 570 } 571 if (!is_large_object_space_immune_) { 572 SweepLargeObjects(swap_bitmaps); 573 } 574} 575 576void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 577 DCHECK(!is_large_object_space_immune_); 578 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 579 size_t freed_objects = 0; 580 size_t freed_bytes = 0; 581 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 582 freed_large_objects_.FetchAndAdd(freed_objects); 583 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 584 GetHeap()->RecordFree(freed_objects, freed_bytes); 585} 586 587// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 588// marked, put it on the appropriate list in the heap for later processing. 589void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 590 heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this); 591} 592 593// Visit all of the references of an object and update. 594void SemiSpace::ScanObject(Object* obj) { 595 DCHECK(obj != NULL); 596 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 597 MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset, 598 bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS { 599 mirror::Object* new_address = MarkObject(ref); 600 if (new_address != ref) { 601 DCHECK(new_address != nullptr); 602 // Don't need to mark the card since we updating the object address and not changing the 603 // actual objects its pointing to. Using SetFieldPtr is better in this case since it does not 604 // dirty cards and use additional memory. 605 obj->SetFieldPtr(offset, new_address, false); 606 } 607 }, kMovingClasses); 608 mirror::Class* klass = obj->GetClass(); 609 if (UNLIKELY(klass->IsReferenceClass())) { 610 DelayReferenceReferent(klass, obj); 611 } 612} 613 614// Scan anything that's on the mark stack. 615void SemiSpace::ProcessMarkStack(bool paused) { 616 space::MallocSpace* promo_dest_space = NULL; 617 accounting::SpaceBitmap* live_bitmap = NULL; 618 if (generational_ && !whole_heap_collection_) { 619 // If a bump pointer space only collection (and the promotion is 620 // enabled,) we delay the live-bitmap marking of promoted objects 621 // from MarkObject() until this function. 622 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 623 live_bitmap = promo_dest_space->GetLiveBitmap(); 624 DCHECK(live_bitmap != nullptr); 625 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 626 DCHECK(mark_bitmap != nullptr); 627 DCHECK_EQ(live_bitmap, mark_bitmap); 628 } 629 timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack"); 630 while (!mark_stack_->IsEmpty()) { 631 Object* obj = mark_stack_->PopBack(); 632 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 633 // obj has just been promoted. Mark the live bitmap for it, 634 // which is delayed from MarkObject(). 635 DCHECK(!live_bitmap->Test(obj)); 636 live_bitmap->Set(obj); 637 } 638 ScanObject(obj); 639 } 640 timings_.EndSplit(); 641} 642 643inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 644 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 645 // All immune objects are assumed marked. 646 if (IsImmune(obj)) { 647 return obj; 648 } 649 if (from_space_->HasAddress(obj)) { 650 mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); 651 return forwarding_address; // Returns either the forwarding address or nullptr. 652 } else if (to_space_->HasAddress(obj)) { 653 // Should be unlikely. 654 // Already forwarded, must be marked. 655 return obj; 656 } 657 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 658} 659 660void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 661 DCHECK(to_space != nullptr); 662 to_space_ = to_space; 663} 664 665void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 666 DCHECK(from_space != nullptr); 667 from_space_ = from_space; 668} 669 670void SemiSpace::FinishPhase() { 671 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 672 Heap* heap = GetHeap(); 673 timings_.NewSplit("PostGcVerification"); 674 heap->PostGcVerification(this); 675 676 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 677 // further action is done by the heap. 678 to_space_ = nullptr; 679 from_space_ = nullptr; 680 681 // Update the cumulative statistics 682 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 683 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 684 685 // Ensure that the mark stack is empty. 686 CHECK(mark_stack_->IsEmpty()); 687 688 // Update the cumulative loggers. 689 cumulative_timings_.Start(); 690 cumulative_timings_.AddLogger(timings_); 691 cumulative_timings_.End(); 692 693 // Clear all of the spaces' mark bitmaps. 694 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 695 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 696 if (bitmap != nullptr && 697 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 698 bitmap->Clear(); 699 } 700 } 701 mark_stack_->Reset(); 702 703 // Reset the marked large objects. 704 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 705 large_objects->GetMarkObjects()->Clear(); 706 707 if (generational_) { 708 // Decide whether to do a whole heap collection or a bump pointer 709 // only space collection at the next collection by updating 710 // whole_heap_collection. Enable whole_heap_collection once every 711 // kDefaultWholeHeapCollectionInterval collections. 712 if (!whole_heap_collection_) { 713 --whole_heap_collection_interval_counter_; 714 DCHECK_GE(whole_heap_collection_interval_counter_, 0); 715 if (whole_heap_collection_interval_counter_ == 0) { 716 whole_heap_collection_ = true; 717 } 718 } else { 719 DCHECK_EQ(whole_heap_collection_interval_counter_, 0); 720 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; 721 whole_heap_collection_ = false; 722 } 723 } 724} 725 726} // namespace collector 727} // namespace gc 728} // namespace art 729