semi_space.cc revision ba5870d1cb6c6320bfd2eb818772352df71a8269
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/space_bitmap-inl.h" 31#include "gc/heap.h" 32#include "gc/space/bump_pointer_space.h" 33#include "gc/space/bump_pointer_space-inl.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "mark_sweep-inl.h" 41#include "monitor.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/object-inl.h" 48#include "mirror/object_array.h" 49#include "mirror/object_array-inl.h" 50#include "runtime.h" 51#include "semi_space-inl.h" 52#include "thread-inl.h" 53#include "thread_list.h" 54#include "verifier/method_verifier.h" 55 56using ::art::mirror::Class; 57using ::art::mirror::Object; 58 59namespace art { 60namespace gc { 61namespace collector { 62 63static constexpr bool kProtectFromSpace = true; 64static constexpr bool kResetFromSpace = true; 65 66// TODO: Unduplicate logic. 67void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) { 68 // Bind live to mark bitmap if necessary. 69 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 70 BindLiveToMarkBitmap(space); 71 } 72 // Add the space to the immune region. 73 if (immune_begin_ == nullptr) { 74 DCHECK(immune_end_ == nullptr); 75 immune_begin_ = reinterpret_cast<Object*>(space->Begin()); 76 immune_end_ = reinterpret_cast<Object*>(space->End()); 77 } else { 78 const space::ContinuousSpace* prev_space = nullptr; 79 // Find out if the previous space is immune. 80 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { 81 if (cur_space == space) { 82 break; 83 } 84 prev_space = cur_space; 85 } 86 // If previous space was immune, then extend the immune region. Relies on continuous spaces 87 // being sorted by Heap::AddContinuousSpace. 88 if (prev_space != nullptr && IsImmuneSpace(prev_space)) { 89 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 90 // Use Limit() instead of End() because otherwise if the 91 // generational mode is enabled, the alloc space might expand 92 // due to promotion and the sense of immunity may change in the 93 // middle of a GC. 94 immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_); 95 } 96 } 97} 98 99void SemiSpace::BindBitmaps() { 100 timings_.StartSplit("BindBitmaps"); 101 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 102 // Mark all of the spaces we never collect as immune. 103 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 104 if (space->GetLiveBitmap() != nullptr) { 105 if (space == to_space_) { 106 BindLiveToMarkBitmap(to_space_); 107 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 108 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 109 // Add the main free list space and the non-moving 110 // space to the immune space if a bump pointer space 111 // only collection. 112 || (generational_ && !whole_heap_collection_ && 113 (space == GetHeap()->GetNonMovingSpace() || 114 space == GetHeap()->GetPrimaryFreeListSpace()))) { 115 ImmuneSpace(space); 116 } 117 } 118 } 119 if (generational_ && !whole_heap_collection_) { 120 // We won't collect the large object space if a bump pointer space only collection. 121 is_large_object_space_immune_ = true; 122 } 123 timings_.EndSplit(); 124} 125 126SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 127 : GarbageCollector(heap, 128 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 129 mark_stack_(nullptr), 130 immune_begin_(nullptr), 131 immune_end_(nullptr), 132 is_large_object_space_immune_(false), 133 to_space_(nullptr), 134 from_space_(nullptr), 135 self_(nullptr), 136 generational_(generational), 137 last_gc_to_space_end_(nullptr), 138 bytes_promoted_(0), 139 whole_heap_collection_(true), 140 whole_heap_collection_interval_counter_(0) { 141} 142 143void SemiSpace::InitializePhase() { 144 timings_.Reset(); 145 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 146 mark_stack_ = heap_->mark_stack_.get(); 147 DCHECK(mark_stack_ != nullptr); 148 immune_begin_ = nullptr; 149 immune_end_ = nullptr; 150 is_large_object_space_immune_ = false; 151 self_ = Thread::Current(); 152 // Do any pre GC verification. 153 timings_.NewSplit("PreGcVerification"); 154 heap_->PreGcVerification(this); 155 // Set the initial bitmap. 156 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 157} 158 159void SemiSpace::ProcessReferences(Thread* self) { 160 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 161 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 162 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 163 &RecursiveMarkObjectCallback, this); 164} 165 166void SemiSpace::MarkingPhase() { 167 if (generational_) { 168 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 169 clear_soft_references_) { 170 // If an explicit, native allocation-triggered, or last attempt 171 // collection, collect the whole heap (and reset the interval 172 // counter to be consistent.) 173 whole_heap_collection_ = true; 174 whole_heap_collection_interval_counter_ = 0; 175 } 176 if (whole_heap_collection_) { 177 VLOG(heap) << "Whole heap collection"; 178 } else { 179 VLOG(heap) << "Bump pointer space only collection"; 180 } 181 } 182 Thread* self = Thread::Current(); 183 Locks::mutator_lock_->AssertExclusiveHeld(self); 184 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 185 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 186 // wrong space. 187 heap_->SwapSemiSpaces(); 188 if (generational_) { 189 // If last_gc_to_space_end_ is out of the bounds of the from-space 190 // (the to-space from last GC), then point it to the beginning of 191 // the from-space. For example, the very first GC or the 192 // pre-zygote compaction. 193 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 194 last_gc_to_space_end_ = from_space_->Begin(); 195 } 196 // Reset this before the marking starts below. 197 bytes_promoted_ = 0; 198 } 199 // Assume the cleared space is already empty. 200 BindBitmaps(); 201 // Process dirty cards and add dirty cards to mod-union tables. 202 heap_->ProcessCards(timings_); 203 // Clear the whole card table since we can not get any additional dirty cards during the 204 // paused GC. This saves memory but only works for pause the world collectors. 205 timings_.NewSplit("ClearCardTable"); 206 heap_->GetCardTable()->ClearCardTable(); 207 // Need to do this before the checkpoint since we don't want any threads to add references to 208 // the live stack during the recursive mark. 209 timings_.NewSplit("SwapStacks"); 210 heap_->SwapStacks(); 211 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 212 MarkRoots(); 213 // Mark roots of immune spaces. 214 UpdateAndMarkModUnion(); 215 // Recursively mark remaining objects. 216 MarkReachableObjects(); 217} 218 219bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const { 220 return 221 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && 222 immune_end_ >= reinterpret_cast<Object*>(space->End()); 223} 224 225void SemiSpace::UpdateAndMarkModUnion() { 226 for (auto& space : heap_->GetContinuousSpaces()) { 227 // If the space is immune then we need to mark the references to other spaces. 228 if (IsImmuneSpace(space)) { 229 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 230 if (table != nullptr) { 231 // TODO: Improve naming. 232 TimingLogger::ScopedSplit split( 233 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 234 "UpdateAndMarkImageModUnionTable", 235 &timings_); 236 table->UpdateAndMarkReferences(MarkRootCallback, this); 237 } else { 238 // If a bump pointer space only collection, the non-moving 239 // space is added to the immune space. But the non-moving 240 // space doesn't have a mod union table. Instead, its live 241 // bitmap will be scanned later in MarkReachableObjects(). 242 DCHECK(generational_ && !whole_heap_collection_ && 243 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())); 244 } 245 } 246 } 247} 248 249class SemiSpaceScanObjectVisitor { 250 public: 251 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 252 void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 253 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an 254 // exclusive lock on the mutator lock, but 255 // SpaceBitmap::VisitMarkedRange() only requires the shared lock. 256 DCHECK(obj != nullptr); 257 semi_space_->ScanObject(obj); 258 } 259 private: 260 SemiSpace* semi_space_; 261}; 262 263void SemiSpace::MarkReachableObjects() { 264 timings_.StartSplit("MarkStackAsLive"); 265 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 266 heap_->MarkAllocStackAsLive(live_stack); 267 live_stack->Reset(); 268 timings_.EndSplit(); 269 270 for (auto& space : heap_->GetContinuousSpaces()) { 271 // If the space is immune and has no mod union table (the 272 // non-moving space when the bump pointer space only collection is 273 // enabled,) then we need to scan its live bitmap as roots 274 // (including the objects on the live stack which have just marked 275 // in the live bitmap above in MarkAllocStackAsLive().) 276 if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) { 277 DCHECK(generational_ && !whole_heap_collection_ && 278 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 279 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 280 SemiSpaceScanObjectVisitor visitor(this); 281 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 282 reinterpret_cast<uintptr_t>(space->End()), 283 visitor); 284 } 285 } 286 287 if (is_large_object_space_immune_) { 288 DCHECK(generational_ && !whole_heap_collection_); 289 // Delay copying the live set to the marked set until here from 290 // BindBitmaps() as the large objects on the allocation stack may 291 // be newly added to the live set above in MarkAllocStackAsLive(). 292 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 293 294 // When the large object space is immune, we need to scan the 295 // large object space as roots as they contain references to their 296 // classes (primitive array classes) that could move though they 297 // don't contain any other references. 298 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 299 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 300 SemiSpaceScanObjectVisitor visitor(this); 301 for (const Object* obj : large_live_objects->GetObjects()) { 302 visitor(const_cast<Object*>(obj)); 303 } 304 } 305 306 // Recursively process the mark stack. 307 ProcessMarkStack(true); 308} 309 310void SemiSpace::ReclaimPhase() { 311 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 312 Thread* self = Thread::Current(); 313 ProcessReferences(self); 314 { 315 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 316 SweepSystemWeaks(); 317 } 318 // Record freed memory. 319 uint64_t from_bytes = from_space_->GetBytesAllocated(); 320 uint64_t to_bytes = to_space_->GetBytesAllocated(); 321 uint64_t from_objects = from_space_->GetObjectsAllocated(); 322 uint64_t to_objects = to_space_->GetObjectsAllocated(); 323 CHECK_LE(to_objects, from_objects); 324 int64_t freed_bytes = from_bytes - to_bytes; 325 int64_t freed_objects = from_objects - to_objects; 326 freed_bytes_.FetchAndAdd(freed_bytes); 327 freed_objects_.FetchAndAdd(freed_objects); 328 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 329 // space. 330 heap_->RecordFree(freed_objects, freed_bytes); 331 timings_.StartSplit("PreSweepingGcVerification"); 332 heap_->PreSweepingGcVerification(this); 333 timings_.EndSplit(); 334 335 { 336 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 337 // Reclaim unmarked objects. 338 Sweep(false); 339 // Swap the live and mark bitmaps for each space which we modified space. This is an 340 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 341 // bitmaps. 342 timings_.StartSplit("SwapBitmaps"); 343 SwapBitmaps(); 344 timings_.EndSplit(); 345 // Unbind the live and mark bitmaps. 346 UnBindBitmaps(); 347 } 348 // Release the memory used by the from space. 349 if (kResetFromSpace) { 350 // Clearing from space. 351 from_space_->Clear(); 352 } 353 // Protect the from space. 354 VLOG(heap) 355 << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - " 356 << reinterpret_cast<void*>(from_space_->Limit()); 357 if (kProtectFromSpace) { 358 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE); 359 } else { 360 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ); 361 } 362 363 if (generational_) { 364 // Record the end (top) of the to space so we can distinguish 365 // between objects that were allocated since the last GC and the 366 // older objects. 367 last_gc_to_space_end_ = to_space_->End(); 368 } 369} 370 371void SemiSpace::ResizeMarkStack(size_t new_size) { 372 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 373 CHECK_LE(mark_stack_->Size(), new_size); 374 mark_stack_->Resize(new_size); 375 for (const auto& obj : temp) { 376 mark_stack_->PushBack(obj); 377 } 378} 379 380inline void SemiSpace::MarkStackPush(Object* obj) { 381 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 382 ResizeMarkStack(mark_stack_->Capacity() * 2); 383 } 384 // The object must be pushed on to the mark stack. 385 mark_stack_->PushBack(obj); 386} 387 388// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 389bool SemiSpace::MarkLargeObject(const Object* obj) { 390 // TODO: support >1 discontinuous space. 391 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 392 DCHECK(large_object_space->Contains(obj)); 393 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 394 if (UNLIKELY(!large_objects->Test(obj))) { 395 large_objects->Set(obj); 396 return true; 397 } 398 return false; 399} 400 401mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 402 size_t object_size = obj->SizeOf(); 403 size_t bytes_allocated; 404 mirror::Object* forward_address = nullptr; 405 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 406 // If it's allocated before the last GC (older), move 407 // (pseudo-promote) it to the main free list space (as sort 408 // of an old generation.) 409 size_t bytes_promoted; 410 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 411 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted); 412 if (forward_address == nullptr) { 413 // If out of space, fall back to the to-space. 414 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); 415 } else { 416 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); 417 bytes_promoted_ += bytes_promoted; 418 // Handle the bitmaps marking. 419 accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 420 DCHECK(live_bitmap != nullptr); 421 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 422 DCHECK(mark_bitmap != nullptr); 423 DCHECK(!live_bitmap->Test(forward_address)); 424 if (!whole_heap_collection_) { 425 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 426 DCHECK_EQ(live_bitmap, mark_bitmap); 427 428 // If a bump pointer space only collection, delay the live 429 // bitmap marking of the promoted object until it's popped off 430 // the mark stack (ProcessMarkStack()). The rationale: we may 431 // be in the middle of scanning the objects in the promo 432 // destination space for 433 // non-moving-space-to-bump-pointer-space references by 434 // iterating over the marked bits of the live bitmap 435 // (MarkReachableObjects()). If we don't delay it (and instead 436 // mark the promoted object here), the above promo destination 437 // space scan could encounter the just-promoted object and 438 // forward the references in the promoted object's fields even 439 // through it is pushed onto the mark stack. If this happens, 440 // the promoted object would be in an inconsistent state, that 441 // is, it's on the mark stack (gray) but its fields are 442 // already forwarded (black), which would cause a 443 // DCHECK(!to_space_->HasAddress(obj)) failure below. 444 } else { 445 // Mark forward_address on the live bit map. 446 live_bitmap->Set(forward_address); 447 // Mark forward_address on the mark bit map. 448 DCHECK(!mark_bitmap->Test(forward_address)); 449 mark_bitmap->Set(forward_address); 450 } 451 } 452 DCHECK(forward_address != nullptr); 453 } else { 454 // If it's allocated after the last GC (younger), copy it to the to-space. 455 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); 456 } 457 // Copy over the object and add it to the mark stack since we still need to update its 458 // references. 459 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size); 460 if (to_space_live_bitmap_ != nullptr) { 461 to_space_live_bitmap_->Set(forward_address); 462 } 463 DCHECK(to_space_->HasAddress(forward_address) || 464 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 465 return forward_address; 466} 467 468// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to 469// the to-space and have their forward address updated. Objects which have been newly marked are 470// pushed on the mark stack. 471Object* SemiSpace::MarkObject(Object* obj) { 472 Object* forward_address = obj; 473 if (obj != nullptr && !IsImmune(obj)) { 474 if (from_space_->HasAddress(obj)) { 475 forward_address = GetForwardingAddressInFromSpace(obj); 476 // If the object has already been moved, return the new forward address. 477 if (forward_address == nullptr) { 478 forward_address = MarkNonForwardedObject(obj); 479 DCHECK(forward_address != nullptr); 480 // Make sure to only update the forwarding address AFTER you copy the object so that the 481 // monitor word doesn't get stomped over. 482 obj->SetLockWord(LockWord::FromForwardingAddress( 483 reinterpret_cast<size_t>(forward_address))); 484 // Push the object onto the mark stack for later processing. 485 MarkStackPush(forward_address); 486 } 487 // TODO: Do we need this if in the else statement? 488 } else { 489 accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 490 if (LIKELY(object_bitmap != nullptr)) { 491 if (generational_) { 492 // If a bump pointer space only collection, we should not 493 // reach here as we don't/won't mark the objects in the 494 // non-moving space (except for the promoted objects.) Note 495 // the non-moving space is added to the immune space. 496 DCHECK(whole_heap_collection_); 497 } 498 // This object was not previously marked. 499 if (!object_bitmap->Test(obj)) { 500 object_bitmap->Set(obj); 501 MarkStackPush(obj); 502 } 503 } else { 504 DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; 505 if (MarkLargeObject(obj)) { 506 MarkStackPush(obj); 507 } 508 } 509 } 510 } 511 return forward_address; 512} 513 514Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) { 515 DCHECK(root != nullptr); 516 DCHECK(arg != nullptr); 517 SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg); 518 mirror::Object* ret = semi_space->MarkObject(root); 519 semi_space->ProcessMarkStack(true); 520 return ret; 521} 522 523Object* SemiSpace::MarkRootCallback(Object* root, void* arg) { 524 DCHECK(root != nullptr); 525 DCHECK(arg != nullptr); 526 return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root); 527} 528 529// Marks all objects in the root set. 530void SemiSpace::MarkRoots() { 531 timings_.StartSplit("MarkRoots"); 532 // TODO: Visit up image roots as well? 533 Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true); 534 timings_.EndSplit(); 535} 536 537void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 538 CHECK(space->IsMallocSpace()); 539 space::MallocSpace* alloc_space = space->AsMallocSpace(); 540 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 541 accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap(); 542 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 543} 544 545mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) { 546 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 547} 548 549void SemiSpace::SweepSystemWeaks() { 550 timings_.StartSplit("SweepSystemWeaks"); 551 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 552 timings_.EndSplit(); 553} 554 555bool SemiSpace::ShouldSweepSpace(space::MallocSpace* space) const { 556 return space != from_space_ && space != to_space_ && !IsImmuneSpace(space); 557} 558 559void SemiSpace::Sweep(bool swap_bitmaps) { 560 DCHECK(mark_stack_->IsEmpty()); 561 TimingLogger::ScopedSplit("Sweep", &timings_); 562 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 563 if (space->IsMallocSpace()) { 564 space::MallocSpace* malloc_space = space->AsMallocSpace(); 565 if (!ShouldSweepSpace(malloc_space)) { 566 continue; 567 } 568 TimingLogger::ScopedSplit split( 569 malloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 570 size_t freed_objects = 0; 571 size_t freed_bytes = 0; 572 malloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 573 heap_->RecordFree(freed_objects, freed_bytes); 574 freed_objects_.FetchAndAdd(freed_objects); 575 freed_bytes_.FetchAndAdd(freed_bytes); 576 } 577 } 578 if (!is_large_object_space_immune_) { 579 SweepLargeObjects(swap_bitmaps); 580 } 581} 582 583void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 584 DCHECK(!is_large_object_space_immune_); 585 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 586 size_t freed_objects = 0; 587 size_t freed_bytes = 0; 588 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 589 freed_large_objects_.FetchAndAdd(freed_objects); 590 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 591 GetHeap()->RecordFree(freed_objects, freed_bytes); 592} 593 594// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 595// marked, put it on the appropriate list in the heap for later processing. 596void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 597 heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this); 598} 599 600// Visit all of the references of an object and update. 601void SemiSpace::ScanObject(Object* obj) { 602 DCHECK(obj != NULL); 603 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 604 MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset, 605 bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS { 606 mirror::Object* new_address = MarkObject(ref); 607 if (new_address != ref) { 608 DCHECK(new_address != nullptr); 609 // Don't need to mark the card since we updating the object address and not changing the 610 // actual objects its pointing to. Using SetFieldPtr is better in this case since it does not 611 // dirty cards and use additional memory. 612 obj->SetFieldPtr(offset, new_address, false); 613 } 614 }, kMovingClasses); 615 mirror::Class* klass = obj->GetClass(); 616 if (UNLIKELY(klass->IsReferenceClass())) { 617 DelayReferenceReferent(klass, obj); 618 } 619} 620 621// Scan anything that's on the mark stack. 622void SemiSpace::ProcessMarkStack(bool paused) { 623 space::MallocSpace* promo_dest_space = NULL; 624 accounting::SpaceBitmap* live_bitmap = NULL; 625 if (generational_ && !whole_heap_collection_) { 626 // If a bump pointer space only collection (and the promotion is 627 // enabled,) we delay the live-bitmap marking of promoted objects 628 // from MarkObject() until this function. 629 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 630 live_bitmap = promo_dest_space->GetLiveBitmap(); 631 DCHECK(live_bitmap != nullptr); 632 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 633 DCHECK(mark_bitmap != nullptr); 634 DCHECK_EQ(live_bitmap, mark_bitmap); 635 } 636 timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack"); 637 while (!mark_stack_->IsEmpty()) { 638 Object* obj = mark_stack_->PopBack(); 639 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 640 // obj has just been promoted. Mark the live bitmap for it, 641 // which is delayed from MarkObject(). 642 DCHECK(!live_bitmap->Test(obj)); 643 live_bitmap->Set(obj); 644 } 645 ScanObject(obj); 646 } 647 timings_.EndSplit(); 648} 649 650inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 651 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 652 // All immune objects are assumed marked. 653 if (IsImmune(obj)) { 654 return obj; 655 } 656 if (from_space_->HasAddress(obj)) { 657 mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); 658 return forwarding_address; // Returns either the forwarding address or nullptr. 659 } else if (to_space_->HasAddress(obj)) { 660 // Should be unlikely. 661 // Already forwarded, must be marked. 662 return obj; 663 } 664 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 665} 666 667void SemiSpace::UnBindBitmaps() { 668 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 669 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 670 if (space->IsMallocSpace()) { 671 space::MallocSpace* alloc_space = space->AsMallocSpace(); 672 if (alloc_space->HasBoundBitmaps()) { 673 alloc_space->UnBindBitmaps(); 674 heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(), 675 alloc_space->GetMarkBitmap()); 676 } 677 } 678 } 679} 680 681void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 682 DCHECK(to_space != nullptr); 683 to_space_ = to_space; 684} 685 686void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 687 DCHECK(from_space != nullptr); 688 from_space_ = from_space; 689} 690 691void SemiSpace::FinishPhase() { 692 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 693 // Can't enqueue references if we hold the mutator lock. 694 Heap* heap = GetHeap(); 695 timings_.NewSplit("PostGcVerification"); 696 heap->PostGcVerification(this); 697 698 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 699 // further action is done by the heap. 700 to_space_ = nullptr; 701 from_space_ = nullptr; 702 703 // Update the cumulative statistics 704 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 705 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 706 707 // Ensure that the mark stack is empty. 708 CHECK(mark_stack_->IsEmpty()); 709 710 // Update the cumulative loggers. 711 cumulative_timings_.Start(); 712 cumulative_timings_.AddLogger(timings_); 713 cumulative_timings_.End(); 714 715 // Clear all of the spaces' mark bitmaps. 716 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 717 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 718 if (bitmap != nullptr && 719 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 720 bitmap->Clear(); 721 } 722 } 723 mark_stack_->Reset(); 724 725 // Reset the marked large objects. 726 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 727 large_objects->GetMarkObjects()->Clear(); 728 729 if (generational_) { 730 // Decide whether to do a whole heap collection or a bump pointer 731 // only space collection at the next collection by updating 732 // whole_heap_collection. Enable whole_heap_collection once every 733 // kDefaultWholeHeapCollectionInterval collections. 734 if (!whole_heap_collection_) { 735 --whole_heap_collection_interval_counter_; 736 DCHECK_GE(whole_heap_collection_interval_counter_, 0); 737 if (whole_heap_collection_interval_counter_ == 0) { 738 whole_heap_collection_ = true; 739 } 740 } else { 741 DCHECK_EQ(whole_heap_collection_interval_counter_, 0); 742 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; 743 whole_heap_collection_ = false; 744 } 745 } 746} 747 748} // namespace collector 749} // namespace gc 750} // namespace art 751