semi_space.cc revision b122a4bbed34ab22b4c1541ee25e5cf22f12a926
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/space_bitmap-inl.h" 31#include "gc/heap.h" 32#include "gc/space/bump_pointer_space.h" 33#include "gc/space/bump_pointer_space-inl.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "mark_sweep-inl.h" 41#include "monitor.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/object-inl.h" 48#include "mirror/object_array.h" 49#include "mirror/object_array-inl.h" 50#include "runtime.h" 51#include "semi_space-inl.h" 52#include "thread-inl.h" 53#include "thread_list.h" 54#include "verifier/method_verifier.h" 55 56using ::art::mirror::Class; 57using ::art::mirror::Object; 58 59namespace art { 60namespace gc { 61namespace collector { 62 63static constexpr bool kProtectFromSpace = true; 64static constexpr bool kResetFromSpace = true; 65// TODO: move this to a new file as a new garbage collector? 66static constexpr bool kEnableSimplePromo = false; 67 68// TODO: Unduplicate logic. 69void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) { 70 // Bind live to mark bitmap if necessary. 71 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 72 BindLiveToMarkBitmap(space); 73 } 74 // Add the space to the immune region. 75 if (immune_begin_ == nullptr) { 76 DCHECK(immune_end_ == nullptr); 77 immune_begin_ = reinterpret_cast<Object*>(space->Begin()); 78 immune_end_ = reinterpret_cast<Object*>(space->End()); 79 } else { 80 const space::ContinuousSpace* prev_space = nullptr; 81 // Find out if the previous space is immune. 82 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { 83 if (cur_space == space) { 84 break; 85 } 86 prev_space = cur_space; 87 } 88 // If previous space was immune, then extend the immune region. Relies on continuous spaces 89 // being sorted by Heap::AddContinuousSpace. 90 if (prev_space != nullptr && IsImmuneSpace(prev_space)) { 91 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 92 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 93 } 94 } 95} 96 97void SemiSpace::BindBitmaps() { 98 timings_.StartSplit("BindBitmaps"); 99 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 100 // Mark all of the spaces we never collect as immune. 101 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 102 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 103 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 104 ImmuneSpace(space); 105 } 106 } 107 timings_.EndSplit(); 108} 109 110SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix) 111 : GarbageCollector(heap, 112 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 113 mark_stack_(nullptr), 114 immune_begin_(nullptr), 115 immune_end_(nullptr), 116 to_space_(nullptr), 117 from_space_(nullptr), 118 soft_reference_list_(nullptr), 119 weak_reference_list_(nullptr), 120 finalizer_reference_list_(nullptr), 121 phantom_reference_list_(nullptr), 122 cleared_reference_list_(nullptr), 123 self_(nullptr), 124 last_gc_to_space_end_(nullptr), 125 bytes_promoted_(0) { 126} 127 128void SemiSpace::InitializePhase() { 129 timings_.Reset(); 130 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 131 mark_stack_ = heap_->mark_stack_.get(); 132 DCHECK(mark_stack_ != nullptr); 133 immune_begin_ = nullptr; 134 immune_end_ = nullptr; 135 soft_reference_list_ = nullptr; 136 weak_reference_list_ = nullptr; 137 finalizer_reference_list_ = nullptr; 138 phantom_reference_list_ = nullptr; 139 cleared_reference_list_ = nullptr; 140 self_ = Thread::Current(); 141 // Do any pre GC verification. 142 timings_.NewSplit("PreGcVerification"); 143 heap_->PreGcVerification(this); 144} 145 146void SemiSpace::ProcessReferences(Thread* self) { 147 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 148 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 149 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 150 &RecursiveMarkObjectCallback, this); 151} 152 153void SemiSpace::MarkingPhase() { 154 Thread* self = Thread::Current(); 155 Locks::mutator_lock_->AssertExclusiveHeld(self); 156 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 157 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 158 // wrong space. 159 heap_->SwapSemiSpaces(); 160 if (kEnableSimplePromo) { 161 // If last_gc_to_space_end_ is out of the bounds of the from-space 162 // (the to-space from last GC), then point it to the beginning of 163 // the from-space. For example, the very first GC or the 164 // pre-zygote compaction. 165 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 166 last_gc_to_space_end_ = from_space_->Begin(); 167 } 168 // Reset this before the marking starts below. 169 bytes_promoted_ = 0; 170 } 171 // Assume the cleared space is already empty. 172 BindBitmaps(); 173 // Process dirty cards and add dirty cards to mod-union tables. 174 heap_->ProcessCards(timings_); 175 // Clear the whole card table since we can not get any additional dirty cards during the 176 // paused GC. This saves memory but only works for pause the world collectors. 177 timings_.NewSplit("ClearCardTable"); 178 heap_->GetCardTable()->ClearCardTable(); 179 // Need to do this before the checkpoint since we don't want any threads to add references to 180 // the live stack during the recursive mark. 181 timings_.NewSplit("SwapStacks"); 182 heap_->SwapStacks(); 183 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 184 MarkRoots(); 185 // Mark roots of immune spaces. 186 UpdateAndMarkModUnion(); 187 // Recursively mark remaining objects. 188 MarkReachableObjects(); 189} 190 191bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const { 192 return 193 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && 194 immune_end_ >= reinterpret_cast<Object*>(space->End()); 195} 196 197void SemiSpace::UpdateAndMarkModUnion() { 198 for (auto& space : heap_->GetContinuousSpaces()) { 199 // If the space is immune then we need to mark the references to other spaces. 200 if (IsImmuneSpace(space)) { 201 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 202 CHECK(table != nullptr); 203 // TODO: Improve naming. 204 TimingLogger::ScopedSplit split( 205 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 206 "UpdateAndMarkImageModUnionTable", 207 &timings_); 208 table->UpdateAndMarkReferences(MarkRootCallback, this); 209 } 210 } 211} 212 213void SemiSpace::MarkReachableObjects() { 214 timings_.StartSplit("MarkStackAsLive"); 215 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 216 heap_->MarkAllocStackAsLive(live_stack); 217 live_stack->Reset(); 218 timings_.EndSplit(); 219 // Recursively process the mark stack. 220 ProcessMarkStack(true); 221} 222 223void SemiSpace::ReclaimPhase() { 224 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 225 Thread* self = Thread::Current(); 226 ProcessReferences(self); 227 { 228 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 229 SweepSystemWeaks(); 230 } 231 // Record freed memory. 232 int from_bytes = from_space_->GetBytesAllocated(); 233 int to_bytes = to_space_->GetBytesAllocated(); 234 int from_objects = from_space_->GetObjectsAllocated(); 235 int to_objects = to_space_->GetObjectsAllocated(); 236 int freed_bytes = from_bytes - to_bytes; 237 int freed_objects = from_objects - to_objects; 238 CHECK_GE(freed_bytes, 0); 239 freed_bytes_.FetchAndAdd(freed_bytes); 240 freed_objects_.FetchAndAdd(freed_objects); 241 heap_->RecordFree(static_cast<size_t>(freed_objects), static_cast<size_t>(freed_bytes)); 242 243 timings_.StartSplit("PreSweepingGcVerification"); 244 heap_->PreSweepingGcVerification(this); 245 timings_.EndSplit(); 246 247 { 248 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 249 // Reclaim unmarked objects. 250 Sweep(false); 251 // Swap the live and mark bitmaps for each space which we modified space. This is an 252 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 253 // bitmaps. 254 timings_.StartSplit("SwapBitmaps"); 255 SwapBitmaps(); 256 timings_.EndSplit(); 257 // Unbind the live and mark bitmaps. 258 UnBindBitmaps(); 259 } 260 // Release the memory used by the from space. 261 if (kResetFromSpace) { 262 // Clearing from space. 263 from_space_->Clear(); 264 } 265 // Protect the from space. 266 VLOG(heap) 267 << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - " 268 << reinterpret_cast<void*>(from_space_->Limit()); 269 if (kProtectFromSpace) { 270 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE); 271 } else { 272 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ); 273 } 274 275 if (kEnableSimplePromo) { 276 // Record the end (top) of the to space so we can distinguish 277 // between objects that were allocated since the last GC and the 278 // older objects. 279 last_gc_to_space_end_ = to_space_->End(); 280 } 281} 282 283void SemiSpace::ResizeMarkStack(size_t new_size) { 284 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 285 CHECK_LE(mark_stack_->Size(), new_size); 286 mark_stack_->Resize(new_size); 287 for (const auto& obj : temp) { 288 mark_stack_->PushBack(obj); 289 } 290} 291 292inline void SemiSpace::MarkStackPush(Object* obj) { 293 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 294 ResizeMarkStack(mark_stack_->Capacity() * 2); 295 } 296 // The object must be pushed on to the mark stack. 297 mark_stack_->PushBack(obj); 298} 299 300// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 301bool SemiSpace::MarkLargeObject(const Object* obj) { 302 // TODO: support >1 discontinuous space. 303 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 304 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); 305 if (UNLIKELY(!large_objects->Test(obj))) { 306 large_objects->Set(obj); 307 return true; 308 } 309 return false; 310} 311 312// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to 313// the to-space and have their forward address updated. Objects which have been newly marked are 314// pushed on the mark stack. 315Object* SemiSpace::MarkObject(Object* obj) { 316 Object* ret = obj; 317 if (obj != nullptr && !IsImmune(obj)) { 318 if (from_space_->HasAddress(obj)) { 319 mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj); 320 // If the object has already been moved, return the new forward address. 321 if (forward_address == nullptr) { 322 // Otherwise, we need to move the object and add it to the markstack for processing. 323 size_t object_size = obj->SizeOf(); 324 size_t bytes_allocated = 0; 325 if (kEnableSimplePromo && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 326 // If it's allocated before the last GC (older), move (pseudo-promote) it to 327 // the non-moving space (as sort of an old generation.) 328 size_t bytes_promoted; 329 space::MallocSpace* non_moving_space = GetHeap()->GetNonMovingSpace(); 330 forward_address = non_moving_space->Alloc(self_, object_size, &bytes_promoted); 331 if (forward_address == nullptr) { 332 // If out of space, fall back to the to-space. 333 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); 334 } else { 335 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); 336 bytes_promoted_ += bytes_promoted; 337 // Mark forward_address on the live bit map. 338 accounting::SpaceBitmap* live_bitmap = non_moving_space->GetLiveBitmap(); 339 DCHECK(live_bitmap != nullptr); 340 DCHECK(!live_bitmap->Test(forward_address)); 341 live_bitmap->Set(forward_address); 342 // Mark forward_address on the mark bit map. 343 accounting::SpaceBitmap* mark_bitmap = non_moving_space->GetMarkBitmap(); 344 DCHECK(mark_bitmap != nullptr); 345 DCHECK(!mark_bitmap->Test(forward_address)); 346 mark_bitmap->Set(forward_address); 347 } 348 DCHECK(forward_address != nullptr); 349 } else { 350 // If it's allocated after the last GC (younger), copy it to the to-space. 351 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); 352 } 353 // Copy over the object and add it to the mark stack since we still need to update it's 354 // references. 355 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size); 356 // Make sure to only update the forwarding address AFTER you copy the object so that the 357 // monitor word doesn't get stomped over. 358 obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address))); 359 MarkStackPush(forward_address); 360 } else { 361 DCHECK(to_space_->HasAddress(forward_address) || 362 (kEnableSimplePromo && GetHeap()->GetNonMovingSpace()->HasAddress(forward_address))); 363 } 364 ret = forward_address; 365 // TODO: Do we need this if in the else statement? 366 } else { 367 accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 368 if (LIKELY(object_bitmap != nullptr)) { 369 // This object was not previously marked. 370 if (!object_bitmap->Test(obj)) { 371 object_bitmap->Set(obj); 372 MarkStackPush(obj); 373 } 374 } else { 375 DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; 376 if (MarkLargeObject(obj)) { 377 MarkStackPush(obj); 378 } 379 } 380 } 381 } 382 return ret; 383} 384 385Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) { 386 DCHECK(root != nullptr); 387 DCHECK(arg != nullptr); 388 SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg); 389 mirror::Object* ret = semi_space->MarkObject(root); 390 semi_space->ProcessMarkStack(true); 391 return ret; 392} 393 394Object* SemiSpace::MarkRootCallback(Object* root, void* arg) { 395 DCHECK(root != nullptr); 396 DCHECK(arg != nullptr); 397 return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root); 398} 399 400// Marks all objects in the root set. 401void SemiSpace::MarkRoots() { 402 timings_.StartSplit("MarkRoots"); 403 // TODO: Visit up image roots as well? 404 Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true); 405 timings_.EndSplit(); 406} 407 408void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 409 CHECK(space->IsMallocSpace()); 410 space::MallocSpace* alloc_space = space->AsMallocSpace(); 411 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 412 accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap(); 413 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 414} 415 416mirror::Object* SemiSpace::GetForwardingAddress(mirror::Object* obj) { 417 if (from_space_->HasAddress(obj)) { 418 LOG(FATAL) << "Shouldn't happen!"; 419 return GetForwardingAddressInFromSpace(obj); 420 } 421 return obj; 422} 423 424mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) { 425 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 426} 427 428void SemiSpace::SweepSystemWeaks() { 429 timings_.StartSplit("SweepSystemWeaks"); 430 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 431 timings_.EndSplit(); 432} 433 434struct SweepCallbackContext { 435 SemiSpace* mark_sweep; 436 space::AllocSpace* space; 437 Thread* self; 438}; 439 440void SemiSpace::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 441 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 442 SemiSpace* gc = context->mark_sweep; 443 Heap* heap = gc->GetHeap(); 444 space::AllocSpace* space = context->space; 445 Thread* self = context->self; 446 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 447 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); 448 heap->RecordFree(num_ptrs, freed_bytes); 449 gc->freed_objects_.FetchAndAdd(num_ptrs); 450 gc->freed_bytes_.FetchAndAdd(freed_bytes); 451} 452 453void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 454 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 455 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); 456 Heap* heap = context->mark_sweep->GetHeap(); 457 // We don't free any actual memory to avoid dirtying the shared zygote pages. 458 for (size_t i = 0; i < num_ptrs; ++i) { 459 Object* obj = static_cast<Object*>(ptrs[i]); 460 heap->GetLiveBitmap()->Clear(obj); 461 heap->GetCardTable()->MarkCard(obj); 462 } 463} 464 465void SemiSpace::Sweep(bool swap_bitmaps) { 466 DCHECK(mark_stack_->IsEmpty()); 467 TimingLogger::ScopedSplit("Sweep", &timings_); 468 469 const bool partial = (GetGcType() == kGcTypePartial); 470 SweepCallbackContext scc; 471 scc.mark_sweep = this; 472 scc.self = Thread::Current(); 473 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 474 if (!space->IsMallocSpace()) { 475 continue; 476 } 477 // We always sweep always collect spaces. 478 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); 479 if (!partial && !sweep_space) { 480 // We sweep full collect spaces when the GC isn't a partial GC (ie its full). 481 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); 482 } 483 if (sweep_space && space->IsMallocSpace()) { 484 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 485 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 486 scc.space = space->AsMallocSpace(); 487 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 488 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 489 if (swap_bitmaps) { 490 std::swap(live_bitmap, mark_bitmap); 491 } 492 if (!space->IsZygoteSpace()) { 493 TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_); 494 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. 495 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 496 &SweepCallback, reinterpret_cast<void*>(&scc)); 497 } else { 498 TimingLogger::ScopedSplit split("SweepZygote", &timings_); 499 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual 500 // memory. 501 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 502 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); 503 } 504 } 505 } 506 507 SweepLargeObjects(swap_bitmaps); 508} 509 510void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 511 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 512 // Sweep large objects 513 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 514 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 515 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 516 if (swap_bitmaps) { 517 std::swap(large_live_objects, large_mark_objects); 518 } 519 // O(n*log(n)) but hopefully there are not too many large objects. 520 size_t freed_objects = 0; 521 size_t freed_bytes = 0; 522 Thread* self = Thread::Current(); 523 for (const Object* obj : large_live_objects->GetObjects()) { 524 if (!large_mark_objects->Test(obj)) { 525 freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj)); 526 ++freed_objects; 527 } 528 } 529 freed_large_objects_.FetchAndAdd(freed_objects); 530 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 531 GetHeap()->RecordFree(freed_objects, freed_bytes); 532} 533 534// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 535// marked, put it on the appropriate list in the heap for later processing. 536void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 537 heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this); 538} 539 540// Visit all of the references of an object and update. 541void SemiSpace::ScanObject(Object* obj) { 542 DCHECK(obj != NULL); 543 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 544 MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset, 545 bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS { 546 mirror::Object* new_address = MarkObject(ref); 547 if (new_address != ref) { 548 DCHECK(new_address != nullptr); 549 // Don't need to mark the card since we updating the object address and not changing the 550 // actual objects its pointing to. Using SetFieldPtr is better in this case since it does not 551 // dirty cards and use additional memory. 552 obj->SetFieldPtr(offset, new_address, false); 553 } 554 }, kMovingClasses); 555 mirror::Class* klass = obj->GetClass(); 556 if (UNLIKELY(klass->IsReferenceClass())) { 557 DelayReferenceReferent(klass, obj); 558 } 559} 560 561// Scan anything that's on the mark stack. 562void SemiSpace::ProcessMarkStack(bool paused) { 563 timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack"); 564 while (!mark_stack_->IsEmpty()) { 565 ScanObject(mark_stack_->PopBack()); 566 } 567 timings_.EndSplit(); 568} 569 570inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 571 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 572 // All immune objects are assumed marked. 573 if (IsImmune(obj)) { 574 return obj; 575 } 576 if (from_space_->HasAddress(obj)) { 577 mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); 578 // If the object is forwarded then it MUST be marked. 579 DCHECK(forwarding_address == nullptr || to_space_->HasAddress(forwarding_address) || 580 (kEnableSimplePromo && GetHeap()->GetNonMovingSpace()->HasAddress(forwarding_address))); 581 if (forwarding_address != nullptr) { 582 return forwarding_address; 583 } 584 // Must not be marked, return nullptr; 585 return nullptr; 586 } else if (to_space_->HasAddress(obj)) { 587 // Already forwarded, must be marked. 588 return obj; 589 } 590 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 591} 592 593void SemiSpace::UnBindBitmaps() { 594 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 595 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 596 if (space->IsMallocSpace()) { 597 space::MallocSpace* alloc_space = space->AsMallocSpace(); 598 if (alloc_space->HasBoundBitmaps()) { 599 alloc_space->UnBindBitmaps(); 600 heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(), 601 alloc_space->GetMarkBitmap()); 602 } 603 } 604 } 605} 606 607void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 608 DCHECK(to_space != nullptr); 609 to_space_ = to_space; 610} 611 612void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 613 DCHECK(from_space != nullptr); 614 from_space_ = from_space; 615} 616 617void SemiSpace::FinishPhase() { 618 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 619 // Can't enqueue references if we hold the mutator lock. 620 Heap* heap = GetHeap(); 621 timings_.NewSplit("PostGcVerification"); 622 heap->PostGcVerification(this); 623 624 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 625 // further action is done by the heap. 626 to_space_ = nullptr; 627 from_space_ = nullptr; 628 629 // Update the cumulative statistics 630 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 631 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 632 633 // Ensure that the mark stack is empty. 634 CHECK(mark_stack_->IsEmpty()); 635 636 // Update the cumulative loggers. 637 cumulative_timings_.Start(); 638 cumulative_timings_.AddLogger(timings_); 639 cumulative_timings_.End(); 640 641 // Clear all of the spaces' mark bitmaps. 642 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 643 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 644 if (bitmap != nullptr && 645 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 646 bitmap->Clear(); 647 } 648 } 649 mark_stack_->Reset(); 650 651 // Reset the marked large objects. 652 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 653 large_objects->GetMarkObjects()->Clear(); 654} 655 656} // namespace collector 657} // namespace gc 658} // namespace art 659