mark_sweep.cc revision 2775ee4f82dff260663ca16adddc0b15327aaa42
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/space/image_space.h" 34#include "gc/space/large_object_space.h" 35#include "gc/space/space-inl.h" 36#include "indirect_reference_table.h" 37#include "intern_table.h" 38#include "jni_internal.h" 39#include "monitor.h" 40#include "mark_sweep-inl.h" 41#include "mirror/art_field.h" 42#include "mirror/art_field-inl.h" 43#include "mirror/class-inl.h" 44#include "mirror/class_loader.h" 45#include "mirror/dex_cache.h" 46#include "mirror/object-inl.h" 47#include "mirror/object_array.h" 48#include "mirror/object_array-inl.h" 49#include "runtime.h" 50#include "thread-inl.h" 51#include "thread_list.h" 52#include "verifier/method_verifier.h" 53 54using ::art::mirror::ArtField; 55using ::art::mirror::Class; 56using ::art::mirror::Object; 57using ::art::mirror::ObjectArray; 58 59namespace art { 60namespace gc { 61namespace collector { 62 63// Performance options. 64constexpr bool kUseRecursiveMark = false; 65constexpr bool kUseMarkStackPrefetch = true; 66constexpr size_t kSweepArrayChunkFreeSize = 1024; 67 68// Parallelism options. 69constexpr bool kParallelCardScan = true; 70constexpr bool kParallelRecursiveMark = true; 71// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 72// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 73// having this can add overhead in ProcessReferences since we may end up doing many calls of 74// ProcessMarkStack with very small mark stacks. 75constexpr size_t kMinimumParallelMarkStackSize = 128; 76constexpr bool kParallelProcessMarkStack = true; 77 78// Profiling and information flags. 79constexpr bool kCountClassesMarked = false; 80constexpr bool kProfileLargeObjects = false; 81constexpr bool kMeasureOverhead = false; 82constexpr bool kCountTasks = false; 83constexpr bool kCountJavaLangRefs = false; 84 85// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 86constexpr bool kCheckLocks = kDebugLocking; 87 88void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { 89 // Bind live to mark bitmap if necessary. 90 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 91 BindLiveToMarkBitmap(space); 92 } 93 94 // Add the space to the immune region. 95 if (immune_begin_ == NULL) { 96 DCHECK(immune_end_ == NULL); 97 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), 98 reinterpret_cast<Object*>(space->End())); 99 } else { 100 const space::ContinuousSpace* prev_space = nullptr; 101 // Find out if the previous space is immune. 102 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { 103 if (cur_space == space) { 104 break; 105 } 106 prev_space = cur_space; 107 } 108 // If previous space was immune, then extend the immune region. Relies on continuous spaces 109 // being sorted by Heap::AddContinuousSpace. 110 if (prev_space != NULL && 111 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) && 112 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) { 113 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 114 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 115 } 116 } 117} 118 119void MarkSweep::BindBitmaps() { 120 timings_.StartSplit("BindBitmaps"); 121 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 122 // Mark all of the spaces we never collect as immune. 123 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 124 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 125 ImmuneSpace(space); 126 } 127 } 128 timings_.EndSplit(); 129} 130 131MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 132 : GarbageCollector(heap, 133 name_prefix + (name_prefix.empty() ? "" : " ") + 134 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 135 current_mark_bitmap_(NULL), 136 java_lang_Class_(NULL), 137 mark_stack_(NULL), 138 immune_begin_(NULL), 139 immune_end_(NULL), 140 soft_reference_list_(NULL), 141 weak_reference_list_(NULL), 142 finalizer_reference_list_(NULL), 143 phantom_reference_list_(NULL), 144 cleared_reference_list_(NULL), 145 gc_barrier_(new Barrier(0)), 146 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 147 mark_stack_expand_lock_("mark sweep mark stack expand lock"), 148 is_concurrent_(is_concurrent), 149 clear_soft_references_(false) { 150} 151 152void MarkSweep::InitializePhase() { 153 timings_.Reset(); 154 base::TimingLogger::ScopedSplit split("InitializePhase", &timings_); 155 mark_stack_ = heap_->mark_stack_.get(); 156 DCHECK(mark_stack_ != nullptr); 157 SetImmuneRange(nullptr, nullptr); 158 soft_reference_list_ = nullptr; 159 weak_reference_list_ = nullptr; 160 finalizer_reference_list_ = nullptr; 161 phantom_reference_list_ = nullptr; 162 cleared_reference_list_ = nullptr; 163 freed_bytes_ = 0; 164 freed_large_object_bytes_ = 0; 165 freed_objects_ = 0; 166 freed_large_objects_ = 0; 167 class_count_ = 0; 168 array_count_ = 0; 169 other_count_ = 0; 170 large_object_test_ = 0; 171 large_object_mark_ = 0; 172 classes_marked_ = 0; 173 overhead_time_ = 0; 174 work_chunks_created_ = 0; 175 work_chunks_deleted_ = 0; 176 reference_count_ = 0; 177 java_lang_Class_ = Class::GetJavaLangClass(); 178 CHECK(java_lang_Class_ != nullptr); 179 180 FindDefaultMarkBitmap(); 181 182 // Do any pre GC verification. 183 timings_.NewSplit("PreGcVerification"); 184 heap_->PreGcVerification(this); 185} 186 187void MarkSweep::ProcessReferences(Thread* self) { 188 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 189 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, 190 &finalizer_reference_list_, &phantom_reference_list_); 191} 192 193bool MarkSweep::HandleDirtyObjectsPhase() { 194 base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_); 195 Thread* self = Thread::Current(); 196 Locks::mutator_lock_->AssertExclusiveHeld(self); 197 198 { 199 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 200 201 // Re-mark root set. 202 ReMarkRoots(); 203 204 // Scan dirty objects, this is only required if we are not doing concurrent GC. 205 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 206 } 207 208 ProcessReferences(self); 209 210 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 211 if (GetHeap()->verify_missing_card_marks_) { 212 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 213 // This second sweep makes sure that we don't have any objects in the live stack which point to 214 // freed objects. These cause problems since their references may be previously freed objects. 215 SweepArray(GetHeap()->allocation_stack_.get(), false); 216 } 217 return true; 218} 219 220bool MarkSweep::IsConcurrent() const { 221 return is_concurrent_; 222} 223 224void MarkSweep::MarkingPhase() { 225 base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 226 Heap* heap = GetHeap(); 227 Thread* self = Thread::Current(); 228 229 BindBitmaps(); 230 FindDefaultMarkBitmap(); 231 232 // Process dirty cards and add dirty cards to mod union tables. 233 heap->ProcessCards(timings_); 234 235 // Need to do this before the checkpoint since we don't want any threads to add references to 236 // the live stack during the recursive mark. 237 timings_.NewSplit("SwapStacks"); 238 heap->SwapStacks(); 239 240 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 241 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 242 // If we exclusively hold the mutator lock, all threads must be suspended. 243 MarkRoots(); 244 } else { 245 MarkThreadRoots(self); 246 MarkNonThreadRoots(); 247 } 248 MarkConcurrentRoots(); 249 250 heap->UpdateAndMarkModUnion(this, timings_, GetGcType()); 251 MarkReachableObjects(); 252} 253 254void MarkSweep::MarkThreadRoots(Thread* self) { 255 MarkRootsCheckpoint(self); 256} 257 258void MarkSweep::MarkReachableObjects() { 259 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 260 // knowing that new allocations won't be marked as live. 261 timings_.StartSplit("MarkStackAsLive"); 262 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 263 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(), 264 heap_->large_object_space_->GetLiveObjects(), live_stack); 265 live_stack->Reset(); 266 timings_.EndSplit(); 267 // Recursively mark all the non-image bits set in the mark bitmap. 268 RecursiveMark(); 269} 270 271void MarkSweep::ReclaimPhase() { 272 base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 273 Thread* self = Thread::Current(); 274 275 if (!IsConcurrent()) { 276 base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 277 ProcessReferences(self); 278 } else { 279 base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); 280 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 281 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 282 // The allocation stack contains things allocated since the start of the GC. These may have been 283 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. 284 // Remove these objects from the mark bitmaps so that they will be eligible for sticky 285 // collection. 286 // There is a race here which is safely handled. Another thread such as the hprof could 287 // have flushed the alloc stack after we resumed the threads. This is safe however, since 288 // reseting the allocation stack zeros it out with madvise. This means that we will either 289 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the 290 // first place. 291 mirror::Object** end = allocation_stack->End(); 292 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { 293 const Object* obj = *it; 294 if (obj != NULL) { 295 UnMarkObjectNonNull(obj); 296 } 297 } 298 } 299 300 // Before freeing anything, lets verify the heap. 301 if (kIsDebugBuild) { 302 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 303 VerifyImageRoots(); 304 } 305 timings_.StartSplit("PreSweepingGcVerification"); 306 heap_->PreSweepingGcVerification(this); 307 timings_.EndSplit(); 308 309 { 310 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 311 312 // Reclaim unmarked objects. 313 Sweep(false); 314 315 // Swap the live and mark bitmaps for each space which we modified space. This is an 316 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 317 // bitmaps. 318 timings_.StartSplit("SwapBitmaps"); 319 SwapBitmaps(); 320 timings_.EndSplit(); 321 322 // Unbind the live and mark bitmaps. 323 UnBindBitmaps(); 324 } 325} 326 327void MarkSweep::SetImmuneRange(Object* begin, Object* end) { 328 immune_begin_ = begin; 329 immune_end_ = end; 330} 331 332void MarkSweep::FindDefaultMarkBitmap() { 333 base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 334 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 335 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 336 current_mark_bitmap_ = space->GetMarkBitmap(); 337 CHECK(current_mark_bitmap_ != NULL); 338 return; 339 } 340 } 341 GetHeap()->DumpSpaces(); 342 LOG(FATAL) << "Could not find a default mark bitmap"; 343} 344 345void MarkSweep::ExpandMarkStack() { 346 // Rare case, no need to have Thread::Current be a parameter. 347 MutexLock mu(Thread::Current(), mark_stack_expand_lock_); 348 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 349 // Someone else acquired the lock and expanded the mark stack before us. 350 return; 351 } 352 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 353 mark_stack_->Resize(mark_stack_->Capacity() * 2); 354 for (const auto& obj : temp) { 355 mark_stack_->PushBack(obj); 356 } 357} 358 359inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) { 360 DCHECK(obj != NULL); 361 if (MarkObjectParallel(obj)) { 362 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) { 363 // Only reason a push can fail is that the mark stack is full. 364 ExpandMarkStack(); 365 } 366 } 367} 368 369inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { 370 DCHECK(!IsImmune(obj)); 371 // Try to take advantage of locality of references within a space, failing this find the space 372 // the hard way. 373 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 374 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 375 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 376 if (LIKELY(new_bitmap != NULL)) { 377 object_bitmap = new_bitmap; 378 } else { 379 MarkLargeObject(obj, false); 380 return; 381 } 382 } 383 384 DCHECK(object_bitmap->HasAddress(obj)); 385 object_bitmap->Clear(obj); 386} 387 388inline void MarkSweep::MarkObjectNonNull(const Object* obj) { 389 DCHECK(obj != NULL); 390 391 if (IsImmune(obj)) { 392 DCHECK(IsMarked(obj)); 393 return; 394 } 395 396 // Try to take advantage of locality of references within a space, failing this find the space 397 // the hard way. 398 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 399 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 400 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 401 if (LIKELY(new_bitmap != NULL)) { 402 object_bitmap = new_bitmap; 403 } else { 404 MarkLargeObject(obj, true); 405 return; 406 } 407 } 408 409 // This object was not previously marked. 410 if (!object_bitmap->Test(obj)) { 411 object_bitmap->Set(obj); 412 // Do we need to expand the mark stack? 413 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 414 ExpandMarkStack(); 415 } 416 // The object must be pushed on to the mark stack. 417 mark_stack_->PushBack(const_cast<Object*>(obj)); 418 } 419} 420 421// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 422bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 423 // TODO: support >1 discontinuous space. 424 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 425 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); 426 if (kProfileLargeObjects) { 427 ++large_object_test_; 428 } 429 if (UNLIKELY(!large_objects->Test(obj))) { 430 if (!large_object_space->Contains(obj)) { 431 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 432 LOG(ERROR) << "Attempting see if it's a bad root"; 433 VerifyRoots(); 434 LOG(FATAL) << "Can't mark bad root"; 435 } 436 if (kProfileLargeObjects) { 437 ++large_object_mark_; 438 } 439 if (set) { 440 large_objects->Set(obj); 441 } else { 442 large_objects->Clear(obj); 443 } 444 return true; 445 } 446 return false; 447} 448 449inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 450 DCHECK(obj != NULL); 451 452 if (IsImmune(obj)) { 453 DCHECK(IsMarked(obj)); 454 return false; 455 } 456 457 // Try to take advantage of locality of references within a space, failing this find the space 458 // the hard way. 459 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 460 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 461 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 462 if (new_bitmap != NULL) { 463 object_bitmap = new_bitmap; 464 } else { 465 // TODO: Remove the Thread::Current here? 466 // TODO: Convert this to some kind of atomic marking? 467 MutexLock mu(Thread::Current(), large_object_lock_); 468 return MarkLargeObject(obj, true); 469 } 470 } 471 472 // Return true if the object was not previously marked. 473 return !object_bitmap->AtomicTestAndSet(obj); 474} 475 476// Used to mark objects when recursing. Recursion is done by moving 477// the finger across the bitmaps in address order and marking child 478// objects. Any newly-marked objects whose addresses are lower than 479// the finger won't be visited by the bitmap scan, so those objects 480// need to be added to the mark stack. 481inline void MarkSweep::MarkObject(const Object* obj) { 482 if (obj != NULL) { 483 MarkObjectNonNull(obj); 484 } 485} 486 487void MarkSweep::MarkRoot(const Object* obj) { 488 if (obj != NULL) { 489 MarkObjectNonNull(obj); 490 } 491} 492 493void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) { 494 DCHECK(root != NULL); 495 DCHECK(arg != NULL); 496 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 497 mark_sweep->MarkObjectNonNullParallel(root); 498} 499 500void MarkSweep::MarkObjectCallback(const Object* root, void* arg) { 501 DCHECK(root != NULL); 502 DCHECK(arg != NULL); 503 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 504 mark_sweep->MarkObjectNonNull(root); 505} 506 507void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) { 508 DCHECK(root != NULL); 509 DCHECK(arg != NULL); 510 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 511 mark_sweep->MarkObjectNonNull(root); 512} 513 514void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 515 const StackVisitor* visitor) { 516 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 517} 518 519void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 520 // See if the root is on any space bitmap. 521 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 522 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 523 if (!large_object_space->Contains(root)) { 524 LOG(ERROR) << "Found invalid root: " << root; 525 if (visitor != NULL) { 526 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 527 } 528 } 529 } 530} 531 532void MarkSweep::VerifyRoots() { 533 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 534} 535 536// Marks all objects in the root set. 537void MarkSweep::MarkRoots() { 538 timings_.StartSplit("MarkRoots"); 539 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this); 540 timings_.EndSplit(); 541} 542 543void MarkSweep::MarkNonThreadRoots() { 544 timings_.StartSplit("MarkNonThreadRoots"); 545 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this); 546 timings_.EndSplit(); 547} 548 549void MarkSweep::MarkConcurrentRoots() { 550 timings_.StartSplit("MarkConcurrentRoots"); 551 // Visit all runtime roots and clear dirty flags. 552 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true); 553 timings_.EndSplit(); 554} 555 556void MarkSweep::CheckObject(const Object* obj) { 557 DCHECK(obj != NULL); 558 VisitObjectReferences(obj, [this](const Object* obj, const Object* ref, MemberOffset offset, 559 bool is_static) NO_THREAD_SAFETY_ANALYSIS { 560 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 561 CheckReference(obj, ref, offset, is_static); 562 }); 563} 564 565void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { 566 DCHECK(root != NULL); 567 DCHECK(arg != NULL); 568 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 569 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); 570 mark_sweep->CheckObject(root); 571} 572 573void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 574 CHECK(space->IsDlMallocSpace()); 575 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 576 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 577 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release(); 578 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 579 alloc_space->temp_bitmap_.reset(mark_bitmap); 580 alloc_space->mark_bitmap_.reset(live_bitmap); 581} 582 583class ScanObjectVisitor { 584 public: 585 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 586 : mark_sweep_(mark_sweep) {} 587 588 // TODO: Fixme when anotatalysis works with visitors. 589 void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { 590 if (kCheckLocks) { 591 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 592 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 593 } 594 mark_sweep_->ScanObject(obj); 595 } 596 597 private: 598 MarkSweep* const mark_sweep_; 599}; 600 601template <bool kUseFinger = false> 602class MarkStackTask : public Task { 603 public: 604 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 605 const Object** mark_stack) 606 : mark_sweep_(mark_sweep), 607 thread_pool_(thread_pool), 608 mark_stack_pos_(mark_stack_size) { 609 // We may have to copy part of an existing mark stack when another mark stack overflows. 610 if (mark_stack_size != 0) { 611 DCHECK(mark_stack != NULL); 612 // TODO: Check performance? 613 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 614 } 615 if (kCountTasks) { 616 ++mark_sweep_->work_chunks_created_; 617 } 618 } 619 620 static const size_t kMaxSize = 1 * KB; 621 622 protected: 623 class ScanObjectParallelVisitor { 624 public: 625 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 626 : chunk_task_(chunk_task) {} 627 628 void operator()(const Object* obj) const { 629 MarkSweep* mark_sweep = chunk_task_->mark_sweep_; 630 mark_sweep->ScanObjectVisit(obj, 631 [mark_sweep, this](const Object* /* obj */, const Object* ref, 632 const MemberOffset& /* offset */, bool /* is_static */) ALWAYS_INLINE { 633 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) { 634 if (kUseFinger) { 635 android_memory_barrier(); 636 if (reinterpret_cast<uintptr_t>(ref) >= 637 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) { 638 return; 639 } 640 } 641 chunk_task_->MarkStackPush(ref); 642 } 643 }); 644 } 645 646 private: 647 MarkStackTask<kUseFinger>* const chunk_task_; 648 }; 649 650 virtual ~MarkStackTask() { 651 // Make sure that we have cleared our mark stack. 652 DCHECK_EQ(mark_stack_pos_, 0U); 653 if (kCountTasks) { 654 ++mark_sweep_->work_chunks_deleted_; 655 } 656 } 657 658 MarkSweep* const mark_sweep_; 659 ThreadPool* const thread_pool_; 660 // Thread local mark stack for this task. 661 const Object* mark_stack_[kMaxSize]; 662 // Mark stack position. 663 size_t mark_stack_pos_; 664 665 void MarkStackPush(const Object* obj) ALWAYS_INLINE { 666 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 667 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 668 mark_stack_pos_ /= 2; 669 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 670 mark_stack_ + mark_stack_pos_); 671 thread_pool_->AddTask(Thread::Current(), task); 672 } 673 DCHECK(obj != nullptr); 674 DCHECK(mark_stack_pos_ < kMaxSize); 675 mark_stack_[mark_stack_pos_++] = obj; 676 } 677 678 virtual void Finalize() { 679 delete this; 680 } 681 682 // Scans all of the objects 683 virtual void Run(Thread* self) { 684 ScanObjectParallelVisitor visitor(this); 685 // TODO: Tune this. 686 static const size_t kFifoSize = 4; 687 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; 688 for (;;) { 689 const Object* obj = NULL; 690 if (kUseMarkStackPrefetch) { 691 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 692 const Object* obj = mark_stack_[--mark_stack_pos_]; 693 DCHECK(obj != NULL); 694 __builtin_prefetch(obj); 695 prefetch_fifo.push_back(obj); 696 } 697 if (UNLIKELY(prefetch_fifo.empty())) { 698 break; 699 } 700 obj = prefetch_fifo.front(); 701 prefetch_fifo.pop_front(); 702 } else { 703 if (UNLIKELY(mark_stack_pos_ == 0)) { 704 break; 705 } 706 obj = mark_stack_[--mark_stack_pos_]; 707 } 708 DCHECK(obj != NULL); 709 visitor(obj); 710 } 711 } 712}; 713 714class CardScanTask : public MarkStackTask<false> { 715 public: 716 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, 717 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 718 const Object** mark_stack_obj) 719 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 720 bitmap_(bitmap), 721 begin_(begin), 722 end_(end), 723 minimum_age_(minimum_age) { 724 } 725 726 protected: 727 accounting::SpaceBitmap* const bitmap_; 728 byte* const begin_; 729 byte* const end_; 730 const byte minimum_age_; 731 732 virtual void Finalize() { 733 delete this; 734 } 735 736 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 737 ScanObjectParallelVisitor visitor(this); 738 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 739 card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 740 // Finish by emptying our local mark stack. 741 MarkStackTask::Run(self); 742 } 743}; 744 745size_t MarkSweep::GetThreadCount(bool paused) const { 746 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 747 return 0; 748 } 749 if (paused) { 750 return heap_->GetParallelGCThreadCount() + 1; 751 } else { 752 return heap_->GetConcGCThreadCount() + 1; 753 } 754} 755 756void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 757 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 758 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 759 size_t thread_count = GetThreadCount(paused); 760 // The parallel version with only one thread is faster for card scanning, TODO: fix. 761 if (kParallelCardScan && thread_count > 0) { 762 Thread* self = Thread::Current(); 763 // Can't have a different split for each space since multiple spaces can have their cards being 764 // scanned at the same time. 765 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 766 // Try to take some of the mark stack since we can pass this off to the worker tasks. 767 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin()); 768 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End()); 769 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 770 // Estimated number of work tasks we will create. 771 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 772 DCHECK_NE(mark_stack_tasks, 0U); 773 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 774 mark_stack_size / mark_stack_tasks + 1); 775 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 776 byte* card_begin = space->Begin(); 777 byte* card_end = space->End(); 778 // Calculate how many bytes of heap we will scan, 779 const size_t address_range = card_end - card_begin; 780 // Calculate how much address range each task gets. 781 const size_t card_delta = RoundUp(address_range / thread_count + 1, 782 accounting::CardTable::kCardSize); 783 // Create the worker tasks for this space. 784 while (card_begin != card_end) { 785 // Add a range of cards. 786 size_t addr_remaining = card_end - card_begin; 787 size_t card_increment = std::min(card_delta, addr_remaining); 788 // Take from the back of the mark stack. 789 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 790 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 791 mark_stack_end -= mark_stack_increment; 792 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 793 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 794 // Add the new task to the thread pool. 795 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 796 card_begin + card_increment, minimum_age, 797 mark_stack_increment, mark_stack_end); 798 thread_pool->AddTask(self, task); 799 card_begin += card_increment; 800 } 801 } 802 thread_pool->SetMaxActiveWorkers(thread_count - 1); 803 thread_pool->StartWorkers(self); 804 thread_pool->Wait(self, true, true); 805 thread_pool->StopWorkers(self); 806 timings_.EndSplit(); 807 } else { 808 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 809 // Image spaces are handled properly since live == marked for them. 810 switch (space->GetGcRetentionPolicy()) { 811 case space::kGcRetentionPolicyNeverCollect: 812 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 813 "ScanGrayImageSpaceObjects"); 814 break; 815 case space::kGcRetentionPolicyFullCollect: 816 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 817 "ScanGrayZygoteSpaceObjects"); 818 break; 819 case space::kGcRetentionPolicyAlwaysCollect: 820 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 821 "ScanGrayAllocSpaceObjects"); 822 break; 823 } 824 ScanObjectVisitor visitor(this); 825 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 826 timings_.EndSplit(); 827 } 828 } 829} 830 831void MarkSweep::VerifyImageRoots() { 832 // Verify roots ensures that all the references inside the image space point 833 // objects which are either in the image space or marked objects in the alloc 834 // space 835 timings_.StartSplit("VerifyImageRoots"); 836 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 837 if (space->IsImageSpace()) { 838 space::ImageSpace* image_space = space->AsImageSpace(); 839 uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin()); 840 uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End()); 841 accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap(); 842 DCHECK(live_bitmap != NULL); 843 live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) { 844 if (kCheckLocks) { 845 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 846 } 847 DCHECK(obj != NULL); 848 CheckObject(obj); 849 }); 850 } 851 } 852 timings_.EndSplit(); 853} 854 855class RecursiveMarkTask : public MarkStackTask<false> { 856 public: 857 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 858 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 859 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 860 bitmap_(bitmap), 861 begin_(begin), 862 end_(end) { 863 } 864 865 protected: 866 accounting::SpaceBitmap* const bitmap_; 867 const uintptr_t begin_; 868 const uintptr_t end_; 869 870 virtual void Finalize() { 871 delete this; 872 } 873 874 // Scans all of the objects 875 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 876 ScanObjectParallelVisitor visitor(this); 877 bitmap_->VisitMarkedRange(begin_, end_, visitor); 878 // Finish by emptying our local mark stack. 879 MarkStackTask::Run(self); 880 } 881}; 882 883// Populates the mark stack based on the set of marked objects and 884// recursively marks until the mark stack is emptied. 885void MarkSweep::RecursiveMark() { 886 base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 887 // RecursiveMark will build the lists of known instances of the Reference classes. 888 // See DelayReferenceReferent for details. 889 CHECK(soft_reference_list_ == NULL); 890 CHECK(weak_reference_list_ == NULL); 891 CHECK(finalizer_reference_list_ == NULL); 892 CHECK(phantom_reference_list_ == NULL); 893 CHECK(cleared_reference_list_ == NULL); 894 895 if (kUseRecursiveMark) { 896 const bool partial = GetGcType() == kGcTypePartial; 897 ScanObjectVisitor scan_visitor(this); 898 auto* self = Thread::Current(); 899 ThreadPool* thread_pool = heap_->GetThreadPool(); 900 size_t thread_count = GetThreadCount(false); 901 const bool parallel = kParallelRecursiveMark && thread_count > 1; 902 mark_stack_->Reset(); 903 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 904 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 905 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 906 current_mark_bitmap_ = space->GetMarkBitmap(); 907 if (current_mark_bitmap_ == NULL) { 908 GetHeap()->DumpSpaces(); 909 LOG(FATAL) << "invalid bitmap"; 910 } 911 if (parallel) { 912 // We will use the mark stack the future. 913 // CHECK(mark_stack_->IsEmpty()); 914 // This function does not handle heap end increasing, so we must use the space end. 915 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 916 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 917 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 918 919 // Create a few worker tasks. 920 const size_t n = thread_count * 2; 921 while (begin != end) { 922 uintptr_t start = begin; 923 uintptr_t delta = (end - begin) / n; 924 delta = RoundUp(delta, KB); 925 if (delta < 16 * KB) delta = end - begin; 926 begin += delta; 927 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start, 928 begin); 929 thread_pool->AddTask(self, task); 930 } 931 thread_pool->SetMaxActiveWorkers(thread_count - 1); 932 thread_pool->StartWorkers(self); 933 thread_pool->Wait(self, true, true); 934 thread_pool->StopWorkers(self); 935 } else { 936 // This function does not handle heap end increasing, so we must use the space end. 937 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 938 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 939 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 940 } 941 } 942 } 943 } 944 ProcessMarkStack(false); 945} 946 947bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) { 948 return 949 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) || 950 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object); 951} 952 953void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 954 ScanGrayObjects(paused, minimum_age); 955 ProcessMarkStack(paused); 956} 957 958void MarkSweep::ReMarkRoots() { 959 timings_.StartSplit("ReMarkRoots"); 960 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true); 961 timings_.EndSplit(); 962} 963 964void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) { 965 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 966 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 967 for (const Object** entry : vm->weak_globals) { 968 if (!is_marked(*entry, arg)) { 969 *entry = kClearedJniWeakGlobal; 970 } 971 } 972} 973 974struct ArrayMarkedCheck { 975 accounting::ObjectStack* live_stack; 976 MarkSweep* mark_sweep; 977}; 978 979// Either marked or not live. 980bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) { 981 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg); 982 if (array_check->mark_sweep->IsMarked(object)) { 983 return true; 984 } 985 accounting::ObjectStack* live_stack = array_check->live_stack; 986 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End(); 987} 988 989void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) { 990 Runtime* runtime = Runtime::Current(); 991 // The callbacks check 992 // !is_marked where is_marked is the callback but we want 993 // !IsMarked && IsLive 994 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 995 // Or for swapped (IsLive || !IsMarked). 996 997 timings_.StartSplit("SweepSystemWeaksArray"); 998 ArrayMarkedCheck visitor; 999 visitor.live_stack = allocations; 1000 visitor.mark_sweep = this; 1001 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor); 1002 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor); 1003 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor); 1004 timings_.EndSplit(); 1005} 1006 1007void MarkSweep::SweepSystemWeaks() { 1008 Runtime* runtime = Runtime::Current(); 1009 // The callbacks check 1010 // !is_marked where is_marked is the callback but we want 1011 // !IsMarked && IsLive 1012 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 1013 // Or for swapped (IsLive || !IsMarked). 1014 timings_.StartSplit("SweepSystemWeaks"); 1015 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this); 1016 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this); 1017 SweepJniWeakGlobals(IsMarkedCallback, this); 1018 timings_.EndSplit(); 1019} 1020 1021bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) { 1022 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 1023 // We don't actually want to sweep the object, so lets return "marked" 1024 return true; 1025} 1026 1027void MarkSweep::VerifyIsLive(const Object* obj) { 1028 Heap* heap = GetHeap(); 1029 if (!heap->GetLiveBitmap()->Test(obj)) { 1030 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1031 if (!large_object_space->GetLiveObjects()->Test(obj)) { 1032 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 1033 heap->allocation_stack_->End()) { 1034 // Object not found! 1035 heap->DumpSpaces(); 1036 LOG(FATAL) << "Found dead object " << obj; 1037 } 1038 } 1039 } 1040} 1041 1042void MarkSweep::VerifySystemWeaks() { 1043 Runtime* runtime = Runtime::Current(); 1044 // Verify system weaks, uses a special IsMarked callback which always returns true. 1045 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this); 1046 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this); 1047 1048 JavaVMExt* vm = runtime->GetJavaVM(); 1049 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 1050 for (const Object** entry : vm->weak_globals) { 1051 VerifyIsLive(*entry); 1052 } 1053} 1054 1055struct SweepCallbackContext { 1056 MarkSweep* mark_sweep; 1057 space::AllocSpace* space; 1058 Thread* self; 1059}; 1060 1061class CheckpointMarkThreadRoots : public Closure { 1062 public: 1063 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1064 1065 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1066 // Note: self is not necessarily equal to thread since thread may be suspended. 1067 Thread* self = Thread::Current(); 1068 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1069 << thread->GetState() << " thread " << thread << " self " << self; 1070 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 1071 mark_sweep_->GetBarrier().Pass(self); 1072 } 1073 1074 private: 1075 MarkSweep* mark_sweep_; 1076}; 1077 1078void MarkSweep::MarkRootsCheckpoint(Thread* self) { 1079 CheckpointMarkThreadRoots check_point(this); 1080 timings_.StartSplit("MarkRootsCheckpoint"); 1081 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1082 // Request the check point is run on all threads returning a count of the threads that must 1083 // run through the barrier including self. 1084 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1085 // Release locks then wait for all mutator threads to pass the barrier. 1086 // TODO: optimize to not release locks when there are no threads to wait for. 1087 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1088 Locks::mutator_lock_->SharedUnlock(self); 1089 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 1090 CHECK_EQ(old_state, kWaitingPerformingGc); 1091 gc_barrier_->Increment(self, barrier_count); 1092 self->SetState(kWaitingPerformingGc); 1093 Locks::mutator_lock_->SharedLock(self); 1094 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1095 timings_.EndSplit(); 1096} 1097 1098void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 1099 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 1100 MarkSweep* mark_sweep = context->mark_sweep; 1101 Heap* heap = mark_sweep->GetHeap(); 1102 space::AllocSpace* space = context->space; 1103 Thread* self = context->self; 1104 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 1105 // Use a bulk free, that merges consecutive objects before freeing or free per object? 1106 // Documentation suggests better free performance with merging, but this may be at the expensive 1107 // of allocation. 1108 size_t freed_objects = num_ptrs; 1109 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit 1110 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); 1111 heap->RecordFree(freed_objects, freed_bytes); 1112 mark_sweep->freed_objects_.fetch_add(freed_objects); 1113 mark_sweep->freed_bytes_.fetch_add(freed_bytes); 1114} 1115 1116void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 1117 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 1118 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); 1119 Heap* heap = context->mark_sweep->GetHeap(); 1120 // We don't free any actual memory to avoid dirtying the shared zygote pages. 1121 for (size_t i = 0; i < num_ptrs; ++i) { 1122 Object* obj = static_cast<Object*>(ptrs[i]); 1123 heap->GetLiveBitmap()->Clear(obj); 1124 heap->GetCardTable()->MarkCard(obj); 1125 } 1126} 1127 1128void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1129 space::DlMallocSpace* space = heap_->GetAllocSpace(); 1130 1131 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 1132 // bitmap, resulting in occasional frees of Weaks which are still in use. 1133 SweepSystemWeaksArray(allocations); 1134 1135 timings_.StartSplit("SweepArray"); 1136 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are 1137 // going to free. 1138 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1139 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1140 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1141 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 1142 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 1143 if (swap_bitmaps) { 1144 std::swap(live_bitmap, mark_bitmap); 1145 std::swap(large_live_objects, large_mark_objects); 1146 } 1147 1148 size_t freed_bytes = 0; 1149 size_t freed_large_object_bytes = 0; 1150 size_t freed_objects = 0; 1151 size_t freed_large_objects = 0; 1152 size_t count = allocations->Size(); 1153 Object** objects = const_cast<Object**>(allocations->Begin()); 1154 Object** out = objects; 1155 Object** objects_to_chunk_free = out; 1156 1157 // Empty the allocation stack. 1158 Thread* self = Thread::Current(); 1159 for (size_t i = 0; i < count; ++i) { 1160 Object* obj = objects[i]; 1161 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. 1162 if (LIKELY(mark_bitmap->HasAddress(obj))) { 1163 if (!mark_bitmap->Test(obj)) { 1164 // Don't bother un-marking since we clear the mark bitmap anyways. 1165 *(out++) = obj; 1166 // Free objects in chunks. 1167 DCHECK_GE(out, objects_to_chunk_free); 1168 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); 1169 if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) { 1170 timings_.StartSplit("FreeList"); 1171 size_t chunk_freed_objects = out - objects_to_chunk_free; 1172 freed_objects += chunk_freed_objects; 1173 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); 1174 objects_to_chunk_free = out; 1175 timings_.EndSplit(); 1176 } 1177 } 1178 } else if (!large_mark_objects->Test(obj)) { 1179 ++freed_large_objects; 1180 freed_large_object_bytes += large_object_space->Free(self, obj); 1181 } 1182 } 1183 // Free the remaining objects in chunks. 1184 DCHECK_GE(out, objects_to_chunk_free); 1185 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); 1186 if (out - objects_to_chunk_free > 0) { 1187 timings_.StartSplit("FreeList"); 1188 size_t chunk_freed_objects = out - objects_to_chunk_free; 1189 freed_objects += chunk_freed_objects; 1190 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); 1191 timings_.EndSplit(); 1192 } 1193 CHECK_EQ(count, allocations->Size()); 1194 timings_.EndSplit(); 1195 1196 timings_.StartSplit("RecordFree"); 1197 VLOG(heap) << "Freed " << freed_objects << "/" << count 1198 << " objects with size " << PrettySize(freed_bytes); 1199 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1200 freed_objects_.fetch_add(freed_objects); 1201 freed_large_objects_.fetch_add(freed_large_objects); 1202 freed_bytes_.fetch_add(freed_bytes); 1203 freed_large_object_bytes_.fetch_add(freed_large_object_bytes); 1204 timings_.EndSplit(); 1205 1206 timings_.StartSplit("ResetStack"); 1207 allocations->Reset(); 1208 timings_.EndSplit(); 1209} 1210 1211void MarkSweep::Sweep(bool swap_bitmaps) { 1212 DCHECK(mark_stack_->IsEmpty()); 1213 base::TimingLogger::ScopedSplit("Sweep", &timings_); 1214 1215 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 1216 // bitmap, resulting in occasional frees of Weaks which are still in use. 1217 SweepSystemWeaks(); 1218 1219 const bool partial = (GetGcType() == kGcTypePartial); 1220 SweepCallbackContext scc; 1221 scc.mark_sweep = this; 1222 scc.self = Thread::Current(); 1223 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1224 // We always sweep always collect spaces. 1225 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); 1226 if (!partial && !sweep_space) { 1227 // We sweep full collect spaces when the GC isn't a partial GC (ie its full). 1228 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); 1229 } 1230 if (sweep_space) { 1231 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 1232 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 1233 scc.space = space->AsDlMallocSpace(); 1234 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1235 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1236 if (swap_bitmaps) { 1237 std::swap(live_bitmap, mark_bitmap); 1238 } 1239 if (!space->IsZygoteSpace()) { 1240 base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_); 1241 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. 1242 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 1243 &SweepCallback, reinterpret_cast<void*>(&scc)); 1244 } else { 1245 base::TimingLogger::ScopedSplit split("SweepZygote", &timings_); 1246 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual 1247 // memory. 1248 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 1249 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); 1250 } 1251 } 1252 } 1253 1254 SweepLargeObjects(swap_bitmaps); 1255} 1256 1257void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1258 base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1259 // Sweep large objects 1260 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1261 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 1262 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 1263 if (swap_bitmaps) { 1264 std::swap(large_live_objects, large_mark_objects); 1265 } 1266 // O(n*log(n)) but hopefully there are not too many large objects. 1267 size_t freed_objects = 0; 1268 size_t freed_bytes = 0; 1269 Thread* self = Thread::Current(); 1270 for (const Object* obj : large_live_objects->GetObjects()) { 1271 if (!large_mark_objects->Test(obj)) { 1272 freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj)); 1273 ++freed_objects; 1274 } 1275 } 1276 freed_large_objects_.fetch_add(freed_objects); 1277 freed_large_object_bytes_.fetch_add(freed_bytes); 1278 GetHeap()->RecordFree(freed_objects, freed_bytes); 1279} 1280 1281void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { 1282 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1283 if (space->IsDlMallocSpace() && space->Contains(ref)) { 1284 DCHECK(IsMarked(obj)); 1285 1286 bool is_marked = IsMarked(ref); 1287 if (!is_marked) { 1288 LOG(INFO) << *space; 1289 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) 1290 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) 1291 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " 1292 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; 1293 1294 const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1295 DCHECK(klass != NULL); 1296 const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); 1297 DCHECK(fields != NULL); 1298 bool found = false; 1299 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1300 const ArtField* cur = fields->Get(i); 1301 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1302 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); 1303 found = true; 1304 break; 1305 } 1306 } 1307 if (!found) { 1308 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); 1309 } 1310 1311 bool obj_marked = heap_->GetCardTable()->IsDirty(obj); 1312 if (!obj_marked) { 1313 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " 1314 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " 1315 << "the alloc space, but wasn't card marked"; 1316 } 1317 } 1318 } 1319 break; 1320 } 1321} 1322 1323// Process the "referent" field in a java.lang.ref.Reference. If the 1324// referent has not yet been marked, put it on the appropriate list in 1325// the heap for later processing. 1326void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 1327 DCHECK(klass != nullptr); 1328 DCHECK(klass->IsReferenceClass()); 1329 DCHECK(obj != NULL); 1330 Object* referent = heap_->GetReferenceReferent(obj); 1331 if (referent != NULL && !IsMarked(referent)) { 1332 if (kCountJavaLangRefs) { 1333 ++reference_count_; 1334 } 1335 Thread* self = Thread::Current(); 1336 // TODO: Remove these locks, and use atomic stacks for storing references? 1337 if (klass->IsSoftReferenceClass()) { 1338 MutexLock mu(self, *heap_->GetSoftRefQueueLock()); 1339 heap_->EnqueuePendingReference(obj, &soft_reference_list_); 1340 } else if (klass->IsWeakReferenceClass()) { 1341 MutexLock mu(self, *heap_->GetWeakRefQueueLock()); 1342 heap_->EnqueuePendingReference(obj, &weak_reference_list_); 1343 } else if (klass->IsFinalizerReferenceClass()) { 1344 MutexLock mu(self, *heap_->GetFinalizerRefQueueLock()); 1345 heap_->EnqueuePendingReference(obj, &finalizer_reference_list_); 1346 } else if (klass->IsPhantomReferenceClass()) { 1347 MutexLock mu(self, *heap_->GetPhantomRefQueueLock()); 1348 heap_->EnqueuePendingReference(obj, &phantom_reference_list_); 1349 } else { 1350 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) 1351 << " " << std::hex << klass->GetAccessFlags(); 1352 } 1353 } 1354} 1355 1356void MarkSweep::ScanRoot(const Object* obj) { 1357 ScanObject(obj); 1358} 1359 1360class MarkObjectVisitor { 1361 public: 1362 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {} 1363 1364 // TODO: Fixme when anotatalysis works with visitors. 1365 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1366 bool /* is_static */) const ALWAYS_INLINE 1367 NO_THREAD_SAFETY_ANALYSIS { 1368 if (kCheckLocks) { 1369 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1370 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1371 } 1372 mark_sweep_->MarkObject(ref); 1373 } 1374 1375 private: 1376 MarkSweep* const mark_sweep_; 1377}; 1378 1379// Scans an object reference. Determines the type of the reference 1380// and dispatches to a specialized scanning routine. 1381void MarkSweep::ScanObject(const Object* obj) { 1382 MarkObjectVisitor visitor(this); 1383 ScanObjectVisit(obj, visitor); 1384} 1385 1386void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1387 Thread* self = Thread::Current(); 1388 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1389 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1390 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1391 CHECK_GT(chunk_size, 0U); 1392 // Split the current mark stack up into work tasks. 1393 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1394 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1395 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, 1396 const_cast<const mirror::Object**>(it))); 1397 it += delta; 1398 } 1399 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1400 thread_pool->StartWorkers(self); 1401 thread_pool->Wait(self, true, true); 1402 thread_pool->StopWorkers(self); 1403 mark_stack_->Reset(); 1404 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1405} 1406 1407// Scan anything that's on the mark stack. 1408void MarkSweep::ProcessMarkStack(bool paused) { 1409 timings_.StartSplit("ProcessMarkStack"); 1410 size_t thread_count = GetThreadCount(paused); 1411 if (kParallelProcessMarkStack && thread_count > 1 && 1412 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1413 ProcessMarkStackParallel(thread_count); 1414 } else { 1415 // TODO: Tune this. 1416 static const size_t kFifoSize = 4; 1417 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; 1418 for (;;) { 1419 const Object* obj = NULL; 1420 if (kUseMarkStackPrefetch) { 1421 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1422 const Object* obj = mark_stack_->PopBack(); 1423 DCHECK(obj != NULL); 1424 __builtin_prefetch(obj); 1425 prefetch_fifo.push_back(obj); 1426 } 1427 if (prefetch_fifo.empty()) { 1428 break; 1429 } 1430 obj = prefetch_fifo.front(); 1431 prefetch_fifo.pop_front(); 1432 } else { 1433 if (mark_stack_->IsEmpty()) { 1434 break; 1435 } 1436 obj = mark_stack_->PopBack(); 1437 } 1438 DCHECK(obj != NULL); 1439 ScanObject(obj); 1440 } 1441 } 1442 timings_.EndSplit(); 1443} 1444 1445// Walks the reference list marking any references subject to the 1446// reference clearing policy. References with a black referent are 1447// removed from the list. References with white referents biased 1448// toward saving are blackened and also removed from the list. 1449void MarkSweep::PreserveSomeSoftReferences(Object** list) { 1450 DCHECK(list != NULL); 1451 Object* clear = NULL; 1452 size_t counter = 0; 1453 1454 DCHECK(mark_stack_->IsEmpty()); 1455 1456 timings_.StartSplit("PreserveSomeSoftReferences"); 1457 while (*list != NULL) { 1458 Object* ref = heap_->DequeuePendingReference(list); 1459 Object* referent = heap_->GetReferenceReferent(ref); 1460 if (referent == NULL) { 1461 // Referent was cleared by the user during marking. 1462 continue; 1463 } 1464 bool is_marked = IsMarked(referent); 1465 if (!is_marked && ((++counter) & 1)) { 1466 // Referent is white and biased toward saving, mark it. 1467 MarkObject(referent); 1468 is_marked = true; 1469 } 1470 if (!is_marked) { 1471 // Referent is white, queue it for clearing. 1472 heap_->EnqueuePendingReference(ref, &clear); 1473 } 1474 } 1475 *list = clear; 1476 timings_.EndSplit(); 1477 1478 // Restart the mark with the newly black references added to the root set. 1479 ProcessMarkStack(true); 1480} 1481 1482inline bool MarkSweep::IsMarked(const Object* object) const 1483 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1484 if (IsImmune(object)) { 1485 return true; 1486 } 1487 DCHECK(current_mark_bitmap_ != NULL); 1488 if (current_mark_bitmap_->HasAddress(object)) { 1489 return current_mark_bitmap_->Test(object); 1490 } 1491 return heap_->GetMarkBitmap()->Test(object); 1492} 1493 1494 1495// Unlink the reference list clearing references objects with white 1496// referents. Cleared references registered to a reference queue are 1497// scheduled for appending by the heap worker thread. 1498void MarkSweep::ClearWhiteReferences(Object** list) { 1499 DCHECK(list != NULL); 1500 while (*list != NULL) { 1501 Object* ref = heap_->DequeuePendingReference(list); 1502 Object* referent = heap_->GetReferenceReferent(ref); 1503 if (referent != NULL && !IsMarked(referent)) { 1504 // Referent is white, clear it. 1505 heap_->ClearReferenceReferent(ref); 1506 if (heap_->IsEnqueuable(ref)) { 1507 heap_->EnqueueReference(ref, &cleared_reference_list_); 1508 } 1509 } 1510 } 1511 DCHECK(*list == NULL); 1512} 1513 1514// Enqueues finalizer references with white referents. White 1515// referents are blackened, moved to the zombie field, and the 1516// referent field is cleared. 1517void MarkSweep::EnqueueFinalizerReferences(Object** list) { 1518 DCHECK(list != NULL); 1519 timings_.StartSplit("EnqueueFinalizerReferences"); 1520 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); 1521 bool has_enqueued = false; 1522 while (*list != NULL) { 1523 Object* ref = heap_->DequeuePendingReference(list); 1524 Object* referent = heap_->GetReferenceReferent(ref); 1525 if (referent != NULL && !IsMarked(referent)) { 1526 MarkObject(referent); 1527 // If the referent is non-null the reference must queuable. 1528 DCHECK(heap_->IsEnqueuable(ref)); 1529 ref->SetFieldObject(zombie_offset, referent, false); 1530 heap_->ClearReferenceReferent(ref); 1531 heap_->EnqueueReference(ref, &cleared_reference_list_); 1532 has_enqueued = true; 1533 } 1534 } 1535 timings_.EndSplit(); 1536 if (has_enqueued) { 1537 ProcessMarkStack(true); 1538 } 1539 DCHECK(*list == NULL); 1540} 1541 1542// Process reference class instances and schedule finalizations. 1543void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, 1544 Object** weak_references, 1545 Object** finalizer_references, 1546 Object** phantom_references) { 1547 DCHECK(soft_references != NULL); 1548 DCHECK(weak_references != NULL); 1549 DCHECK(finalizer_references != NULL); 1550 DCHECK(phantom_references != NULL); 1551 1552 // Unless we are in the zygote or required to clear soft references 1553 // with white references, preserve some white referents. 1554 if (!clear_soft && !Runtime::Current()->IsZygote()) { 1555 PreserveSomeSoftReferences(soft_references); 1556 } 1557 1558 timings_.StartSplit("ProcessReferences"); 1559 // Clear all remaining soft and weak references with white 1560 // referents. 1561 ClearWhiteReferences(soft_references); 1562 ClearWhiteReferences(weak_references); 1563 timings_.EndSplit(); 1564 1565 // Preserve all white objects with finalize methods and schedule 1566 // them for finalization. 1567 EnqueueFinalizerReferences(finalizer_references); 1568 1569 timings_.StartSplit("ProcessReferences"); 1570 // Clear all f-reachable soft and weak references with white 1571 // referents. 1572 ClearWhiteReferences(soft_references); 1573 ClearWhiteReferences(weak_references); 1574 1575 // Clear all phantom references with white referents. 1576 ClearWhiteReferences(phantom_references); 1577 1578 // At this point all reference lists should be empty. 1579 DCHECK(*soft_references == NULL); 1580 DCHECK(*weak_references == NULL); 1581 DCHECK(*finalizer_references == NULL); 1582 DCHECK(*phantom_references == NULL); 1583 timings_.EndSplit(); 1584} 1585 1586void MarkSweep::UnBindBitmaps() { 1587 base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 1588 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1589 if (space->IsDlMallocSpace()) { 1590 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 1591 if (alloc_space->temp_bitmap_.get() != NULL) { 1592 // At this point, the temp_bitmap holds our old mark bitmap. 1593 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); 1594 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); 1595 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); 1596 alloc_space->mark_bitmap_.reset(new_bitmap); 1597 DCHECK(alloc_space->temp_bitmap_.get() == NULL); 1598 } 1599 } 1600 } 1601} 1602 1603void MarkSweep::FinishPhase() { 1604 base::TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1605 // Can't enqueue references if we hold the mutator lock. 1606 Object* cleared_references = GetClearedReferences(); 1607 Heap* heap = GetHeap(); 1608 timings_.NewSplit("EnqueueClearedReferences"); 1609 heap->EnqueueClearedReferences(&cleared_references); 1610 1611 timings_.NewSplit("PostGcVerification"); 1612 heap->PostGcVerification(this); 1613 1614 timings_.NewSplit("GrowForUtilization"); 1615 heap->GrowForUtilization(GetGcType(), GetDurationNs()); 1616 1617 timings_.NewSplit("RequestHeapTrim"); 1618 heap->RequestHeapTrim(); 1619 1620 // Update the cumulative statistics 1621 total_time_ns_ += GetDurationNs(); 1622 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, 1623 std::plus<uint64_t>()); 1624 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 1625 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 1626 1627 // Ensure that the mark stack is empty. 1628 CHECK(mark_stack_->IsEmpty()); 1629 1630 if (kCountScannedTypes) { 1631 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1632 << " other=" << other_count_; 1633 } 1634 1635 if (kCountTasks) { 1636 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1637 } 1638 1639 if (kMeasureOverhead) { 1640 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1641 } 1642 1643 if (kProfileLargeObjects) { 1644 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1645 } 1646 1647 if (kCountClassesMarked) { 1648 VLOG(gc) << "Classes marked " << classes_marked_; 1649 } 1650 1651 if (kCountJavaLangRefs) { 1652 VLOG(gc) << "References scanned " << reference_count_; 1653 } 1654 1655 // Update the cumulative loggers. 1656 cumulative_timings_.Start(); 1657 cumulative_timings_.AddLogger(timings_); 1658 cumulative_timings_.End(); 1659 1660 // Clear all of the spaces' mark bitmaps. 1661 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1662 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1663 space->GetMarkBitmap()->Clear(); 1664 } 1665 } 1666 mark_stack_->Reset(); 1667 1668 // Reset the marked large objects. 1669 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1670 large_objects->GetMarkObjects()->Clear(); 1671} 1672 1673} // namespace collector 1674} // namespace gc 1675} // namespace art 1676