mark_sweep.cc revision 958291c7afe723d846a39539fd00410c102485f3
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/space/image_space.h" 34#include "gc/space/large_object_space.h" 35#include "gc/space/space-inl.h" 36#include "indirect_reference_table.h" 37#include "intern_table.h" 38#include "jni_internal.h" 39#include "monitor.h" 40#include "mark_sweep-inl.h" 41#include "mirror/art_field.h" 42#include "mirror/art_field-inl.h" 43#include "mirror/class-inl.h" 44#include "mirror/class_loader.h" 45#include "mirror/dex_cache.h" 46#include "mirror/object-inl.h" 47#include "mirror/object_array.h" 48#include "mirror/object_array-inl.h" 49#include "runtime.h" 50#include "thread-inl.h" 51#include "thread_list.h" 52#include "verifier/method_verifier.h" 53 54using ::art::mirror::ArtField; 55using ::art::mirror::Class; 56using ::art::mirror::Object; 57using ::art::mirror::ObjectArray; 58 59namespace art { 60namespace gc { 61namespace collector { 62 63// Performance options. 64constexpr bool kUseRecursiveMark = false; 65constexpr bool kUseMarkStackPrefetch = true; 66constexpr size_t kSweepArrayChunkFreeSize = 1024; 67 68// Parallelism options. 69constexpr bool kParallelCardScan = true; 70constexpr bool kParallelRecursiveMark = true; 71// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 72// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 73// having this can add overhead in ProcessReferences since we may end up doing many calls of 74// ProcessMarkStack with very small mark stacks. 75constexpr size_t kMinimumParallelMarkStackSize = 128; 76constexpr bool kParallelProcessMarkStack = true; 77 78// Profiling and information flags. 79constexpr bool kCountClassesMarked = false; 80constexpr bool kProfileLargeObjects = false; 81constexpr bool kMeasureOverhead = false; 82constexpr bool kCountTasks = false; 83constexpr bool kCountJavaLangRefs = false; 84 85// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 86constexpr bool kCheckLocks = kDebugLocking; 87 88void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { 89 // Bind live to mark bitmap if necessary. 90 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 91 BindLiveToMarkBitmap(space); 92 } 93 94 // Add the space to the immune region. 95 if (immune_begin_ == NULL) { 96 DCHECK(immune_end_ == NULL); 97 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), 98 reinterpret_cast<Object*>(space->End())); 99 } else { 100 const space::ContinuousSpace* prev_space = nullptr; 101 // Find out if the previous space is immune. 102 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { 103 if (cur_space == space) { 104 break; 105 } 106 prev_space = cur_space; 107 } 108 // If previous space was immune, then extend the immune region. Relies on continuous spaces 109 // being sorted by Heap::AddContinuousSpace. 110 if (prev_space != NULL && 111 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) && 112 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) { 113 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 114 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 115 } 116 } 117} 118 119void MarkSweep::BindBitmaps() { 120 timings_.StartSplit("BindBitmaps"); 121 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 122 // Mark all of the spaces we never collect as immune. 123 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 124 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 125 ImmuneSpace(space); 126 } 127 } 128 timings_.EndSplit(); 129} 130 131MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 132 : GarbageCollector(heap, 133 name_prefix + (name_prefix.empty() ? "" : " ") + 134 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 135 current_mark_bitmap_(NULL), 136 java_lang_Class_(NULL), 137 mark_stack_(NULL), 138 immune_begin_(NULL), 139 immune_end_(NULL), 140 soft_reference_list_(NULL), 141 weak_reference_list_(NULL), 142 finalizer_reference_list_(NULL), 143 phantom_reference_list_(NULL), 144 cleared_reference_list_(NULL), 145 gc_barrier_(new Barrier(0)), 146 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 147 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 148 is_concurrent_(is_concurrent), 149 clear_soft_references_(false) { 150} 151 152void MarkSweep::InitializePhase() { 153 timings_.Reset(); 154 base::TimingLogger::ScopedSplit split("InitializePhase", &timings_); 155 mark_stack_ = heap_->mark_stack_.get(); 156 DCHECK(mark_stack_ != nullptr); 157 SetImmuneRange(nullptr, nullptr); 158 soft_reference_list_ = nullptr; 159 weak_reference_list_ = nullptr; 160 finalizer_reference_list_ = nullptr; 161 phantom_reference_list_ = nullptr; 162 cleared_reference_list_ = nullptr; 163 freed_bytes_ = 0; 164 freed_large_object_bytes_ = 0; 165 freed_objects_ = 0; 166 freed_large_objects_ = 0; 167 class_count_ = 0; 168 array_count_ = 0; 169 other_count_ = 0; 170 large_object_test_ = 0; 171 large_object_mark_ = 0; 172 classes_marked_ = 0; 173 overhead_time_ = 0; 174 work_chunks_created_ = 0; 175 work_chunks_deleted_ = 0; 176 reference_count_ = 0; 177 java_lang_Class_ = Class::GetJavaLangClass(); 178 CHECK(java_lang_Class_ != nullptr); 179 180 FindDefaultMarkBitmap(); 181 182 // Do any pre GC verification. 183 timings_.NewSplit("PreGcVerification"); 184 heap_->PreGcVerification(this); 185} 186 187void MarkSweep::ProcessReferences(Thread* self) { 188 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 189 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, 190 &finalizer_reference_list_, &phantom_reference_list_); 191} 192 193bool MarkSweep::HandleDirtyObjectsPhase() { 194 base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_); 195 Thread* self = Thread::Current(); 196 Locks::mutator_lock_->AssertExclusiveHeld(self); 197 198 { 199 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 200 201 // Re-mark root set. 202 ReMarkRoots(); 203 204 // Scan dirty objects, this is only required if we are not doing concurrent GC. 205 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 206 } 207 208 ProcessReferences(self); 209 210 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 211 if (GetHeap()->verify_missing_card_marks_) { 212 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 213 // This second sweep makes sure that we don't have any objects in the live stack which point to 214 // freed objects. These cause problems since their references may be previously freed objects. 215 SweepArray(GetHeap()->allocation_stack_.get(), false); 216 } 217 return true; 218} 219 220bool MarkSweep::IsConcurrent() const { 221 return is_concurrent_; 222} 223 224void MarkSweep::MarkingPhase() { 225 base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 226 Heap* heap = GetHeap(); 227 Thread* self = Thread::Current(); 228 229 BindBitmaps(); 230 FindDefaultMarkBitmap(); 231 232 // Process dirty cards and add dirty cards to mod union tables. 233 heap->ProcessCards(timings_); 234 235 // Need to do this before the checkpoint since we don't want any threads to add references to 236 // the live stack during the recursive mark. 237 timings_.NewSplit("SwapStacks"); 238 heap->SwapStacks(); 239 240 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 241 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 242 // If we exclusively hold the mutator lock, all threads must be suspended. 243 MarkRoots(); 244 } else { 245 MarkThreadRoots(self); 246 MarkNonThreadRoots(); 247 } 248 MarkConcurrentRoots(); 249 250 heap->UpdateAndMarkModUnion(this, timings_, GetGcType()); 251 MarkReachableObjects(); 252} 253 254void MarkSweep::MarkThreadRoots(Thread* self) { 255 MarkRootsCheckpoint(self); 256} 257 258void MarkSweep::MarkReachableObjects() { 259 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 260 // knowing that new allocations won't be marked as live. 261 timings_.StartSplit("MarkStackAsLive"); 262 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 263 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(), 264 heap_->large_object_space_->GetLiveObjects(), live_stack); 265 live_stack->Reset(); 266 timings_.EndSplit(); 267 // Recursively mark all the non-image bits set in the mark bitmap. 268 RecursiveMark(); 269} 270 271void MarkSweep::ReclaimPhase() { 272 base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 273 Thread* self = Thread::Current(); 274 275 if (!IsConcurrent()) { 276 base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 277 ProcessReferences(self); 278 } else { 279 base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); 280 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 281 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 282 // The allocation stack contains things allocated since the start of the GC. These may have been 283 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. 284 // Remove these objects from the mark bitmaps so that they will be eligible for sticky 285 // collection. 286 // There is a race here which is safely handled. Another thread such as the hprof could 287 // have flushed the alloc stack after we resumed the threads. This is safe however, since 288 // reseting the allocation stack zeros it out with madvise. This means that we will either 289 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the 290 // first place. 291 mirror::Object** end = allocation_stack->End(); 292 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { 293 const Object* obj = *it; 294 if (obj != NULL) { 295 UnMarkObjectNonNull(obj); 296 } 297 } 298 } 299 300 // Before freeing anything, lets verify the heap. 301 if (kIsDebugBuild) { 302 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 303 VerifyImageRoots(); 304 } 305 timings_.StartSplit("PreSweepingGcVerification"); 306 heap_->PreSweepingGcVerification(this); 307 timings_.EndSplit(); 308 309 { 310 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 311 312 // Reclaim unmarked objects. 313 Sweep(false); 314 315 // Swap the live and mark bitmaps for each space which we modified space. This is an 316 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 317 // bitmaps. 318 timings_.StartSplit("SwapBitmaps"); 319 SwapBitmaps(); 320 timings_.EndSplit(); 321 322 // Unbind the live and mark bitmaps. 323 UnBindBitmaps(); 324 } 325} 326 327void MarkSweep::SetImmuneRange(Object* begin, Object* end) { 328 immune_begin_ = begin; 329 immune_end_ = end; 330} 331 332void MarkSweep::FindDefaultMarkBitmap() { 333 base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 334 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 335 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 336 current_mark_bitmap_ = space->GetMarkBitmap(); 337 CHECK(current_mark_bitmap_ != NULL); 338 return; 339 } 340 } 341 GetHeap()->DumpSpaces(); 342 LOG(FATAL) << "Could not find a default mark bitmap"; 343} 344 345void MarkSweep::ExpandMarkStack() { 346 ResizeMarkStack(mark_stack_->Capacity() * 2); 347} 348 349void MarkSweep::ResizeMarkStack(size_t new_size) { 350 // Rare case, no need to have Thread::Current be a parameter. 351 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 352 // Someone else acquired the lock and expanded the mark stack before us. 353 return; 354 } 355 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 356 CHECK_LE(mark_stack_->Size(), new_size); 357 mark_stack_->Resize(new_size); 358 for (const auto& obj : temp) { 359 mark_stack_->PushBack(obj); 360 } 361} 362 363inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) { 364 DCHECK(obj != NULL); 365 if (MarkObjectParallel(obj)) { 366 MutexLock mu(Thread::Current(), mark_stack_lock_); 367 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 368 ExpandMarkStack(); 369 } 370 // The object must be pushed on to the mark stack. 371 mark_stack_->PushBack(const_cast<Object*>(obj)); 372 } 373} 374 375inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { 376 DCHECK(!IsImmune(obj)); 377 // Try to take advantage of locality of references within a space, failing this find the space 378 // the hard way. 379 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 380 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 381 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 382 if (LIKELY(new_bitmap != NULL)) { 383 object_bitmap = new_bitmap; 384 } else { 385 MarkLargeObject(obj, false); 386 return; 387 } 388 } 389 390 DCHECK(object_bitmap->HasAddress(obj)); 391 object_bitmap->Clear(obj); 392} 393 394inline void MarkSweep::MarkObjectNonNull(const Object* obj) { 395 DCHECK(obj != NULL); 396 397 if (IsImmune(obj)) { 398 DCHECK(IsMarked(obj)); 399 return; 400 } 401 402 // Try to take advantage of locality of references within a space, failing this find the space 403 // the hard way. 404 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 405 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 406 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 407 if (LIKELY(new_bitmap != NULL)) { 408 object_bitmap = new_bitmap; 409 } else { 410 MarkLargeObject(obj, true); 411 return; 412 } 413 } 414 415 // This object was not previously marked. 416 if (!object_bitmap->Test(obj)) { 417 object_bitmap->Set(obj); 418 // Lock is not needed but is here anyways to please annotalysis. 419 MutexLock mu(Thread::Current(), mark_stack_lock_); 420 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 421 ExpandMarkStack(); 422 } 423 // The object must be pushed on to the mark stack. 424 mark_stack_->PushBack(const_cast<Object*>(obj)); 425 } 426} 427 428// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 429bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 430 // TODO: support >1 discontinuous space. 431 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 432 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); 433 if (kProfileLargeObjects) { 434 ++large_object_test_; 435 } 436 if (UNLIKELY(!large_objects->Test(obj))) { 437 if (!large_object_space->Contains(obj)) { 438 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 439 LOG(ERROR) << "Attempting see if it's a bad root"; 440 VerifyRoots(); 441 LOG(FATAL) << "Can't mark bad root"; 442 } 443 if (kProfileLargeObjects) { 444 ++large_object_mark_; 445 } 446 if (set) { 447 large_objects->Set(obj); 448 } else { 449 large_objects->Clear(obj); 450 } 451 return true; 452 } 453 return false; 454} 455 456inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 457 DCHECK(obj != NULL); 458 459 if (IsImmune(obj)) { 460 DCHECK(IsMarked(obj)); 461 return false; 462 } 463 464 // Try to take advantage of locality of references within a space, failing this find the space 465 // the hard way. 466 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 467 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 468 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 469 if (new_bitmap != NULL) { 470 object_bitmap = new_bitmap; 471 } else { 472 // TODO: Remove the Thread::Current here? 473 // TODO: Convert this to some kind of atomic marking? 474 MutexLock mu(Thread::Current(), large_object_lock_); 475 return MarkLargeObject(obj, true); 476 } 477 } 478 479 // Return true if the object was not previously marked. 480 return !object_bitmap->AtomicTestAndSet(obj); 481} 482 483// Used to mark objects when recursing. Recursion is done by moving 484// the finger across the bitmaps in address order and marking child 485// objects. Any newly-marked objects whose addresses are lower than 486// the finger won't be visited by the bitmap scan, so those objects 487// need to be added to the mark stack. 488inline void MarkSweep::MarkObject(const Object* obj) { 489 if (obj != NULL) { 490 MarkObjectNonNull(obj); 491 } 492} 493 494void MarkSweep::MarkRoot(const Object* obj) { 495 if (obj != NULL) { 496 MarkObjectNonNull(obj); 497 } 498} 499 500void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) { 501 DCHECK(root != NULL); 502 DCHECK(arg != NULL); 503 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root); 504} 505 506void MarkSweep::MarkObjectCallback(const Object* root, void* arg) { 507 DCHECK(root != NULL); 508 DCHECK(arg != NULL); 509 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 510 mark_sweep->MarkObjectNonNull(root); 511} 512 513void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) { 514 DCHECK(root != NULL); 515 DCHECK(arg != NULL); 516 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 517 mark_sweep->MarkObjectNonNull(root); 518} 519 520void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 521 const StackVisitor* visitor) { 522 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 523} 524 525void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 526 // See if the root is on any space bitmap. 527 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 528 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 529 if (!large_object_space->Contains(root)) { 530 LOG(ERROR) << "Found invalid root: " << root; 531 if (visitor != NULL) { 532 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 533 } 534 } 535 } 536} 537 538void MarkSweep::VerifyRoots() { 539 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 540} 541 542// Marks all objects in the root set. 543void MarkSweep::MarkRoots() { 544 timings_.StartSplit("MarkRoots"); 545 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this); 546 timings_.EndSplit(); 547} 548 549void MarkSweep::MarkNonThreadRoots() { 550 timings_.StartSplit("MarkNonThreadRoots"); 551 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this); 552 timings_.EndSplit(); 553} 554 555void MarkSweep::MarkConcurrentRoots() { 556 timings_.StartSplit("MarkConcurrentRoots"); 557 // Visit all runtime roots and clear dirty flags. 558 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true); 559 timings_.EndSplit(); 560} 561 562void MarkSweep::CheckObject(const Object* obj) { 563 DCHECK(obj != NULL); 564 VisitObjectReferences(obj, [this](const Object* obj, const Object* ref, MemberOffset offset, 565 bool is_static) NO_THREAD_SAFETY_ANALYSIS { 566 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 567 CheckReference(obj, ref, offset, is_static); 568 }); 569} 570 571void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { 572 DCHECK(root != NULL); 573 DCHECK(arg != NULL); 574 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 575 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); 576 mark_sweep->CheckObject(root); 577} 578 579void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 580 CHECK(space->IsDlMallocSpace()); 581 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 582 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 583 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release(); 584 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 585 alloc_space->temp_bitmap_.reset(mark_bitmap); 586 alloc_space->mark_bitmap_.reset(live_bitmap); 587} 588 589class ScanObjectVisitor { 590 public: 591 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 592 : mark_sweep_(mark_sweep) {} 593 594 // TODO: Fixme when anotatalysis works with visitors. 595 void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { 596 if (kCheckLocks) { 597 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 598 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 599 } 600 mark_sweep_->ScanObject(obj); 601 } 602 603 private: 604 MarkSweep* const mark_sweep_; 605}; 606 607template <bool kUseFinger = false> 608class MarkStackTask : public Task { 609 public: 610 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 611 const Object** mark_stack) 612 : mark_sweep_(mark_sweep), 613 thread_pool_(thread_pool), 614 mark_stack_pos_(mark_stack_size) { 615 // We may have to copy part of an existing mark stack when another mark stack overflows. 616 if (mark_stack_size != 0) { 617 DCHECK(mark_stack != NULL); 618 // TODO: Check performance? 619 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 620 } 621 if (kCountTasks) { 622 ++mark_sweep_->work_chunks_created_; 623 } 624 } 625 626 static const size_t kMaxSize = 1 * KB; 627 628 protected: 629 class ScanObjectParallelVisitor { 630 public: 631 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 632 : chunk_task_(chunk_task) {} 633 634 void operator()(const Object* obj) const { 635 MarkSweep* mark_sweep = chunk_task_->mark_sweep_; 636 mark_sweep->ScanObjectVisit(obj, 637 [mark_sweep, this](const Object* /* obj */, const Object* ref, 638 const MemberOffset& /* offset */, bool /* is_static */) ALWAYS_INLINE { 639 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) { 640 if (kUseFinger) { 641 android_memory_barrier(); 642 if (reinterpret_cast<uintptr_t>(ref) >= 643 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) { 644 return; 645 } 646 } 647 chunk_task_->MarkStackPush(ref); 648 } 649 }); 650 } 651 652 private: 653 MarkStackTask<kUseFinger>* const chunk_task_; 654 }; 655 656 virtual ~MarkStackTask() { 657 // Make sure that we have cleared our mark stack. 658 DCHECK_EQ(mark_stack_pos_, 0U); 659 if (kCountTasks) { 660 ++mark_sweep_->work_chunks_deleted_; 661 } 662 } 663 664 MarkSweep* const mark_sweep_; 665 ThreadPool* const thread_pool_; 666 // Thread local mark stack for this task. 667 const Object* mark_stack_[kMaxSize]; 668 // Mark stack position. 669 size_t mark_stack_pos_; 670 671 void MarkStackPush(const Object* obj) ALWAYS_INLINE { 672 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 673 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 674 mark_stack_pos_ /= 2; 675 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 676 mark_stack_ + mark_stack_pos_); 677 thread_pool_->AddTask(Thread::Current(), task); 678 } 679 DCHECK(obj != nullptr); 680 DCHECK(mark_stack_pos_ < kMaxSize); 681 mark_stack_[mark_stack_pos_++] = obj; 682 } 683 684 virtual void Finalize() { 685 delete this; 686 } 687 688 // Scans all of the objects 689 virtual void Run(Thread* self) { 690 ScanObjectParallelVisitor visitor(this); 691 // TODO: Tune this. 692 static const size_t kFifoSize = 4; 693 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; 694 for (;;) { 695 const Object* obj = NULL; 696 if (kUseMarkStackPrefetch) { 697 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 698 const Object* obj = mark_stack_[--mark_stack_pos_]; 699 DCHECK(obj != NULL); 700 __builtin_prefetch(obj); 701 prefetch_fifo.push_back(obj); 702 } 703 if (UNLIKELY(prefetch_fifo.empty())) { 704 break; 705 } 706 obj = prefetch_fifo.front(); 707 prefetch_fifo.pop_front(); 708 } else { 709 if (UNLIKELY(mark_stack_pos_ == 0)) { 710 break; 711 } 712 obj = mark_stack_[--mark_stack_pos_]; 713 } 714 DCHECK(obj != NULL); 715 visitor(obj); 716 } 717 } 718}; 719 720class CardScanTask : public MarkStackTask<false> { 721 public: 722 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, 723 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 724 const Object** mark_stack_obj) 725 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 726 bitmap_(bitmap), 727 begin_(begin), 728 end_(end), 729 minimum_age_(minimum_age) { 730 } 731 732 protected: 733 accounting::SpaceBitmap* const bitmap_; 734 byte* const begin_; 735 byte* const end_; 736 const byte minimum_age_; 737 738 virtual void Finalize() { 739 delete this; 740 } 741 742 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 743 ScanObjectParallelVisitor visitor(this); 744 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 745 card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 746 // Finish by emptying our local mark stack. 747 MarkStackTask::Run(self); 748 } 749}; 750 751size_t MarkSweep::GetThreadCount(bool paused) const { 752 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 753 return 0; 754 } 755 if (paused) { 756 return heap_->GetParallelGCThreadCount() + 1; 757 } else { 758 return heap_->GetConcGCThreadCount() + 1; 759 } 760} 761 762void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 763 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 764 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 765 size_t thread_count = GetThreadCount(paused); 766 // The parallel version with only one thread is faster for card scanning, TODO: fix. 767 if (kParallelCardScan && thread_count > 0) { 768 Thread* self = Thread::Current(); 769 // Can't have a different split for each space since multiple spaces can have their cards being 770 // scanned at the same time. 771 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 772 // Try to take some of the mark stack since we can pass this off to the worker tasks. 773 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin()); 774 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End()); 775 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 776 // Estimated number of work tasks we will create. 777 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 778 DCHECK_NE(mark_stack_tasks, 0U); 779 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 780 mark_stack_size / mark_stack_tasks + 1); 781 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 782 byte* card_begin = space->Begin(); 783 byte* card_end = space->End(); 784 // Calculate how many bytes of heap we will scan, 785 const size_t address_range = card_end - card_begin; 786 // Calculate how much address range each task gets. 787 const size_t card_delta = RoundUp(address_range / thread_count + 1, 788 accounting::CardTable::kCardSize); 789 // Create the worker tasks for this space. 790 while (card_begin != card_end) { 791 // Add a range of cards. 792 size_t addr_remaining = card_end - card_begin; 793 size_t card_increment = std::min(card_delta, addr_remaining); 794 // Take from the back of the mark stack. 795 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 796 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 797 mark_stack_end -= mark_stack_increment; 798 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 799 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 800 // Add the new task to the thread pool. 801 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 802 card_begin + card_increment, minimum_age, 803 mark_stack_increment, mark_stack_end); 804 thread_pool->AddTask(self, task); 805 card_begin += card_increment; 806 } 807 } 808 thread_pool->SetMaxActiveWorkers(thread_count - 1); 809 thread_pool->StartWorkers(self); 810 thread_pool->Wait(self, true, true); 811 thread_pool->StopWorkers(self); 812 timings_.EndSplit(); 813 } else { 814 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 815 // Image spaces are handled properly since live == marked for them. 816 switch (space->GetGcRetentionPolicy()) { 817 case space::kGcRetentionPolicyNeverCollect: 818 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 819 "ScanGrayImageSpaceObjects"); 820 break; 821 case space::kGcRetentionPolicyFullCollect: 822 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 823 "ScanGrayZygoteSpaceObjects"); 824 break; 825 case space::kGcRetentionPolicyAlwaysCollect: 826 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 827 "ScanGrayAllocSpaceObjects"); 828 break; 829 } 830 ScanObjectVisitor visitor(this); 831 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 832 timings_.EndSplit(); 833 } 834 } 835} 836 837void MarkSweep::VerifyImageRoots() { 838 // Verify roots ensures that all the references inside the image space point 839 // objects which are either in the image space or marked objects in the alloc 840 // space 841 timings_.StartSplit("VerifyImageRoots"); 842 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 843 if (space->IsImageSpace()) { 844 space::ImageSpace* image_space = space->AsImageSpace(); 845 uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin()); 846 uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End()); 847 accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap(); 848 DCHECK(live_bitmap != NULL); 849 live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) { 850 if (kCheckLocks) { 851 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 852 } 853 DCHECK(obj != NULL); 854 CheckObject(obj); 855 }); 856 } 857 } 858 timings_.EndSplit(); 859} 860 861class RecursiveMarkTask : public MarkStackTask<false> { 862 public: 863 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 864 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 865 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 866 bitmap_(bitmap), 867 begin_(begin), 868 end_(end) { 869 } 870 871 protected: 872 accounting::SpaceBitmap* const bitmap_; 873 const uintptr_t begin_; 874 const uintptr_t end_; 875 876 virtual void Finalize() { 877 delete this; 878 } 879 880 // Scans all of the objects 881 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 882 ScanObjectParallelVisitor visitor(this); 883 bitmap_->VisitMarkedRange(begin_, end_, visitor); 884 // Finish by emptying our local mark stack. 885 MarkStackTask::Run(self); 886 } 887}; 888 889// Populates the mark stack based on the set of marked objects and 890// recursively marks until the mark stack is emptied. 891void MarkSweep::RecursiveMark() { 892 base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 893 // RecursiveMark will build the lists of known instances of the Reference classes. 894 // See DelayReferenceReferent for details. 895 CHECK(soft_reference_list_ == NULL); 896 CHECK(weak_reference_list_ == NULL); 897 CHECK(finalizer_reference_list_ == NULL); 898 CHECK(phantom_reference_list_ == NULL); 899 CHECK(cleared_reference_list_ == NULL); 900 901 if (kUseRecursiveMark) { 902 const bool partial = GetGcType() == kGcTypePartial; 903 ScanObjectVisitor scan_visitor(this); 904 auto* self = Thread::Current(); 905 ThreadPool* thread_pool = heap_->GetThreadPool(); 906 size_t thread_count = GetThreadCount(false); 907 const bool parallel = kParallelRecursiveMark && thread_count > 1; 908 mark_stack_->Reset(); 909 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 910 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 911 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 912 current_mark_bitmap_ = space->GetMarkBitmap(); 913 if (current_mark_bitmap_ == NULL) { 914 GetHeap()->DumpSpaces(); 915 LOG(FATAL) << "invalid bitmap"; 916 } 917 if (parallel) { 918 // We will use the mark stack the future. 919 // CHECK(mark_stack_->IsEmpty()); 920 // This function does not handle heap end increasing, so we must use the space end. 921 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 922 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 923 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 924 925 // Create a few worker tasks. 926 const size_t n = thread_count * 2; 927 while (begin != end) { 928 uintptr_t start = begin; 929 uintptr_t delta = (end - begin) / n; 930 delta = RoundUp(delta, KB); 931 if (delta < 16 * KB) delta = end - begin; 932 begin += delta; 933 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start, 934 begin); 935 thread_pool->AddTask(self, task); 936 } 937 thread_pool->SetMaxActiveWorkers(thread_count - 1); 938 thread_pool->StartWorkers(self); 939 thread_pool->Wait(self, true, true); 940 thread_pool->StopWorkers(self); 941 } else { 942 // This function does not handle heap end increasing, so we must use the space end. 943 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 944 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 945 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 946 } 947 } 948 } 949 } 950 ProcessMarkStack(false); 951} 952 953bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) { 954 return 955 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) || 956 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object); 957} 958 959void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 960 ScanGrayObjects(paused, minimum_age); 961 ProcessMarkStack(paused); 962} 963 964void MarkSweep::ReMarkRoots() { 965 timings_.StartSplit("ReMarkRoots"); 966 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true); 967 timings_.EndSplit(); 968} 969 970void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) { 971 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 972 WriterMutexLock mu(Thread::Current(), vm->weak_globals_lock); 973 for (const Object** entry : vm->weak_globals) { 974 if (!is_marked(*entry, arg)) { 975 *entry = kClearedJniWeakGlobal; 976 } 977 } 978} 979 980struct ArrayMarkedCheck { 981 accounting::ObjectStack* live_stack; 982 MarkSweep* mark_sweep; 983}; 984 985// Either marked or not live. 986bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) { 987 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg); 988 if (array_check->mark_sweep->IsMarked(object)) { 989 return true; 990 } 991 accounting::ObjectStack* live_stack = array_check->live_stack; 992 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End(); 993} 994 995void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) { 996 Runtime* runtime = Runtime::Current(); 997 // The callbacks check 998 // !is_marked where is_marked is the callback but we want 999 // !IsMarked && IsLive 1000 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 1001 // Or for swapped (IsLive || !IsMarked). 1002 1003 timings_.StartSplit("SweepSystemWeaksArray"); 1004 ArrayMarkedCheck visitor; 1005 visitor.live_stack = allocations; 1006 visitor.mark_sweep = this; 1007 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor); 1008 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor); 1009 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor); 1010 timings_.EndSplit(); 1011} 1012 1013void MarkSweep::SweepSystemWeaks() { 1014 Runtime* runtime = Runtime::Current(); 1015 // The callbacks check 1016 // !is_marked where is_marked is the callback but we want 1017 // !IsMarked && IsLive 1018 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 1019 // Or for swapped (IsLive || !IsMarked). 1020 timings_.StartSplit("SweepSystemWeaks"); 1021 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this); 1022 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this); 1023 SweepJniWeakGlobals(IsMarkedCallback, this); 1024 timings_.EndSplit(); 1025} 1026 1027bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) { 1028 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 1029 // We don't actually want to sweep the object, so lets return "marked" 1030 return true; 1031} 1032 1033void MarkSweep::VerifyIsLive(const Object* obj) { 1034 Heap* heap = GetHeap(); 1035 if (!heap->GetLiveBitmap()->Test(obj)) { 1036 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1037 if (!large_object_space->GetLiveObjects()->Test(obj)) { 1038 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 1039 heap->allocation_stack_->End()) { 1040 // Object not found! 1041 heap->DumpSpaces(); 1042 LOG(FATAL) << "Found dead object " << obj; 1043 } 1044 } 1045 } 1046} 1047 1048void MarkSweep::VerifySystemWeaks() { 1049 Runtime* runtime = Runtime::Current(); 1050 // Verify system weaks, uses a special IsMarked callback which always returns true. 1051 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this); 1052 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this); 1053 1054 JavaVMExt* vm = runtime->GetJavaVM(); 1055 ReaderMutexLock mu(Thread::Current(), vm->weak_globals_lock); 1056 for (const Object** entry : vm->weak_globals) { 1057 VerifyIsLive(*entry); 1058 } 1059} 1060 1061struct SweepCallbackContext { 1062 MarkSweep* mark_sweep; 1063 space::AllocSpace* space; 1064 Thread* self; 1065}; 1066 1067class CheckpointMarkThreadRoots : public Closure { 1068 public: 1069 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1070 1071 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1072 // Note: self is not necessarily equal to thread since thread may be suspended. 1073 Thread* self = Thread::Current(); 1074 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1075 << thread->GetState() << " thread " << thread << " self " << self; 1076 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 1077 mark_sweep_->GetBarrier().Pass(self); 1078 } 1079 1080 private: 1081 MarkSweep* mark_sweep_; 1082}; 1083 1084void MarkSweep::MarkRootsCheckpoint(Thread* self) { 1085 CheckpointMarkThreadRoots check_point(this); 1086 timings_.StartSplit("MarkRootsCheckpoint"); 1087 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1088 // Request the check point is run on all threads returning a count of the threads that must 1089 // run through the barrier including self. 1090 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1091 // Release locks then wait for all mutator threads to pass the barrier. 1092 // TODO: optimize to not release locks when there are no threads to wait for. 1093 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1094 Locks::mutator_lock_->SharedUnlock(self); 1095 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 1096 CHECK_EQ(old_state, kWaitingPerformingGc); 1097 gc_barrier_->Increment(self, barrier_count); 1098 self->SetState(kWaitingPerformingGc); 1099 Locks::mutator_lock_->SharedLock(self); 1100 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1101 timings_.EndSplit(); 1102} 1103 1104void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 1105 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 1106 MarkSweep* mark_sweep = context->mark_sweep; 1107 Heap* heap = mark_sweep->GetHeap(); 1108 space::AllocSpace* space = context->space; 1109 Thread* self = context->self; 1110 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 1111 // Use a bulk free, that merges consecutive objects before freeing or free per object? 1112 // Documentation suggests better free performance with merging, but this may be at the expensive 1113 // of allocation. 1114 size_t freed_objects = num_ptrs; 1115 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit 1116 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); 1117 heap->RecordFree(freed_objects, freed_bytes); 1118 mark_sweep->freed_objects_.fetch_add(freed_objects); 1119 mark_sweep->freed_bytes_.fetch_add(freed_bytes); 1120} 1121 1122void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 1123 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 1124 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); 1125 Heap* heap = context->mark_sweep->GetHeap(); 1126 // We don't free any actual memory to avoid dirtying the shared zygote pages. 1127 for (size_t i = 0; i < num_ptrs; ++i) { 1128 Object* obj = static_cast<Object*>(ptrs[i]); 1129 heap->GetLiveBitmap()->Clear(obj); 1130 heap->GetCardTable()->MarkCard(obj); 1131 } 1132} 1133 1134void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1135 space::DlMallocSpace* space = heap_->GetAllocSpace(); 1136 1137 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 1138 // bitmap, resulting in occasional frees of Weaks which are still in use. 1139 SweepSystemWeaksArray(allocations); 1140 1141 timings_.StartSplit("SweepArray"); 1142 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are 1143 // going to free. 1144 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1145 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1146 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1147 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 1148 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 1149 if (swap_bitmaps) { 1150 std::swap(live_bitmap, mark_bitmap); 1151 std::swap(large_live_objects, large_mark_objects); 1152 } 1153 1154 size_t freed_bytes = 0; 1155 size_t freed_large_object_bytes = 0; 1156 size_t freed_objects = 0; 1157 size_t freed_large_objects = 0; 1158 size_t count = allocations->Size(); 1159 Object** objects = const_cast<Object**>(allocations->Begin()); 1160 Object** out = objects; 1161 Object** objects_to_chunk_free = out; 1162 1163 // Empty the allocation stack. 1164 Thread* self = Thread::Current(); 1165 for (size_t i = 0; i < count; ++i) { 1166 Object* obj = objects[i]; 1167 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. 1168 if (LIKELY(mark_bitmap->HasAddress(obj))) { 1169 if (!mark_bitmap->Test(obj)) { 1170 // Don't bother un-marking since we clear the mark bitmap anyways. 1171 *(out++) = obj; 1172 // Free objects in chunks. 1173 DCHECK_GE(out, objects_to_chunk_free); 1174 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); 1175 if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) { 1176 timings_.StartSplit("FreeList"); 1177 size_t chunk_freed_objects = out - objects_to_chunk_free; 1178 freed_objects += chunk_freed_objects; 1179 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); 1180 objects_to_chunk_free = out; 1181 timings_.EndSplit(); 1182 } 1183 } 1184 } else if (!large_mark_objects->Test(obj)) { 1185 ++freed_large_objects; 1186 freed_large_object_bytes += large_object_space->Free(self, obj); 1187 } 1188 } 1189 // Free the remaining objects in chunks. 1190 DCHECK_GE(out, objects_to_chunk_free); 1191 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); 1192 if (out - objects_to_chunk_free > 0) { 1193 timings_.StartSplit("FreeList"); 1194 size_t chunk_freed_objects = out - objects_to_chunk_free; 1195 freed_objects += chunk_freed_objects; 1196 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); 1197 timings_.EndSplit(); 1198 } 1199 CHECK_EQ(count, allocations->Size()); 1200 timings_.EndSplit(); 1201 1202 timings_.StartSplit("RecordFree"); 1203 VLOG(heap) << "Freed " << freed_objects << "/" << count 1204 << " objects with size " << PrettySize(freed_bytes); 1205 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1206 freed_objects_.fetch_add(freed_objects); 1207 freed_large_objects_.fetch_add(freed_large_objects); 1208 freed_bytes_.fetch_add(freed_bytes); 1209 freed_large_object_bytes_.fetch_add(freed_large_object_bytes); 1210 timings_.EndSplit(); 1211 1212 timings_.StartSplit("ResetStack"); 1213 allocations->Reset(); 1214 timings_.EndSplit(); 1215} 1216 1217void MarkSweep::Sweep(bool swap_bitmaps) { 1218 DCHECK(mark_stack_->IsEmpty()); 1219 base::TimingLogger::ScopedSplit("Sweep", &timings_); 1220 1221 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 1222 // bitmap, resulting in occasional frees of Weaks which are still in use. 1223 SweepSystemWeaks(); 1224 1225 const bool partial = (GetGcType() == kGcTypePartial); 1226 SweepCallbackContext scc; 1227 scc.mark_sweep = this; 1228 scc.self = Thread::Current(); 1229 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1230 // We always sweep always collect spaces. 1231 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); 1232 if (!partial && !sweep_space) { 1233 // We sweep full collect spaces when the GC isn't a partial GC (ie its full). 1234 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); 1235 } 1236 if (sweep_space) { 1237 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 1238 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 1239 scc.space = space->AsDlMallocSpace(); 1240 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1241 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1242 if (swap_bitmaps) { 1243 std::swap(live_bitmap, mark_bitmap); 1244 } 1245 if (!space->IsZygoteSpace()) { 1246 base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_); 1247 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. 1248 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 1249 &SweepCallback, reinterpret_cast<void*>(&scc)); 1250 } else { 1251 base::TimingLogger::ScopedSplit split("SweepZygote", &timings_); 1252 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual 1253 // memory. 1254 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 1255 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); 1256 } 1257 } 1258 } 1259 1260 SweepLargeObjects(swap_bitmaps); 1261} 1262 1263void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1264 base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1265 // Sweep large objects 1266 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1267 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 1268 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 1269 if (swap_bitmaps) { 1270 std::swap(large_live_objects, large_mark_objects); 1271 } 1272 // O(n*log(n)) but hopefully there are not too many large objects. 1273 size_t freed_objects = 0; 1274 size_t freed_bytes = 0; 1275 Thread* self = Thread::Current(); 1276 for (const Object* obj : large_live_objects->GetObjects()) { 1277 if (!large_mark_objects->Test(obj)) { 1278 freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj)); 1279 ++freed_objects; 1280 } 1281 } 1282 freed_large_objects_.fetch_add(freed_objects); 1283 freed_large_object_bytes_.fetch_add(freed_bytes); 1284 GetHeap()->RecordFree(freed_objects, freed_bytes); 1285} 1286 1287void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { 1288 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1289 if (space->IsDlMallocSpace() && space->Contains(ref)) { 1290 DCHECK(IsMarked(obj)); 1291 1292 bool is_marked = IsMarked(ref); 1293 if (!is_marked) { 1294 LOG(INFO) << *space; 1295 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) 1296 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) 1297 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " 1298 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; 1299 1300 const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1301 DCHECK(klass != NULL); 1302 const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); 1303 DCHECK(fields != NULL); 1304 bool found = false; 1305 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1306 const ArtField* cur = fields->Get(i); 1307 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1308 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); 1309 found = true; 1310 break; 1311 } 1312 } 1313 if (!found) { 1314 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); 1315 } 1316 1317 bool obj_marked = heap_->GetCardTable()->IsDirty(obj); 1318 if (!obj_marked) { 1319 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " 1320 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " 1321 << "the alloc space, but wasn't card marked"; 1322 } 1323 } 1324 } 1325 break; 1326 } 1327} 1328 1329// Process the "referent" field in a java.lang.ref.Reference. If the 1330// referent has not yet been marked, put it on the appropriate list in 1331// the heap for later processing. 1332void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 1333 DCHECK(klass != nullptr); 1334 DCHECK(klass->IsReferenceClass()); 1335 DCHECK(obj != NULL); 1336 Object* referent = heap_->GetReferenceReferent(obj); 1337 if (referent != NULL && !IsMarked(referent)) { 1338 if (kCountJavaLangRefs) { 1339 ++reference_count_; 1340 } 1341 Thread* self = Thread::Current(); 1342 // TODO: Remove these locks, and use atomic stacks for storing references? 1343 if (klass->IsSoftReferenceClass()) { 1344 MutexLock mu(self, *heap_->GetSoftRefQueueLock()); 1345 heap_->EnqueuePendingReference(obj, &soft_reference_list_); 1346 } else if (klass->IsWeakReferenceClass()) { 1347 MutexLock mu(self, *heap_->GetWeakRefQueueLock()); 1348 heap_->EnqueuePendingReference(obj, &weak_reference_list_); 1349 } else if (klass->IsFinalizerReferenceClass()) { 1350 MutexLock mu(self, *heap_->GetFinalizerRefQueueLock()); 1351 heap_->EnqueuePendingReference(obj, &finalizer_reference_list_); 1352 } else if (klass->IsPhantomReferenceClass()) { 1353 MutexLock mu(self, *heap_->GetPhantomRefQueueLock()); 1354 heap_->EnqueuePendingReference(obj, &phantom_reference_list_); 1355 } else { 1356 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) 1357 << " " << std::hex << klass->GetAccessFlags(); 1358 } 1359 } 1360} 1361 1362void MarkSweep::ScanRoot(const Object* obj) { 1363 ScanObject(obj); 1364} 1365 1366class MarkObjectVisitor { 1367 public: 1368 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {} 1369 1370 // TODO: Fixme when anotatalysis works with visitors. 1371 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1372 bool /* is_static */) const ALWAYS_INLINE 1373 NO_THREAD_SAFETY_ANALYSIS { 1374 if (kCheckLocks) { 1375 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1376 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1377 } 1378 mark_sweep_->MarkObject(ref); 1379 } 1380 1381 private: 1382 MarkSweep* const mark_sweep_; 1383}; 1384 1385// Scans an object reference. Determines the type of the reference 1386// and dispatches to a specialized scanning routine. 1387void MarkSweep::ScanObject(const Object* obj) { 1388 MarkObjectVisitor visitor(this); 1389 ScanObjectVisit(obj, visitor); 1390} 1391 1392void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1393 Thread* self = Thread::Current(); 1394 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1395 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1396 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1397 CHECK_GT(chunk_size, 0U); 1398 // Split the current mark stack up into work tasks. 1399 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1400 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1401 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, 1402 const_cast<const mirror::Object**>(it))); 1403 it += delta; 1404 } 1405 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1406 thread_pool->StartWorkers(self); 1407 thread_pool->Wait(self, true, true); 1408 thread_pool->StopWorkers(self); 1409 mark_stack_->Reset(); 1410 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1411} 1412 1413// Scan anything that's on the mark stack. 1414void MarkSweep::ProcessMarkStack(bool paused) { 1415 timings_.StartSplit("ProcessMarkStack"); 1416 size_t thread_count = GetThreadCount(paused); 1417 if (kParallelProcessMarkStack && thread_count > 1 && 1418 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1419 ProcessMarkStackParallel(thread_count); 1420 } else { 1421 // TODO: Tune this. 1422 static const size_t kFifoSize = 4; 1423 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; 1424 for (;;) { 1425 const Object* obj = NULL; 1426 if (kUseMarkStackPrefetch) { 1427 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1428 const Object* obj = mark_stack_->PopBack(); 1429 DCHECK(obj != NULL); 1430 __builtin_prefetch(obj); 1431 prefetch_fifo.push_back(obj); 1432 } 1433 if (prefetch_fifo.empty()) { 1434 break; 1435 } 1436 obj = prefetch_fifo.front(); 1437 prefetch_fifo.pop_front(); 1438 } else { 1439 if (mark_stack_->IsEmpty()) { 1440 break; 1441 } 1442 obj = mark_stack_->PopBack(); 1443 } 1444 DCHECK(obj != NULL); 1445 ScanObject(obj); 1446 } 1447 } 1448 timings_.EndSplit(); 1449} 1450 1451// Walks the reference list marking any references subject to the 1452// reference clearing policy. References with a black referent are 1453// removed from the list. References with white referents biased 1454// toward saving are blackened and also removed from the list. 1455void MarkSweep::PreserveSomeSoftReferences(Object** list) { 1456 DCHECK(list != NULL); 1457 Object* clear = NULL; 1458 size_t counter = 0; 1459 1460 DCHECK(mark_stack_->IsEmpty()); 1461 1462 timings_.StartSplit("PreserveSomeSoftReferences"); 1463 while (*list != NULL) { 1464 Object* ref = heap_->DequeuePendingReference(list); 1465 Object* referent = heap_->GetReferenceReferent(ref); 1466 if (referent == NULL) { 1467 // Referent was cleared by the user during marking. 1468 continue; 1469 } 1470 bool is_marked = IsMarked(referent); 1471 if (!is_marked && ((++counter) & 1)) { 1472 // Referent is white and biased toward saving, mark it. 1473 MarkObject(referent); 1474 is_marked = true; 1475 } 1476 if (!is_marked) { 1477 // Referent is white, queue it for clearing. 1478 heap_->EnqueuePendingReference(ref, &clear); 1479 } 1480 } 1481 *list = clear; 1482 timings_.EndSplit(); 1483 1484 // Restart the mark with the newly black references added to the root set. 1485 ProcessMarkStack(true); 1486} 1487 1488inline bool MarkSweep::IsMarked(const Object* object) const 1489 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1490 if (IsImmune(object)) { 1491 return true; 1492 } 1493 DCHECK(current_mark_bitmap_ != NULL); 1494 if (current_mark_bitmap_->HasAddress(object)) { 1495 return current_mark_bitmap_->Test(object); 1496 } 1497 return heap_->GetMarkBitmap()->Test(object); 1498} 1499 1500 1501// Unlink the reference list clearing references objects with white 1502// referents. Cleared references registered to a reference queue are 1503// scheduled for appending by the heap worker thread. 1504void MarkSweep::ClearWhiteReferences(Object** list) { 1505 DCHECK(list != NULL); 1506 while (*list != NULL) { 1507 Object* ref = heap_->DequeuePendingReference(list); 1508 Object* referent = heap_->GetReferenceReferent(ref); 1509 if (referent != NULL && !IsMarked(referent)) { 1510 // Referent is white, clear it. 1511 heap_->ClearReferenceReferent(ref); 1512 if (heap_->IsEnqueuable(ref)) { 1513 heap_->EnqueueReference(ref, &cleared_reference_list_); 1514 } 1515 } 1516 } 1517 DCHECK(*list == NULL); 1518} 1519 1520// Enqueues finalizer references with white referents. White 1521// referents are blackened, moved to the zombie field, and the 1522// referent field is cleared. 1523void MarkSweep::EnqueueFinalizerReferences(Object** list) { 1524 DCHECK(list != NULL); 1525 timings_.StartSplit("EnqueueFinalizerReferences"); 1526 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); 1527 bool has_enqueued = false; 1528 while (*list != NULL) { 1529 Object* ref = heap_->DequeuePendingReference(list); 1530 Object* referent = heap_->GetReferenceReferent(ref); 1531 if (referent != NULL && !IsMarked(referent)) { 1532 MarkObject(referent); 1533 // If the referent is non-null the reference must queuable. 1534 DCHECK(heap_->IsEnqueuable(ref)); 1535 ref->SetFieldObject(zombie_offset, referent, false); 1536 heap_->ClearReferenceReferent(ref); 1537 heap_->EnqueueReference(ref, &cleared_reference_list_); 1538 has_enqueued = true; 1539 } 1540 } 1541 timings_.EndSplit(); 1542 if (has_enqueued) { 1543 ProcessMarkStack(true); 1544 } 1545 DCHECK(*list == NULL); 1546} 1547 1548// Process reference class instances and schedule finalizations. 1549void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, 1550 Object** weak_references, 1551 Object** finalizer_references, 1552 Object** phantom_references) { 1553 DCHECK(soft_references != NULL); 1554 DCHECK(weak_references != NULL); 1555 DCHECK(finalizer_references != NULL); 1556 DCHECK(phantom_references != NULL); 1557 1558 // Unless we are in the zygote or required to clear soft references 1559 // with white references, preserve some white referents. 1560 if (!clear_soft && !Runtime::Current()->IsZygote()) { 1561 PreserveSomeSoftReferences(soft_references); 1562 } 1563 1564 timings_.StartSplit("ProcessReferences"); 1565 // Clear all remaining soft and weak references with white 1566 // referents. 1567 ClearWhiteReferences(soft_references); 1568 ClearWhiteReferences(weak_references); 1569 timings_.EndSplit(); 1570 1571 // Preserve all white objects with finalize methods and schedule 1572 // them for finalization. 1573 EnqueueFinalizerReferences(finalizer_references); 1574 1575 timings_.StartSplit("ProcessReferences"); 1576 // Clear all f-reachable soft and weak references with white 1577 // referents. 1578 ClearWhiteReferences(soft_references); 1579 ClearWhiteReferences(weak_references); 1580 1581 // Clear all phantom references with white referents. 1582 ClearWhiteReferences(phantom_references); 1583 1584 // At this point all reference lists should be empty. 1585 DCHECK(*soft_references == NULL); 1586 DCHECK(*weak_references == NULL); 1587 DCHECK(*finalizer_references == NULL); 1588 DCHECK(*phantom_references == NULL); 1589 timings_.EndSplit(); 1590} 1591 1592void MarkSweep::UnBindBitmaps() { 1593 base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 1594 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1595 if (space->IsDlMallocSpace()) { 1596 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 1597 if (alloc_space->temp_bitmap_.get() != NULL) { 1598 // At this point, the temp_bitmap holds our old mark bitmap. 1599 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); 1600 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); 1601 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); 1602 alloc_space->mark_bitmap_.reset(new_bitmap); 1603 DCHECK(alloc_space->temp_bitmap_.get() == NULL); 1604 } 1605 } 1606 } 1607} 1608 1609void MarkSweep::FinishPhase() { 1610 base::TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1611 // Can't enqueue references if we hold the mutator lock. 1612 Object* cleared_references = GetClearedReferences(); 1613 Heap* heap = GetHeap(); 1614 timings_.NewSplit("EnqueueClearedReferences"); 1615 heap->EnqueueClearedReferences(&cleared_references); 1616 1617 timings_.NewSplit("PostGcVerification"); 1618 heap->PostGcVerification(this); 1619 1620 timings_.NewSplit("GrowForUtilization"); 1621 heap->GrowForUtilization(GetGcType(), GetDurationNs()); 1622 1623 timings_.NewSplit("RequestHeapTrim"); 1624 heap->RequestHeapTrim(); 1625 1626 // Update the cumulative statistics 1627 total_time_ns_ += GetDurationNs(); 1628 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, 1629 std::plus<uint64_t>()); 1630 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 1631 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 1632 1633 // Ensure that the mark stack is empty. 1634 CHECK(mark_stack_->IsEmpty()); 1635 1636 if (kCountScannedTypes) { 1637 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1638 << " other=" << other_count_; 1639 } 1640 1641 if (kCountTasks) { 1642 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1643 } 1644 1645 if (kMeasureOverhead) { 1646 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1647 } 1648 1649 if (kProfileLargeObjects) { 1650 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1651 } 1652 1653 if (kCountClassesMarked) { 1654 VLOG(gc) << "Classes marked " << classes_marked_; 1655 } 1656 1657 if (kCountJavaLangRefs) { 1658 VLOG(gc) << "References scanned " << reference_count_; 1659 } 1660 1661 // Update the cumulative loggers. 1662 cumulative_timings_.Start(); 1663 cumulative_timings_.AddLogger(timings_); 1664 cumulative_timings_.End(); 1665 1666 // Clear all of the spaces' mark bitmaps. 1667 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1668 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1669 space->GetMarkBitmap()->Clear(); 1670 } 1671 } 1672 mark_stack_->Reset(); 1673 1674 // Reset the marked large objects. 1675 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1676 large_objects->GetMarkObjects()->Clear(); 1677} 1678 1679} // namespace collector 1680} // namespace gc 1681} // namespace art 1682