mark_sweep.cc revision 720ef7680573c1afd12f99f02eee3045daee5168
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/space/image_space.h" 34#include "gc/space/large_object_space.h" 35#include "gc/space/space-inl.h" 36#include "indirect_reference_table.h" 37#include "intern_table.h" 38#include "jni_internal.h" 39#include "monitor.h" 40#include "mark_sweep-inl.h" 41#include "mirror/art_field.h" 42#include "mirror/art_field-inl.h" 43#include "mirror/class-inl.h" 44#include "mirror/class_loader.h" 45#include "mirror/dex_cache.h" 46#include "mirror/object-inl.h" 47#include "mirror/object_array.h" 48#include "mirror/object_array-inl.h" 49#include "runtime.h" 50#include "thread-inl.h" 51#include "thread_list.h" 52#include "verifier/method_verifier.h" 53 54using ::art::mirror::ArtField; 55using ::art::mirror::Class; 56using ::art::mirror::Object; 57using ::art::mirror::ObjectArray; 58 59namespace art { 60namespace gc { 61namespace collector { 62 63// Performance options. 64constexpr bool kUseRecursiveMark = false; 65constexpr bool kUseMarkStackPrefetch = true; 66constexpr size_t kSweepArrayChunkFreeSize = 1024; 67 68// Parallelism options. 69constexpr bool kParallelCardScan = true; 70constexpr bool kParallelRecursiveMark = true; 71// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 72// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 73// having this can add overhead in ProcessReferences since we may end up doing many calls of 74// ProcessMarkStack with very small mark stacks. 75constexpr size_t kMinimumParallelMarkStackSize = 128; 76constexpr bool kParallelProcessMarkStack = true; 77 78// Profiling and information flags. 79constexpr bool kCountClassesMarked = false; 80constexpr bool kProfileLargeObjects = false; 81constexpr bool kMeasureOverhead = false; 82constexpr bool kCountTasks = false; 83constexpr bool kCountJavaLangRefs = false; 84 85// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 86constexpr bool kCheckLocks = kDebugLocking; 87 88void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { 89 // Bind live to mark bitmap if necessary. 90 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 91 BindLiveToMarkBitmap(space); 92 } 93 94 // Add the space to the immune region. 95 if (immune_begin_ == NULL) { 96 DCHECK(immune_end_ == NULL); 97 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), 98 reinterpret_cast<Object*>(space->End())); 99 } else { 100 const space::ContinuousSpace* prev_space = nullptr; 101 // Find out if the previous space is immune. 102 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { 103 if (cur_space == space) { 104 break; 105 } 106 prev_space = cur_space; 107 } 108 // If previous space was immune, then extend the immune region. Relies on continuous spaces 109 // being sorted by Heap::AddContinuousSpace. 110 if (prev_space != NULL && 111 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) && 112 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) { 113 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 114 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 115 } 116 } 117} 118 119void MarkSweep::BindBitmaps() { 120 timings_.StartSplit("BindBitmaps"); 121 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 122 // Mark all of the spaces we never collect as immune. 123 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 124 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 125 ImmuneSpace(space); 126 } 127 } 128 timings_.EndSplit(); 129} 130 131MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 132 : GarbageCollector(heap, 133 name_prefix + (name_prefix.empty() ? "" : " ") + 134 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 135 current_mark_bitmap_(NULL), 136 java_lang_Class_(NULL), 137 mark_stack_(NULL), 138 immune_begin_(NULL), 139 immune_end_(NULL), 140 soft_reference_list_(NULL), 141 weak_reference_list_(NULL), 142 finalizer_reference_list_(NULL), 143 phantom_reference_list_(NULL), 144 cleared_reference_list_(NULL), 145 gc_barrier_(new Barrier(0)), 146 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 147 mark_stack_expand_lock_("mark sweep mark stack expand lock"), 148 is_concurrent_(is_concurrent), 149 clear_soft_references_(false) { 150} 151 152void MarkSweep::InitializePhase() { 153 timings_.Reset(); 154 base::TimingLogger::ScopedSplit split("InitializePhase", &timings_); 155 mark_stack_ = GetHeap()->mark_stack_.get(); 156 DCHECK(mark_stack_ != NULL); 157 SetImmuneRange(NULL, NULL); 158 soft_reference_list_ = NULL; 159 weak_reference_list_ = NULL; 160 finalizer_reference_list_ = NULL; 161 phantom_reference_list_ = NULL; 162 cleared_reference_list_ = NULL; 163 freed_bytes_ = 0; 164 freed_objects_ = 0; 165 class_count_ = 0; 166 array_count_ = 0; 167 other_count_ = 0; 168 large_object_test_ = 0; 169 large_object_mark_ = 0; 170 classes_marked_ = 0; 171 overhead_time_ = 0; 172 work_chunks_created_ = 0; 173 work_chunks_deleted_ = 0; 174 reference_count_ = 0; 175 java_lang_Class_ = Class::GetJavaLangClass(); 176 CHECK(java_lang_Class_ != NULL); 177 178 FindDefaultMarkBitmap(); 179 180 // Do any pre GC verification. 181 timings_.NewSplit("PreGcVerification"); 182 heap_->PreGcVerification(this); 183} 184 185void MarkSweep::ProcessReferences(Thread* self) { 186 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 187 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, 188 &finalizer_reference_list_, &phantom_reference_list_); 189} 190 191bool MarkSweep::HandleDirtyObjectsPhase() { 192 base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_); 193 Thread* self = Thread::Current(); 194 Locks::mutator_lock_->AssertExclusiveHeld(self); 195 196 { 197 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 198 199 // Re-mark root set. 200 ReMarkRoots(); 201 202 // Scan dirty objects, this is only required if we are not doing concurrent GC. 203 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 204 } 205 206 ProcessReferences(self); 207 208 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 209 if (GetHeap()->verify_missing_card_marks_) { 210 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 211 // This second sweep makes sure that we don't have any objects in the live stack which point to 212 // freed objects. These cause problems since their references may be previously freed objects. 213 SweepArray(GetHeap()->allocation_stack_.get(), false); 214 } 215 return true; 216} 217 218bool MarkSweep::IsConcurrent() const { 219 return is_concurrent_; 220} 221 222void MarkSweep::MarkingPhase() { 223 base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 224 Heap* heap = GetHeap(); 225 Thread* self = Thread::Current(); 226 227 BindBitmaps(); 228 FindDefaultMarkBitmap(); 229 230 // Process dirty cards and add dirty cards to mod union tables. 231 heap->ProcessCards(timings_); 232 233 // Need to do this before the checkpoint since we don't want any threads to add references to 234 // the live stack during the recursive mark. 235 timings_.NewSplit("SwapStacks"); 236 heap->SwapStacks(); 237 238 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 239 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 240 // If we exclusively hold the mutator lock, all threads must be suspended. 241 MarkRoots(); 242 } else { 243 MarkThreadRoots(self); 244 MarkNonThreadRoots(); 245 } 246 MarkConcurrentRoots(); 247 248 heap->UpdateAndMarkModUnion(this, timings_, GetGcType()); 249 MarkReachableObjects(); 250} 251 252void MarkSweep::MarkThreadRoots(Thread* self) { 253 MarkRootsCheckpoint(self); 254} 255 256void MarkSweep::MarkReachableObjects() { 257 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 258 // knowing that new allocations won't be marked as live. 259 timings_.StartSplit("MarkStackAsLive"); 260 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 261 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(), 262 heap_->large_object_space_->GetLiveObjects(), live_stack); 263 live_stack->Reset(); 264 timings_.EndSplit(); 265 // Recursively mark all the non-image bits set in the mark bitmap. 266 RecursiveMark(); 267} 268 269void MarkSweep::ReclaimPhase() { 270 base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 271 Thread* self = Thread::Current(); 272 273 if (!IsConcurrent()) { 274 base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 275 ProcessReferences(self); 276 } else { 277 base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); 278 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 279 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 280 // The allocation stack contains things allocated since the start of the GC. These may have been 281 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. 282 // Remove these objects from the mark bitmaps so that they will be eligible for sticky 283 // collection. 284 // There is a race here which is safely handled. Another thread such as the hprof could 285 // have flushed the alloc stack after we resumed the threads. This is safe however, since 286 // reseting the allocation stack zeros it out with madvise. This means that we will either 287 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the 288 // first place. 289 mirror::Object** end = allocation_stack->End(); 290 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { 291 const Object* obj = *it; 292 if (obj != NULL) { 293 UnMarkObjectNonNull(obj); 294 } 295 } 296 } 297 298 // Before freeing anything, lets verify the heap. 299 if (kIsDebugBuild) { 300 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 301 VerifyImageRoots(); 302 } 303 timings_.StartSplit("PreSweepingGcVerification"); 304 heap_->PreSweepingGcVerification(this); 305 timings_.EndSplit(); 306 307 { 308 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 309 310 // Reclaim unmarked objects. 311 Sweep(false); 312 313 // Swap the live and mark bitmaps for each space which we modified space. This is an 314 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 315 // bitmaps. 316 timings_.StartSplit("SwapBitmaps"); 317 SwapBitmaps(); 318 timings_.EndSplit(); 319 320 // Unbind the live and mark bitmaps. 321 UnBindBitmaps(); 322 } 323} 324 325void MarkSweep::SetImmuneRange(Object* begin, Object* end) { 326 immune_begin_ = begin; 327 immune_end_ = end; 328} 329 330void MarkSweep::FindDefaultMarkBitmap() { 331 base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 332 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 333 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 334 current_mark_bitmap_ = space->GetMarkBitmap(); 335 CHECK(current_mark_bitmap_ != NULL); 336 return; 337 } 338 } 339 GetHeap()->DumpSpaces(); 340 LOG(FATAL) << "Could not find a default mark bitmap"; 341} 342 343void MarkSweep::ExpandMarkStack() { 344 // Rare case, no need to have Thread::Current be a parameter. 345 MutexLock mu(Thread::Current(), mark_stack_expand_lock_); 346 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 347 // Someone else acquired the lock and expanded the mark stack before us. 348 return; 349 } 350 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 351 mark_stack_->Resize(mark_stack_->Capacity() * 2); 352 for (const auto& obj : temp) { 353 mark_stack_->PushBack(obj); 354 } 355} 356 357inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) { 358 DCHECK(obj != NULL); 359 if (MarkObjectParallel(obj)) { 360 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) { 361 // Only reason a push can fail is that the mark stack is full. 362 ExpandMarkStack(); 363 } 364 } 365} 366 367inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { 368 DCHECK(!IsImmune(obj)); 369 // Try to take advantage of locality of references within a space, failing this find the space 370 // the hard way. 371 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 372 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 373 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 374 if (LIKELY(new_bitmap != NULL)) { 375 object_bitmap = new_bitmap; 376 } else { 377 MarkLargeObject(obj, false); 378 return; 379 } 380 } 381 382 DCHECK(object_bitmap->HasAddress(obj)); 383 object_bitmap->Clear(obj); 384} 385 386inline void MarkSweep::MarkObjectNonNull(const Object* obj) { 387 DCHECK(obj != NULL); 388 389 if (IsImmune(obj)) { 390 DCHECK(IsMarked(obj)); 391 return; 392 } 393 394 // Try to take advantage of locality of references within a space, failing this find the space 395 // the hard way. 396 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 397 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 398 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 399 if (LIKELY(new_bitmap != NULL)) { 400 object_bitmap = new_bitmap; 401 } else { 402 MarkLargeObject(obj, true); 403 return; 404 } 405 } 406 407 // This object was not previously marked. 408 if (!object_bitmap->Test(obj)) { 409 object_bitmap->Set(obj); 410 // Do we need to expand the mark stack? 411 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 412 ExpandMarkStack(); 413 } 414 // The object must be pushed on to the mark stack. 415 mark_stack_->PushBack(const_cast<Object*>(obj)); 416 } 417} 418 419// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 420bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 421 // TODO: support >1 discontinuous space. 422 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 423 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); 424 if (kProfileLargeObjects) { 425 ++large_object_test_; 426 } 427 if (UNLIKELY(!large_objects->Test(obj))) { 428 if (!large_object_space->Contains(obj)) { 429 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 430 LOG(ERROR) << "Attempting see if it's a bad root"; 431 VerifyRoots(); 432 LOG(FATAL) << "Can't mark bad root"; 433 } 434 if (kProfileLargeObjects) { 435 ++large_object_mark_; 436 } 437 if (set) { 438 large_objects->Set(obj); 439 } else { 440 large_objects->Clear(obj); 441 } 442 return true; 443 } 444 return false; 445} 446 447inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 448 DCHECK(obj != NULL); 449 450 if (IsImmune(obj)) { 451 DCHECK(IsMarked(obj)); 452 return false; 453 } 454 455 // Try to take advantage of locality of references within a space, failing this find the space 456 // the hard way. 457 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 458 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 459 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 460 if (new_bitmap != NULL) { 461 object_bitmap = new_bitmap; 462 } else { 463 // TODO: Remove the Thread::Current here? 464 // TODO: Convert this to some kind of atomic marking? 465 MutexLock mu(Thread::Current(), large_object_lock_); 466 return MarkLargeObject(obj, true); 467 } 468 } 469 470 // Return true if the object was not previously marked. 471 return !object_bitmap->AtomicTestAndSet(obj); 472} 473 474// Used to mark objects when recursing. Recursion is done by moving 475// the finger across the bitmaps in address order and marking child 476// objects. Any newly-marked objects whose addresses are lower than 477// the finger won't be visited by the bitmap scan, so those objects 478// need to be added to the mark stack. 479inline void MarkSweep::MarkObject(const Object* obj) { 480 if (obj != NULL) { 481 MarkObjectNonNull(obj); 482 } 483} 484 485void MarkSweep::MarkRoot(const Object* obj) { 486 if (obj != NULL) { 487 MarkObjectNonNull(obj); 488 } 489} 490 491void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) { 492 DCHECK(root != NULL); 493 DCHECK(arg != NULL); 494 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 495 mark_sweep->MarkObjectNonNullParallel(root); 496} 497 498void MarkSweep::MarkObjectCallback(const Object* root, void* arg) { 499 DCHECK(root != NULL); 500 DCHECK(arg != NULL); 501 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 502 mark_sweep->MarkObjectNonNull(root); 503} 504 505void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) { 506 DCHECK(root != NULL); 507 DCHECK(arg != NULL); 508 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 509 mark_sweep->MarkObjectNonNull(root); 510} 511 512void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 513 const StackVisitor* visitor) { 514 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 515} 516 517void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 518 // See if the root is on any space bitmap. 519 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 520 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 521 if (!large_object_space->Contains(root)) { 522 LOG(ERROR) << "Found invalid root: " << root; 523 if (visitor != NULL) { 524 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 525 } 526 } 527 } 528} 529 530void MarkSweep::VerifyRoots() { 531 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 532} 533 534// Marks all objects in the root set. 535void MarkSweep::MarkRoots() { 536 timings_.StartSplit("MarkRoots"); 537 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this); 538 timings_.EndSplit(); 539} 540 541void MarkSweep::MarkNonThreadRoots() { 542 timings_.StartSplit("MarkNonThreadRoots"); 543 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this); 544 timings_.EndSplit(); 545} 546 547void MarkSweep::MarkConcurrentRoots() { 548 timings_.StartSplit("MarkConcurrentRoots"); 549 // Visit all runtime roots and clear dirty flags. 550 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true); 551 timings_.EndSplit(); 552} 553 554void MarkSweep::CheckObject(const Object* obj) { 555 DCHECK(obj != NULL); 556 VisitObjectReferences(obj, [this](const Object* obj, const Object* ref, MemberOffset offset, 557 bool is_static) NO_THREAD_SAFETY_ANALYSIS { 558 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 559 CheckReference(obj, ref, offset, is_static); 560 }); 561} 562 563void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { 564 DCHECK(root != NULL); 565 DCHECK(arg != NULL); 566 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 567 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); 568 mark_sweep->CheckObject(root); 569} 570 571void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 572 CHECK(space->IsDlMallocSpace()); 573 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 574 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 575 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release(); 576 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 577 alloc_space->temp_bitmap_.reset(mark_bitmap); 578 alloc_space->mark_bitmap_.reset(live_bitmap); 579} 580 581class ScanObjectVisitor { 582 public: 583 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 584 : mark_sweep_(mark_sweep) {} 585 586 // TODO: Fixme when anotatalysis works with visitors. 587 void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { 588 if (kCheckLocks) { 589 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 590 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 591 } 592 mark_sweep_->ScanObject(obj); 593 } 594 595 private: 596 MarkSweep* const mark_sweep_; 597}; 598 599template <bool kUseFinger = false> 600class MarkStackTask : public Task { 601 public: 602 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 603 const Object** mark_stack) 604 : mark_sweep_(mark_sweep), 605 thread_pool_(thread_pool), 606 mark_stack_pos_(mark_stack_size) { 607 // We may have to copy part of an existing mark stack when another mark stack overflows. 608 if (mark_stack_size != 0) { 609 DCHECK(mark_stack != NULL); 610 // TODO: Check performance? 611 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 612 } 613 if (kCountTasks) { 614 ++mark_sweep_->work_chunks_created_; 615 } 616 } 617 618 static const size_t kMaxSize = 1 * KB; 619 620 protected: 621 class ScanObjectParallelVisitor { 622 public: 623 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 624 : chunk_task_(chunk_task) {} 625 626 void operator()(const Object* obj) const { 627 MarkSweep* mark_sweep = chunk_task_->mark_sweep_; 628 mark_sweep->ScanObjectVisit(obj, 629 [mark_sweep, this](const Object* /* obj */, const Object* ref, 630 const MemberOffset& /* offset */, bool /* is_static */) ALWAYS_INLINE { 631 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) { 632 if (kUseFinger) { 633 android_memory_barrier(); 634 if (reinterpret_cast<uintptr_t>(ref) >= 635 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) { 636 return; 637 } 638 } 639 chunk_task_->MarkStackPush(ref); 640 } 641 }); 642 } 643 644 private: 645 MarkStackTask<kUseFinger>* const chunk_task_; 646 }; 647 648 virtual ~MarkStackTask() { 649 // Make sure that we have cleared our mark stack. 650 DCHECK_EQ(mark_stack_pos_, 0U); 651 if (kCountTasks) { 652 ++mark_sweep_->work_chunks_deleted_; 653 } 654 } 655 656 MarkSweep* const mark_sweep_; 657 ThreadPool* const thread_pool_; 658 // Thread local mark stack for this task. 659 const Object* mark_stack_[kMaxSize]; 660 // Mark stack position. 661 size_t mark_stack_pos_; 662 663 void MarkStackPush(const Object* obj) ALWAYS_INLINE { 664 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 665 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 666 mark_stack_pos_ /= 2; 667 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 668 mark_stack_ + mark_stack_pos_); 669 thread_pool_->AddTask(Thread::Current(), task); 670 } 671 DCHECK(obj != nullptr); 672 DCHECK(mark_stack_pos_ < kMaxSize); 673 mark_stack_[mark_stack_pos_++] = obj; 674 } 675 676 virtual void Finalize() { 677 delete this; 678 } 679 680 // Scans all of the objects 681 virtual void Run(Thread* self) { 682 ScanObjectParallelVisitor visitor(this); 683 // TODO: Tune this. 684 static const size_t kFifoSize = 4; 685 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; 686 for (;;) { 687 const Object* obj = NULL; 688 if (kUseMarkStackPrefetch) { 689 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 690 const Object* obj = mark_stack_[--mark_stack_pos_]; 691 DCHECK(obj != NULL); 692 __builtin_prefetch(obj); 693 prefetch_fifo.push_back(obj); 694 } 695 if (UNLIKELY(prefetch_fifo.empty())) { 696 break; 697 } 698 obj = prefetch_fifo.front(); 699 prefetch_fifo.pop_front(); 700 } else { 701 if (UNLIKELY(mark_stack_pos_ == 0)) { 702 break; 703 } 704 obj = mark_stack_[--mark_stack_pos_]; 705 } 706 DCHECK(obj != NULL); 707 visitor(obj); 708 } 709 } 710}; 711 712class CardScanTask : public MarkStackTask<false> { 713 public: 714 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, 715 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 716 const Object** mark_stack_obj) 717 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 718 bitmap_(bitmap), 719 begin_(begin), 720 end_(end), 721 minimum_age_(minimum_age) { 722 } 723 724 protected: 725 accounting::SpaceBitmap* const bitmap_; 726 byte* const begin_; 727 byte* const end_; 728 const byte minimum_age_; 729 730 virtual void Finalize() { 731 delete this; 732 } 733 734 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 735 ScanObjectParallelVisitor visitor(this); 736 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 737 card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 738 // Finish by emptying our local mark stack. 739 MarkStackTask::Run(self); 740 } 741}; 742 743void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 744 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 745 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 746 const bool parallel = kParallelCardScan && thread_pool != nullptr; 747 if (parallel) { 748 Thread* self = Thread::Current(); 749 // Can't have a different split for each space since multiple spaces can have their cards being 750 // scanned at the same time. 751 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 752 // Try to take some of the mark stack since we can pass this off to the worker tasks. 753 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin()); 754 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End()); 755 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 756 const size_t thread_count = thread_pool->GetThreadCount() + 1; 757 // Estimated number of work tasks we will create. 758 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 759 DCHECK_NE(mark_stack_tasks, 0U); 760 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 761 mark_stack_size / mark_stack_tasks + 1); 762 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 763 byte* card_begin = space->Begin(); 764 byte* card_end = space->End(); 765 // Calculate how many bytes of heap we will scan, 766 const size_t address_range = card_end - card_begin; 767 // Calculate how much address range each task gets. 768 const size_t card_delta = RoundUp(address_range / thread_count + 1, 769 accounting::CardTable::kCardSize); 770 // Create the worker tasks for this space. 771 while (card_begin != card_end) { 772 // Add a range of cards. 773 size_t addr_remaining = card_end - card_begin; 774 size_t card_increment = std::min(card_delta, addr_remaining); 775 // Take from the back of the mark stack. 776 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 777 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 778 mark_stack_end -= mark_stack_increment; 779 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 780 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 781 // Add the new task to the thread pool. 782 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 783 card_begin + card_increment, minimum_age, 784 mark_stack_increment, mark_stack_end); 785 thread_pool->AddTask(self, task); 786 card_begin += card_increment; 787 } 788 } 789 thread_pool->StartWorkers(self); 790 thread_pool->Wait(self, paused, true); // Only do work in the main thread if we are paused. 791 thread_pool->StopWorkers(self); 792 timings_.EndSplit(); 793 } else { 794 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 795 // Image spaces are handled properly since live == marked for them. 796 switch (space->GetGcRetentionPolicy()) { 797 case space::kGcRetentionPolicyNeverCollect: 798 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 799 "ScanGrayImageSpaceObjects"); 800 break; 801 case space::kGcRetentionPolicyFullCollect: 802 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 803 "ScanGrayZygoteSpaceObjects"); 804 break; 805 case space::kGcRetentionPolicyAlwaysCollect: 806 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 807 "ScanGrayAllocSpaceObjects"); 808 break; 809 } 810 ScanObjectVisitor visitor(this); 811 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 812 timings_.EndSplit(); 813 } 814 } 815} 816 817void MarkSweep::VerifyImageRoots() { 818 // Verify roots ensures that all the references inside the image space point 819 // objects which are either in the image space or marked objects in the alloc 820 // space 821 timings_.StartSplit("VerifyImageRoots"); 822 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 823 if (space->IsImageSpace()) { 824 space::ImageSpace* image_space = space->AsImageSpace(); 825 uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin()); 826 uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End()); 827 accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap(); 828 DCHECK(live_bitmap != NULL); 829 live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) { 830 if (kCheckLocks) { 831 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 832 } 833 DCHECK(obj != NULL); 834 CheckObject(obj); 835 }); 836 } 837 } 838 timings_.EndSplit(); 839} 840 841class RecursiveMarkTask : public MarkStackTask<false> { 842 public: 843 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 844 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 845 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 846 bitmap_(bitmap), 847 begin_(begin), 848 end_(end) { 849 } 850 851 protected: 852 accounting::SpaceBitmap* const bitmap_; 853 const uintptr_t begin_; 854 const uintptr_t end_; 855 856 virtual void Finalize() { 857 delete this; 858 } 859 860 // Scans all of the objects 861 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 862 ScanObjectParallelVisitor visitor(this); 863 bitmap_->VisitMarkedRange(begin_, end_, visitor); 864 // Finish by emptying our local mark stack. 865 MarkStackTask::Run(self); 866 } 867}; 868 869// Populates the mark stack based on the set of marked objects and 870// recursively marks until the mark stack is emptied. 871void MarkSweep::RecursiveMark() { 872 base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 873 // RecursiveMark will build the lists of known instances of the Reference classes. 874 // See DelayReferenceReferent for details. 875 CHECK(soft_reference_list_ == NULL); 876 CHECK(weak_reference_list_ == NULL); 877 CHECK(finalizer_reference_list_ == NULL); 878 CHECK(phantom_reference_list_ == NULL); 879 CHECK(cleared_reference_list_ == NULL); 880 881 if (kUseRecursiveMark) { 882 const bool partial = GetGcType() == kGcTypePartial; 883 ScanObjectVisitor scan_visitor(this); 884 auto* self = Thread::Current(); 885 ThreadPool* thread_pool = heap_->GetThreadPool(); 886 const bool parallel = kParallelRecursiveMark && thread_pool != NULL; 887 mark_stack_->Reset(); 888 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 889 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 890 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 891 current_mark_bitmap_ = space->GetMarkBitmap(); 892 if (current_mark_bitmap_ == NULL) { 893 GetHeap()->DumpSpaces(); 894 LOG(FATAL) << "invalid bitmap"; 895 } 896 if (parallel) { 897 // We will use the mark stack the future. 898 // CHECK(mark_stack_->IsEmpty()); 899 // This function does not handle heap end increasing, so we must use the space end. 900 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 901 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 902 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 903 904 // Create a few worker tasks. 905 size_t n = (thread_pool->GetThreadCount() + 1) * 2; 906 while (begin != end) { 907 uintptr_t start = begin; 908 uintptr_t delta = (end - begin) / n; 909 delta = RoundUp(delta, KB); 910 if (delta < 16 * KB) delta = end - begin; 911 begin += delta; 912 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start, 913 begin); 914 thread_pool->AddTask(self, task); 915 } 916 thread_pool->StartWorkers(self); 917 thread_pool->Wait(self, false, true); 918 thread_pool->StopWorkers(self); 919 } else { 920 // This function does not handle heap end increasing, so we must use the space end. 921 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 922 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 923 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 924 } 925 } 926 } 927 } 928 ProcessMarkStack(false); 929} 930 931bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) { 932 return 933 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) || 934 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object); 935} 936 937void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 938 ScanGrayObjects(paused, minimum_age); 939 ProcessMarkStack(paused); 940} 941 942void MarkSweep::ReMarkRoots() { 943 timings_.StartSplit("ReMarkRoots"); 944 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true); 945 timings_.EndSplit(); 946} 947 948void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) { 949 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 950 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 951 for (const Object** entry : vm->weak_globals) { 952 if (!is_marked(*entry, arg)) { 953 *entry = kClearedJniWeakGlobal; 954 } 955 } 956} 957 958struct ArrayMarkedCheck { 959 accounting::ObjectStack* live_stack; 960 MarkSweep* mark_sweep; 961}; 962 963// Either marked or not live. 964bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) { 965 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg); 966 if (array_check->mark_sweep->IsMarked(object)) { 967 return true; 968 } 969 accounting::ObjectStack* live_stack = array_check->live_stack; 970 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End(); 971} 972 973void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) { 974 Runtime* runtime = Runtime::Current(); 975 // The callbacks check 976 // !is_marked where is_marked is the callback but we want 977 // !IsMarked && IsLive 978 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 979 // Or for swapped (IsLive || !IsMarked). 980 981 timings_.StartSplit("SweepSystemWeaksArray"); 982 ArrayMarkedCheck visitor; 983 visitor.live_stack = allocations; 984 visitor.mark_sweep = this; 985 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor); 986 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor); 987 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor); 988 timings_.EndSplit(); 989} 990 991void MarkSweep::SweepSystemWeaks() { 992 Runtime* runtime = Runtime::Current(); 993 // The callbacks check 994 // !is_marked where is_marked is the callback but we want 995 // !IsMarked && IsLive 996 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 997 // Or for swapped (IsLive || !IsMarked). 998 timings_.StartSplit("SweepSystemWeaks"); 999 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this); 1000 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this); 1001 SweepJniWeakGlobals(IsMarkedCallback, this); 1002 timings_.EndSplit(); 1003} 1004 1005bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) { 1006 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 1007 // We don't actually want to sweep the object, so lets return "marked" 1008 return true; 1009} 1010 1011void MarkSweep::VerifyIsLive(const Object* obj) { 1012 Heap* heap = GetHeap(); 1013 if (!heap->GetLiveBitmap()->Test(obj)) { 1014 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1015 if (!large_object_space->GetLiveObjects()->Test(obj)) { 1016 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 1017 heap->allocation_stack_->End()) { 1018 // Object not found! 1019 heap->DumpSpaces(); 1020 LOG(FATAL) << "Found dead object " << obj; 1021 } 1022 } 1023 } 1024} 1025 1026void MarkSweep::VerifySystemWeaks() { 1027 Runtime* runtime = Runtime::Current(); 1028 // Verify system weaks, uses a special IsMarked callback which always returns true. 1029 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this); 1030 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this); 1031 1032 JavaVMExt* vm = runtime->GetJavaVM(); 1033 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 1034 for (const Object** entry : vm->weak_globals) { 1035 VerifyIsLive(*entry); 1036 } 1037} 1038 1039struct SweepCallbackContext { 1040 MarkSweep* mark_sweep; 1041 space::AllocSpace* space; 1042 Thread* self; 1043}; 1044 1045class CheckpointMarkThreadRoots : public Closure { 1046 public: 1047 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1048 1049 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1050 // Note: self is not necessarily equal to thread since thread may be suspended. 1051 Thread* self = Thread::Current(); 1052 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1053 << thread->GetState() << " thread " << thread << " self " << self; 1054 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 1055 mark_sweep_->GetBarrier().Pass(self); 1056 } 1057 1058 private: 1059 MarkSweep* mark_sweep_; 1060}; 1061 1062void MarkSweep::MarkRootsCheckpoint(Thread* self) { 1063 CheckpointMarkThreadRoots check_point(this); 1064 timings_.StartSplit("MarkRootsCheckpoint"); 1065 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1066 // Request the check point is run on all threads returning a count of the threads that must 1067 // run through the barrier including self. 1068 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1069 // Release locks then wait for all mutator threads to pass the barrier. 1070 // TODO: optimize to not release locks when there are no threads to wait for. 1071 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1072 Locks::mutator_lock_->SharedUnlock(self); 1073 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 1074 CHECK_EQ(old_state, kWaitingPerformingGc); 1075 gc_barrier_->Increment(self, barrier_count); 1076 self->SetState(kWaitingPerformingGc); 1077 Locks::mutator_lock_->SharedLock(self); 1078 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1079 timings_.EndSplit(); 1080} 1081 1082void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 1083 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 1084 MarkSweep* mark_sweep = context->mark_sweep; 1085 Heap* heap = mark_sweep->GetHeap(); 1086 space::AllocSpace* space = context->space; 1087 Thread* self = context->self; 1088 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 1089 // Use a bulk free, that merges consecutive objects before freeing or free per object? 1090 // Documentation suggests better free performance with merging, but this may be at the expensive 1091 // of allocation. 1092 size_t freed_objects = num_ptrs; 1093 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit 1094 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); 1095 heap->RecordFree(freed_objects, freed_bytes); 1096 mark_sweep->freed_objects_.fetch_add(freed_objects); 1097 mark_sweep->freed_bytes_.fetch_add(freed_bytes); 1098} 1099 1100void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 1101 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 1102 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); 1103 Heap* heap = context->mark_sweep->GetHeap(); 1104 // We don't free any actual memory to avoid dirtying the shared zygote pages. 1105 for (size_t i = 0; i < num_ptrs; ++i) { 1106 Object* obj = static_cast<Object*>(ptrs[i]); 1107 heap->GetLiveBitmap()->Clear(obj); 1108 heap->GetCardTable()->MarkCard(obj); 1109 } 1110} 1111 1112void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1113 size_t freed_bytes = 0; 1114 space::DlMallocSpace* space = heap_->GetAllocSpace(); 1115 1116 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 1117 // bitmap, resulting in occasional frees of Weaks which are still in use. 1118 SweepSystemWeaksArray(allocations); 1119 1120 timings_.StartSplit("SweepArray"); 1121 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are 1122 // going to free. 1123 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1124 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1125 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1126 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 1127 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 1128 if (swap_bitmaps) { 1129 std::swap(live_bitmap, mark_bitmap); 1130 std::swap(large_live_objects, large_mark_objects); 1131 } 1132 1133 size_t freed_objects = 0; 1134 size_t freed_large_objects = 0; 1135 size_t count = allocations->Size(); 1136 Object** objects = const_cast<Object**>(allocations->Begin()); 1137 Object** out = objects; 1138 Object** objects_to_chunk_free = out; 1139 1140 // Empty the allocation stack. 1141 Thread* self = Thread::Current(); 1142 for (size_t i = 0; i < count; ++i) { 1143 Object* obj = objects[i]; 1144 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. 1145 if (LIKELY(mark_bitmap->HasAddress(obj))) { 1146 if (!mark_bitmap->Test(obj)) { 1147 // Don't bother un-marking since we clear the mark bitmap anyways. 1148 *(out++) = obj; 1149 // Free objects in chunks. 1150 DCHECK_GE(out, objects_to_chunk_free); 1151 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); 1152 if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) { 1153 // timings_.StartSplit("FreeList"); 1154 size_t chunk_freed_objects = out - objects_to_chunk_free; 1155 freed_objects += chunk_freed_objects; 1156 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); 1157 objects_to_chunk_free = out; 1158 // timings_.EndSplit(); 1159 } 1160 } 1161 } else if (!large_mark_objects->Test(obj)) { 1162 ++freed_large_objects; 1163 freed_bytes += large_object_space->Free(self, obj); 1164 } 1165 } 1166 // Free the remaining objects in chunks. 1167 DCHECK_GE(out, objects_to_chunk_free); 1168 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); 1169 if (out - objects_to_chunk_free > 0) { 1170 // timings_.StartSplit("FreeList"); 1171 size_t chunk_freed_objects = out - objects_to_chunk_free; 1172 freed_objects += chunk_freed_objects; 1173 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); 1174 // timings_.EndSplit(); 1175 } 1176 CHECK_EQ(count, allocations->Size()); 1177 timings_.EndSplit(); 1178 1179 timings_.StartSplit("RecordFree"); 1180 VLOG(heap) << "Freed " << freed_objects << "/" << count 1181 << " objects with size " << PrettySize(freed_bytes); 1182 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes); 1183 freed_objects_.fetch_add(freed_objects); 1184 freed_bytes_.fetch_add(freed_bytes); 1185 timings_.EndSplit(); 1186 1187 timings_.StartSplit("ResetStack"); 1188 allocations->Reset(); 1189 timings_.EndSplit(); 1190} 1191 1192void MarkSweep::Sweep(bool swap_bitmaps) { 1193 DCHECK(mark_stack_->IsEmpty()); 1194 base::TimingLogger::ScopedSplit("Sweep", &timings_); 1195 1196 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 1197 // bitmap, resulting in occasional frees of Weaks which are still in use. 1198 SweepSystemWeaks(); 1199 1200 const bool partial = (GetGcType() == kGcTypePartial); 1201 SweepCallbackContext scc; 1202 scc.mark_sweep = this; 1203 scc.self = Thread::Current(); 1204 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1205 // We always sweep always collect spaces. 1206 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); 1207 if (!partial && !sweep_space) { 1208 // We sweep full collect spaces when the GC isn't a partial GC (ie its full). 1209 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); 1210 } 1211 if (sweep_space) { 1212 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 1213 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 1214 scc.space = space->AsDlMallocSpace(); 1215 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1216 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1217 if (swap_bitmaps) { 1218 std::swap(live_bitmap, mark_bitmap); 1219 } 1220 if (!space->IsZygoteSpace()) { 1221 base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_); 1222 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. 1223 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 1224 &SweepCallback, reinterpret_cast<void*>(&scc)); 1225 } else { 1226 base::TimingLogger::ScopedSplit split("SweepZygote", &timings_); 1227 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual 1228 // memory. 1229 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 1230 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); 1231 } 1232 } 1233 } 1234 1235 SweepLargeObjects(swap_bitmaps); 1236} 1237 1238void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1239 base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1240 // Sweep large objects 1241 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1242 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 1243 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 1244 if (swap_bitmaps) { 1245 std::swap(large_live_objects, large_mark_objects); 1246 } 1247 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects(); 1248 // O(n*log(n)) but hopefully there are not too many large objects. 1249 size_t freed_objects = 0; 1250 size_t freed_bytes = 0; 1251 Thread* self = Thread::Current(); 1252 for (const Object* obj : live_objects) { 1253 if (!large_mark_objects->Test(obj)) { 1254 freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj)); 1255 ++freed_objects; 1256 } 1257 } 1258 freed_objects_.fetch_add(freed_objects); 1259 freed_bytes_.fetch_add(freed_bytes); 1260 GetHeap()->RecordFree(freed_objects, freed_bytes); 1261} 1262 1263void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { 1264 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1265 if (space->IsDlMallocSpace() && space->Contains(ref)) { 1266 DCHECK(IsMarked(obj)); 1267 1268 bool is_marked = IsMarked(ref); 1269 if (!is_marked) { 1270 LOG(INFO) << *space; 1271 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) 1272 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) 1273 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " 1274 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; 1275 1276 const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1277 DCHECK(klass != NULL); 1278 const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); 1279 DCHECK(fields != NULL); 1280 bool found = false; 1281 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1282 const ArtField* cur = fields->Get(i); 1283 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1284 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); 1285 found = true; 1286 break; 1287 } 1288 } 1289 if (!found) { 1290 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); 1291 } 1292 1293 bool obj_marked = heap_->GetCardTable()->IsDirty(obj); 1294 if (!obj_marked) { 1295 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " 1296 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " 1297 << "the alloc space, but wasn't card marked"; 1298 } 1299 } 1300 } 1301 break; 1302 } 1303} 1304 1305// Process the "referent" field in a java.lang.ref.Reference. If the 1306// referent has not yet been marked, put it on the appropriate list in 1307// the heap for later processing. 1308void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 1309 DCHECK(klass != nullptr); 1310 DCHECK(klass->IsReferenceClass()); 1311 DCHECK(obj != NULL); 1312 Object* referent = heap_->GetReferenceReferent(obj); 1313 if (referent != NULL && !IsMarked(referent)) { 1314 if (kCountJavaLangRefs) { 1315 ++reference_count_; 1316 } 1317 Thread* self = Thread::Current(); 1318 // TODO: Remove these locks, and use atomic stacks for storing references? 1319 if (klass->IsSoftReferenceClass()) { 1320 MutexLock mu(self, *heap_->GetSoftRefQueueLock()); 1321 heap_->EnqueuePendingReference(obj, &soft_reference_list_); 1322 } else if (klass->IsWeakReferenceClass()) { 1323 MutexLock mu(self, *heap_->GetWeakRefQueueLock()); 1324 heap_->EnqueuePendingReference(obj, &weak_reference_list_); 1325 } else if (klass->IsFinalizerReferenceClass()) { 1326 MutexLock mu(self, *heap_->GetFinalizerRefQueueLock()); 1327 heap_->EnqueuePendingReference(obj, &finalizer_reference_list_); 1328 } else if (klass->IsPhantomReferenceClass()) { 1329 MutexLock mu(self, *heap_->GetPhantomRefQueueLock()); 1330 heap_->EnqueuePendingReference(obj, &phantom_reference_list_); 1331 } else { 1332 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) 1333 << " " << std::hex << klass->GetAccessFlags(); 1334 } 1335 } 1336} 1337 1338void MarkSweep::ScanRoot(const Object* obj) { 1339 ScanObject(obj); 1340} 1341 1342class MarkObjectVisitor { 1343 public: 1344 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {} 1345 1346 // TODO: Fixme when anotatalysis works with visitors. 1347 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1348 bool /* is_static */) const ALWAYS_INLINE 1349 NO_THREAD_SAFETY_ANALYSIS { 1350 if (kCheckLocks) { 1351 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1352 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1353 } 1354 mark_sweep_->MarkObject(ref); 1355 } 1356 1357 private: 1358 MarkSweep* const mark_sweep_; 1359}; 1360 1361// Scans an object reference. Determines the type of the reference 1362// and dispatches to a specialized scanning routine. 1363void MarkSweep::ScanObject(const Object* obj) { 1364 MarkObjectVisitor visitor(this); 1365 ScanObjectVisit(obj, visitor); 1366} 1367 1368void MarkSweep::ProcessMarkStackParallel(bool paused) { 1369 Thread* self = Thread::Current(); 1370 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1371 const size_t num_threads = thread_pool->GetThreadCount(); 1372 const size_t chunk_size = 1373 std::min(mark_stack_->Size() / num_threads + 1, 1374 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1375 CHECK_GT(chunk_size, 0U); 1376 // Split the current mark stack up into work tasks. 1377 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1378 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1379 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, 1380 const_cast<const mirror::Object**>(it))); 1381 it += delta; 1382 } 1383 thread_pool->StartWorkers(self); 1384 // Don't do work in the main thread since it assumed at least one other thread will require CPU 1385 // time during the GC. 1386 thread_pool->Wait(self, paused, true); 1387 thread_pool->StopWorkers(self); 1388 mark_stack_->Reset(); 1389 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1390} 1391 1392// Scan anything that's on the mark stack. 1393void MarkSweep::ProcessMarkStack(bool paused) { 1394 timings_.StartSplit("ProcessMarkStack"); 1395 const bool parallel = kParallelProcessMarkStack && GetHeap()->GetThreadPool() && 1396 mark_stack_->Size() >= kMinimumParallelMarkStackSize; 1397 if (parallel) { 1398 ProcessMarkStackParallel(paused); 1399 } else { 1400 // TODO: Tune this. 1401 static const size_t kFifoSize = 4; 1402 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; 1403 for (;;) { 1404 const Object* obj = NULL; 1405 if (kUseMarkStackPrefetch) { 1406 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1407 const Object* obj = mark_stack_->PopBack(); 1408 DCHECK(obj != NULL); 1409 __builtin_prefetch(obj); 1410 prefetch_fifo.push_back(obj); 1411 } 1412 if (prefetch_fifo.empty()) { 1413 break; 1414 } 1415 obj = prefetch_fifo.front(); 1416 prefetch_fifo.pop_front(); 1417 } else { 1418 if (mark_stack_->IsEmpty()) { 1419 break; 1420 } 1421 obj = mark_stack_->PopBack(); 1422 } 1423 DCHECK(obj != NULL); 1424 ScanObject(obj); 1425 } 1426 } 1427 timings_.EndSplit(); 1428} 1429 1430// Walks the reference list marking any references subject to the 1431// reference clearing policy. References with a black referent are 1432// removed from the list. References with white referents biased 1433// toward saving are blackened and also removed from the list. 1434void MarkSweep::PreserveSomeSoftReferences(Object** list) { 1435 DCHECK(list != NULL); 1436 Object* clear = NULL; 1437 size_t counter = 0; 1438 1439 DCHECK(mark_stack_->IsEmpty()); 1440 1441 timings_.StartSplit("PreserveSomeSoftReferences"); 1442 while (*list != NULL) { 1443 Object* ref = heap_->DequeuePendingReference(list); 1444 Object* referent = heap_->GetReferenceReferent(ref); 1445 if (referent == NULL) { 1446 // Referent was cleared by the user during marking. 1447 continue; 1448 } 1449 bool is_marked = IsMarked(referent); 1450 if (!is_marked && ((++counter) & 1)) { 1451 // Referent is white and biased toward saving, mark it. 1452 MarkObject(referent); 1453 is_marked = true; 1454 } 1455 if (!is_marked) { 1456 // Referent is white, queue it for clearing. 1457 heap_->EnqueuePendingReference(ref, &clear); 1458 } 1459 } 1460 *list = clear; 1461 timings_.EndSplit(); 1462 1463 // Restart the mark with the newly black references added to the root set. 1464 ProcessMarkStack(true); 1465} 1466 1467inline bool MarkSweep::IsMarked(const Object* object) const 1468 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1469 if (IsImmune(object)) { 1470 return true; 1471 } 1472 DCHECK(current_mark_bitmap_ != NULL); 1473 if (current_mark_bitmap_->HasAddress(object)) { 1474 return current_mark_bitmap_->Test(object); 1475 } 1476 return heap_->GetMarkBitmap()->Test(object); 1477} 1478 1479 1480// Unlink the reference list clearing references objects with white 1481// referents. Cleared references registered to a reference queue are 1482// scheduled for appending by the heap worker thread. 1483void MarkSweep::ClearWhiteReferences(Object** list) { 1484 DCHECK(list != NULL); 1485 while (*list != NULL) { 1486 Object* ref = heap_->DequeuePendingReference(list); 1487 Object* referent = heap_->GetReferenceReferent(ref); 1488 if (referent != NULL && !IsMarked(referent)) { 1489 // Referent is white, clear it. 1490 heap_->ClearReferenceReferent(ref); 1491 if (heap_->IsEnqueuable(ref)) { 1492 heap_->EnqueueReference(ref, &cleared_reference_list_); 1493 } 1494 } 1495 } 1496 DCHECK(*list == NULL); 1497} 1498 1499// Enqueues finalizer references with white referents. White 1500// referents are blackened, moved to the zombie field, and the 1501// referent field is cleared. 1502void MarkSweep::EnqueueFinalizerReferences(Object** list) { 1503 DCHECK(list != NULL); 1504 timings_.StartSplit("EnqueueFinalizerReferences"); 1505 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); 1506 bool has_enqueued = false; 1507 while (*list != NULL) { 1508 Object* ref = heap_->DequeuePendingReference(list); 1509 Object* referent = heap_->GetReferenceReferent(ref); 1510 if (referent != NULL && !IsMarked(referent)) { 1511 MarkObject(referent); 1512 // If the referent is non-null the reference must queuable. 1513 DCHECK(heap_->IsEnqueuable(ref)); 1514 ref->SetFieldObject(zombie_offset, referent, false); 1515 heap_->ClearReferenceReferent(ref); 1516 heap_->EnqueueReference(ref, &cleared_reference_list_); 1517 has_enqueued = true; 1518 } 1519 } 1520 timings_.EndSplit(); 1521 if (has_enqueued) { 1522 ProcessMarkStack(true); 1523 } 1524 DCHECK(*list == NULL); 1525} 1526 1527// Process reference class instances and schedule finalizations. 1528void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, 1529 Object** weak_references, 1530 Object** finalizer_references, 1531 Object** phantom_references) { 1532 DCHECK(soft_references != NULL); 1533 DCHECK(weak_references != NULL); 1534 DCHECK(finalizer_references != NULL); 1535 DCHECK(phantom_references != NULL); 1536 1537 // Unless we are in the zygote or required to clear soft references 1538 // with white references, preserve some white referents. 1539 if (!clear_soft && !Runtime::Current()->IsZygote()) { 1540 PreserveSomeSoftReferences(soft_references); 1541 } 1542 1543 timings_.StartSplit("ProcessReferences"); 1544 // Clear all remaining soft and weak references with white 1545 // referents. 1546 ClearWhiteReferences(soft_references); 1547 ClearWhiteReferences(weak_references); 1548 timings_.EndSplit(); 1549 1550 // Preserve all white objects with finalize methods and schedule 1551 // them for finalization. 1552 EnqueueFinalizerReferences(finalizer_references); 1553 1554 timings_.StartSplit("ProcessReferences"); 1555 // Clear all f-reachable soft and weak references with white 1556 // referents. 1557 ClearWhiteReferences(soft_references); 1558 ClearWhiteReferences(weak_references); 1559 1560 // Clear all phantom references with white referents. 1561 ClearWhiteReferences(phantom_references); 1562 1563 // At this point all reference lists should be empty. 1564 DCHECK(*soft_references == NULL); 1565 DCHECK(*weak_references == NULL); 1566 DCHECK(*finalizer_references == NULL); 1567 DCHECK(*phantom_references == NULL); 1568 timings_.EndSplit(); 1569} 1570 1571void MarkSweep::UnBindBitmaps() { 1572 base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 1573 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1574 if (space->IsDlMallocSpace()) { 1575 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 1576 if (alloc_space->temp_bitmap_.get() != NULL) { 1577 // At this point, the temp_bitmap holds our old mark bitmap. 1578 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); 1579 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); 1580 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); 1581 alloc_space->mark_bitmap_.reset(new_bitmap); 1582 DCHECK(alloc_space->temp_bitmap_.get() == NULL); 1583 } 1584 } 1585 } 1586} 1587 1588void MarkSweep::FinishPhase() { 1589 base::TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1590 // Can't enqueue references if we hold the mutator lock. 1591 Object* cleared_references = GetClearedReferences(); 1592 Heap* heap = GetHeap(); 1593 timings_.NewSplit("EnqueueClearedReferences"); 1594 heap->EnqueueClearedReferences(&cleared_references); 1595 1596 timings_.NewSplit("PostGcVerification"); 1597 heap->PostGcVerification(this); 1598 1599 timings_.NewSplit("GrowForUtilization"); 1600 heap->GrowForUtilization(GetGcType(), GetDurationNs()); 1601 1602 timings_.NewSplit("RequestHeapTrim"); 1603 heap->RequestHeapTrim(); 1604 1605 // Update the cumulative statistics 1606 total_time_ns_ += GetDurationNs(); 1607 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, 1608 std::plus<uint64_t>()); 1609 total_freed_objects_ += GetFreedObjects(); 1610 total_freed_bytes_ += GetFreedBytes(); 1611 1612 // Ensure that the mark stack is empty. 1613 CHECK(mark_stack_->IsEmpty()); 1614 1615 if (kCountScannedTypes) { 1616 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1617 << " other=" << other_count_; 1618 } 1619 1620 if (kCountTasks) { 1621 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1622 } 1623 1624 if (kMeasureOverhead) { 1625 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1626 } 1627 1628 if (kProfileLargeObjects) { 1629 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1630 } 1631 1632 if (kCountClassesMarked) { 1633 VLOG(gc) << "Classes marked " << classes_marked_; 1634 } 1635 1636 if (kCountJavaLangRefs) { 1637 VLOG(gc) << "References scanned " << reference_count_; 1638 } 1639 1640 // Update the cumulative loggers. 1641 cumulative_timings_.Start(); 1642 cumulative_timings_.AddLogger(timings_); 1643 cumulative_timings_.End(); 1644 1645 // Clear all of the spaces' mark bitmaps. 1646 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1647 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1648 space->GetMarkBitmap()->Clear(); 1649 } 1650 } 1651 mark_stack_->Reset(); 1652 1653 // Reset the marked large objects. 1654 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1655 large_objects->GetMarkObjects()->Clear(); 1656} 1657 1658} // namespace collector 1659} // namespace gc 1660} // namespace art 1661