mark_sweep.cc revision 906457c326d505f511fae42fc693cade1656c19e
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "monitor.h" 41#include "mark_sweep-inl.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/object-inl.h" 48#include "mirror/object_array.h" 49#include "mirror/object_array-inl.h" 50#include "runtime.h" 51#include "thread-inl.h" 52#include "thread_list.h" 53#include "verifier/method_verifier.h" 54 55using ::art::mirror::ArtField; 56using ::art::mirror::Class; 57using ::art::mirror::Object; 58using ::art::mirror::ObjectArray; 59 60namespace art { 61namespace gc { 62namespace collector { 63 64// Performance options. 65constexpr bool kUseRecursiveMark = false; 66constexpr bool kUseMarkStackPrefetch = true; 67constexpr size_t kSweepArrayChunkFreeSize = 1024; 68 69// Parallelism options. 70constexpr bool kParallelCardScan = true; 71constexpr bool kParallelRecursiveMark = true; 72// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 73// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 74// having this can add overhead in ProcessReferences since we may end up doing many calls of 75// ProcessMarkStack with very small mark stacks. 76constexpr size_t kMinimumParallelMarkStackSize = 128; 77constexpr bool kParallelProcessMarkStack = true; 78 79// Profiling and information flags. 80constexpr bool kCountClassesMarked = false; 81constexpr bool kProfileLargeObjects = false; 82constexpr bool kMeasureOverhead = false; 83constexpr bool kCountTasks = false; 84constexpr bool kCountJavaLangRefs = false; 85 86// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 87constexpr bool kCheckLocks = kDebugLocking; 88 89void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { 90 // Bind live to mark bitmap if necessary. 91 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 92 BindLiveToMarkBitmap(space); 93 } 94 95 // Add the space to the immune region. 96 // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc 97 // callbacks. 98 if (immune_begin_ == NULL) { 99 DCHECK(immune_end_ == NULL); 100 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), 101 reinterpret_cast<Object*>(space->End())); 102 } else { 103 const space::ContinuousSpace* prev_space = nullptr; 104 // Find out if the previous space is immune. 105 for (const space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { 106 if (cur_space == space) { 107 break; 108 } 109 prev_space = cur_space; 110 } 111 // If previous space was immune, then extend the immune region. Relies on continuous spaces 112 // being sorted by Heap::AddContinuousSpace. 113 if (prev_space != nullptr && IsImmuneSpace(prev_space)) { 114 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 115 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 116 } 117 } 118} 119 120bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const { 121 return 122 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && 123 immune_end_ >= reinterpret_cast<Object*>(space->End()); 124} 125 126void MarkSweep::BindBitmaps() { 127 timings_.StartSplit("BindBitmaps"); 128 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 129 // Mark all of the spaces we never collect as immune. 130 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 131 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 132 ImmuneSpace(space); 133 } 134 } 135 timings_.EndSplit(); 136} 137 138MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 139 : GarbageCollector(heap, 140 name_prefix + 141 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 142 current_mark_bitmap_(NULL), 143 mark_stack_(NULL), 144 immune_begin_(NULL), 145 immune_end_(NULL), 146 soft_reference_list_(NULL), 147 weak_reference_list_(NULL), 148 finalizer_reference_list_(NULL), 149 phantom_reference_list_(NULL), 150 cleared_reference_list_(NULL), 151 live_stack_freeze_size_(0), 152 gc_barrier_(new Barrier(0)), 153 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 154 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 155 is_concurrent_(is_concurrent) { 156} 157 158void MarkSweep::InitializePhase() { 159 timings_.Reset(); 160 base::TimingLogger::ScopedSplit split("InitializePhase", &timings_); 161 mark_stack_ = heap_->mark_stack_.get(); 162 DCHECK(mark_stack_ != nullptr); 163 SetImmuneRange(nullptr, nullptr); 164 soft_reference_list_ = nullptr; 165 weak_reference_list_ = nullptr; 166 finalizer_reference_list_ = nullptr; 167 phantom_reference_list_ = nullptr; 168 cleared_reference_list_ = nullptr; 169 class_count_ = 0; 170 array_count_ = 0; 171 other_count_ = 0; 172 large_object_test_ = 0; 173 large_object_mark_ = 0; 174 classes_marked_ = 0; 175 overhead_time_ = 0; 176 work_chunks_created_ = 0; 177 work_chunks_deleted_ = 0; 178 reference_count_ = 0; 179 180 FindDefaultMarkBitmap(); 181 182 // Do any pre GC verification. 183 timings_.NewSplit("PreGcVerification"); 184 heap_->PreGcVerification(this); 185} 186 187void MarkSweep::ProcessReferences(Thread* self) { 188 base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 189 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 190 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, 191 &finalizer_reference_list_, &phantom_reference_list_); 192} 193 194bool MarkSweep::HandleDirtyObjectsPhase() { 195 base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_); 196 Thread* self = Thread::Current(); 197 Locks::mutator_lock_->AssertExclusiveHeld(self); 198 199 { 200 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 201 202 // Re-mark root set. 203 ReMarkRoots(); 204 205 // Scan dirty objects, this is only required if we are not doing concurrent GC. 206 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 207 } 208 209 ProcessReferences(self); 210 211 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 212 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_|| 213 GetHeap()->verify_post_gc_heap_) { 214 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 215 // This second sweep makes sure that we don't have any objects in the live stack which point to 216 // freed objects. These cause problems since their references may be previously freed objects. 217 SweepArray(GetHeap()->allocation_stack_.get(), false); 218 } 219 220 timings_.StartSplit("PreSweepingGcVerification"); 221 heap_->PreSweepingGcVerification(this); 222 timings_.EndSplit(); 223 224 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 225 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 226 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 227 228 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 229 // weak before we sweep them. Since this new system weak may not be marked, the GC may 230 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 231 // reference to a string that is about to be swept. 232 Runtime::Current()->DisallowNewSystemWeaks(); 233 return true; 234} 235 236bool MarkSweep::IsConcurrent() const { 237 return is_concurrent_; 238} 239 240void MarkSweep::MarkingPhase() { 241 base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 242 Thread* self = Thread::Current(); 243 244 BindBitmaps(); 245 FindDefaultMarkBitmap(); 246 247 // Process dirty cards and add dirty cards to mod union tables. 248 heap_->ProcessCards(timings_); 249 250 // Need to do this before the checkpoint since we don't want any threads to add references to 251 // the live stack during the recursive mark. 252 timings_.NewSplit("SwapStacks"); 253 heap_->SwapStacks(); 254 255 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 256 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 257 // If we exclusively hold the mutator lock, all threads must be suspended. 258 MarkRoots(); 259 } else { 260 MarkThreadRoots(self); 261 // At this point the live stack should no longer have any mutators which push into it. 262 MarkNonThreadRoots(); 263 } 264 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 265 MarkConcurrentRoots(); 266 UpdateAndMarkModUnion(); 267 MarkReachableObjects(); 268} 269 270void MarkSweep::UpdateAndMarkModUnion() { 271 for (const auto& space : heap_->GetContinuousSpaces()) { 272 if (IsImmuneSpace(space)) { 273 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 274 "UpdateAndMarkImageModUnionTable"; 275 base::TimingLogger::ScopedSplit split(name, &timings_); 276 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 277 CHECK(mod_union_table != nullptr); 278 mod_union_table->UpdateAndMarkReferences(MarkRootCallback, this); 279 } 280 } 281} 282 283void MarkSweep::MarkThreadRoots(Thread* self) { 284 MarkRootsCheckpoint(self); 285} 286 287void MarkSweep::MarkReachableObjects() { 288 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 289 // knowing that new allocations won't be marked as live. 290 timings_.StartSplit("MarkStackAsLive"); 291 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 292 heap_->MarkAllocStackAsLive(live_stack); 293 live_stack->Reset(); 294 timings_.EndSplit(); 295 // Recursively mark all the non-image bits set in the mark bitmap. 296 RecursiveMark(); 297} 298 299void MarkSweep::ReclaimPhase() { 300 base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 301 Thread* self = Thread::Current(); 302 303 if (!IsConcurrent()) { 304 ProcessReferences(self); 305 } 306 307 { 308 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 309 SweepSystemWeaks(); 310 } 311 312 if (IsConcurrent()) { 313 Runtime::Current()->AllowNewSystemWeaks(); 314 315 base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); 316 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 317 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 318 // The allocation stack contains things allocated since the start of the GC. These may have been 319 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. 320 // Remove these objects from the mark bitmaps so that they will be eligible for sticky 321 // collection. 322 // There is a race here which is safely handled. Another thread such as the hprof could 323 // have flushed the alloc stack after we resumed the threads. This is safe however, since 324 // reseting the allocation stack zeros it out with madvise. This means that we will either 325 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the 326 // first place. 327 mirror::Object** end = allocation_stack->End(); 328 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { 329 const Object* obj = *it; 330 if (obj != NULL) { 331 UnMarkObjectNonNull(obj); 332 } 333 } 334 } 335 336 // Before freeing anything, lets verify the heap. 337 if (kIsDebugBuild) { 338 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 339 VerifyImageRoots(); 340 } 341 342 { 343 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 344 345 // Reclaim unmarked objects. 346 Sweep(false); 347 348 // Swap the live and mark bitmaps for each space which we modified space. This is an 349 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 350 // bitmaps. 351 timings_.StartSplit("SwapBitmaps"); 352 SwapBitmaps(); 353 timings_.EndSplit(); 354 355 // Unbind the live and mark bitmaps. 356 UnBindBitmaps(); 357 } 358} 359 360void MarkSweep::SetImmuneRange(Object* begin, Object* end) { 361 immune_begin_ = begin; 362 immune_end_ = end; 363} 364 365void MarkSweep::FindDefaultMarkBitmap() { 366 base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 367 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 368 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 369 if (bitmap != nullptr && 370 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 371 current_mark_bitmap_ = bitmap; 372 CHECK(current_mark_bitmap_ != NULL); 373 return; 374 } 375 } 376 GetHeap()->DumpSpaces(); 377 LOG(FATAL) << "Could not find a default mark bitmap"; 378} 379 380void MarkSweep::ExpandMarkStack() { 381 ResizeMarkStack(mark_stack_->Capacity() * 2); 382} 383 384void MarkSweep::ResizeMarkStack(size_t new_size) { 385 // Rare case, no need to have Thread::Current be a parameter. 386 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 387 // Someone else acquired the lock and expanded the mark stack before us. 388 return; 389 } 390 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 391 CHECK_LE(mark_stack_->Size(), new_size); 392 mark_stack_->Resize(new_size); 393 for (const auto& obj : temp) { 394 mark_stack_->PushBack(obj); 395 } 396} 397 398inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) { 399 DCHECK(obj != NULL); 400 if (MarkObjectParallel(obj)) { 401 MutexLock mu(Thread::Current(), mark_stack_lock_); 402 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 403 ExpandMarkStack(); 404 } 405 // The object must be pushed on to the mark stack. 406 mark_stack_->PushBack(const_cast<Object*>(obj)); 407 } 408} 409 410inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { 411 DCHECK(!IsImmune(obj)); 412 // Try to take advantage of locality of references within a space, failing this find the space 413 // the hard way. 414 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 415 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 416 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 417 if (LIKELY(new_bitmap != NULL)) { 418 object_bitmap = new_bitmap; 419 } else { 420 MarkLargeObject(obj, false); 421 return; 422 } 423 } 424 425 DCHECK(object_bitmap->HasAddress(obj)); 426 object_bitmap->Clear(obj); 427} 428 429inline void MarkSweep::MarkObjectNonNull(const Object* obj) { 430 DCHECK(obj != NULL); 431 432 if (IsImmune(obj)) { 433 DCHECK(IsMarked(obj)); 434 return; 435 } 436 437 // Try to take advantage of locality of references within a space, failing this find the space 438 // the hard way. 439 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 440 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 441 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 442 if (LIKELY(new_bitmap != NULL)) { 443 object_bitmap = new_bitmap; 444 } else { 445 MarkLargeObject(obj, true); 446 return; 447 } 448 } 449 450 // This object was not previously marked. 451 if (!object_bitmap->Test(obj)) { 452 object_bitmap->Set(obj); 453 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 454 // Lock is not needed but is here anyways to please annotalysis. 455 MutexLock mu(Thread::Current(), mark_stack_lock_); 456 ExpandMarkStack(); 457 } 458 // The object must be pushed on to the mark stack. 459 mark_stack_->PushBack(const_cast<Object*>(obj)); 460 } 461} 462 463// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 464bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 465 // TODO: support >1 discontinuous space. 466 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 467 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); 468 if (kProfileLargeObjects) { 469 ++large_object_test_; 470 } 471 if (UNLIKELY(!large_objects->Test(obj))) { 472 if (!large_object_space->Contains(obj)) { 473 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 474 LOG(ERROR) << "Attempting see if it's a bad root"; 475 VerifyRoots(); 476 LOG(FATAL) << "Can't mark bad root"; 477 } 478 if (kProfileLargeObjects) { 479 ++large_object_mark_; 480 } 481 if (set) { 482 large_objects->Set(obj); 483 } else { 484 large_objects->Clear(obj); 485 } 486 return true; 487 } 488 return false; 489} 490 491inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 492 DCHECK(obj != NULL); 493 494 if (IsImmune(obj)) { 495 DCHECK(IsMarked(obj)); 496 return false; 497 } 498 499 // Try to take advantage of locality of references within a space, failing this find the space 500 // the hard way. 501 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 502 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 503 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 504 if (new_bitmap != NULL) { 505 object_bitmap = new_bitmap; 506 } else { 507 // TODO: Remove the Thread::Current here? 508 // TODO: Convert this to some kind of atomic marking? 509 MutexLock mu(Thread::Current(), large_object_lock_); 510 return MarkLargeObject(obj, true); 511 } 512 } 513 514 // Return true if the object was not previously marked. 515 return !object_bitmap->AtomicTestAndSet(obj); 516} 517 518// Used to mark objects when recursing. Recursion is done by moving 519// the finger across the bitmaps in address order and marking child 520// objects. Any newly-marked objects whose addresses are lower than 521// the finger won't be visited by the bitmap scan, so those objects 522// need to be added to the mark stack. 523inline void MarkSweep::MarkObject(const Object* obj) { 524 if (obj != NULL) { 525 MarkObjectNonNull(obj); 526 } 527} 528 529void MarkSweep::MarkRoot(const Object* obj) { 530 if (obj != NULL) { 531 MarkObjectNonNull(obj); 532 } 533} 534 535Object* MarkSweep::MarkRootParallelCallback(Object* root, void* arg) { 536 DCHECK(root != NULL); 537 DCHECK(arg != NULL); 538 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(root); 539 return root; 540} 541 542Object* MarkSweep::MarkRootCallback(Object* root, void* arg) { 543 DCHECK(root != nullptr); 544 DCHECK(arg != nullptr); 545 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(root); 546 return root; 547} 548 549void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 550 const StackVisitor* visitor) { 551 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 552} 553 554void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 555 // See if the root is on any space bitmap. 556 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 557 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 558 if (!large_object_space->Contains(root)) { 559 LOG(ERROR) << "Found invalid root: " << root; 560 if (visitor != NULL) { 561 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 562 } 563 } 564 } 565} 566 567void MarkSweep::VerifyRoots() { 568 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 569} 570 571// Marks all objects in the root set. 572void MarkSweep::MarkRoots() { 573 timings_.StartSplit("MarkRoots"); 574 Runtime::Current()->VisitNonConcurrentRoots(MarkRootCallback, this); 575 timings_.EndSplit(); 576} 577 578void MarkSweep::MarkNonThreadRoots() { 579 timings_.StartSplit("MarkNonThreadRoots"); 580 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 581 timings_.EndSplit(); 582} 583 584void MarkSweep::MarkConcurrentRoots() { 585 timings_.StartSplit("MarkConcurrentRoots"); 586 // Visit all runtime roots and clear dirty flags. 587 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, false, true); 588 timings_.EndSplit(); 589} 590 591void MarkSweep::CheckObject(const Object* obj) { 592 DCHECK(obj != NULL); 593 VisitObjectReferences(const_cast<Object*>(obj), [this](const Object* obj, const Object* ref, 594 MemberOffset offset, bool is_static) NO_THREAD_SAFETY_ANALYSIS { 595 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 596 CheckReference(obj, ref, offset, is_static); 597 }, true); 598} 599 600void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { 601 DCHECK(root != NULL); 602 DCHECK(arg != NULL); 603 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 604 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); 605 mark_sweep->CheckObject(root); 606} 607 608void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 609 CHECK(space->IsDlMallocSpace()); 610 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 611 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 612 accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap(); 613 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 614} 615 616class ScanObjectVisitor { 617 public: 618 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 619 : mark_sweep_(mark_sweep) {} 620 621 // TODO: Fixme when anotatalysis works with visitors. 622 void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { 623 if (kCheckLocks) { 624 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 625 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 626 } 627 mark_sweep_->ScanObject(obj); 628 } 629 630 private: 631 MarkSweep* const mark_sweep_; 632}; 633 634template <bool kUseFinger = false> 635class MarkStackTask : public Task { 636 public: 637 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 638 const Object** mark_stack) 639 : mark_sweep_(mark_sweep), 640 thread_pool_(thread_pool), 641 mark_stack_pos_(mark_stack_size) { 642 // We may have to copy part of an existing mark stack when another mark stack overflows. 643 if (mark_stack_size != 0) { 644 DCHECK(mark_stack != NULL); 645 // TODO: Check performance? 646 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 647 } 648 if (kCountTasks) { 649 ++mark_sweep_->work_chunks_created_; 650 } 651 } 652 653 static const size_t kMaxSize = 1 * KB; 654 655 protected: 656 class ScanObjectParallelVisitor { 657 public: 658 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 659 : chunk_task_(chunk_task) {} 660 661 void operator()(Object* obj) const { 662 MarkSweep* mark_sweep = chunk_task_->mark_sweep_; 663 mark_sweep->ScanObjectVisit(obj, 664 [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */, 665 bool /* is_static */) ALWAYS_INLINE { 666 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) { 667 if (kUseFinger) { 668 android_memory_barrier(); 669 if (reinterpret_cast<uintptr_t>(ref) >= 670 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) { 671 return; 672 } 673 } 674 chunk_task_->MarkStackPush(ref); 675 } 676 }); 677 } 678 679 private: 680 MarkStackTask<kUseFinger>* const chunk_task_; 681 }; 682 683 virtual ~MarkStackTask() { 684 // Make sure that we have cleared our mark stack. 685 DCHECK_EQ(mark_stack_pos_, 0U); 686 if (kCountTasks) { 687 ++mark_sweep_->work_chunks_deleted_; 688 } 689 } 690 691 MarkSweep* const mark_sweep_; 692 ThreadPool* const thread_pool_; 693 // Thread local mark stack for this task. 694 const Object* mark_stack_[kMaxSize]; 695 // Mark stack position. 696 size_t mark_stack_pos_; 697 698 void MarkStackPush(const Object* obj) ALWAYS_INLINE { 699 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 700 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 701 mark_stack_pos_ /= 2; 702 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 703 mark_stack_ + mark_stack_pos_); 704 thread_pool_->AddTask(Thread::Current(), task); 705 } 706 DCHECK(obj != nullptr); 707 DCHECK(mark_stack_pos_ < kMaxSize); 708 mark_stack_[mark_stack_pos_++] = obj; 709 } 710 711 virtual void Finalize() { 712 delete this; 713 } 714 715 // Scans all of the objects 716 virtual void Run(Thread* self) { 717 ScanObjectParallelVisitor visitor(this); 718 // TODO: Tune this. 719 static const size_t kFifoSize = 4; 720 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; 721 for (;;) { 722 const Object* obj = nullptr; 723 if (kUseMarkStackPrefetch) { 724 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 725 const Object* obj = mark_stack_[--mark_stack_pos_]; 726 DCHECK(obj != nullptr); 727 __builtin_prefetch(obj); 728 prefetch_fifo.push_back(obj); 729 } 730 if (UNLIKELY(prefetch_fifo.empty())) { 731 break; 732 } 733 obj = prefetch_fifo.front(); 734 prefetch_fifo.pop_front(); 735 } else { 736 if (UNLIKELY(mark_stack_pos_ == 0)) { 737 break; 738 } 739 obj = mark_stack_[--mark_stack_pos_]; 740 } 741 DCHECK(obj != nullptr); 742 visitor(const_cast<mirror::Object*>(obj)); 743 } 744 } 745}; 746 747class CardScanTask : public MarkStackTask<false> { 748 public: 749 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, 750 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 751 const Object** mark_stack_obj) 752 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 753 bitmap_(bitmap), 754 begin_(begin), 755 end_(end), 756 minimum_age_(minimum_age) { 757 } 758 759 protected: 760 accounting::SpaceBitmap* const bitmap_; 761 byte* const begin_; 762 byte* const end_; 763 const byte minimum_age_; 764 765 virtual void Finalize() { 766 delete this; 767 } 768 769 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 770 ScanObjectParallelVisitor visitor(this); 771 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 772 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 773 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 774 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 775 // Finish by emptying our local mark stack. 776 MarkStackTask::Run(self); 777 } 778}; 779 780size_t MarkSweep::GetThreadCount(bool paused) const { 781 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 782 return 0; 783 } 784 if (paused) { 785 return heap_->GetParallelGCThreadCount() + 1; 786 } else { 787 return heap_->GetConcGCThreadCount() + 1; 788 } 789} 790 791void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 792 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 793 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 794 size_t thread_count = GetThreadCount(paused); 795 // The parallel version with only one thread is faster for card scanning, TODO: fix. 796 if (kParallelCardScan && thread_count > 0) { 797 Thread* self = Thread::Current(); 798 // Can't have a different split for each space since multiple spaces can have their cards being 799 // scanned at the same time. 800 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 801 // Try to take some of the mark stack since we can pass this off to the worker tasks. 802 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin()); 803 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End()); 804 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 805 // Estimated number of work tasks we will create. 806 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 807 DCHECK_NE(mark_stack_tasks, 0U); 808 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 809 mark_stack_size / mark_stack_tasks + 1); 810 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 811 if (space->GetMarkBitmap() == nullptr) { 812 continue; 813 } 814 byte* card_begin = space->Begin(); 815 byte* card_end = space->End(); 816 // Align up the end address. For example, the image space's end 817 // may not be card-size-aligned. 818 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 819 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 820 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 821 // Calculate how many bytes of heap we will scan, 822 const size_t address_range = card_end - card_begin; 823 // Calculate how much address range each task gets. 824 const size_t card_delta = RoundUp(address_range / thread_count + 1, 825 accounting::CardTable::kCardSize); 826 // Create the worker tasks for this space. 827 while (card_begin != card_end) { 828 // Add a range of cards. 829 size_t addr_remaining = card_end - card_begin; 830 size_t card_increment = std::min(card_delta, addr_remaining); 831 // Take from the back of the mark stack. 832 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 833 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 834 mark_stack_end -= mark_stack_increment; 835 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 836 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 837 // Add the new task to the thread pool. 838 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 839 card_begin + card_increment, minimum_age, 840 mark_stack_increment, mark_stack_end); 841 thread_pool->AddTask(self, task); 842 card_begin += card_increment; 843 } 844 } 845 846 // Note: the card scan below may dirty new cards (and scan them) 847 // as a side effect when a Reference object is encountered and 848 // queued during the marking. See b/11465268. 849 thread_pool->SetMaxActiveWorkers(thread_count - 1); 850 thread_pool->StartWorkers(self); 851 thread_pool->Wait(self, true, true); 852 thread_pool->StopWorkers(self); 853 timings_.EndSplit(); 854 } else { 855 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 856 if (space->GetMarkBitmap() != nullptr) { 857 // Image spaces are handled properly since live == marked for them. 858 switch (space->GetGcRetentionPolicy()) { 859 case space::kGcRetentionPolicyNeverCollect: 860 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 861 "ScanGrayImageSpaceObjects"); 862 break; 863 case space::kGcRetentionPolicyFullCollect: 864 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 865 "ScanGrayZygoteSpaceObjects"); 866 break; 867 case space::kGcRetentionPolicyAlwaysCollect: 868 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 869 "ScanGrayAllocSpaceObjects"); 870 break; 871 } 872 ScanObjectVisitor visitor(this); 873 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 874 timings_.EndSplit(); 875 } 876 } 877 } 878} 879 880void MarkSweep::VerifyImageRoots() { 881 // Verify roots ensures that all the references inside the image space point 882 // objects which are either in the image space or marked objects in the alloc 883 // space 884 timings_.StartSplit("VerifyImageRoots"); 885 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 886 if (space->IsImageSpace()) { 887 space::ImageSpace* image_space = space->AsImageSpace(); 888 uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin()); 889 uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End()); 890 accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap(); 891 DCHECK(live_bitmap != NULL); 892 live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) { 893 if (kCheckLocks) { 894 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 895 } 896 DCHECK(obj != NULL); 897 CheckObject(obj); 898 }); 899 } 900 } 901 timings_.EndSplit(); 902} 903 904class RecursiveMarkTask : public MarkStackTask<false> { 905 public: 906 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 907 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 908 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 909 bitmap_(bitmap), 910 begin_(begin), 911 end_(end) { 912 } 913 914 protected: 915 accounting::SpaceBitmap* const bitmap_; 916 const uintptr_t begin_; 917 const uintptr_t end_; 918 919 virtual void Finalize() { 920 delete this; 921 } 922 923 // Scans all of the objects 924 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 925 ScanObjectParallelVisitor visitor(this); 926 bitmap_->VisitMarkedRange(begin_, end_, visitor); 927 // Finish by emptying our local mark stack. 928 MarkStackTask::Run(self); 929 } 930}; 931 932// Populates the mark stack based on the set of marked objects and 933// recursively marks until the mark stack is emptied. 934void MarkSweep::RecursiveMark() { 935 base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 936 // RecursiveMark will build the lists of known instances of the Reference classes. 937 // See DelayReferenceReferent for details. 938 CHECK(soft_reference_list_ == NULL); 939 CHECK(weak_reference_list_ == NULL); 940 CHECK(finalizer_reference_list_ == NULL); 941 CHECK(phantom_reference_list_ == NULL); 942 CHECK(cleared_reference_list_ == NULL); 943 944 if (kUseRecursiveMark) { 945 const bool partial = GetGcType() == kGcTypePartial; 946 ScanObjectVisitor scan_visitor(this); 947 auto* self = Thread::Current(); 948 ThreadPool* thread_pool = heap_->GetThreadPool(); 949 size_t thread_count = GetThreadCount(false); 950 const bool parallel = kParallelRecursiveMark && thread_count > 1; 951 mark_stack_->Reset(); 952 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 953 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 954 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 955 current_mark_bitmap_ = space->GetMarkBitmap(); 956 if (current_mark_bitmap_ == nullptr) { 957 continue; 958 } 959 if (parallel) { 960 // We will use the mark stack the future. 961 // CHECK(mark_stack_->IsEmpty()); 962 // This function does not handle heap end increasing, so we must use the space end. 963 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 964 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 965 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 966 967 // Create a few worker tasks. 968 const size_t n = thread_count * 2; 969 while (begin != end) { 970 uintptr_t start = begin; 971 uintptr_t delta = (end - begin) / n; 972 delta = RoundUp(delta, KB); 973 if (delta < 16 * KB) delta = end - begin; 974 begin += delta; 975 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start, 976 begin); 977 thread_pool->AddTask(self, task); 978 } 979 thread_pool->SetMaxActiveWorkers(thread_count - 1); 980 thread_pool->StartWorkers(self); 981 thread_pool->Wait(self, true, true); 982 thread_pool->StopWorkers(self); 983 } else { 984 // This function does not handle heap end increasing, so we must use the space end. 985 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 986 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 987 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 988 } 989 } 990 } 991 } 992 ProcessMarkStack(false); 993} 994 995mirror::Object* MarkSweep::SystemWeakIsMarkedCallback(Object* object, void* arg) { 996 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 997 return object; 998 } 999 return nullptr; 1000} 1001 1002void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 1003 ScanGrayObjects(paused, minimum_age); 1004 ProcessMarkStack(paused); 1005} 1006 1007void MarkSweep::ReMarkRoots() { 1008 timings_.StartSplit("ReMarkRoots"); 1009 Runtime::Current()->VisitRoots(MarkRootCallback, this, true, true); 1010 timings_.EndSplit(); 1011} 1012 1013void MarkSweep::SweepSystemWeaks() { 1014 Runtime* runtime = Runtime::Current(); 1015 timings_.StartSplit("SweepSystemWeaks"); 1016 runtime->SweepSystemWeaks(SystemWeakIsMarkedCallback, this); 1017 timings_.EndSplit(); 1018} 1019 1020mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 1021 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 1022 // We don't actually want to sweep the object, so lets return "marked" 1023 return obj; 1024} 1025 1026void MarkSweep::VerifyIsLive(const Object* obj) { 1027 Heap* heap = GetHeap(); 1028 if (!heap->GetLiveBitmap()->Test(obj)) { 1029 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1030 if (!large_object_space->GetLiveObjects()->Test(obj)) { 1031 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 1032 heap->allocation_stack_->End()) { 1033 // Object not found! 1034 heap->DumpSpaces(); 1035 LOG(FATAL) << "Found dead object " << obj; 1036 } 1037 } 1038 } 1039} 1040 1041void MarkSweep::VerifySystemWeaks() { 1042 // Verify system weaks, uses a special object visitor which returns the input object. 1043 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 1044} 1045 1046struct SweepCallbackContext { 1047 MarkSweep* mark_sweep; 1048 space::AllocSpace* space; 1049 Thread* self; 1050}; 1051 1052class CheckpointMarkThreadRoots : public Closure { 1053 public: 1054 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1055 1056 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1057 ATRACE_BEGIN("Marking thread roots"); 1058 // Note: self is not necessarily equal to thread since thread may be suspended. 1059 Thread* self = Thread::Current(); 1060 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1061 << thread->GetState() << " thread " << thread << " self " << self; 1062 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 1063 ATRACE_END(); 1064 mark_sweep_->GetBarrier().Pass(self); 1065 } 1066 1067 private: 1068 MarkSweep* mark_sweep_; 1069}; 1070 1071void MarkSweep::MarkRootsCheckpoint(Thread* self) { 1072 CheckpointMarkThreadRoots check_point(this); 1073 timings_.StartSplit("MarkRootsCheckpoint"); 1074 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1075 // Request the check point is run on all threads returning a count of the threads that must 1076 // run through the barrier including self. 1077 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1078 // Release locks then wait for all mutator threads to pass the barrier. 1079 // TODO: optimize to not release locks when there are no threads to wait for. 1080 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1081 Locks::mutator_lock_->SharedUnlock(self); 1082 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 1083 CHECK_EQ(old_state, kWaitingPerformingGc); 1084 gc_barrier_->Increment(self, barrier_count); 1085 self->SetState(kWaitingPerformingGc); 1086 Locks::mutator_lock_->SharedLock(self); 1087 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1088 timings_.EndSplit(); 1089} 1090 1091void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 1092 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 1093 MarkSweep* mark_sweep = context->mark_sweep; 1094 Heap* heap = mark_sweep->GetHeap(); 1095 space::AllocSpace* space = context->space; 1096 Thread* self = context->self; 1097 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 1098 // Use a bulk free, that merges consecutive objects before freeing or free per object? 1099 // Documentation suggests better free performance with merging, but this may be at the expensive 1100 // of allocation. 1101 size_t freed_objects = num_ptrs; 1102 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit 1103 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); 1104 heap->RecordFree(freed_objects, freed_bytes); 1105 mark_sweep->freed_objects_.fetch_add(freed_objects); 1106 mark_sweep->freed_bytes_.fetch_add(freed_bytes); 1107} 1108 1109void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 1110 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 1111 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); 1112 Heap* heap = context->mark_sweep->GetHeap(); 1113 // We don't free any actual memory to avoid dirtying the shared zygote pages. 1114 for (size_t i = 0; i < num_ptrs; ++i) { 1115 Object* obj = static_cast<Object*>(ptrs[i]); 1116 heap->GetLiveBitmap()->Clear(obj); 1117 heap->GetCardTable()->MarkCard(obj); 1118 } 1119} 1120 1121void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1122 space::DlMallocSpace* space = heap_->GetNonMovingSpace(); 1123 timings_.StartSplit("SweepArray"); 1124 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are 1125 // going to free. 1126 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1127 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1128 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1129 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 1130 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 1131 if (swap_bitmaps) { 1132 std::swap(live_bitmap, mark_bitmap); 1133 std::swap(large_live_objects, large_mark_objects); 1134 } 1135 1136 size_t freed_bytes = 0; 1137 size_t freed_large_object_bytes = 0; 1138 size_t freed_objects = 0; 1139 size_t freed_large_objects = 0; 1140 size_t count = allocations->Size(); 1141 Object** objects = const_cast<Object**>(allocations->Begin()); 1142 Object** out = objects; 1143 Object** objects_to_chunk_free = out; 1144 1145 // Empty the allocation stack. 1146 Thread* self = Thread::Current(); 1147 for (size_t i = 0; i < count; ++i) { 1148 Object* obj = objects[i]; 1149 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. 1150 if (LIKELY(mark_bitmap->HasAddress(obj))) { 1151 if (!mark_bitmap->Test(obj)) { 1152 // Don't bother un-marking since we clear the mark bitmap anyways. 1153 *(out++) = obj; 1154 // Free objects in chunks. 1155 DCHECK_GE(out, objects_to_chunk_free); 1156 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); 1157 if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) { 1158 timings_.StartSplit("FreeList"); 1159 size_t chunk_freed_objects = out - objects_to_chunk_free; 1160 freed_objects += chunk_freed_objects; 1161 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); 1162 objects_to_chunk_free = out; 1163 timings_.EndSplit(); 1164 } 1165 } 1166 } else if (!large_mark_objects->Test(obj)) { 1167 ++freed_large_objects; 1168 freed_large_object_bytes += large_object_space->Free(self, obj); 1169 } 1170 } 1171 // Free the remaining objects in chunks. 1172 DCHECK_GE(out, objects_to_chunk_free); 1173 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); 1174 if (out - objects_to_chunk_free > 0) { 1175 timings_.StartSplit("FreeList"); 1176 size_t chunk_freed_objects = out - objects_to_chunk_free; 1177 freed_objects += chunk_freed_objects; 1178 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); 1179 timings_.EndSplit(); 1180 } 1181 CHECK_EQ(count, allocations->Size()); 1182 timings_.EndSplit(); 1183 1184 timings_.StartSplit("RecordFree"); 1185 VLOG(heap) << "Freed " << freed_objects << "/" << count 1186 << " objects with size " << PrettySize(freed_bytes); 1187 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1188 freed_objects_.fetch_add(freed_objects); 1189 freed_large_objects_.fetch_add(freed_large_objects); 1190 freed_bytes_.fetch_add(freed_bytes); 1191 freed_large_object_bytes_.fetch_add(freed_large_object_bytes); 1192 timings_.EndSplit(); 1193 1194 timings_.StartSplit("ResetStack"); 1195 allocations->Reset(); 1196 timings_.EndSplit(); 1197} 1198 1199void MarkSweep::Sweep(bool swap_bitmaps) { 1200 DCHECK(mark_stack_->IsEmpty()); 1201 base::TimingLogger::ScopedSplit("Sweep", &timings_); 1202 1203 const bool partial = (GetGcType() == kGcTypePartial); 1204 SweepCallbackContext scc; 1205 scc.mark_sweep = this; 1206 scc.self = Thread::Current(); 1207 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1208 if (!space->IsDlMallocSpace()) { 1209 continue; 1210 } 1211 // We always sweep always collect spaces. 1212 bool sweep_space = space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect; 1213 if (!partial && !sweep_space) { 1214 // We sweep full collect spaces when the GC isn't a partial GC (ie its full). 1215 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); 1216 } 1217 if (sweep_space) { 1218 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 1219 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 1220 scc.space = space->AsDlMallocSpace(); 1221 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1222 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1223 if (swap_bitmaps) { 1224 std::swap(live_bitmap, mark_bitmap); 1225 } 1226 if (!space->IsZygoteSpace()) { 1227 base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_); 1228 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. 1229 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 1230 &SweepCallback, reinterpret_cast<void*>(&scc)); 1231 } else { 1232 base::TimingLogger::ScopedSplit split("SweepZygote", &timings_); 1233 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual 1234 // memory. 1235 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 1236 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); 1237 } 1238 } 1239 } 1240 1241 SweepLargeObjects(swap_bitmaps); 1242} 1243 1244void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1245 base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1246 // Sweep large objects 1247 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1248 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 1249 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 1250 if (swap_bitmaps) { 1251 std::swap(large_live_objects, large_mark_objects); 1252 } 1253 // O(n*log(n)) but hopefully there are not too many large objects. 1254 size_t freed_objects = 0; 1255 size_t freed_bytes = 0; 1256 Thread* self = Thread::Current(); 1257 for (const Object* obj : large_live_objects->GetObjects()) { 1258 if (!large_mark_objects->Test(obj)) { 1259 freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj)); 1260 ++freed_objects; 1261 } 1262 } 1263 freed_large_objects_.fetch_add(freed_objects); 1264 freed_large_object_bytes_.fetch_add(freed_bytes); 1265 GetHeap()->RecordFree(freed_objects, freed_bytes); 1266} 1267 1268void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { 1269 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1270 if (space->IsDlMallocSpace() && space->Contains(ref)) { 1271 DCHECK(IsMarked(obj)); 1272 1273 bool is_marked = IsMarked(ref); 1274 if (!is_marked) { 1275 LOG(INFO) << *space; 1276 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) 1277 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) 1278 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " 1279 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; 1280 1281 const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1282 DCHECK(klass != NULL); 1283 const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); 1284 DCHECK(fields != NULL); 1285 bool found = false; 1286 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1287 const ArtField* cur = fields->Get(i); 1288 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1289 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); 1290 found = true; 1291 break; 1292 } 1293 } 1294 if (!found) { 1295 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); 1296 } 1297 1298 bool obj_marked = heap_->GetCardTable()->IsDirty(obj); 1299 if (!obj_marked) { 1300 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " 1301 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " 1302 << "the alloc space, but wasn't card marked"; 1303 } 1304 } 1305 } 1306 break; 1307 } 1308} 1309 1310// Process the "referent" field in a java.lang.ref.Reference. If the 1311// referent has not yet been marked, put it on the appropriate list in 1312// the heap for later processing. 1313void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 1314 DCHECK(klass != nullptr); 1315 DCHECK(klass->IsReferenceClass()); 1316 DCHECK(obj != NULL); 1317 Object* referent = heap_->GetReferenceReferent(obj); 1318 if (referent != NULL && !IsMarked(referent)) { 1319 if (kCountJavaLangRefs) { 1320 ++reference_count_; 1321 } 1322 Thread* self = Thread::Current(); 1323 // TODO: Remove these locks, and use atomic stacks for storing references? 1324 // We need to check that the references haven't already been enqueued since we can end up 1325 // scanning the same reference multiple times due to dirty cards. 1326 if (klass->IsSoftReferenceClass()) { 1327 MutexLock mu(self, *heap_->GetSoftRefQueueLock()); 1328 if (!heap_->IsEnqueued(obj)) { 1329 heap_->EnqueuePendingReference(obj, &soft_reference_list_); 1330 } 1331 } else if (klass->IsWeakReferenceClass()) { 1332 MutexLock mu(self, *heap_->GetWeakRefQueueLock()); 1333 if (!heap_->IsEnqueued(obj)) { 1334 heap_->EnqueuePendingReference(obj, &weak_reference_list_); 1335 } 1336 } else if (klass->IsFinalizerReferenceClass()) { 1337 MutexLock mu(self, *heap_->GetFinalizerRefQueueLock()); 1338 if (!heap_->IsEnqueued(obj)) { 1339 heap_->EnqueuePendingReference(obj, &finalizer_reference_list_); 1340 } 1341 } else if (klass->IsPhantomReferenceClass()) { 1342 MutexLock mu(self, *heap_->GetPhantomRefQueueLock()); 1343 if (!heap_->IsEnqueued(obj)) { 1344 heap_->EnqueuePendingReference(obj, &phantom_reference_list_); 1345 } 1346 } else { 1347 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) 1348 << " " << std::hex << klass->GetAccessFlags(); 1349 } 1350 } 1351} 1352 1353class MarkObjectVisitor { 1354 public: 1355 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {} 1356 1357 // TODO: Fixme when anotatalysis works with visitors. 1358 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1359 bool /* is_static */) const ALWAYS_INLINE 1360 NO_THREAD_SAFETY_ANALYSIS { 1361 if (kCheckLocks) { 1362 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1363 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1364 } 1365 mark_sweep_->MarkObject(ref); 1366 } 1367 1368 private: 1369 MarkSweep* const mark_sweep_; 1370}; 1371 1372// Scans an object reference. Determines the type of the reference 1373// and dispatches to a specialized scanning routine. 1374void MarkSweep::ScanObject(Object* obj) { 1375 MarkObjectVisitor visitor(this); 1376 ScanObjectVisit(obj, visitor); 1377} 1378 1379void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1380 Thread* self = Thread::Current(); 1381 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1382 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1383 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1384 CHECK_GT(chunk_size, 0U); 1385 // Split the current mark stack up into work tasks. 1386 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1387 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1388 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, 1389 const_cast<const mirror::Object**>(it))); 1390 it += delta; 1391 } 1392 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1393 thread_pool->StartWorkers(self); 1394 thread_pool->Wait(self, true, true); 1395 thread_pool->StopWorkers(self); 1396 mark_stack_->Reset(); 1397 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1398} 1399 1400// Scan anything that's on the mark stack. 1401void MarkSweep::ProcessMarkStack(bool paused) { 1402 timings_.StartSplit("ProcessMarkStack"); 1403 size_t thread_count = GetThreadCount(paused); 1404 if (kParallelProcessMarkStack && thread_count > 1 && 1405 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1406 ProcessMarkStackParallel(thread_count); 1407 } else { 1408 // TODO: Tune this. 1409 static const size_t kFifoSize = 4; 1410 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1411 for (;;) { 1412 Object* obj = NULL; 1413 if (kUseMarkStackPrefetch) { 1414 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1415 Object* obj = mark_stack_->PopBack(); 1416 DCHECK(obj != NULL); 1417 __builtin_prefetch(obj); 1418 prefetch_fifo.push_back(obj); 1419 } 1420 if (prefetch_fifo.empty()) { 1421 break; 1422 } 1423 obj = prefetch_fifo.front(); 1424 prefetch_fifo.pop_front(); 1425 } else { 1426 if (mark_stack_->IsEmpty()) { 1427 break; 1428 } 1429 obj = mark_stack_->PopBack(); 1430 } 1431 DCHECK(obj != NULL); 1432 ScanObject(obj); 1433 } 1434 } 1435 timings_.EndSplit(); 1436} 1437 1438// Walks the reference list marking any references subject to the 1439// reference clearing policy. References with a black referent are 1440// removed from the list. References with white referents biased 1441// toward saving are blackened and also removed from the list. 1442void MarkSweep::PreserveSomeSoftReferences(Object** list) { 1443 DCHECK(list != NULL); 1444 Object* clear = NULL; 1445 size_t counter = 0; 1446 1447 DCHECK(mark_stack_->IsEmpty()); 1448 1449 timings_.StartSplit("PreserveSomeSoftReferences"); 1450 while (*list != NULL) { 1451 Object* ref = heap_->DequeuePendingReference(list); 1452 Object* referent = heap_->GetReferenceReferent(ref); 1453 if (referent == NULL) { 1454 // Referent was cleared by the user during marking. 1455 continue; 1456 } 1457 bool is_marked = IsMarked(referent); 1458 if (!is_marked && ((++counter) & 1)) { 1459 // Referent is white and biased toward saving, mark it. 1460 MarkObject(referent); 1461 is_marked = true; 1462 } 1463 if (!is_marked) { 1464 // Referent is white, queue it for clearing. 1465 heap_->EnqueuePendingReference(ref, &clear); 1466 } 1467 } 1468 *list = clear; 1469 timings_.EndSplit(); 1470 1471 // Restart the mark with the newly black references added to the root set. 1472 ProcessMarkStack(true); 1473} 1474 1475inline bool MarkSweep::IsMarked(const Object* object) const 1476 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1477 if (IsImmune(object)) { 1478 return true; 1479 } 1480 DCHECK(current_mark_bitmap_ != NULL); 1481 if (current_mark_bitmap_->HasAddress(object)) { 1482 return current_mark_bitmap_->Test(object); 1483 } 1484 return heap_->GetMarkBitmap()->Test(object); 1485} 1486 1487// Unlink the reference list clearing references objects with white 1488// referents. Cleared references registered to a reference queue are 1489// scheduled for appending by the heap worker thread. 1490void MarkSweep::ClearWhiteReferences(Object** list) { 1491 DCHECK(list != NULL); 1492 while (*list != NULL) { 1493 Object* ref = heap_->DequeuePendingReference(list); 1494 Object* referent = heap_->GetReferenceReferent(ref); 1495 if (referent != NULL && !IsMarked(referent)) { 1496 // Referent is white, clear it. 1497 heap_->ClearReferenceReferent(ref); 1498 if (heap_->IsEnqueuable(ref)) { 1499 heap_->EnqueueReference(ref, &cleared_reference_list_); 1500 } 1501 } 1502 } 1503 DCHECK(*list == NULL); 1504} 1505 1506// Enqueues finalizer references with white referents. White 1507// referents are blackened, moved to the zombie field, and the 1508// referent field is cleared. 1509void MarkSweep::EnqueueFinalizerReferences(Object** list) { 1510 DCHECK(list != NULL); 1511 timings_.StartSplit("EnqueueFinalizerReferences"); 1512 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); 1513 bool has_enqueued = false; 1514 while (*list != NULL) { 1515 Object* ref = heap_->DequeuePendingReference(list); 1516 Object* referent = heap_->GetReferenceReferent(ref); 1517 if (referent != NULL && !IsMarked(referent)) { 1518 MarkObject(referent); 1519 // If the referent is non-null the reference must queuable. 1520 DCHECK(heap_->IsEnqueuable(ref)); 1521 ref->SetFieldObject(zombie_offset, referent, false); 1522 heap_->ClearReferenceReferent(ref); 1523 heap_->EnqueueReference(ref, &cleared_reference_list_); 1524 has_enqueued = true; 1525 } 1526 } 1527 timings_.EndSplit(); 1528 if (has_enqueued) { 1529 ProcessMarkStack(true); 1530 } 1531 DCHECK(*list == NULL); 1532} 1533 1534// Process reference class instances and schedule finalizations. 1535void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, 1536 Object** weak_references, 1537 Object** finalizer_references, 1538 Object** phantom_references) { 1539 CHECK(soft_references != NULL); 1540 CHECK(weak_references != NULL); 1541 CHECK(finalizer_references != NULL); 1542 CHECK(phantom_references != NULL); 1543 CHECK(mark_stack_->IsEmpty()); 1544 1545 // Unless we are in the zygote or required to clear soft references 1546 // with white references, preserve some white referents. 1547 if (!clear_soft && !Runtime::Current()->IsZygote()) { 1548 PreserveSomeSoftReferences(soft_references); 1549 } 1550 1551 timings_.StartSplit("ProcessReferences"); 1552 // Clear all remaining soft and weak references with white 1553 // referents. 1554 ClearWhiteReferences(soft_references); 1555 ClearWhiteReferences(weak_references); 1556 timings_.EndSplit(); 1557 1558 // Preserve all white objects with finalize methods and schedule 1559 // them for finalization. 1560 EnqueueFinalizerReferences(finalizer_references); 1561 1562 timings_.StartSplit("ProcessReferences"); 1563 // Clear all f-reachable soft and weak references with white 1564 // referents. 1565 ClearWhiteReferences(soft_references); 1566 ClearWhiteReferences(weak_references); 1567 1568 // Clear all phantom references with white referents. 1569 ClearWhiteReferences(phantom_references); 1570 1571 // At this point all reference lists should be empty. 1572 DCHECK(*soft_references == NULL); 1573 DCHECK(*weak_references == NULL); 1574 DCHECK(*finalizer_references == NULL); 1575 DCHECK(*phantom_references == NULL); 1576 timings_.EndSplit(); 1577} 1578 1579void MarkSweep::UnBindBitmaps() { 1580 base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 1581 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1582 if (space->IsDlMallocSpace()) { 1583 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 1584 if (alloc_space->temp_bitmap_.get() != NULL) { 1585 // At this point, the temp_bitmap holds our old mark bitmap. 1586 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); 1587 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); 1588 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); 1589 alloc_space->mark_bitmap_.reset(new_bitmap); 1590 DCHECK(alloc_space->temp_bitmap_.get() == NULL); 1591 } 1592 } 1593 } 1594} 1595 1596void MarkSweep::FinishPhase() { 1597 base::TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1598 // Can't enqueue references if we hold the mutator lock. 1599 Object* cleared_references = GetClearedReferences(); 1600 Heap* heap = GetHeap(); 1601 timings_.NewSplit("EnqueueClearedReferences"); 1602 heap->EnqueueClearedReferences(&cleared_references); 1603 1604 timings_.NewSplit("PostGcVerification"); 1605 heap->PostGcVerification(this); 1606 1607 timings_.NewSplit("RequestHeapTrim"); 1608 heap->RequestHeapTrim(); 1609 1610 // Update the cumulative statistics 1611 total_time_ns_ += GetDurationNs(); 1612 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, 1613 std::plus<uint64_t>()); 1614 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 1615 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 1616 1617 // Ensure that the mark stack is empty. 1618 CHECK(mark_stack_->IsEmpty()); 1619 1620 if (kCountScannedTypes) { 1621 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1622 << " other=" << other_count_; 1623 } 1624 1625 if (kCountTasks) { 1626 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1627 } 1628 1629 if (kMeasureOverhead) { 1630 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1631 } 1632 1633 if (kProfileLargeObjects) { 1634 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1635 } 1636 1637 if (kCountClassesMarked) { 1638 VLOG(gc) << "Classes marked " << classes_marked_; 1639 } 1640 1641 if (kCountJavaLangRefs) { 1642 VLOG(gc) << "References scanned " << reference_count_; 1643 } 1644 1645 // Update the cumulative loggers. 1646 cumulative_timings_.Start(); 1647 cumulative_timings_.AddLogger(timings_); 1648 cumulative_timings_.End(); 1649 1650 // Clear all of the spaces' mark bitmaps. 1651 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1652 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 1653 if (bitmap != nullptr && 1654 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1655 bitmap->Clear(); 1656 } 1657 } 1658 mark_stack_->Reset(); 1659 1660 // Reset the marked large objects. 1661 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1662 large_objects->GetMarkObjects()->Clear(); 1663} 1664 1665} // namespace collector 1666} // namespace gc 1667} // namespace art 1668