mark_sweep.cc revision eb8167a4f4d27fce0530f6724ab8032610cd146b
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap-inl.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/reference_processor.h" 35#include "gc/space/image_space.h" 36#include "gc/space/large_object_space.h" 37#include "gc/space/space-inl.h" 38#include "mark_sweep-inl.h" 39#include "mirror/art_field-inl.h" 40#include "mirror/object-inl.h" 41#include "runtime.h" 42#include "scoped_thread_state_change.h" 43#include "thread-inl.h" 44#include "thread_list.h" 45 46using ::art::mirror::ArtField; 47using ::art::mirror::Class; 48using ::art::mirror::Object; 49using ::art::mirror::ObjectArray; 50 51namespace art { 52namespace gc { 53namespace collector { 54 55// Performance options. 56static constexpr bool kUseRecursiveMark = false; 57static constexpr bool kUseMarkStackPrefetch = true; 58static constexpr size_t kSweepArrayChunkFreeSize = 1024; 59static constexpr bool kPreCleanCards = true; 60 61// Parallelism options. 62static constexpr bool kParallelCardScan = true; 63static constexpr bool kParallelRecursiveMark = true; 64// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 65// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 66// having this can add overhead in ProcessReferences since we may end up doing many calls of 67// ProcessMarkStack with very small mark stacks. 68static constexpr size_t kMinimumParallelMarkStackSize = 128; 69static constexpr bool kParallelProcessMarkStack = true; 70 71// Profiling and information flags. 72static constexpr bool kProfileLargeObjects = false; 73static constexpr bool kMeasureOverhead = false; 74static constexpr bool kCountTasks = false; 75static constexpr bool kCountJavaLangRefs = false; 76static constexpr bool kCountMarkedObjects = false; 77 78// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 79static constexpr bool kCheckLocks = kDebugLocking; 80static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 81 82// If true, revoke the rosalloc thread-local buffers at the 83// checkpoint, as opposed to during the pause. 84static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 85 86void MarkSweep::BindBitmaps() { 87 timings_.StartSplit("BindBitmaps"); 88 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 89 // Mark all of the spaces we never collect as immune. 90 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 91 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 92 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 93 } 94 } 95 timings_.EndSplit(); 96} 97 98MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 99 : GarbageCollector(heap, 100 name_prefix + 101 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 102 gc_barrier_(new Barrier(0)), 103 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 104 is_concurrent_(is_concurrent) { 105} 106 107void MarkSweep::InitializePhase() { 108 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 109 mark_stack_ = heap_->GetMarkStack(); 110 DCHECK(mark_stack_ != nullptr); 111 immune_region_.Reset(); 112 class_count_ = 0; 113 array_count_ = 0; 114 other_count_ = 0; 115 large_object_test_ = 0; 116 large_object_mark_ = 0; 117 overhead_time_ = 0; 118 work_chunks_created_ = 0; 119 work_chunks_deleted_ = 0; 120 reference_count_ = 0; 121 mark_null_count_ = 0; 122 mark_immune_count_ = 0; 123 mark_fastpath_count_ = 0; 124 mark_slowpath_count_ = 0; 125 { 126 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 127 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 128 mark_bitmap_ = heap_->GetMarkBitmap(); 129 } 130 if (!clear_soft_references_) { 131 // Always clear soft references if a non-sticky collection. 132 clear_soft_references_ = GetGcType() != collector::kGcTypeSticky; 133 } 134} 135 136void MarkSweep::RunPhases() { 137 Thread* self = Thread::Current(); 138 InitializePhase(); 139 Locks::mutator_lock_->AssertNotHeld(self); 140 if (IsConcurrent()) { 141 GetHeap()->PreGcVerification(this); 142 { 143 ReaderMutexLock mu(self, *Locks::mutator_lock_); 144 MarkingPhase(); 145 } 146 ScopedPause pause(this); 147 GetHeap()->PrePauseRosAllocVerification(this); 148 PausePhase(); 149 RevokeAllThreadLocalBuffers(); 150 } else { 151 ScopedPause pause(this); 152 GetHeap()->PreGcVerificationPaused(this); 153 MarkingPhase(); 154 GetHeap()->PrePauseRosAllocVerification(this); 155 PausePhase(); 156 RevokeAllThreadLocalBuffers(); 157 } 158 { 159 // Sweeping always done concurrently, even for non concurrent mark sweep. 160 ReaderMutexLock mu(self, *Locks::mutator_lock_); 161 ReclaimPhase(); 162 } 163 GetHeap()->PostGcVerification(this); 164 FinishPhase(); 165} 166 167void MarkSweep::ProcessReferences(Thread* self) { 168 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 169 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 170 GetHeap()->GetReferenceProcessor()->ProcessReferences( 171 true, &timings_, clear_soft_references_, &IsMarkedCallback, &MarkObjectCallback, 172 &ProcessMarkStackCallback, this); 173} 174 175void MarkSweep::PausePhase() { 176 TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_); 177 Thread* self = Thread::Current(); 178 Locks::mutator_lock_->AssertExclusiveHeld(self); 179 if (IsConcurrent()) { 180 // Handle the dirty objects if we are a concurrent GC. 181 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 182 // Re-mark root set. 183 ReMarkRoots(); 184 // Scan dirty objects, this is only required if we are not doing concurrent GC. 185 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 186 } 187 { 188 TimingLogger::ScopedSplit split("SwapStacks", &timings_); 189 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 190 heap_->SwapStacks(self); 191 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 192 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 193 // stacks and don't want anybody to allocate into the live stack. 194 RevokeAllThreadLocalAllocationStacks(self); 195 } 196 timings_.StartSplit("PreSweepingGcVerification"); 197 heap_->PreSweepingGcVerification(this); 198 timings_.EndSplit(); 199 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 200 // weak before we sweep them. Since this new system weak may not be marked, the GC may 201 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 202 // reference to a string that is about to be swept. 203 Runtime::Current()->DisallowNewSystemWeaks(); 204 // Enable the reference processing slow path, needs to be done with mutators paused since there 205 // is no lock in the GetReferent fast path. 206 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 207} 208 209void MarkSweep::PreCleanCards() { 210 // Don't do this for non concurrent GCs since they don't have any dirty cards. 211 if (kPreCleanCards && IsConcurrent()) { 212 Thread* self = Thread::Current(); 213 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 214 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 215 heap_->ProcessCards(timings_, false); 216 // The checkpoint root marking is required to avoid a race condition which occurs if the 217 // following happens during a reference write: 218 // 1. mutator dirties the card (write barrier) 219 // 2. GC ages the card (the above ProcessCards call) 220 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 221 // 4. mutator writes the value (corresponding to the write barrier in 1.) 222 // This causes the GC to age the card but not necessarily mark the reference which the mutator 223 // wrote into the object stored in the card. 224 // Having the checkpoint fixes this issue since it ensures that the card mark and the 225 // reference write are visible to the GC before the card is scanned (this is due to locks being 226 // acquired / released in the checkpoint code). 227 // The other roots are also marked to help reduce the pause. 228 MarkRootsCheckpoint(self, false); 229 MarkNonThreadRoots(); 230 MarkConcurrentRoots( 231 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 232 // Process the newly aged cards. 233 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 234 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 235 // in the next GC. 236 } 237} 238 239void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 240 if (kUseThreadLocalAllocationStack) { 241 timings_.NewSplit("RevokeAllThreadLocalAllocationStacks"); 242 Locks::mutator_lock_->AssertExclusiveHeld(self); 243 heap_->RevokeAllThreadLocalAllocationStacks(self); 244 } 245} 246 247void MarkSweep::MarkingPhase() { 248 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 249 Thread* self = Thread::Current(); 250 251 BindBitmaps(); 252 FindDefaultSpaceBitmap(); 253 254 // Process dirty cards and add dirty cards to mod union tables. 255 heap_->ProcessCards(timings_, false); 256 257 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 258 MarkRoots(self); 259 MarkReachableObjects(); 260 // Pre-clean dirtied cards to reduce pauses. 261 PreCleanCards(); 262} 263 264void MarkSweep::UpdateAndMarkModUnion() { 265 for (const auto& space : heap_->GetContinuousSpaces()) { 266 if (immune_region_.ContainsSpace(space)) { 267 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 268 "UpdateAndMarkImageModUnionTable"; 269 TimingLogger::ScopedSplit split(name, &timings_); 270 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 271 CHECK(mod_union_table != nullptr); 272 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 273 } 274 } 275} 276 277void MarkSweep::MarkReachableObjects() { 278 UpdateAndMarkModUnion(); 279 // Recursively mark all the non-image bits set in the mark bitmap. 280 RecursiveMark(); 281} 282 283void MarkSweep::ReclaimPhase() { 284 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 285 Thread* self = Thread::Current(); 286 // Process the references concurrently. 287 ProcessReferences(self); 288 SweepSystemWeaks(self); 289 Runtime::Current()->AllowNewSystemWeaks(); 290 { 291 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 292 293 // Reclaim unmarked objects. 294 Sweep(false); 295 296 // Swap the live and mark bitmaps for each space which we modified space. This is an 297 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 298 // bitmaps. 299 timings_.StartSplit("SwapBitmaps"); 300 SwapBitmaps(); 301 timings_.EndSplit(); 302 303 // Unbind the live and mark bitmaps. 304 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 305 GetHeap()->UnBindBitmaps(); 306 } 307} 308 309void MarkSweep::FindDefaultSpaceBitmap() { 310 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 311 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 312 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 313 // We want to have the main space instead of non moving if possible. 314 if (bitmap != nullptr && 315 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 316 current_space_bitmap_ = bitmap; 317 // If we are not the non moving space exit the loop early since this will be good enough. 318 if (space != heap_->GetNonMovingSpace()) { 319 break; 320 } 321 } 322 } 323 if (current_space_bitmap_ == nullptr) { 324 heap_->DumpSpaces(); 325 LOG(FATAL) << "Could not find a default mark bitmap"; 326 } 327} 328 329void MarkSweep::ExpandMarkStack() { 330 ResizeMarkStack(mark_stack_->Capacity() * 2); 331} 332 333void MarkSweep::ResizeMarkStack(size_t new_size) { 334 // Rare case, no need to have Thread::Current be a parameter. 335 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 336 // Someone else acquired the lock and expanded the mark stack before us. 337 return; 338 } 339 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 340 CHECK_LE(mark_stack_->Size(), new_size); 341 mark_stack_->Resize(new_size); 342 for (const auto& obj : temp) { 343 mark_stack_->PushBack(obj); 344 } 345} 346 347inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 348 DCHECK(obj != nullptr); 349 if (MarkObjectParallel(obj)) { 350 MutexLock mu(Thread::Current(), mark_stack_lock_); 351 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 352 ExpandMarkStack(); 353 } 354 // The object must be pushed on to the mark stack. 355 mark_stack_->PushBack(obj); 356 } 357} 358 359mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 360 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 361 mark_sweep->MarkObject(obj); 362 return obj; 363} 364 365void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 366 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 367} 368 369class MarkSweepMarkObjectSlowPath { 370 public: 371 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) { 372 } 373 374 void operator()(const Object* obj) const ALWAYS_INLINE { 375 if (kProfileLargeObjects) { 376 // TODO: Differentiate between marking and testing somehow. 377 ++mark_sweep_->large_object_test_; 378 ++mark_sweep_->large_object_mark_; 379 } 380 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 381 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) || 382 (kIsDebugBuild && !large_object_space->Contains(obj)))) { 383 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 384 LOG(ERROR) << "Attempting see if it's a bad root"; 385 mark_sweep_->VerifyRoots(); 386 LOG(FATAL) << "Can't mark invalid object"; 387 } 388 } 389 390 private: 391 MarkSweep* const mark_sweep_; 392}; 393 394inline void MarkSweep::MarkObjectNonNull(Object* obj) { 395 DCHECK(obj != nullptr); 396 if (kUseBakerOrBrooksReadBarrier) { 397 // Verify all the objects have the correct pointer installed. 398 obj->AssertReadBarrierPointer(); 399 } 400 if (immune_region_.ContainsObject(obj)) { 401 if (kCountMarkedObjects) { 402 ++mark_immune_count_; 403 } 404 DCHECK(mark_bitmap_->Test(obj)); 405 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 406 if (kCountMarkedObjects) { 407 ++mark_fastpath_count_; 408 } 409 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 410 PushOnMarkStack(obj); // This object was not previously marked. 411 } 412 } else { 413 if (kCountMarkedObjects) { 414 ++mark_slowpath_count_; 415 } 416 MarkSweepMarkObjectSlowPath visitor(this); 417 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 418 // will check again. 419 if (!mark_bitmap_->Set(obj, visitor)) { 420 PushOnMarkStack(obj); // Was not already marked, push. 421 } 422 } 423} 424 425inline void MarkSweep::PushOnMarkStack(Object* obj) { 426 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 427 // Lock is not needed but is here anyways to please annotalysis. 428 MutexLock mu(Thread::Current(), mark_stack_lock_); 429 ExpandMarkStack(); 430 } 431 // The object must be pushed on to the mark stack. 432 mark_stack_->PushBack(obj); 433} 434 435inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 436 DCHECK(obj != nullptr); 437 if (kUseBakerOrBrooksReadBarrier) { 438 // Verify all the objects have the correct pointer installed. 439 obj->AssertReadBarrierPointer(); 440 } 441 if (immune_region_.ContainsObject(obj)) { 442 DCHECK(IsMarked(obj)); 443 return false; 444 } 445 // Try to take advantage of locality of references within a space, failing this find the space 446 // the hard way. 447 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 448 if (LIKELY(object_bitmap->HasAddress(obj))) { 449 return !object_bitmap->AtomicTestAndSet(obj); 450 } 451 MarkSweepMarkObjectSlowPath visitor(this); 452 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 453} 454 455// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 456inline void MarkSweep::MarkObject(Object* obj) { 457 if (obj != nullptr) { 458 MarkObjectNonNull(obj); 459 } else if (kCountMarkedObjects) { 460 ++mark_null_count_; 461 } 462} 463 464void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/, 465 RootType /*root_type*/) { 466 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 467} 468 469void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 470 RootType /*root_type*/) { 471 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 472} 473 474void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 475 RootType /*root_type*/) { 476 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 477} 478 479void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 480 const StackVisitor* visitor, RootType root_type) { 481 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type); 482} 483 484void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor, 485 RootType root_type) { 486 // See if the root is on any space bitmap. 487 if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 488 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 489 if (!large_object_space->Contains(root)) { 490 LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type; 491 if (visitor != NULL) { 492 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 493 } 494 } 495 } 496} 497 498void MarkSweep::VerifyRoots() { 499 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 500} 501 502void MarkSweep::MarkRoots(Thread* self) { 503 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 504 // If we exclusively hold the mutator lock, all threads must be suspended. 505 timings_.StartSplit("MarkRoots"); 506 Runtime::Current()->VisitRoots(MarkRootCallback, this); 507 timings_.EndSplit(); 508 RevokeAllThreadLocalAllocationStacks(self); 509 } else { 510 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 511 // At this point the live stack should no longer have any mutators which push into it. 512 MarkNonThreadRoots(); 513 MarkConcurrentRoots( 514 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 515 } 516} 517 518void MarkSweep::MarkNonThreadRoots() { 519 timings_.StartSplit("MarkNonThreadRoots"); 520 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 521 timings_.EndSplit(); 522} 523 524void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 525 timings_.StartSplit("MarkConcurrentRoots"); 526 // Visit all runtime roots and clear dirty flags. 527 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 528 timings_.EndSplit(); 529} 530 531class ScanObjectVisitor { 532 public: 533 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 534 : mark_sweep_(mark_sweep) {} 535 536 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 537 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 538 if (kCheckLocks) { 539 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 540 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 541 } 542 mark_sweep_->ScanObject(obj); 543 } 544 545 private: 546 MarkSweep* const mark_sweep_; 547}; 548 549class DelayReferenceReferentVisitor { 550 public: 551 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 552 } 553 554 void operator()(mirror::Class* klass, mirror::Reference* ref) const 555 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 556 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 557 collector_->DelayReferenceReferent(klass, ref); 558 } 559 560 private: 561 MarkSweep* const collector_; 562}; 563 564template <bool kUseFinger = false> 565class MarkStackTask : public Task { 566 public: 567 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 568 Object** mark_stack) 569 : mark_sweep_(mark_sweep), 570 thread_pool_(thread_pool), 571 mark_stack_pos_(mark_stack_size) { 572 // We may have to copy part of an existing mark stack when another mark stack overflows. 573 if (mark_stack_size != 0) { 574 DCHECK(mark_stack != NULL); 575 // TODO: Check performance? 576 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 577 } 578 if (kCountTasks) { 579 ++mark_sweep_->work_chunks_created_; 580 } 581 } 582 583 static const size_t kMaxSize = 1 * KB; 584 585 protected: 586 class MarkObjectParallelVisitor { 587 public: 588 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 589 MarkSweep* mark_sweep) ALWAYS_INLINE 590 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 591 592 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 593 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 594 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 595 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 596 if (kUseFinger) { 597 android_memory_barrier(); 598 if (reinterpret_cast<uintptr_t>(ref) >= 599 static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) { 600 return; 601 } 602 } 603 chunk_task_->MarkStackPush(ref); 604 } 605 } 606 607 private: 608 MarkStackTask<kUseFinger>* const chunk_task_; 609 MarkSweep* const mark_sweep_; 610 }; 611 612 class ScanObjectParallelVisitor { 613 public: 614 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 615 : chunk_task_(chunk_task) {} 616 617 // No thread safety analysis since multiple threads will use this visitor. 618 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 619 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 620 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 621 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 622 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 623 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 624 } 625 626 private: 627 MarkStackTask<kUseFinger>* const chunk_task_; 628 }; 629 630 virtual ~MarkStackTask() { 631 // Make sure that we have cleared our mark stack. 632 DCHECK_EQ(mark_stack_pos_, 0U); 633 if (kCountTasks) { 634 ++mark_sweep_->work_chunks_deleted_; 635 } 636 } 637 638 MarkSweep* const mark_sweep_; 639 ThreadPool* const thread_pool_; 640 // Thread local mark stack for this task. 641 Object* mark_stack_[kMaxSize]; 642 // Mark stack position. 643 size_t mark_stack_pos_; 644 645 void MarkStackPush(Object* obj) ALWAYS_INLINE { 646 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 647 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 648 mark_stack_pos_ /= 2; 649 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 650 mark_stack_ + mark_stack_pos_); 651 thread_pool_->AddTask(Thread::Current(), task); 652 } 653 DCHECK(obj != nullptr); 654 DCHECK_LT(mark_stack_pos_, kMaxSize); 655 mark_stack_[mark_stack_pos_++] = obj; 656 } 657 658 virtual void Finalize() { 659 delete this; 660 } 661 662 // Scans all of the objects 663 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 664 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 665 ScanObjectParallelVisitor visitor(this); 666 // TODO: Tune this. 667 static const size_t kFifoSize = 4; 668 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 669 for (;;) { 670 Object* obj = nullptr; 671 if (kUseMarkStackPrefetch) { 672 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 673 Object* obj = mark_stack_[--mark_stack_pos_]; 674 DCHECK(obj != nullptr); 675 __builtin_prefetch(obj); 676 prefetch_fifo.push_back(obj); 677 } 678 if (UNLIKELY(prefetch_fifo.empty())) { 679 break; 680 } 681 obj = prefetch_fifo.front(); 682 prefetch_fifo.pop_front(); 683 } else { 684 if (UNLIKELY(mark_stack_pos_ == 0)) { 685 break; 686 } 687 obj = mark_stack_[--mark_stack_pos_]; 688 } 689 DCHECK(obj != nullptr); 690 visitor(obj); 691 } 692 } 693}; 694 695class CardScanTask : public MarkStackTask<false> { 696 public: 697 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 698 accounting::ContinuousSpaceBitmap* bitmap, 699 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 700 Object** mark_stack_obj) 701 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 702 bitmap_(bitmap), 703 begin_(begin), 704 end_(end), 705 minimum_age_(minimum_age) { 706 } 707 708 protected: 709 accounting::ContinuousSpaceBitmap* const bitmap_; 710 byte* const begin_; 711 byte* const end_; 712 const byte minimum_age_; 713 714 virtual void Finalize() { 715 delete this; 716 } 717 718 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 719 ScanObjectParallelVisitor visitor(this); 720 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 721 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 722 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 723 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 724 // Finish by emptying our local mark stack. 725 MarkStackTask::Run(self); 726 } 727}; 728 729size_t MarkSweep::GetThreadCount(bool paused) const { 730 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 731 return 1; 732 } 733 if (paused) { 734 return heap_->GetParallelGCThreadCount() + 1; 735 } else { 736 return heap_->GetConcGCThreadCount() + 1; 737 } 738} 739 740void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 741 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 742 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 743 size_t thread_count = GetThreadCount(paused); 744 // The parallel version with only one thread is faster for card scanning, TODO: fix. 745 if (kParallelCardScan && thread_count > 1) { 746 Thread* self = Thread::Current(); 747 // Can't have a different split for each space since multiple spaces can have their cards being 748 // scanned at the same time. 749 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 750 // Try to take some of the mark stack since we can pass this off to the worker tasks. 751 Object** mark_stack_begin = mark_stack_->Begin(); 752 Object** mark_stack_end = mark_stack_->End(); 753 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 754 // Estimated number of work tasks we will create. 755 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 756 DCHECK_NE(mark_stack_tasks, 0U); 757 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 758 mark_stack_size / mark_stack_tasks + 1); 759 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 760 if (space->GetMarkBitmap() == nullptr) { 761 continue; 762 } 763 byte* card_begin = space->Begin(); 764 byte* card_end = space->End(); 765 // Align up the end address. For example, the image space's end 766 // may not be card-size-aligned. 767 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 768 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 769 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 770 // Calculate how many bytes of heap we will scan, 771 const size_t address_range = card_end - card_begin; 772 // Calculate how much address range each task gets. 773 const size_t card_delta = RoundUp(address_range / thread_count + 1, 774 accounting::CardTable::kCardSize); 775 // Create the worker tasks for this space. 776 while (card_begin != card_end) { 777 // Add a range of cards. 778 size_t addr_remaining = card_end - card_begin; 779 size_t card_increment = std::min(card_delta, addr_remaining); 780 // Take from the back of the mark stack. 781 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 782 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 783 mark_stack_end -= mark_stack_increment; 784 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 785 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 786 // Add the new task to the thread pool. 787 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 788 card_begin + card_increment, minimum_age, 789 mark_stack_increment, mark_stack_end); 790 thread_pool->AddTask(self, task); 791 card_begin += card_increment; 792 } 793 } 794 795 // Note: the card scan below may dirty new cards (and scan them) 796 // as a side effect when a Reference object is encountered and 797 // queued during the marking. See b/11465268. 798 thread_pool->SetMaxActiveWorkers(thread_count - 1); 799 thread_pool->StartWorkers(self); 800 thread_pool->Wait(self, true, true); 801 thread_pool->StopWorkers(self); 802 timings_.EndSplit(); 803 } else { 804 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 805 if (space->GetMarkBitmap() != nullptr) { 806 // Image spaces are handled properly since live == marked for them. 807 switch (space->GetGcRetentionPolicy()) { 808 case space::kGcRetentionPolicyNeverCollect: 809 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 810 "ScanGrayImageSpaceObjects"); 811 break; 812 case space::kGcRetentionPolicyFullCollect: 813 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 814 "ScanGrayZygoteSpaceObjects"); 815 break; 816 case space::kGcRetentionPolicyAlwaysCollect: 817 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 818 "ScanGrayAllocSpaceObjects"); 819 break; 820 } 821 ScanObjectVisitor visitor(this); 822 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 823 timings_.EndSplit(); 824 } 825 } 826 } 827} 828 829class RecursiveMarkTask : public MarkStackTask<false> { 830 public: 831 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 832 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 833 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 834 bitmap_(bitmap), 835 begin_(begin), 836 end_(end) { 837 } 838 839 protected: 840 accounting::ContinuousSpaceBitmap* const bitmap_; 841 const uintptr_t begin_; 842 const uintptr_t end_; 843 844 virtual void Finalize() { 845 delete this; 846 } 847 848 // Scans all of the objects 849 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 850 ScanObjectParallelVisitor visitor(this); 851 bitmap_->VisitMarkedRange(begin_, end_, visitor); 852 // Finish by emptying our local mark stack. 853 MarkStackTask::Run(self); 854 } 855}; 856 857// Populates the mark stack based on the set of marked objects and 858// recursively marks until the mark stack is emptied. 859void MarkSweep::RecursiveMark() { 860 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 861 // RecursiveMark will build the lists of known instances of the Reference classes. See 862 // DelayReferenceReferent for details. 863 if (kUseRecursiveMark) { 864 const bool partial = GetGcType() == kGcTypePartial; 865 ScanObjectVisitor scan_visitor(this); 866 auto* self = Thread::Current(); 867 ThreadPool* thread_pool = heap_->GetThreadPool(); 868 size_t thread_count = GetThreadCount(false); 869 const bool parallel = kParallelRecursiveMark && thread_count > 1; 870 mark_stack_->Reset(); 871 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 872 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 873 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 874 current_space_bitmap_ = space->GetMarkBitmap(); 875 if (current_space_bitmap_ == nullptr) { 876 continue; 877 } 878 if (parallel) { 879 // We will use the mark stack the future. 880 // CHECK(mark_stack_->IsEmpty()); 881 // This function does not handle heap end increasing, so we must use the space end. 882 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 883 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 884 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 885 886 // Create a few worker tasks. 887 const size_t n = thread_count * 2; 888 while (begin != end) { 889 uintptr_t start = begin; 890 uintptr_t delta = (end - begin) / n; 891 delta = RoundUp(delta, KB); 892 if (delta < 16 * KB) delta = end - begin; 893 begin += delta; 894 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 895 begin); 896 thread_pool->AddTask(self, task); 897 } 898 thread_pool->SetMaxActiveWorkers(thread_count - 1); 899 thread_pool->StartWorkers(self); 900 thread_pool->Wait(self, true, true); 901 thread_pool->StopWorkers(self); 902 } else { 903 // This function does not handle heap end increasing, so we must use the space end. 904 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 905 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 906 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 907 } 908 } 909 } 910 } 911 ProcessMarkStack(false); 912} 913 914mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 915 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 916 return object; 917 } 918 return nullptr; 919} 920 921void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 922 ScanGrayObjects(paused, minimum_age); 923 ProcessMarkStack(paused); 924} 925 926void MarkSweep::ReMarkRoots() { 927 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 928 timings_.StartSplit("(Paused)ReMarkRoots"); 929 Runtime::Current()->VisitRoots( 930 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 931 kVisitRootFlagStopLoggingNewRoots | 932 kVisitRootFlagClearRootLog)); 933 timings_.EndSplit(); 934 if (kVerifyRootsMarked) { 935 timings_.StartSplit("(Paused)VerifyRoots"); 936 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 937 timings_.EndSplit(); 938 } 939} 940 941void MarkSweep::SweepSystemWeaks(Thread* self) { 942 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 943 timings_.StartSplit("SweepSystemWeaks"); 944 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 945 timings_.EndSplit(); 946} 947 948mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 949 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 950 // We don't actually want to sweep the object, so lets return "marked" 951 return obj; 952} 953 954void MarkSweep::VerifyIsLive(const Object* obj) { 955 if (!heap_->GetLiveBitmap()->Test(obj)) { 956 if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) == 957 heap_->allocation_stack_->End()) { 958 // Object not found! 959 heap_->DumpSpaces(); 960 LOG(FATAL) << "Found dead object " << obj; 961 } 962 } 963} 964 965void MarkSweep::VerifySystemWeaks() { 966 // Verify system weaks, uses a special object visitor which returns the input object. 967 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 968} 969 970class CheckpointMarkThreadRoots : public Closure { 971 public: 972 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 973 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 974 : mark_sweep_(mark_sweep), 975 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 976 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 977 } 978 979 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 980 ATRACE_BEGIN("Marking thread roots"); 981 // Note: self is not necessarily equal to thread since thread may be suspended. 982 Thread* self = Thread::Current(); 983 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 984 << thread->GetState() << " thread " << thread << " self " << self; 985 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 986 ATRACE_END(); 987 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 988 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 989 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 990 ATRACE_END(); 991 } 992 mark_sweep_->GetBarrier().Pass(self); 993 } 994 995 private: 996 MarkSweep* const mark_sweep_; 997 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 998}; 999 1000void MarkSweep::MarkRootsCheckpoint(Thread* self, 1001 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1002 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 1003 timings_.StartSplit("MarkRootsCheckpoint"); 1004 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1005 // Request the check point is run on all threads returning a count of the threads that must 1006 // run through the barrier including self. 1007 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1008 // Release locks then wait for all mutator threads to pass the barrier. 1009 // TODO: optimize to not release locks when there are no threads to wait for. 1010 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1011 Locks::mutator_lock_->SharedUnlock(self); 1012 { 1013 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1014 gc_barrier_->Increment(self, barrier_count); 1015 } 1016 Locks::mutator_lock_->SharedLock(self); 1017 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1018 timings_.EndSplit(); 1019} 1020 1021void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1022 timings_.StartSplit("SweepArray"); 1023 Thread* self = Thread::Current(); 1024 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize]; 1025 size_t chunk_free_pos = 0; 1026 size_t freed_bytes = 0; 1027 size_t freed_large_object_bytes = 0; 1028 size_t freed_objects = 0; 1029 size_t freed_large_objects = 0; 1030 // How many objects are left in the array, modified after each space is swept. 1031 Object** objects = allocations->Begin(); 1032 size_t count = allocations->Size(); 1033 // Change the order to ensure that the non-moving space last swept as an optimization. 1034 std::vector<space::ContinuousSpace*> sweep_spaces; 1035 space::ContinuousSpace* non_moving_space = nullptr; 1036 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1037 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1038 space->GetLiveBitmap() != nullptr) { 1039 if (space == heap_->GetNonMovingSpace()) { 1040 non_moving_space = space; 1041 } else { 1042 sweep_spaces.push_back(space); 1043 } 1044 } 1045 } 1046 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1047 // the other alloc spaces as an optimization. 1048 if (non_moving_space != nullptr) { 1049 sweep_spaces.push_back(non_moving_space); 1050 } 1051 // Start by sweeping the continuous spaces. 1052 for (space::ContinuousSpace* space : sweep_spaces) { 1053 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1054 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1055 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1056 if (swap_bitmaps) { 1057 std::swap(live_bitmap, mark_bitmap); 1058 } 1059 Object** out = objects; 1060 for (size_t i = 0; i < count; ++i) { 1061 Object* obj = objects[i]; 1062 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1063 continue; 1064 } 1065 if (space->HasAddress(obj)) { 1066 // This object is in the space, remove it from the array and add it to the sweep buffer 1067 // if needed. 1068 if (!mark_bitmap->Test(obj)) { 1069 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1070 timings_.StartSplit("FreeList"); 1071 freed_objects += chunk_free_pos; 1072 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1073 timings_.EndSplit(); 1074 chunk_free_pos = 0; 1075 } 1076 chunk_free_buffer[chunk_free_pos++] = obj; 1077 } 1078 } else { 1079 *(out++) = obj; 1080 } 1081 } 1082 if (chunk_free_pos > 0) { 1083 timings_.StartSplit("FreeList"); 1084 freed_objects += chunk_free_pos; 1085 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1086 timings_.EndSplit(); 1087 chunk_free_pos = 0; 1088 } 1089 // All of the references which space contained are no longer in the allocation stack, update 1090 // the count. 1091 count = out - objects; 1092 } 1093 // Handle the large object space. 1094 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1095 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1096 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1097 if (swap_bitmaps) { 1098 std::swap(large_live_objects, large_mark_objects); 1099 } 1100 for (size_t i = 0; i < count; ++i) { 1101 Object* obj = objects[i]; 1102 // Handle large objects. 1103 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1104 continue; 1105 } 1106 if (!large_mark_objects->Test(obj)) { 1107 ++freed_large_objects; 1108 freed_large_object_bytes += large_object_space->Free(self, obj); 1109 } 1110 } 1111 timings_.EndSplit(); 1112 1113 timings_.StartSplit("RecordFree"); 1114 VLOG(heap) << "Freed " << freed_objects << "/" << count << " objects with size " 1115 << PrettySize(freed_bytes); 1116 RecordFree(freed_objects, freed_bytes); 1117 RecordFreeLargeObjects(freed_large_objects, freed_large_object_bytes); 1118 timings_.EndSplit(); 1119 1120 timings_.StartSplit("ResetStack"); 1121 allocations->Reset(); 1122 timings_.EndSplit(); 1123} 1124 1125void MarkSweep::Sweep(bool swap_bitmaps) { 1126 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1127 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1128 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1129 // knowing that new allocations won't be marked as live. 1130 timings_.StartSplit("MarkStackAsLive"); 1131 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1132 heap_->MarkAllocStackAsLive(live_stack); 1133 live_stack->Reset(); 1134 timings_.EndSplit(); 1135 1136 DCHECK(mark_stack_->IsEmpty()); 1137 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1138 if (space->IsContinuousMemMapAllocSpace()) { 1139 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1140 TimingLogger::ScopedSplit split( 1141 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1142 size_t freed_objects = 0; 1143 size_t freed_bytes = 0; 1144 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1145 RecordFree(freed_objects, freed_bytes); 1146 } 1147 } 1148 SweepLargeObjects(swap_bitmaps); 1149} 1150 1151void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1152 TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_); 1153 size_t freed_objects = 0; 1154 size_t freed_bytes = 0; 1155 heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1156 RecordFreeLargeObjects(freed_objects, freed_bytes); 1157} 1158 1159// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1160// marked, put it on the appropriate list in the heap for later processing. 1161void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1162 DCHECK(klass != nullptr); 1163 if (kCountJavaLangRefs) { 1164 ++reference_count_; 1165 } 1166 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, IsMarkedCallback, this); 1167} 1168 1169class MarkObjectVisitor { 1170 public: 1171 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1172 } 1173 1174 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1175 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1176 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1177 if (kCheckLocks) { 1178 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1179 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1180 } 1181 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset)); 1182 } 1183 1184 private: 1185 MarkSweep* const mark_sweep_; 1186}; 1187 1188// Scans an object reference. Determines the type of the reference 1189// and dispatches to a specialized scanning routine. 1190void MarkSweep::ScanObject(Object* obj) { 1191 MarkObjectVisitor mark_visitor(this); 1192 DelayReferenceReferentVisitor ref_visitor(this); 1193 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1194} 1195 1196void MarkSweep::ProcessMarkStackCallback(void* arg) { 1197 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false); 1198} 1199 1200void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1201 Thread* self = Thread::Current(); 1202 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1203 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1204 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1205 CHECK_GT(chunk_size, 0U); 1206 // Split the current mark stack up into work tasks. 1207 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1208 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1209 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1210 it += delta; 1211 } 1212 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1213 thread_pool->StartWorkers(self); 1214 thread_pool->Wait(self, true, true); 1215 thread_pool->StopWorkers(self); 1216 mark_stack_->Reset(); 1217 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1218} 1219 1220// Scan anything that's on the mark stack. 1221void MarkSweep::ProcessMarkStack(bool paused) { 1222 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1223 size_t thread_count = GetThreadCount(paused); 1224 if (kParallelProcessMarkStack && thread_count > 1 && 1225 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1226 ProcessMarkStackParallel(thread_count); 1227 } else { 1228 // TODO: Tune this. 1229 static const size_t kFifoSize = 4; 1230 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1231 for (;;) { 1232 Object* obj = NULL; 1233 if (kUseMarkStackPrefetch) { 1234 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1235 Object* obj = mark_stack_->PopBack(); 1236 DCHECK(obj != NULL); 1237 __builtin_prefetch(obj); 1238 prefetch_fifo.push_back(obj); 1239 } 1240 if (prefetch_fifo.empty()) { 1241 break; 1242 } 1243 obj = prefetch_fifo.front(); 1244 prefetch_fifo.pop_front(); 1245 } else { 1246 if (mark_stack_->IsEmpty()) { 1247 break; 1248 } 1249 obj = mark_stack_->PopBack(); 1250 } 1251 DCHECK(obj != nullptr); 1252 ScanObject(obj); 1253 } 1254 } 1255 timings_.EndSplit(); 1256} 1257 1258inline bool MarkSweep::IsMarked(const Object* object) const 1259 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1260 if (immune_region_.ContainsObject(object)) { 1261 return true; 1262 } 1263 if (current_space_bitmap_->HasAddress(object)) { 1264 return current_space_bitmap_->Test(object); 1265 } 1266 return mark_bitmap_->Test(object); 1267} 1268 1269void MarkSweep::FinishPhase() { 1270 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1271 if (kCountScannedTypes) { 1272 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1273 << " other=" << other_count_; 1274 } 1275 if (kCountTasks) { 1276 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1277 } 1278 if (kMeasureOverhead) { 1279 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1280 } 1281 if (kProfileLargeObjects) { 1282 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1283 } 1284 if (kCountJavaLangRefs) { 1285 VLOG(gc) << "References scanned " << reference_count_; 1286 } 1287 if (kCountMarkedObjects) { 1288 VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_ 1289 << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_; 1290 } 1291 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1292 mark_stack_->Reset(); 1293 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1294 heap_->ClearMarkedObjects(); 1295} 1296 1297void MarkSweep::RevokeAllThreadLocalBuffers() { 1298 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1299 // If concurrent, rosalloc thread-local buffers are revoked at the 1300 // thread checkpoint. Bump pointer space thread-local buffers must 1301 // not be in use. 1302 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1303 } else { 1304 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 1305 GetHeap()->RevokeAllThreadLocalBuffers(); 1306 timings_.EndSplit(); 1307 } 1308} 1309 1310} // namespace collector 1311} // namespace gc 1312} // namespace art 1313