mark_sweep.cc revision b0fa5dc7769c1e054032f39de0a3f6d6dd06f8cf
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap-inl.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "mark_sweep-inl.h" 38#include "mirror/art_field-inl.h" 39#include "mirror/object-inl.h" 40#include "runtime.h" 41#include "scoped_thread_state_change.h" 42#include "thread-inl.h" 43#include "thread_list.h" 44 45using ::art::mirror::ArtField; 46using ::art::mirror::Class; 47using ::art::mirror::Object; 48using ::art::mirror::ObjectArray; 49 50namespace art { 51namespace gc { 52namespace collector { 53 54// Performance options. 55static constexpr bool kUseRecursiveMark = false; 56static constexpr bool kUseMarkStackPrefetch = true; 57static constexpr size_t kSweepArrayChunkFreeSize = 1024; 58static constexpr bool kPreCleanCards = true; 59 60// Parallelism options. 61static constexpr bool kParallelCardScan = true; 62static constexpr bool kParallelRecursiveMark = true; 63// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 64// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 65// having this can add overhead in ProcessReferences since we may end up doing many calls of 66// ProcessMarkStack with very small mark stacks. 67static constexpr size_t kMinimumParallelMarkStackSize = 128; 68static constexpr bool kParallelProcessMarkStack = true; 69 70// Profiling and information flags. 71static constexpr bool kProfileLargeObjects = false; 72static constexpr bool kMeasureOverhead = false; 73static constexpr bool kCountTasks = false; 74static constexpr bool kCountJavaLangRefs = false; 75static constexpr bool kCountMarkedObjects = false; 76 77// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 78static constexpr bool kCheckLocks = kDebugLocking; 79static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 80 81// If true, revoke the rosalloc thread-local buffers at the 82// checkpoint, as opposed to during the pause. 83static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 84 85void MarkSweep::BindBitmaps() { 86 timings_.StartSplit("BindBitmaps"); 87 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 88 // Mark all of the spaces we never collect as immune. 89 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 90 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 91 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 92 } 93 } 94 timings_.EndSplit(); 95} 96 97MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 98 : GarbageCollector(heap, 99 name_prefix + 100 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 101 gc_barrier_(new Barrier(0)), 102 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 103 is_concurrent_(is_concurrent) { 104} 105 106void MarkSweep::InitializePhase() { 107 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 108 mark_stack_ = heap_->mark_stack_.get(); 109 DCHECK(mark_stack_ != nullptr); 110 immune_region_.Reset(); 111 class_count_ = 0; 112 array_count_ = 0; 113 other_count_ = 0; 114 large_object_test_ = 0; 115 large_object_mark_ = 0; 116 overhead_time_ = 0; 117 work_chunks_created_ = 0; 118 work_chunks_deleted_ = 0; 119 reference_count_ = 0; 120 mark_null_count_ = 0; 121 mark_immune_count_ = 0; 122 mark_fastpath_count_ = 0; 123 mark_slowpath_count_ = 0; 124 { 125 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap. 126 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 127 mark_bitmap_ = heap_->GetMarkBitmap(); 128 } 129 if (!clear_soft_references_) { 130 // Always clear soft references if a non-sticky collection. 131 clear_soft_references_ = GetGcType() != collector::kGcTypeSticky; 132 } 133} 134 135void MarkSweep::RunPhases() { 136 Thread* self = Thread::Current(); 137 InitializePhase(); 138 Locks::mutator_lock_->AssertNotHeld(self); 139 if (IsConcurrent()) { 140 GetHeap()->PreGcVerification(this); 141 { 142 ReaderMutexLock mu(self, *Locks::mutator_lock_); 143 MarkingPhase(); 144 } 145 ScopedPause pause(this); 146 GetHeap()->PrePauseRosAllocVerification(this); 147 PausePhase(); 148 RevokeAllThreadLocalBuffers(); 149 } else { 150 ScopedPause pause(this); 151 GetHeap()->PreGcVerificationPaused(this); 152 MarkingPhase(); 153 GetHeap()->PrePauseRosAllocVerification(this); 154 PausePhase(); 155 RevokeAllThreadLocalBuffers(); 156 } 157 { 158 // Sweeping always done concurrently, even for non concurrent mark sweep. 159 ReaderMutexLock mu(self, *Locks::mutator_lock_); 160 ReclaimPhase(); 161 } 162 GetHeap()->PostGcVerification(this); 163 FinishPhase(); 164} 165 166void MarkSweep::ProcessReferences(Thread* self) { 167 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 168 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 169 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback, 170 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 171} 172 173void MarkSweep::PreProcessReferences() { 174 if (IsConcurrent()) { 175 // No reason to do this for non-concurrent GC since pre processing soft references only helps 176 // pauses. 177 timings_.NewSplit("PreProcessReferences"); 178 GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback, 179 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 180 } 181} 182 183void MarkSweep::PausePhase() { 184 TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_); 185 Thread* self = Thread::Current(); 186 Locks::mutator_lock_->AssertExclusiveHeld(self); 187 if (IsConcurrent()) { 188 // Handle the dirty objects if we are a concurrent GC. 189 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 190 // Re-mark root set. 191 ReMarkRoots(); 192 // Scan dirty objects, this is only required if we are not doing concurrent GC. 193 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 194 } 195 ProcessReferences(self); 196 { 197 TimingLogger::ScopedSplit split("SwapStacks", &timings_); 198 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 199 heap_->SwapStacks(self); 200 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 201 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 202 // stacks and don't want anybody to allocate into the live stack. 203 RevokeAllThreadLocalAllocationStacks(self); 204 } 205 timings_.StartSplit("PreSweepingGcVerification"); 206 heap_->PreSweepingGcVerification(this); 207 timings_.EndSplit(); 208 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 209 // weak before we sweep them. Since this new system weak may not be marked, the GC may 210 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 211 // reference to a string that is about to be swept. 212 Runtime::Current()->DisallowNewSystemWeaks(); 213} 214 215void MarkSweep::PreCleanCards() { 216 // Don't do this for non concurrent GCs since they don't have any dirty cards. 217 if (kPreCleanCards && IsConcurrent()) { 218 Thread* self = Thread::Current(); 219 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 220 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 221 heap_->ProcessCards(timings_, false); 222 // The checkpoint root marking is required to avoid a race condition which occurs if the 223 // following happens during a reference write: 224 // 1. mutator dirties the card (write barrier) 225 // 2. GC ages the card (the above ProcessCards call) 226 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 227 // 4. mutator writes the value (corresponding to the write barrier in 1.) 228 // This causes the GC to age the card but not necessarily mark the reference which the mutator 229 // wrote into the object stored in the card. 230 // Having the checkpoint fixes this issue since it ensures that the card mark and the 231 // reference write are visible to the GC before the card is scanned (this is due to locks being 232 // acquired / released in the checkpoint code). 233 // The other roots are also marked to help reduce the pause. 234 MarkRootsCheckpoint(self, false); 235 MarkNonThreadRoots(); 236 MarkConcurrentRoots( 237 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 238 // Process the newly aged cards. 239 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 240 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 241 // in the next GC. 242 } 243} 244 245void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 246 if (kUseThreadLocalAllocationStack) { 247 timings_.NewSplit("RevokeAllThreadLocalAllocationStacks"); 248 Locks::mutator_lock_->AssertExclusiveHeld(self); 249 heap_->RevokeAllThreadLocalAllocationStacks(self); 250 } 251} 252 253void MarkSweep::MarkingPhase() { 254 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 255 Thread* self = Thread::Current(); 256 257 BindBitmaps(); 258 FindDefaultSpaceBitmap(); 259 260 // Process dirty cards and add dirty cards to mod union tables. 261 heap_->ProcessCards(timings_, false); 262 263 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 264 MarkRoots(self); 265 MarkReachableObjects(); 266 // Pre-clean dirtied cards to reduce pauses. 267 PreCleanCards(); 268 PreProcessReferences(); 269} 270 271void MarkSweep::UpdateAndMarkModUnion() { 272 for (const auto& space : heap_->GetContinuousSpaces()) { 273 if (immune_region_.ContainsSpace(space)) { 274 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 275 "UpdateAndMarkImageModUnionTable"; 276 TimingLogger::ScopedSplit split(name, &timings_); 277 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 278 CHECK(mod_union_table != nullptr); 279 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 280 } 281 } 282} 283 284void MarkSweep::MarkReachableObjects() { 285 UpdateAndMarkModUnion(); 286 // Recursively mark all the non-image bits set in the mark bitmap. 287 RecursiveMark(); 288} 289 290void MarkSweep::ReclaimPhase() { 291 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 292 Thread* self = Thread::Current(); 293 SweepSystemWeaks(self); 294 Runtime::Current()->AllowNewSystemWeaks(); 295 { 296 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 297 298 // Reclaim unmarked objects. 299 Sweep(false); 300 301 // Swap the live and mark bitmaps for each space which we modified space. This is an 302 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 303 // bitmaps. 304 timings_.StartSplit("SwapBitmaps"); 305 SwapBitmaps(); 306 timings_.EndSplit(); 307 308 // Unbind the live and mark bitmaps. 309 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 310 GetHeap()->UnBindBitmaps(); 311 } 312} 313 314void MarkSweep::FindDefaultSpaceBitmap() { 315 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 316 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 317 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 318 // We want to have the main space instead of non moving if possible. 319 if (bitmap != nullptr && 320 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 321 current_space_bitmap_ = bitmap; 322 // If we are not the non moving space exit the loop early since this will be good enough. 323 if (space != heap_->GetNonMovingSpace()) { 324 break; 325 } 326 } 327 } 328 if (current_space_bitmap_ == nullptr) { 329 heap_->DumpSpaces(); 330 LOG(FATAL) << "Could not find a default mark bitmap"; 331 } 332} 333 334void MarkSweep::ExpandMarkStack() { 335 ResizeMarkStack(mark_stack_->Capacity() * 2); 336} 337 338void MarkSweep::ResizeMarkStack(size_t new_size) { 339 // Rare case, no need to have Thread::Current be a parameter. 340 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 341 // Someone else acquired the lock and expanded the mark stack before us. 342 return; 343 } 344 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 345 CHECK_LE(mark_stack_->Size(), new_size); 346 mark_stack_->Resize(new_size); 347 for (const auto& obj : temp) { 348 mark_stack_->PushBack(obj); 349 } 350} 351 352inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 353 DCHECK(obj != nullptr); 354 if (MarkObjectParallel(obj)) { 355 MutexLock mu(Thread::Current(), mark_stack_lock_); 356 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 357 ExpandMarkStack(); 358 } 359 // The object must be pushed on to the mark stack. 360 mark_stack_->PushBack(obj); 361 } 362} 363 364mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 365 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 366 mark_sweep->MarkObject(obj); 367 return obj; 368} 369 370void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 371 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 372} 373 374class MarkSweepMarkObjectSlowPath { 375 public: 376 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) { 377 } 378 379 void operator()(const Object* obj) const ALWAYS_INLINE { 380 if (kProfileLargeObjects) { 381 // TODO: Differentiate between marking and testing somehow. 382 ++mark_sweep_->large_object_test_; 383 ++mark_sweep_->large_object_mark_; 384 } 385 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 386 if (UNLIKELY(!IsAligned<kPageSize>(obj) || 387 (kIsDebugBuild && !large_object_space->Contains(obj)))) { 388 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 389 LOG(ERROR) << "Attempting see if it's a bad root"; 390 mark_sweep_->VerifyRoots(); 391 LOG(FATAL) << "Can't mark invalid object"; 392 } 393 } 394 395 private: 396 MarkSweep* const mark_sweep_; 397}; 398 399inline void MarkSweep::MarkObjectNonNull(Object* obj) { 400 DCHECK(obj != nullptr); 401 if (kUseBakerOrBrooksReadBarrier) { 402 // Verify all the objects have the correct pointer installed. 403 obj->AssertReadBarrierPointer(); 404 } 405 if (immune_region_.ContainsObject(obj)) { 406 if (kCountMarkedObjects) { 407 ++mark_immune_count_; 408 } 409 DCHECK(mark_bitmap_->Test(obj)); 410 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 411 if (kCountMarkedObjects) { 412 ++mark_fastpath_count_; 413 } 414 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 415 PushOnMarkStack(obj); // This object was not previously marked. 416 } 417 } else { 418 if (kCountMarkedObjects) { 419 ++mark_slowpath_count_; 420 } 421 MarkSweepMarkObjectSlowPath visitor(this); 422 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 423 // will check again. 424 if (!mark_bitmap_->Set(obj, visitor)) { 425 PushOnMarkStack(obj); // Was not already marked, push. 426 } 427 } 428} 429 430inline void MarkSweep::PushOnMarkStack(Object* obj) { 431 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 432 // Lock is not needed but is here anyways to please annotalysis. 433 MutexLock mu(Thread::Current(), mark_stack_lock_); 434 ExpandMarkStack(); 435 } 436 // The object must be pushed on to the mark stack. 437 mark_stack_->PushBack(obj); 438} 439 440inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 441 DCHECK(obj != nullptr); 442 if (kUseBakerOrBrooksReadBarrier) { 443 // Verify all the objects have the correct pointer installed. 444 obj->AssertReadBarrierPointer(); 445 } 446 if (immune_region_.ContainsObject(obj)) { 447 DCHECK(IsMarked(obj)); 448 return false; 449 } 450 // Try to take advantage of locality of references within a space, failing this find the space 451 // the hard way. 452 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 453 if (LIKELY(object_bitmap->HasAddress(obj))) { 454 return !object_bitmap->AtomicTestAndSet(obj); 455 } 456 MarkSweepMarkObjectSlowPath visitor(this); 457 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 458} 459 460// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 461inline void MarkSweep::MarkObject(Object* obj) { 462 if (obj != nullptr) { 463 MarkObjectNonNull(obj); 464 } else if (kCountMarkedObjects) { 465 ++mark_null_count_; 466 } 467} 468 469void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/, 470 RootType /*root_type*/) { 471 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 472} 473 474void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 475 RootType /*root_type*/) { 476 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 477} 478 479void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 480 RootType /*root_type*/) { 481 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 482} 483 484void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 485 const StackVisitor* visitor, RootType root_type) { 486 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type); 487} 488 489void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor, 490 RootType root_type) { 491 // See if the root is on any space bitmap. 492 if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 493 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 494 if (!large_object_space->Contains(root)) { 495 LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type; 496 if (visitor != NULL) { 497 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 498 } 499 } 500 } 501} 502 503void MarkSweep::VerifyRoots() { 504 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 505} 506 507void MarkSweep::MarkRoots(Thread* self) { 508 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 509 // If we exclusively hold the mutator lock, all threads must be suspended. 510 timings_.StartSplit("MarkRoots"); 511 Runtime::Current()->VisitRoots(MarkRootCallback, this); 512 timings_.EndSplit(); 513 RevokeAllThreadLocalAllocationStacks(self); 514 } else { 515 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 516 // At this point the live stack should no longer have any mutators which push into it. 517 MarkNonThreadRoots(); 518 MarkConcurrentRoots( 519 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 520 } 521} 522 523void MarkSweep::MarkNonThreadRoots() { 524 timings_.StartSplit("MarkNonThreadRoots"); 525 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 526 timings_.EndSplit(); 527} 528 529void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 530 timings_.StartSplit("MarkConcurrentRoots"); 531 // Visit all runtime roots and clear dirty flags. 532 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 533 timings_.EndSplit(); 534} 535 536class ScanObjectVisitor { 537 public: 538 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 539 : mark_sweep_(mark_sweep) {} 540 541 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 542 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 543 if (kCheckLocks) { 544 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 545 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 546 } 547 mark_sweep_->ScanObject(obj); 548 } 549 550 private: 551 MarkSweep* const mark_sweep_; 552}; 553 554class DelayReferenceReferentVisitor { 555 public: 556 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 557 } 558 559 void operator()(mirror::Class* klass, mirror::Reference* ref) const 560 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 561 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 562 collector_->DelayReferenceReferent(klass, ref); 563 } 564 565 private: 566 MarkSweep* const collector_; 567}; 568 569template <bool kUseFinger = false> 570class MarkStackTask : public Task { 571 public: 572 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 573 Object** mark_stack) 574 : mark_sweep_(mark_sweep), 575 thread_pool_(thread_pool), 576 mark_stack_pos_(mark_stack_size) { 577 // We may have to copy part of an existing mark stack when another mark stack overflows. 578 if (mark_stack_size != 0) { 579 DCHECK(mark_stack != NULL); 580 // TODO: Check performance? 581 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 582 } 583 if (kCountTasks) { 584 ++mark_sweep_->work_chunks_created_; 585 } 586 } 587 588 static const size_t kMaxSize = 1 * KB; 589 590 protected: 591 class MarkObjectParallelVisitor { 592 public: 593 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 594 MarkSweep* mark_sweep) ALWAYS_INLINE 595 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 596 597 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 598 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 599 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 600 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 601 if (kUseFinger) { 602 android_memory_barrier(); 603 if (reinterpret_cast<uintptr_t>(ref) >= 604 static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) { 605 return; 606 } 607 } 608 chunk_task_->MarkStackPush(ref); 609 } 610 } 611 612 private: 613 MarkStackTask<kUseFinger>* const chunk_task_; 614 MarkSweep* const mark_sweep_; 615 }; 616 617 class ScanObjectParallelVisitor { 618 public: 619 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 620 : chunk_task_(chunk_task) {} 621 622 // No thread safety analysis since multiple threads will use this visitor. 623 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 624 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 625 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 626 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 627 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 628 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 629 } 630 631 private: 632 MarkStackTask<kUseFinger>* const chunk_task_; 633 }; 634 635 virtual ~MarkStackTask() { 636 // Make sure that we have cleared our mark stack. 637 DCHECK_EQ(mark_stack_pos_, 0U); 638 if (kCountTasks) { 639 ++mark_sweep_->work_chunks_deleted_; 640 } 641 } 642 643 MarkSweep* const mark_sweep_; 644 ThreadPool* const thread_pool_; 645 // Thread local mark stack for this task. 646 Object* mark_stack_[kMaxSize]; 647 // Mark stack position. 648 size_t mark_stack_pos_; 649 650 void MarkStackPush(Object* obj) ALWAYS_INLINE { 651 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 652 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 653 mark_stack_pos_ /= 2; 654 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 655 mark_stack_ + mark_stack_pos_); 656 thread_pool_->AddTask(Thread::Current(), task); 657 } 658 DCHECK(obj != nullptr); 659 DCHECK_LT(mark_stack_pos_, kMaxSize); 660 mark_stack_[mark_stack_pos_++] = obj; 661 } 662 663 virtual void Finalize() { 664 delete this; 665 } 666 667 // Scans all of the objects 668 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 669 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 670 ScanObjectParallelVisitor visitor(this); 671 // TODO: Tune this. 672 static const size_t kFifoSize = 4; 673 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 674 for (;;) { 675 Object* obj = nullptr; 676 if (kUseMarkStackPrefetch) { 677 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 678 Object* obj = mark_stack_[--mark_stack_pos_]; 679 DCHECK(obj != nullptr); 680 __builtin_prefetch(obj); 681 prefetch_fifo.push_back(obj); 682 } 683 if (UNLIKELY(prefetch_fifo.empty())) { 684 break; 685 } 686 obj = prefetch_fifo.front(); 687 prefetch_fifo.pop_front(); 688 } else { 689 if (UNLIKELY(mark_stack_pos_ == 0)) { 690 break; 691 } 692 obj = mark_stack_[--mark_stack_pos_]; 693 } 694 DCHECK(obj != nullptr); 695 visitor(obj); 696 } 697 } 698}; 699 700class CardScanTask : public MarkStackTask<false> { 701 public: 702 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 703 accounting::ContinuousSpaceBitmap* bitmap, 704 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 705 Object** mark_stack_obj) 706 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 707 bitmap_(bitmap), 708 begin_(begin), 709 end_(end), 710 minimum_age_(minimum_age) { 711 } 712 713 protected: 714 accounting::ContinuousSpaceBitmap* const bitmap_; 715 byte* const begin_; 716 byte* const end_; 717 const byte minimum_age_; 718 719 virtual void Finalize() { 720 delete this; 721 } 722 723 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 724 ScanObjectParallelVisitor visitor(this); 725 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 726 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 727 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 728 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 729 // Finish by emptying our local mark stack. 730 MarkStackTask::Run(self); 731 } 732}; 733 734size_t MarkSweep::GetThreadCount(bool paused) const { 735 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 736 return 1; 737 } 738 if (paused) { 739 return heap_->GetParallelGCThreadCount() + 1; 740 } else { 741 return heap_->GetConcGCThreadCount() + 1; 742 } 743} 744 745void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 746 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 747 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 748 size_t thread_count = GetThreadCount(paused); 749 // The parallel version with only one thread is faster for card scanning, TODO: fix. 750 if (kParallelCardScan && thread_count > 1) { 751 Thread* self = Thread::Current(); 752 // Can't have a different split for each space since multiple spaces can have their cards being 753 // scanned at the same time. 754 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 755 // Try to take some of the mark stack since we can pass this off to the worker tasks. 756 Object** mark_stack_begin = mark_stack_->Begin(); 757 Object** mark_stack_end = mark_stack_->End(); 758 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 759 // Estimated number of work tasks we will create. 760 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 761 DCHECK_NE(mark_stack_tasks, 0U); 762 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 763 mark_stack_size / mark_stack_tasks + 1); 764 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 765 if (space->GetMarkBitmap() == nullptr) { 766 continue; 767 } 768 byte* card_begin = space->Begin(); 769 byte* card_end = space->End(); 770 // Align up the end address. For example, the image space's end 771 // may not be card-size-aligned. 772 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 773 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 774 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 775 // Calculate how many bytes of heap we will scan, 776 const size_t address_range = card_end - card_begin; 777 // Calculate how much address range each task gets. 778 const size_t card_delta = RoundUp(address_range / thread_count + 1, 779 accounting::CardTable::kCardSize); 780 // Create the worker tasks for this space. 781 while (card_begin != card_end) { 782 // Add a range of cards. 783 size_t addr_remaining = card_end - card_begin; 784 size_t card_increment = std::min(card_delta, addr_remaining); 785 // Take from the back of the mark stack. 786 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 787 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 788 mark_stack_end -= mark_stack_increment; 789 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 790 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 791 // Add the new task to the thread pool. 792 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 793 card_begin + card_increment, minimum_age, 794 mark_stack_increment, mark_stack_end); 795 thread_pool->AddTask(self, task); 796 card_begin += card_increment; 797 } 798 } 799 800 // Note: the card scan below may dirty new cards (and scan them) 801 // as a side effect when a Reference object is encountered and 802 // queued during the marking. See b/11465268. 803 thread_pool->SetMaxActiveWorkers(thread_count - 1); 804 thread_pool->StartWorkers(self); 805 thread_pool->Wait(self, true, true); 806 thread_pool->StopWorkers(self); 807 timings_.EndSplit(); 808 } else { 809 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 810 if (space->GetMarkBitmap() != nullptr) { 811 // Image spaces are handled properly since live == marked for them. 812 switch (space->GetGcRetentionPolicy()) { 813 case space::kGcRetentionPolicyNeverCollect: 814 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 815 "ScanGrayImageSpaceObjects"); 816 break; 817 case space::kGcRetentionPolicyFullCollect: 818 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 819 "ScanGrayZygoteSpaceObjects"); 820 break; 821 case space::kGcRetentionPolicyAlwaysCollect: 822 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 823 "ScanGrayAllocSpaceObjects"); 824 break; 825 } 826 ScanObjectVisitor visitor(this); 827 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 828 timings_.EndSplit(); 829 } 830 } 831 } 832} 833 834class RecursiveMarkTask : public MarkStackTask<false> { 835 public: 836 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 837 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 838 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 839 bitmap_(bitmap), 840 begin_(begin), 841 end_(end) { 842 } 843 844 protected: 845 accounting::ContinuousSpaceBitmap* const bitmap_; 846 const uintptr_t begin_; 847 const uintptr_t end_; 848 849 virtual void Finalize() { 850 delete this; 851 } 852 853 // Scans all of the objects 854 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 855 ScanObjectParallelVisitor visitor(this); 856 bitmap_->VisitMarkedRange(begin_, end_, visitor); 857 // Finish by emptying our local mark stack. 858 MarkStackTask::Run(self); 859 } 860}; 861 862// Populates the mark stack based on the set of marked objects and 863// recursively marks until the mark stack is emptied. 864void MarkSweep::RecursiveMark() { 865 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 866 // RecursiveMark will build the lists of known instances of the Reference classes. See 867 // DelayReferenceReferent for details. 868 if (kUseRecursiveMark) { 869 const bool partial = GetGcType() == kGcTypePartial; 870 ScanObjectVisitor scan_visitor(this); 871 auto* self = Thread::Current(); 872 ThreadPool* thread_pool = heap_->GetThreadPool(); 873 size_t thread_count = GetThreadCount(false); 874 const bool parallel = kParallelRecursiveMark && thread_count > 1; 875 mark_stack_->Reset(); 876 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 877 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 878 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 879 current_space_bitmap_ = space->GetMarkBitmap(); 880 if (current_space_bitmap_ == nullptr) { 881 continue; 882 } 883 if (parallel) { 884 // We will use the mark stack the future. 885 // CHECK(mark_stack_->IsEmpty()); 886 // This function does not handle heap end increasing, so we must use the space end. 887 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 888 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 889 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 890 891 // Create a few worker tasks. 892 const size_t n = thread_count * 2; 893 while (begin != end) { 894 uintptr_t start = begin; 895 uintptr_t delta = (end - begin) / n; 896 delta = RoundUp(delta, KB); 897 if (delta < 16 * KB) delta = end - begin; 898 begin += delta; 899 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 900 begin); 901 thread_pool->AddTask(self, task); 902 } 903 thread_pool->SetMaxActiveWorkers(thread_count - 1); 904 thread_pool->StartWorkers(self); 905 thread_pool->Wait(self, true, true); 906 thread_pool->StopWorkers(self); 907 } else { 908 // This function does not handle heap end increasing, so we must use the space end. 909 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 910 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 911 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 912 } 913 } 914 } 915 } 916 ProcessMarkStack(false); 917} 918 919mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 920 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 921 return object; 922 } 923 return nullptr; 924} 925 926void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 927 ScanGrayObjects(paused, minimum_age); 928 ProcessMarkStack(paused); 929} 930 931void MarkSweep::ReMarkRoots() { 932 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 933 timings_.StartSplit("(Paused)ReMarkRoots"); 934 Runtime::Current()->VisitRoots( 935 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 936 kVisitRootFlagStopLoggingNewRoots | 937 kVisitRootFlagClearRootLog)); 938 timings_.EndSplit(); 939 if (kVerifyRootsMarked) { 940 timings_.StartSplit("(Paused)VerifyRoots"); 941 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 942 timings_.EndSplit(); 943 } 944} 945 946void MarkSweep::SweepSystemWeaks(Thread* self) { 947 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 948 timings_.StartSplit("SweepSystemWeaks"); 949 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 950 timings_.EndSplit(); 951} 952 953mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 954 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 955 // We don't actually want to sweep the object, so lets return "marked" 956 return obj; 957} 958 959void MarkSweep::VerifyIsLive(const Object* obj) { 960 if (!heap_->GetLiveBitmap()->Test(obj)) { 961 if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) == 962 heap_->allocation_stack_->End()) { 963 // Object not found! 964 heap_->DumpSpaces(); 965 LOG(FATAL) << "Found dead object " << obj; 966 } 967 } 968} 969 970void MarkSweep::VerifySystemWeaks() { 971 // Verify system weaks, uses a special object visitor which returns the input object. 972 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 973} 974 975class CheckpointMarkThreadRoots : public Closure { 976 public: 977 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 978 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 979 : mark_sweep_(mark_sweep), 980 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 981 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 982 } 983 984 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 985 ATRACE_BEGIN("Marking thread roots"); 986 // Note: self is not necessarily equal to thread since thread may be suspended. 987 Thread* self = Thread::Current(); 988 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 989 << thread->GetState() << " thread " << thread << " self " << self; 990 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 991 ATRACE_END(); 992 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 993 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 994 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 995 ATRACE_END(); 996 } 997 mark_sweep_->GetBarrier().Pass(self); 998 } 999 1000 private: 1001 MarkSweep* const mark_sweep_; 1002 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 1003}; 1004 1005void MarkSweep::MarkRootsCheckpoint(Thread* self, 1006 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1007 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 1008 timings_.StartSplit("MarkRootsCheckpoint"); 1009 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1010 // Request the check point is run on all threads returning a count of the threads that must 1011 // run through the barrier including self. 1012 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1013 // Release locks then wait for all mutator threads to pass the barrier. 1014 // TODO: optimize to not release locks when there are no threads to wait for. 1015 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1016 Locks::mutator_lock_->SharedUnlock(self); 1017 { 1018 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1019 gc_barrier_->Increment(self, barrier_count); 1020 } 1021 Locks::mutator_lock_->SharedLock(self); 1022 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1023 timings_.EndSplit(); 1024} 1025 1026void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1027 timings_.StartSplit("SweepArray"); 1028 Thread* self = Thread::Current(); 1029 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize]; 1030 size_t chunk_free_pos = 0; 1031 size_t freed_bytes = 0; 1032 size_t freed_large_object_bytes = 0; 1033 size_t freed_objects = 0; 1034 size_t freed_large_objects = 0; 1035 // How many objects are left in the array, modified after each space is swept. 1036 Object** objects = allocations->Begin(); 1037 size_t count = allocations->Size(); 1038 // Change the order to ensure that the non-moving space last swept as an optimization. 1039 std::vector<space::ContinuousSpace*> sweep_spaces; 1040 space::ContinuousSpace* non_moving_space = nullptr; 1041 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1042 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1043 space->GetLiveBitmap() != nullptr) { 1044 if (space == heap_->GetNonMovingSpace()) { 1045 non_moving_space = space; 1046 } else { 1047 sweep_spaces.push_back(space); 1048 } 1049 } 1050 } 1051 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1052 // the other alloc spaces as an optimization. 1053 if (non_moving_space != nullptr) { 1054 sweep_spaces.push_back(non_moving_space); 1055 } 1056 // Start by sweeping the continuous spaces. 1057 for (space::ContinuousSpace* space : sweep_spaces) { 1058 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1059 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1060 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1061 if (swap_bitmaps) { 1062 std::swap(live_bitmap, mark_bitmap); 1063 } 1064 Object** out = objects; 1065 for (size_t i = 0; i < count; ++i) { 1066 Object* obj = objects[i]; 1067 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1068 continue; 1069 } 1070 if (space->HasAddress(obj)) { 1071 // This object is in the space, remove it from the array and add it to the sweep buffer 1072 // if needed. 1073 if (!mark_bitmap->Test(obj)) { 1074 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1075 timings_.StartSplit("FreeList"); 1076 freed_objects += chunk_free_pos; 1077 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1078 timings_.EndSplit(); 1079 chunk_free_pos = 0; 1080 } 1081 chunk_free_buffer[chunk_free_pos++] = obj; 1082 } 1083 } else { 1084 *(out++) = obj; 1085 } 1086 } 1087 if (chunk_free_pos > 0) { 1088 timings_.StartSplit("FreeList"); 1089 freed_objects += chunk_free_pos; 1090 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1091 timings_.EndSplit(); 1092 chunk_free_pos = 0; 1093 } 1094 // All of the references which space contained are no longer in the allocation stack, update 1095 // the count. 1096 count = out - objects; 1097 } 1098 // Handle the large object space. 1099 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1100 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1101 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1102 if (swap_bitmaps) { 1103 std::swap(large_live_objects, large_mark_objects); 1104 } 1105 for (size_t i = 0; i < count; ++i) { 1106 Object* obj = objects[i]; 1107 // Handle large objects. 1108 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1109 continue; 1110 } 1111 if (!large_mark_objects->Test(obj)) { 1112 ++freed_large_objects; 1113 freed_large_object_bytes += large_object_space->Free(self, obj); 1114 } 1115 } 1116 timings_.EndSplit(); 1117 1118 timings_.StartSplit("RecordFree"); 1119 VLOG(heap) << "Freed " << freed_objects << "/" << count 1120 << " objects with size " << PrettySize(freed_bytes); 1121 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1122 freed_objects_.FetchAndAdd(freed_objects); 1123 freed_large_objects_.FetchAndAdd(freed_large_objects); 1124 freed_bytes_.FetchAndAdd(freed_bytes); 1125 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes); 1126 timings_.EndSplit(); 1127 1128 timings_.StartSplit("ResetStack"); 1129 allocations->Reset(); 1130 timings_.EndSplit(); 1131} 1132 1133void MarkSweep::Sweep(bool swap_bitmaps) { 1134 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1135 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1136 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1137 // knowing that new allocations won't be marked as live. 1138 timings_.StartSplit("MarkStackAsLive"); 1139 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1140 heap_->MarkAllocStackAsLive(live_stack); 1141 live_stack->Reset(); 1142 timings_.EndSplit(); 1143 1144 DCHECK(mark_stack_->IsEmpty()); 1145 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1146 if (space->IsContinuousMemMapAllocSpace()) { 1147 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1148 TimingLogger::ScopedSplit split( 1149 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1150 size_t freed_objects = 0; 1151 size_t freed_bytes = 0; 1152 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1153 heap_->RecordFree(freed_objects, freed_bytes); 1154 freed_objects_.FetchAndAdd(freed_objects); 1155 freed_bytes_.FetchAndAdd(freed_bytes); 1156 } 1157 } 1158 SweepLargeObjects(swap_bitmaps); 1159} 1160 1161void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1162 TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_); 1163 size_t freed_objects = 0; 1164 size_t freed_bytes = 0; 1165 heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1166 freed_large_objects_.FetchAndAdd(freed_objects); 1167 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 1168 heap_->RecordFree(freed_objects, freed_bytes); 1169} 1170 1171// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1172// marked, put it on the appropriate list in the heap for later processing. 1173void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1174 DCHECK(klass != nullptr); 1175 if (kCountJavaLangRefs) { 1176 ++reference_count_; 1177 } 1178 heap_->DelayReferenceReferent(klass, ref, IsMarkedCallback, this); 1179} 1180 1181class MarkObjectVisitor { 1182 public: 1183 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1184 } 1185 1186 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1187 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1188 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1189 if (kCheckLocks) { 1190 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1191 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1192 } 1193 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset)); 1194 } 1195 1196 private: 1197 MarkSweep* const mark_sweep_; 1198}; 1199 1200// Scans an object reference. Determines the type of the reference 1201// and dispatches to a specialized scanning routine. 1202void MarkSweep::ScanObject(Object* obj) { 1203 MarkObjectVisitor mark_visitor(this); 1204 DelayReferenceReferentVisitor ref_visitor(this); 1205 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1206} 1207 1208void MarkSweep::ProcessMarkStackPausedCallback(void* arg) { 1209 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true); 1210} 1211 1212void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1213 Thread* self = Thread::Current(); 1214 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1215 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1216 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1217 CHECK_GT(chunk_size, 0U); 1218 // Split the current mark stack up into work tasks. 1219 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1220 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1221 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1222 it += delta; 1223 } 1224 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1225 thread_pool->StartWorkers(self); 1226 thread_pool->Wait(self, true, true); 1227 thread_pool->StopWorkers(self); 1228 mark_stack_->Reset(); 1229 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1230} 1231 1232// Scan anything that's on the mark stack. 1233void MarkSweep::ProcessMarkStack(bool paused) { 1234 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1235 size_t thread_count = GetThreadCount(paused); 1236 if (kParallelProcessMarkStack && thread_count > 1 && 1237 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1238 ProcessMarkStackParallel(thread_count); 1239 } else { 1240 // TODO: Tune this. 1241 static const size_t kFifoSize = 4; 1242 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1243 for (;;) { 1244 Object* obj = NULL; 1245 if (kUseMarkStackPrefetch) { 1246 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1247 Object* obj = mark_stack_->PopBack(); 1248 DCHECK(obj != NULL); 1249 __builtin_prefetch(obj); 1250 prefetch_fifo.push_back(obj); 1251 } 1252 if (prefetch_fifo.empty()) { 1253 break; 1254 } 1255 obj = prefetch_fifo.front(); 1256 prefetch_fifo.pop_front(); 1257 } else { 1258 if (mark_stack_->IsEmpty()) { 1259 break; 1260 } 1261 obj = mark_stack_->PopBack(); 1262 } 1263 DCHECK(obj != nullptr); 1264 ScanObject(obj); 1265 } 1266 } 1267 timings_.EndSplit(); 1268} 1269 1270inline bool MarkSweep::IsMarked(const Object* object) const 1271 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1272 if (immune_region_.ContainsObject(object)) { 1273 return true; 1274 } 1275 if (current_space_bitmap_->HasAddress(object)) { 1276 return current_space_bitmap_->Test(object); 1277 } 1278 return mark_bitmap_->Test(object); 1279} 1280 1281void MarkSweep::FinishPhase() { 1282 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1283 if (kCountScannedTypes) { 1284 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1285 << " other=" << other_count_; 1286 } 1287 if (kCountTasks) { 1288 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1289 } 1290 if (kMeasureOverhead) { 1291 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1292 } 1293 if (kProfileLargeObjects) { 1294 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1295 } 1296 if (kCountJavaLangRefs) { 1297 VLOG(gc) << "References scanned " << reference_count_; 1298 } 1299 if (kCountMarkedObjects) { 1300 VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_ 1301 << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_; 1302 } 1303 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1304 mark_stack_->Reset(); 1305 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1306 heap_->ClearMarkedObjects(); 1307} 1308 1309void MarkSweep::RevokeAllThreadLocalBuffers() { 1310 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1311 // If concurrent, rosalloc thread-local buffers are revoked at the 1312 // thread checkpoint. Bump pointer space thread-local buffers must 1313 // not be in use. 1314 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1315 } else { 1316 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 1317 GetHeap()->RevokeAllThreadLocalBuffers(); 1318 timings_.EndSplit(); 1319 } 1320} 1321 1322} // namespace collector 1323} // namespace gc 1324} // namespace art 1325