mark_sweep.cc revision 7bf9f190cd33a7e2f8584299eb889e9df66e0323
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap-inl.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "mark_sweep-inl.h" 38#include "mirror/art_field-inl.h" 39#include "mirror/object-inl.h" 40#include "runtime.h" 41#include "scoped_thread_state_change.h" 42#include "thread-inl.h" 43#include "thread_list.h" 44 45using ::art::mirror::ArtField; 46using ::art::mirror::Class; 47using ::art::mirror::Object; 48using ::art::mirror::ObjectArray; 49 50namespace art { 51namespace gc { 52namespace collector { 53 54// Performance options. 55static constexpr bool kUseRecursiveMark = false; 56static constexpr bool kUseMarkStackPrefetch = true; 57static constexpr size_t kSweepArrayChunkFreeSize = 1024; 58static constexpr bool kPreCleanCards = true; 59 60// Parallelism options. 61static constexpr bool kParallelCardScan = true; 62static constexpr bool kParallelRecursiveMark = true; 63// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 64// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 65// having this can add overhead in ProcessReferences since we may end up doing many calls of 66// ProcessMarkStack with very small mark stacks. 67static constexpr size_t kMinimumParallelMarkStackSize = 128; 68static constexpr bool kParallelProcessMarkStack = true; 69 70// Profiling and information flags. 71static constexpr bool kProfileLargeObjects = false; 72static constexpr bool kMeasureOverhead = false; 73static constexpr bool kCountTasks = false; 74static constexpr bool kCountJavaLangRefs = false; 75static constexpr bool kCountMarkedObjects = false; 76 77// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 78static constexpr bool kCheckLocks = kDebugLocking; 79static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 80 81// If true, revoke the rosalloc thread-local buffers at the 82// checkpoint, as opposed to during the pause. 83static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 84 85void MarkSweep::BindBitmaps() { 86 timings_.StartSplit("BindBitmaps"); 87 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 88 // Mark all of the spaces we never collect as immune. 89 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 90 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 91 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 92 } 93 } 94 timings_.EndSplit(); 95} 96 97MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 98 : GarbageCollector(heap, 99 name_prefix + 100 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 101 gc_barrier_(new Barrier(0)), 102 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 103 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 104 is_concurrent_(is_concurrent) { 105} 106 107void MarkSweep::InitializePhase() { 108 timings_.Reset(); 109 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 110 mark_stack_ = heap_->mark_stack_.get(); 111 DCHECK(mark_stack_ != nullptr); 112 immune_region_.Reset(); 113 class_count_ = 0; 114 array_count_ = 0; 115 other_count_ = 0; 116 large_object_test_ = 0; 117 large_object_mark_ = 0; 118 overhead_time_ = 0; 119 work_chunks_created_ = 0; 120 work_chunks_deleted_ = 0; 121 reference_count_ = 0; 122 mark_null_count_ = 0; 123 mark_immune_count_ = 0; 124 mark_fastpath_count_ = 0; 125 mark_slowpath_count_ = 0; 126 FindDefaultSpaceBitmap(); 127 { 128 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap. 129 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 130 mark_bitmap_ = heap_->GetMarkBitmap(); 131 } 132 133 // Do any pre GC verification. 134 timings_.NewSplit("PreGcVerification"); 135 heap_->PreGcVerification(this); 136} 137 138void MarkSweep::ProcessReferences(Thread* self) { 139 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 140 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 141 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback, 142 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 143} 144 145void MarkSweep::PreProcessReferences() { 146 if (IsConcurrent()) { 147 // No reason to do this for non-concurrent GC since pre processing soft references only helps 148 // pauses. 149 timings_.NewSplit("PreProcessReferences"); 150 GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback, 151 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 152 } 153} 154 155void MarkSweep::PausePhase() { 156 TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_); 157 Thread* self = Thread::Current(); 158 Locks::mutator_lock_->AssertExclusiveHeld(self); 159 if (IsConcurrent()) { 160 // Handle the dirty objects if we are a concurrent GC. 161 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 162 // Re-mark root set. 163 ReMarkRoots(); 164 // Scan dirty objects, this is only required if we are not doing concurrent GC. 165 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 166 } 167 ProcessReferences(self); 168 { 169 timings_.NewSplit("SwapStacks"); 170 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 171 heap_->SwapStacks(self); 172 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 173 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 174 // stacks and don't want anybody to allocate into the live stack. 175 RevokeAllThreadLocalAllocationStacks(self); 176 } 177 timings_.StartSplit("PreSweepingGcVerification"); 178 heap_->PreSweepingGcVerification(this); 179 timings_.EndSplit(); 180 if (IsConcurrent()) { 181 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 182 // weak before we sweep them. Since this new system weak may not be marked, the GC may 183 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 184 // reference to a string that is about to be swept. 185 Runtime::Current()->DisallowNewSystemWeaks(); 186 } 187} 188 189void MarkSweep::PreCleanCards() { 190 // Don't do this for non concurrent GCs since they don't have any dirty cards. 191 if (kPreCleanCards && IsConcurrent()) { 192 Thread* self = Thread::Current(); 193 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 194 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 195 heap_->ProcessCards(timings_, false); 196 // The checkpoint root marking is required to avoid a race condition which occurs if the 197 // following happens during a reference write: 198 // 1. mutator dirties the card (write barrier) 199 // 2. GC ages the card (the above ProcessCards call) 200 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 201 // 4. mutator writes the value (corresponding to the write barrier in 1.) 202 // This causes the GC to age the card but not necessarily mark the reference which the mutator 203 // wrote into the object stored in the card. 204 // Having the checkpoint fixes this issue since it ensures that the card mark and the 205 // reference write are visible to the GC before the card is scanned (this is due to locks being 206 // acquired / released in the checkpoint code). 207 // The other roots are also marked to help reduce the pause. 208 MarkRootsCheckpoint(self, false); 209 MarkNonThreadRoots(); 210 MarkConcurrentRoots( 211 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 212 // Process the newly aged cards. 213 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 214 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 215 // in the next GC. 216 } 217} 218 219void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 220 if (kUseThreadLocalAllocationStack) { 221 timings_.NewSplit("RevokeAllThreadLocalAllocationStacks"); 222 Locks::mutator_lock_->AssertExclusiveHeld(self); 223 heap_->RevokeAllThreadLocalAllocationStacks(self); 224 } 225} 226 227void MarkSweep::MarkingPhase() { 228 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 229 Thread* self = Thread::Current(); 230 231 BindBitmaps(); 232 FindDefaultSpaceBitmap(); 233 234 // Process dirty cards and add dirty cards to mod union tables. 235 heap_->ProcessCards(timings_, false); 236 237 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 238 MarkRoots(self); 239 MarkReachableObjects(); 240 // Pre-clean dirtied cards to reduce pauses. 241 PreCleanCards(); 242 PreProcessReferences(); 243} 244 245void MarkSweep::UpdateAndMarkModUnion() { 246 for (const auto& space : heap_->GetContinuousSpaces()) { 247 if (immune_region_.ContainsSpace(space)) { 248 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 249 "UpdateAndMarkImageModUnionTable"; 250 TimingLogger::ScopedSplit split(name, &timings_); 251 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 252 CHECK(mod_union_table != nullptr); 253 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 254 } 255 } 256} 257 258void MarkSweep::MarkReachableObjects() { 259 UpdateAndMarkModUnion(); 260 // Recursively mark all the non-image bits set in the mark bitmap. 261 RecursiveMark(); 262} 263 264void MarkSweep::ReclaimPhase() { 265 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 266 Thread* self = Thread::Current(); 267 SweepSystemWeaks(self); 268 if (IsConcurrent()) { 269 Runtime::Current()->AllowNewSystemWeaks(); 270 } 271 { 272 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 273 274 // Reclaim unmarked objects. 275 Sweep(false); 276 277 // Swap the live and mark bitmaps for each space which we modified space. This is an 278 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 279 // bitmaps. 280 timings_.StartSplit("SwapBitmaps"); 281 SwapBitmaps(); 282 timings_.EndSplit(); 283 284 // Unbind the live and mark bitmaps. 285 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 286 GetHeap()->UnBindBitmaps(); 287 } 288} 289 290void MarkSweep::FindDefaultSpaceBitmap() { 291 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 292 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 293 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 294 if (bitmap != nullptr && 295 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 296 current_space_bitmap_ = bitmap; 297 return; 298 } 299 } 300 GetHeap()->DumpSpaces(); 301 LOG(FATAL) << "Could not find a default mark bitmap"; 302} 303 304void MarkSweep::ExpandMarkStack() { 305 ResizeMarkStack(mark_stack_->Capacity() * 2); 306} 307 308void MarkSweep::ResizeMarkStack(size_t new_size) { 309 // Rare case, no need to have Thread::Current be a parameter. 310 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 311 // Someone else acquired the lock and expanded the mark stack before us. 312 return; 313 } 314 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 315 CHECK_LE(mark_stack_->Size(), new_size); 316 mark_stack_->Resize(new_size); 317 for (const auto& obj : temp) { 318 mark_stack_->PushBack(obj); 319 } 320} 321 322inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 323 DCHECK(obj != NULL); 324 if (MarkObjectParallel(obj)) { 325 MutexLock mu(Thread::Current(), mark_stack_lock_); 326 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 327 ExpandMarkStack(); 328 } 329 // The object must be pushed on to the mark stack. 330 mark_stack_->PushBack(obj); 331 } 332} 333 334mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 335 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 336 mark_sweep->MarkObject(obj); 337 return obj; 338} 339 340void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 341 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 342} 343 344inline void MarkSweep::MarkObjectNonNull(Object* obj) { 345 DCHECK(obj != nullptr); 346 if (kUseBakerOrBrooksReadBarrier) { 347 // Verify all the objects have the correct pointer installed. 348 obj->AssertReadBarrierPointer(); 349 } 350 if (immune_region_.ContainsObject(obj)) { 351 if (kCountMarkedObjects) { 352 ++mark_immune_count_; 353 } 354 DCHECK(IsMarked(obj)); 355 return; 356 } 357 // Try to take advantage of locality of references within a space, failing this find the space 358 // the hard way. 359 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_; 360 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 361 object_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj); 362 if (kCountMarkedObjects) { 363 ++mark_slowpath_count_; 364 } 365 if (UNLIKELY(object_bitmap == nullptr)) { 366 MarkLargeObject(obj, true); 367 return; 368 } 369 } else if (kCountMarkedObjects) { 370 ++mark_fastpath_count_; 371 } 372 // This object was not previously marked. 373 if (!object_bitmap->Set(obj)) { 374 PushOnMarkStack(obj); 375 } 376} 377 378inline void MarkSweep::PushOnMarkStack(Object* obj) { 379 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 380 // Lock is not needed but is here anyways to please annotalysis. 381 MutexLock mu(Thread::Current(), mark_stack_lock_); 382 ExpandMarkStack(); 383 } 384 // The object must be pushed on to the mark stack. 385 mark_stack_->PushBack(obj); 386} 387 388// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 389bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 390 // TODO: support >1 discontinuous space. 391 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 392 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 393 if (kProfileLargeObjects) { 394 ++large_object_test_; 395 } 396 if (UNLIKELY(!large_objects->Test(obj))) { 397 if (!large_object_space->Contains(obj)) { 398 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 399 LOG(ERROR) << "Attempting see if it's a bad root"; 400 VerifyRoots(); 401 LOG(FATAL) << "Can't mark bad root"; 402 } 403 if (kProfileLargeObjects) { 404 ++large_object_mark_; 405 } 406 if (set) { 407 large_objects->Set(obj); 408 } else { 409 large_objects->Clear(obj); 410 } 411 return true; 412 } 413 return false; 414} 415 416inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 417 DCHECK(obj != nullptr); 418 if (kUseBakerOrBrooksReadBarrier) { 419 // Verify all the objects have the correct pointer installed. 420 obj->AssertReadBarrierPointer(); 421 } 422 if (immune_region_.ContainsObject(obj)) { 423 DCHECK(IsMarked(obj)); 424 return false; 425 } 426 // Try to take advantage of locality of references within a space, failing this find the space 427 // the hard way. 428 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_; 429 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 430 accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj); 431 if (new_bitmap != NULL) { 432 object_bitmap = new_bitmap; 433 } else { 434 // TODO: Remove the Thread::Current here? 435 // TODO: Convert this to some kind of atomic marking? 436 MutexLock mu(Thread::Current(), large_object_lock_); 437 return MarkLargeObject(obj, true); 438 } 439 } 440 // Return true if the object was not previously marked. 441 return !object_bitmap->AtomicTestAndSet(obj); 442} 443 444// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 445inline void MarkSweep::MarkObject(Object* obj) { 446 if (obj != nullptr) { 447 MarkObjectNonNull(obj); 448 } else if (kCountMarkedObjects) { 449 ++mark_null_count_; 450 } 451} 452 453void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/, 454 RootType /*root_type*/) { 455 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 456} 457 458void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 459 RootType /*root_type*/) { 460 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 461} 462 463void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 464 RootType /*root_type*/) { 465 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 466} 467 468void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 469 const StackVisitor* visitor, RootType root_type) { 470 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type); 471} 472 473void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor, 474 RootType root_type) { 475 // See if the root is on any space bitmap. 476 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 477 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 478 if (!large_object_space->Contains(root)) { 479 LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type; 480 if (visitor != NULL) { 481 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 482 } 483 } 484 } 485} 486 487void MarkSweep::VerifyRoots() { 488 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 489} 490 491void MarkSweep::MarkRoots(Thread* self) { 492 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 493 // If we exclusively hold the mutator lock, all threads must be suspended. 494 timings_.StartSplit("MarkRoots"); 495 Runtime::Current()->VisitRoots(MarkRootCallback, this); 496 timings_.EndSplit(); 497 RevokeAllThreadLocalAllocationStacks(self); 498 } else { 499 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 500 // At this point the live stack should no longer have any mutators which push into it. 501 MarkNonThreadRoots(); 502 MarkConcurrentRoots( 503 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 504 } 505} 506 507void MarkSweep::MarkNonThreadRoots() { 508 timings_.StartSplit("MarkNonThreadRoots"); 509 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 510 timings_.EndSplit(); 511} 512 513void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 514 timings_.StartSplit("MarkConcurrentRoots"); 515 // Visit all runtime roots and clear dirty flags. 516 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 517 timings_.EndSplit(); 518} 519 520class ScanObjectVisitor { 521 public: 522 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 523 : mark_sweep_(mark_sweep) {} 524 525 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 526 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 527 if (kCheckLocks) { 528 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 529 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 530 } 531 mark_sweep_->ScanObject(obj); 532 } 533 534 private: 535 MarkSweep* const mark_sweep_; 536}; 537 538class DelayReferenceReferentVisitor { 539 public: 540 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 541 } 542 543 void operator()(mirror::Class* klass, mirror::Reference* ref) const 544 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 545 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 546 collector_->DelayReferenceReferent(klass, ref); 547 } 548 549 private: 550 MarkSweep* const collector_; 551}; 552 553template <bool kUseFinger = false> 554class MarkStackTask : public Task { 555 public: 556 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 557 Object** mark_stack) 558 : mark_sweep_(mark_sweep), 559 thread_pool_(thread_pool), 560 mark_stack_pos_(mark_stack_size) { 561 // We may have to copy part of an existing mark stack when another mark stack overflows. 562 if (mark_stack_size != 0) { 563 DCHECK(mark_stack != NULL); 564 // TODO: Check performance? 565 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 566 } 567 if (kCountTasks) { 568 ++mark_sweep_->work_chunks_created_; 569 } 570 } 571 572 static const size_t kMaxSize = 1 * KB; 573 574 protected: 575 class MarkObjectParallelVisitor { 576 public: 577 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 578 MarkSweep* mark_sweep) ALWAYS_INLINE 579 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 580 581 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 582 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 583 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); 584 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 585 if (kUseFinger) { 586 android_memory_barrier(); 587 if (reinterpret_cast<uintptr_t>(ref) >= 588 static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) { 589 return; 590 } 591 } 592 chunk_task_->MarkStackPush(ref); 593 } 594 } 595 596 private: 597 MarkStackTask<kUseFinger>* const chunk_task_; 598 MarkSweep* const mark_sweep_; 599 }; 600 601 class ScanObjectParallelVisitor { 602 public: 603 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 604 : chunk_task_(chunk_task) {} 605 606 // No thread safety analysis since multiple threads will use this visitor. 607 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 608 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 609 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 610 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 611 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 612 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 613 } 614 615 private: 616 MarkStackTask<kUseFinger>* const chunk_task_; 617 }; 618 619 virtual ~MarkStackTask() { 620 // Make sure that we have cleared our mark stack. 621 DCHECK_EQ(mark_stack_pos_, 0U); 622 if (kCountTasks) { 623 ++mark_sweep_->work_chunks_deleted_; 624 } 625 } 626 627 MarkSweep* const mark_sweep_; 628 ThreadPool* const thread_pool_; 629 // Thread local mark stack for this task. 630 Object* mark_stack_[kMaxSize]; 631 // Mark stack position. 632 size_t mark_stack_pos_; 633 634 void MarkStackPush(Object* obj) ALWAYS_INLINE { 635 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 636 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 637 mark_stack_pos_ /= 2; 638 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 639 mark_stack_ + mark_stack_pos_); 640 thread_pool_->AddTask(Thread::Current(), task); 641 } 642 DCHECK(obj != nullptr); 643 DCHECK_LT(mark_stack_pos_, kMaxSize); 644 mark_stack_[mark_stack_pos_++] = obj; 645 } 646 647 virtual void Finalize() { 648 delete this; 649 } 650 651 // Scans all of the objects 652 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 653 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 654 ScanObjectParallelVisitor visitor(this); 655 // TODO: Tune this. 656 static const size_t kFifoSize = 4; 657 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 658 for (;;) { 659 Object* obj = nullptr; 660 if (kUseMarkStackPrefetch) { 661 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 662 Object* obj = mark_stack_[--mark_stack_pos_]; 663 DCHECK(obj != nullptr); 664 __builtin_prefetch(obj); 665 prefetch_fifo.push_back(obj); 666 } 667 if (UNLIKELY(prefetch_fifo.empty())) { 668 break; 669 } 670 obj = prefetch_fifo.front(); 671 prefetch_fifo.pop_front(); 672 } else { 673 if (UNLIKELY(mark_stack_pos_ == 0)) { 674 break; 675 } 676 obj = mark_stack_[--mark_stack_pos_]; 677 } 678 DCHECK(obj != nullptr); 679 visitor(obj); 680 } 681 } 682}; 683 684class CardScanTask : public MarkStackTask<false> { 685 public: 686 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, 687 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 688 Object** mark_stack_obj) 689 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 690 bitmap_(bitmap), 691 begin_(begin), 692 end_(end), 693 minimum_age_(minimum_age) { 694 } 695 696 protected: 697 accounting::SpaceBitmap* const bitmap_; 698 byte* const begin_; 699 byte* const end_; 700 const byte minimum_age_; 701 702 virtual void Finalize() { 703 delete this; 704 } 705 706 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 707 ScanObjectParallelVisitor visitor(this); 708 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 709 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 710 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 711 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 712 // Finish by emptying our local mark stack. 713 MarkStackTask::Run(self); 714 } 715}; 716 717size_t MarkSweep::GetThreadCount(bool paused) const { 718 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 719 return 0; 720 } 721 if (paused) { 722 return heap_->GetParallelGCThreadCount() + 1; 723 } else { 724 return heap_->GetConcGCThreadCount() + 1; 725 } 726} 727 728void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 729 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 730 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 731 size_t thread_count = GetThreadCount(paused); 732 // The parallel version with only one thread is faster for card scanning, TODO: fix. 733 if (kParallelCardScan && thread_count > 0) { 734 Thread* self = Thread::Current(); 735 // Can't have a different split for each space since multiple spaces can have their cards being 736 // scanned at the same time. 737 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 738 // Try to take some of the mark stack since we can pass this off to the worker tasks. 739 Object** mark_stack_begin = mark_stack_->Begin(); 740 Object** mark_stack_end = mark_stack_->End(); 741 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 742 // Estimated number of work tasks we will create. 743 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 744 DCHECK_NE(mark_stack_tasks, 0U); 745 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 746 mark_stack_size / mark_stack_tasks + 1); 747 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 748 if (space->GetMarkBitmap() == nullptr) { 749 continue; 750 } 751 byte* card_begin = space->Begin(); 752 byte* card_end = space->End(); 753 // Align up the end address. For example, the image space's end 754 // may not be card-size-aligned. 755 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 756 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 757 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 758 // Calculate how many bytes of heap we will scan, 759 const size_t address_range = card_end - card_begin; 760 // Calculate how much address range each task gets. 761 const size_t card_delta = RoundUp(address_range / thread_count + 1, 762 accounting::CardTable::kCardSize); 763 // Create the worker tasks for this space. 764 while (card_begin != card_end) { 765 // Add a range of cards. 766 size_t addr_remaining = card_end - card_begin; 767 size_t card_increment = std::min(card_delta, addr_remaining); 768 // Take from the back of the mark stack. 769 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 770 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 771 mark_stack_end -= mark_stack_increment; 772 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 773 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 774 // Add the new task to the thread pool. 775 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 776 card_begin + card_increment, minimum_age, 777 mark_stack_increment, mark_stack_end); 778 thread_pool->AddTask(self, task); 779 card_begin += card_increment; 780 } 781 } 782 783 // Note: the card scan below may dirty new cards (and scan them) 784 // as a side effect when a Reference object is encountered and 785 // queued during the marking. See b/11465268. 786 thread_pool->SetMaxActiveWorkers(thread_count - 1); 787 thread_pool->StartWorkers(self); 788 thread_pool->Wait(self, true, true); 789 thread_pool->StopWorkers(self); 790 timings_.EndSplit(); 791 } else { 792 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 793 if (space->GetMarkBitmap() != nullptr) { 794 // Image spaces are handled properly since live == marked for them. 795 switch (space->GetGcRetentionPolicy()) { 796 case space::kGcRetentionPolicyNeverCollect: 797 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 798 "ScanGrayImageSpaceObjects"); 799 break; 800 case space::kGcRetentionPolicyFullCollect: 801 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 802 "ScanGrayZygoteSpaceObjects"); 803 break; 804 case space::kGcRetentionPolicyAlwaysCollect: 805 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 806 "ScanGrayAllocSpaceObjects"); 807 break; 808 } 809 ScanObjectVisitor visitor(this); 810 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 811 timings_.EndSplit(); 812 } 813 } 814 } 815} 816 817class RecursiveMarkTask : public MarkStackTask<false> { 818 public: 819 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 820 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 821 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 822 bitmap_(bitmap), 823 begin_(begin), 824 end_(end) { 825 } 826 827 protected: 828 accounting::SpaceBitmap* const bitmap_; 829 const uintptr_t begin_; 830 const uintptr_t end_; 831 832 virtual void Finalize() { 833 delete this; 834 } 835 836 // Scans all of the objects 837 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 838 ScanObjectParallelVisitor visitor(this); 839 bitmap_->VisitMarkedRange(begin_, end_, visitor); 840 // Finish by emptying our local mark stack. 841 MarkStackTask::Run(self); 842 } 843}; 844 845// Populates the mark stack based on the set of marked objects and 846// recursively marks until the mark stack is emptied. 847void MarkSweep::RecursiveMark() { 848 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 849 // RecursiveMark will build the lists of known instances of the Reference classes. See 850 // DelayReferenceReferent for details. 851 if (kUseRecursiveMark) { 852 const bool partial = GetGcType() == kGcTypePartial; 853 ScanObjectVisitor scan_visitor(this); 854 auto* self = Thread::Current(); 855 ThreadPool* thread_pool = heap_->GetThreadPool(); 856 size_t thread_count = GetThreadCount(false); 857 const bool parallel = kParallelRecursiveMark && thread_count > 1; 858 mark_stack_->Reset(); 859 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 860 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 861 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 862 current_space_bitmap_ = space->GetMarkBitmap(); 863 if (current_space_bitmap_ == nullptr) { 864 continue; 865 } 866 if (parallel) { 867 // We will use the mark stack the future. 868 // CHECK(mark_stack_->IsEmpty()); 869 // This function does not handle heap end increasing, so we must use the space end. 870 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 871 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 872 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 873 874 // Create a few worker tasks. 875 const size_t n = thread_count * 2; 876 while (begin != end) { 877 uintptr_t start = begin; 878 uintptr_t delta = (end - begin) / n; 879 delta = RoundUp(delta, KB); 880 if (delta < 16 * KB) delta = end - begin; 881 begin += delta; 882 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 883 begin); 884 thread_pool->AddTask(self, task); 885 } 886 thread_pool->SetMaxActiveWorkers(thread_count - 1); 887 thread_pool->StartWorkers(self); 888 thread_pool->Wait(self, true, true); 889 thread_pool->StopWorkers(self); 890 } else { 891 // This function does not handle heap end increasing, so we must use the space end. 892 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 893 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 894 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 895 } 896 } 897 } 898 } 899 ProcessMarkStack(false); 900} 901 902mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 903 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 904 return object; 905 } 906 return nullptr; 907} 908 909void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 910 ScanGrayObjects(paused, minimum_age); 911 ProcessMarkStack(paused); 912} 913 914void MarkSweep::ReMarkRoots() { 915 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 916 timings_.StartSplit("(Paused)ReMarkRoots"); 917 Runtime::Current()->VisitRoots( 918 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 919 kVisitRootFlagStopLoggingNewRoots | 920 kVisitRootFlagClearRootLog)); 921 timings_.EndSplit(); 922 if (kVerifyRootsMarked) { 923 timings_.StartSplit("(Paused)VerifyRoots"); 924 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 925 timings_.EndSplit(); 926 } 927} 928 929void MarkSweep::SweepSystemWeaks(Thread* self) { 930 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 931 timings_.StartSplit("SweepSystemWeaks"); 932 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 933 timings_.EndSplit(); 934} 935 936mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 937 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 938 // We don't actually want to sweep the object, so lets return "marked" 939 return obj; 940} 941 942void MarkSweep::VerifyIsLive(const Object* obj) { 943 if (!heap_->GetLiveBitmap()->Test(obj)) { 944 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 945 if (!large_object_space->GetLiveObjects()->Test(obj)) { 946 if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) == 947 heap_->allocation_stack_->End()) { 948 // Object not found! 949 heap_->DumpSpaces(); 950 LOG(FATAL) << "Found dead object " << obj; 951 } 952 } 953 } 954} 955 956void MarkSweep::VerifySystemWeaks() { 957 // Verify system weaks, uses a special object visitor which returns the input object. 958 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 959} 960 961class CheckpointMarkThreadRoots : public Closure { 962 public: 963 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 964 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 965 : mark_sweep_(mark_sweep), 966 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 967 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 968 } 969 970 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 971 ATRACE_BEGIN("Marking thread roots"); 972 // Note: self is not necessarily equal to thread since thread may be suspended. 973 Thread* self = Thread::Current(); 974 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 975 << thread->GetState() << " thread " << thread << " self " << self; 976 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 977 ATRACE_END(); 978 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 979 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 980 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 981 ATRACE_END(); 982 } 983 mark_sweep_->GetBarrier().Pass(self); 984 } 985 986 private: 987 MarkSweep* const mark_sweep_; 988 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 989}; 990 991void MarkSweep::MarkRootsCheckpoint(Thread* self, 992 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 993 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 994 timings_.StartSplit("MarkRootsCheckpoint"); 995 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 996 // Request the check point is run on all threads returning a count of the threads that must 997 // run through the barrier including self. 998 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 999 // Release locks then wait for all mutator threads to pass the barrier. 1000 // TODO: optimize to not release locks when there are no threads to wait for. 1001 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1002 Locks::mutator_lock_->SharedUnlock(self); 1003 { 1004 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1005 gc_barrier_->Increment(self, barrier_count); 1006 } 1007 Locks::mutator_lock_->SharedLock(self); 1008 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1009 timings_.EndSplit(); 1010} 1011 1012void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1013 timings_.StartSplit("SweepArray"); 1014 Thread* self = Thread::Current(); 1015 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize]; 1016 size_t chunk_free_pos = 0; 1017 size_t freed_bytes = 0; 1018 size_t freed_large_object_bytes = 0; 1019 size_t freed_objects = 0; 1020 size_t freed_large_objects = 0; 1021 // How many objects are left in the array, modified after each space is swept. 1022 Object** objects = allocations->Begin(); 1023 size_t count = allocations->Size(); 1024 // Change the order to ensure that the non-moving space last swept as an optimization. 1025 std::vector<space::ContinuousSpace*> sweep_spaces; 1026 space::ContinuousSpace* non_moving_space = nullptr; 1027 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1028 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1029 space->GetLiveBitmap() != nullptr) { 1030 if (space == heap_->GetNonMovingSpace()) { 1031 non_moving_space = space; 1032 } else { 1033 sweep_spaces.push_back(space); 1034 } 1035 } 1036 } 1037 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1038 // the other alloc spaces as an optimization. 1039 if (non_moving_space != nullptr) { 1040 sweep_spaces.push_back(non_moving_space); 1041 } 1042 // Start by sweeping the continuous spaces. 1043 for (space::ContinuousSpace* space : sweep_spaces) { 1044 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1045 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1046 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1047 if (swap_bitmaps) { 1048 std::swap(live_bitmap, mark_bitmap); 1049 } 1050 Object** out = objects; 1051 for (size_t i = 0; i < count; ++i) { 1052 Object* obj = objects[i]; 1053 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1054 continue; 1055 } 1056 if (space->HasAddress(obj)) { 1057 // This object is in the space, remove it from the array and add it to the sweep buffer 1058 // if needed. 1059 if (!mark_bitmap->Test(obj)) { 1060 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1061 timings_.StartSplit("FreeList"); 1062 freed_objects += chunk_free_pos; 1063 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1064 timings_.EndSplit(); 1065 chunk_free_pos = 0; 1066 } 1067 chunk_free_buffer[chunk_free_pos++] = obj; 1068 } 1069 } else { 1070 *(out++) = obj; 1071 } 1072 } 1073 if (chunk_free_pos > 0) { 1074 timings_.StartSplit("FreeList"); 1075 freed_objects += chunk_free_pos; 1076 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1077 timings_.EndSplit(); 1078 chunk_free_pos = 0; 1079 } 1080 // All of the references which space contained are no longer in the allocation stack, update 1081 // the count. 1082 count = out - objects; 1083 } 1084 // Handle the large object space. 1085 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1086 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 1087 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects(); 1088 if (swap_bitmaps) { 1089 std::swap(large_live_objects, large_mark_objects); 1090 } 1091 for (size_t i = 0; i < count; ++i) { 1092 Object* obj = objects[i]; 1093 // Handle large objects. 1094 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1095 continue; 1096 } 1097 if (!large_mark_objects->Test(obj)) { 1098 ++freed_large_objects; 1099 freed_large_object_bytes += large_object_space->Free(self, obj); 1100 } 1101 } 1102 timings_.EndSplit(); 1103 1104 timings_.StartSplit("RecordFree"); 1105 VLOG(heap) << "Freed " << freed_objects << "/" << count 1106 << " objects with size " << PrettySize(freed_bytes); 1107 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1108 freed_objects_.FetchAndAdd(freed_objects); 1109 freed_large_objects_.FetchAndAdd(freed_large_objects); 1110 freed_bytes_.FetchAndAdd(freed_bytes); 1111 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes); 1112 timings_.EndSplit(); 1113 1114 timings_.StartSplit("ResetStack"); 1115 allocations->Reset(); 1116 timings_.EndSplit(); 1117} 1118 1119void MarkSweep::Sweep(bool swap_bitmaps) { 1120 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1121 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1122 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1123 // knowing that new allocations won't be marked as live. 1124 timings_.StartSplit("MarkStackAsLive"); 1125 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1126 heap_->MarkAllocStackAsLive(live_stack); 1127 live_stack->Reset(); 1128 timings_.EndSplit(); 1129 1130 DCHECK(mark_stack_->IsEmpty()); 1131 TimingLogger::ScopedSplit("Sweep", &timings_); 1132 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1133 if (space->IsContinuousMemMapAllocSpace()) { 1134 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1135 TimingLogger::ScopedSplit split( 1136 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1137 size_t freed_objects = 0; 1138 size_t freed_bytes = 0; 1139 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1140 heap_->RecordFree(freed_objects, freed_bytes); 1141 freed_objects_.FetchAndAdd(freed_objects); 1142 freed_bytes_.FetchAndAdd(freed_bytes); 1143 } 1144 } 1145 SweepLargeObjects(swap_bitmaps); 1146} 1147 1148void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1149 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1150 size_t freed_objects = 0; 1151 size_t freed_bytes = 0; 1152 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1153 freed_large_objects_.FetchAndAdd(freed_objects); 1154 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 1155 GetHeap()->RecordFree(freed_objects, freed_bytes); 1156} 1157 1158// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1159// marked, put it on the appropriate list in the heap for later processing. 1160void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1161 DCHECK(klass != nullptr); 1162 if (kCountJavaLangRefs) { 1163 ++reference_count_; 1164 } 1165 heap_->DelayReferenceReferent(klass, ref, IsMarkedCallback, this); 1166} 1167 1168class MarkObjectVisitor { 1169 public: 1170 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1171 } 1172 1173 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1174 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1175 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1176 if (kCheckLocks) { 1177 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1178 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1179 } 1180 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset, false)); 1181 } 1182 1183 private: 1184 MarkSweep* const mark_sweep_; 1185}; 1186 1187// Scans an object reference. Determines the type of the reference 1188// and dispatches to a specialized scanning routine. 1189void MarkSweep::ScanObject(Object* obj) { 1190 MarkObjectVisitor mark_visitor(this); 1191 DelayReferenceReferentVisitor ref_visitor(this); 1192 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1193} 1194 1195void MarkSweep::ProcessMarkStackPausedCallback(void* arg) { 1196 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true); 1197} 1198 1199void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1200 Thread* self = Thread::Current(); 1201 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1202 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1203 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1204 CHECK_GT(chunk_size, 0U); 1205 // Split the current mark stack up into work tasks. 1206 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1207 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1208 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1209 it += delta; 1210 } 1211 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1212 thread_pool->StartWorkers(self); 1213 thread_pool->Wait(self, true, true); 1214 thread_pool->StopWorkers(self); 1215 mark_stack_->Reset(); 1216 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1217} 1218 1219// Scan anything that's on the mark stack. 1220void MarkSweep::ProcessMarkStack(bool paused) { 1221 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1222 size_t thread_count = GetThreadCount(paused); 1223 if (kParallelProcessMarkStack && thread_count > 1 && 1224 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1225 ProcessMarkStackParallel(thread_count); 1226 } else { 1227 // TODO: Tune this. 1228 static const size_t kFifoSize = 4; 1229 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1230 for (;;) { 1231 Object* obj = NULL; 1232 if (kUseMarkStackPrefetch) { 1233 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1234 Object* obj = mark_stack_->PopBack(); 1235 DCHECK(obj != NULL); 1236 __builtin_prefetch(obj); 1237 prefetch_fifo.push_back(obj); 1238 } 1239 if (prefetch_fifo.empty()) { 1240 break; 1241 } 1242 obj = prefetch_fifo.front(); 1243 prefetch_fifo.pop_front(); 1244 } else { 1245 if (mark_stack_->IsEmpty()) { 1246 break; 1247 } 1248 obj = mark_stack_->PopBack(); 1249 } 1250 DCHECK(obj != nullptr); 1251 ScanObject(obj); 1252 } 1253 } 1254 timings_.EndSplit(); 1255} 1256 1257inline bool MarkSweep::IsMarked(const Object* object) const 1258 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1259 if (immune_region_.ContainsObject(object)) { 1260 return true; 1261 } 1262 if (current_space_bitmap_->HasAddress(object)) { 1263 return current_space_bitmap_->Test(object); 1264 } 1265 return mark_bitmap_->Test(object); 1266} 1267 1268void MarkSweep::FinishPhase() { 1269 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1270 // Can't enqueue references if we hold the mutator lock. 1271 timings_.NewSplit("PostGcVerification"); 1272 heap_->PostGcVerification(this); 1273 if (kCountScannedTypes) { 1274 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1275 << " other=" << other_count_; 1276 } 1277 if (kCountTasks) { 1278 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1279 } 1280 if (kMeasureOverhead) { 1281 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1282 } 1283 if (kProfileLargeObjects) { 1284 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1285 } 1286 if (kCountJavaLangRefs) { 1287 VLOG(gc) << "References scanned " << reference_count_; 1288 } 1289 if (kCountMarkedObjects) { 1290 VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_ 1291 << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_; 1292 } 1293 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1294 mark_stack_->Reset(); 1295 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1296 heap_->ClearMarkedObjects(); 1297} 1298 1299void MarkSweep::RevokeAllThreadLocalBuffers() { 1300 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1301 // If concurrent, rosalloc thread-local buffers are revoked at the 1302 // thread checkpoint. Bump pointer space thread-local buffers must 1303 // not be in use. 1304 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1305 } else { 1306 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 1307 GetHeap()->RevokeAllThreadLocalBuffers(); 1308 timings_.EndSplit(); 1309 } 1310} 1311 1312} // namespace collector 1313} // namespace gc 1314} // namespace art 1315