mark_sweep.cc revision bbd695c71e0bf518f582e84524e1cdeb3de3896c
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap-inl.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "mark_sweep-inl.h" 38#include "mirror/art_field-inl.h" 39#include "mirror/object-inl.h" 40#include "runtime.h" 41#include "scoped_thread_state_change.h" 42#include "thread-inl.h" 43#include "thread_list.h" 44 45using ::art::mirror::ArtField; 46using ::art::mirror::Class; 47using ::art::mirror::Object; 48using ::art::mirror::ObjectArray; 49 50namespace art { 51namespace gc { 52namespace collector { 53 54// Performance options. 55static constexpr bool kUseRecursiveMark = false; 56static constexpr bool kUseMarkStackPrefetch = true; 57static constexpr size_t kSweepArrayChunkFreeSize = 1024; 58static constexpr bool kPreCleanCards = true; 59 60// Parallelism options. 61static constexpr bool kParallelCardScan = true; 62static constexpr bool kParallelRecursiveMark = true; 63// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 64// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 65// having this can add overhead in ProcessReferences since we may end up doing many calls of 66// ProcessMarkStack with very small mark stacks. 67static constexpr size_t kMinimumParallelMarkStackSize = 128; 68static constexpr bool kParallelProcessMarkStack = true; 69 70// Profiling and information flags. 71static constexpr bool kProfileLargeObjects = false; 72static constexpr bool kMeasureOverhead = false; 73static constexpr bool kCountTasks = false; 74static constexpr bool kCountJavaLangRefs = false; 75static constexpr bool kCountMarkedObjects = false; 76 77// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 78static constexpr bool kCheckLocks = kDebugLocking; 79static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 80 81// If true, revoke the rosalloc thread-local buffers at the 82// checkpoint, as opposed to during the pause. 83static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 84 85void MarkSweep::BindBitmaps() { 86 timings_.StartSplit("BindBitmaps"); 87 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 88 // Mark all of the spaces we never collect as immune. 89 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 90 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 91 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 92 } 93 } 94 timings_.EndSplit(); 95} 96 97MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 98 : GarbageCollector(heap, 99 name_prefix + 100 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 101 gc_barrier_(new Barrier(0)), 102 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 103 is_concurrent_(is_concurrent) { 104} 105 106void MarkSweep::InitializePhase() { 107 timings_.Reset(); 108 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 109 mark_stack_ = heap_->mark_stack_.get(); 110 DCHECK(mark_stack_ != nullptr); 111 immune_region_.Reset(); 112 class_count_ = 0; 113 array_count_ = 0; 114 other_count_ = 0; 115 large_object_test_ = 0; 116 large_object_mark_ = 0; 117 overhead_time_ = 0; 118 work_chunks_created_ = 0; 119 work_chunks_deleted_ = 0; 120 reference_count_ = 0; 121 mark_null_count_ = 0; 122 mark_immune_count_ = 0; 123 mark_fastpath_count_ = 0; 124 mark_slowpath_count_ = 0; 125 { 126 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap. 127 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 128 mark_bitmap_ = heap_->GetMarkBitmap(); 129 } 130 if (!clear_soft_references_) { 131 // Always clear soft references if a non-sticky collection. 132 clear_soft_references_ = GetGcType() != collector::kGcTypeSticky; 133 } 134 // Do any pre GC verification. 135 timings_.NewSplit("PreGcVerification"); 136 heap_->PreGcVerification(this); 137} 138 139void MarkSweep::ProcessReferences(Thread* self) { 140 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 141 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 142 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback, 143 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 144} 145 146void MarkSweep::PreProcessReferences() { 147 if (IsConcurrent()) { 148 // No reason to do this for non-concurrent GC since pre processing soft references only helps 149 // pauses. 150 timings_.NewSplit("PreProcessReferences"); 151 GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback, 152 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 153 } 154} 155 156void MarkSweep::PausePhase() { 157 TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_); 158 Thread* self = Thread::Current(); 159 Locks::mutator_lock_->AssertExclusiveHeld(self); 160 if (IsConcurrent()) { 161 // Handle the dirty objects if we are a concurrent GC. 162 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 163 // Re-mark root set. 164 ReMarkRoots(); 165 // Scan dirty objects, this is only required if we are not doing concurrent GC. 166 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 167 } 168 ProcessReferences(self); 169 { 170 timings_.NewSplit("SwapStacks"); 171 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 172 heap_->SwapStacks(self); 173 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 174 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 175 // stacks and don't want anybody to allocate into the live stack. 176 RevokeAllThreadLocalAllocationStacks(self); 177 } 178 timings_.StartSplit("PreSweepingGcVerification"); 179 heap_->PreSweepingGcVerification(this); 180 timings_.EndSplit(); 181 if (IsConcurrent()) { 182 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 183 // weak before we sweep them. Since this new system weak may not be marked, the GC may 184 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 185 // reference to a string that is about to be swept. 186 Runtime::Current()->DisallowNewSystemWeaks(); 187 } 188} 189 190void MarkSweep::PreCleanCards() { 191 // Don't do this for non concurrent GCs since they don't have any dirty cards. 192 if (kPreCleanCards && IsConcurrent()) { 193 Thread* self = Thread::Current(); 194 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 195 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 196 heap_->ProcessCards(timings_, false); 197 // The checkpoint root marking is required to avoid a race condition which occurs if the 198 // following happens during a reference write: 199 // 1. mutator dirties the card (write barrier) 200 // 2. GC ages the card (the above ProcessCards call) 201 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 202 // 4. mutator writes the value (corresponding to the write barrier in 1.) 203 // This causes the GC to age the card but not necessarily mark the reference which the mutator 204 // wrote into the object stored in the card. 205 // Having the checkpoint fixes this issue since it ensures that the card mark and the 206 // reference write are visible to the GC before the card is scanned (this is due to locks being 207 // acquired / released in the checkpoint code). 208 // The other roots are also marked to help reduce the pause. 209 MarkRootsCheckpoint(self, false); 210 MarkNonThreadRoots(); 211 MarkConcurrentRoots( 212 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 213 // Process the newly aged cards. 214 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 215 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 216 // in the next GC. 217 } 218} 219 220void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 221 if (kUseThreadLocalAllocationStack) { 222 timings_.NewSplit("RevokeAllThreadLocalAllocationStacks"); 223 Locks::mutator_lock_->AssertExclusiveHeld(self); 224 heap_->RevokeAllThreadLocalAllocationStacks(self); 225 } 226} 227 228void MarkSweep::MarkingPhase() { 229 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 230 Thread* self = Thread::Current(); 231 232 BindBitmaps(); 233 FindDefaultSpaceBitmap(); 234 235 // Process dirty cards and add dirty cards to mod union tables. 236 heap_->ProcessCards(timings_, false); 237 238 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 239 MarkRoots(self); 240 MarkReachableObjects(); 241 // Pre-clean dirtied cards to reduce pauses. 242 PreCleanCards(); 243 PreProcessReferences(); 244} 245 246void MarkSweep::UpdateAndMarkModUnion() { 247 for (const auto& space : heap_->GetContinuousSpaces()) { 248 if (immune_region_.ContainsSpace(space)) { 249 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 250 "UpdateAndMarkImageModUnionTable"; 251 TimingLogger::ScopedSplit split(name, &timings_); 252 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 253 CHECK(mod_union_table != nullptr); 254 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 255 } 256 } 257} 258 259void MarkSweep::MarkReachableObjects() { 260 UpdateAndMarkModUnion(); 261 // Recursively mark all the non-image bits set in the mark bitmap. 262 RecursiveMark(); 263} 264 265void MarkSweep::ReclaimPhase() { 266 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 267 Thread* self = Thread::Current(); 268 SweepSystemWeaks(self); 269 if (IsConcurrent()) { 270 Runtime::Current()->AllowNewSystemWeaks(); 271 } 272 { 273 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 274 275 // Reclaim unmarked objects. 276 Sweep(false); 277 278 // Swap the live and mark bitmaps for each space which we modified space. This is an 279 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 280 // bitmaps. 281 timings_.StartSplit("SwapBitmaps"); 282 SwapBitmaps(); 283 timings_.EndSplit(); 284 285 // Unbind the live and mark bitmaps. 286 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 287 GetHeap()->UnBindBitmaps(); 288 } 289} 290 291void MarkSweep::FindDefaultSpaceBitmap() { 292 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 293 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 294 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 295 // We want to have the main space instead of non moving if possible. 296 if (bitmap != nullptr && 297 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 298 current_space_bitmap_ = bitmap; 299 // If we are not the non moving space exit the loop early since this will be good enough. 300 if (space != heap_->GetNonMovingSpace()) { 301 break; 302 } 303 } 304 } 305 if (current_space_bitmap_ == nullptr) { 306 heap_->DumpSpaces(); 307 LOG(FATAL) << "Could not find a default mark bitmap"; 308 } 309} 310 311void MarkSweep::ExpandMarkStack() { 312 ResizeMarkStack(mark_stack_->Capacity() * 2); 313} 314 315void MarkSweep::ResizeMarkStack(size_t new_size) { 316 // Rare case, no need to have Thread::Current be a parameter. 317 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 318 // Someone else acquired the lock and expanded the mark stack before us. 319 return; 320 } 321 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 322 CHECK_LE(mark_stack_->Size(), new_size); 323 mark_stack_->Resize(new_size); 324 for (const auto& obj : temp) { 325 mark_stack_->PushBack(obj); 326 } 327} 328 329inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 330 DCHECK(obj != nullptr); 331 if (MarkObjectParallel(obj)) { 332 MutexLock mu(Thread::Current(), mark_stack_lock_); 333 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 334 ExpandMarkStack(); 335 } 336 // The object must be pushed on to the mark stack. 337 mark_stack_->PushBack(obj); 338 } 339} 340 341mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 342 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 343 mark_sweep->MarkObject(obj); 344 return obj; 345} 346 347void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 348 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 349} 350 351class MarkSweepMarkObjectSlowPath { 352 public: 353 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) { 354 } 355 356 void operator()(const Object* obj) const ALWAYS_INLINE { 357 if (kProfileLargeObjects) { 358 // TODO: Differentiate between marking and testing somehow. 359 ++mark_sweep_->large_object_test_; 360 ++mark_sweep_->large_object_mark_; 361 } 362 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 363 if (UNLIKELY(!IsAligned<kPageSize>(obj) || 364 (kIsDebugBuild && !large_object_space->Contains(obj)))) { 365 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 366 LOG(ERROR) << "Attempting see if it's a bad root"; 367 mark_sweep_->VerifyRoots(); 368 LOG(FATAL) << "Can't mark invalid object"; 369 } 370 } 371 372 private: 373 MarkSweep* const mark_sweep_; 374}; 375 376inline void MarkSweep::MarkObjectNonNull(Object* obj) { 377 DCHECK(obj != nullptr); 378 if (kUseBakerOrBrooksReadBarrier) { 379 // Verify all the objects have the correct pointer installed. 380 obj->AssertReadBarrierPointer(); 381 } 382 if (immune_region_.ContainsObject(obj)) { 383 if (kCountMarkedObjects) { 384 ++mark_immune_count_; 385 } 386 DCHECK(mark_bitmap_->Test(obj)); 387 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 388 if (kCountMarkedObjects) { 389 ++mark_fastpath_count_; 390 } 391 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 392 PushOnMarkStack(obj); // This object was not previously marked. 393 } 394 } else { 395 if (kCountMarkedObjects) { 396 ++mark_slowpath_count_; 397 } 398 MarkSweepMarkObjectSlowPath visitor(this); 399 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 400 // will check again. 401 if (!mark_bitmap_->Set(obj, visitor)) { 402 PushOnMarkStack(obj); // Was not already marked, push. 403 } 404 } 405} 406 407inline void MarkSweep::PushOnMarkStack(Object* obj) { 408 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 409 // Lock is not needed but is here anyways to please annotalysis. 410 MutexLock mu(Thread::Current(), mark_stack_lock_); 411 ExpandMarkStack(); 412 } 413 // The object must be pushed on to the mark stack. 414 mark_stack_->PushBack(obj); 415} 416 417inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 418 DCHECK(obj != nullptr); 419 if (kUseBakerOrBrooksReadBarrier) { 420 // Verify all the objects have the correct pointer installed. 421 obj->AssertReadBarrierPointer(); 422 } 423 if (immune_region_.ContainsObject(obj)) { 424 DCHECK(IsMarked(obj)); 425 return false; 426 } 427 // Try to take advantage of locality of references within a space, failing this find the space 428 // the hard way. 429 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 430 if (LIKELY(object_bitmap->HasAddress(obj))) { 431 return !object_bitmap->AtomicTestAndSet(obj); 432 } 433 MarkSweepMarkObjectSlowPath visitor(this); 434 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 435} 436 437// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 438inline void MarkSweep::MarkObject(Object* obj) { 439 if (obj != nullptr) { 440 MarkObjectNonNull(obj); 441 } else if (kCountMarkedObjects) { 442 ++mark_null_count_; 443 } 444} 445 446void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/, 447 RootType /*root_type*/) { 448 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 449} 450 451void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 452 RootType /*root_type*/) { 453 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 454} 455 456void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 457 RootType /*root_type*/) { 458 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 459} 460 461void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 462 const StackVisitor* visitor, RootType root_type) { 463 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type); 464} 465 466void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor, 467 RootType root_type) { 468 // See if the root is on any space bitmap. 469 if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 470 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 471 if (!large_object_space->Contains(root)) { 472 LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type; 473 if (visitor != NULL) { 474 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 475 } 476 } 477 } 478} 479 480void MarkSweep::VerifyRoots() { 481 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 482} 483 484void MarkSweep::MarkRoots(Thread* self) { 485 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 486 // If we exclusively hold the mutator lock, all threads must be suspended. 487 timings_.StartSplit("MarkRoots"); 488 Runtime::Current()->VisitRoots(MarkRootCallback, this); 489 timings_.EndSplit(); 490 RevokeAllThreadLocalAllocationStacks(self); 491 } else { 492 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 493 // At this point the live stack should no longer have any mutators which push into it. 494 MarkNonThreadRoots(); 495 MarkConcurrentRoots( 496 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 497 } 498} 499 500void MarkSweep::MarkNonThreadRoots() { 501 timings_.StartSplit("MarkNonThreadRoots"); 502 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 503 timings_.EndSplit(); 504} 505 506void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 507 timings_.StartSplit("MarkConcurrentRoots"); 508 // Visit all runtime roots and clear dirty flags. 509 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 510 timings_.EndSplit(); 511} 512 513class ScanObjectVisitor { 514 public: 515 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 516 : mark_sweep_(mark_sweep) {} 517 518 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 519 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 520 if (kCheckLocks) { 521 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 522 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 523 } 524 mark_sweep_->ScanObject(obj); 525 } 526 527 private: 528 MarkSweep* const mark_sweep_; 529}; 530 531class DelayReferenceReferentVisitor { 532 public: 533 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 534 } 535 536 void operator()(mirror::Class* klass, mirror::Reference* ref) const 537 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 538 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 539 collector_->DelayReferenceReferent(klass, ref); 540 } 541 542 private: 543 MarkSweep* const collector_; 544}; 545 546template <bool kUseFinger = false> 547class MarkStackTask : public Task { 548 public: 549 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 550 Object** mark_stack) 551 : mark_sweep_(mark_sweep), 552 thread_pool_(thread_pool), 553 mark_stack_pos_(mark_stack_size) { 554 // We may have to copy part of an existing mark stack when another mark stack overflows. 555 if (mark_stack_size != 0) { 556 DCHECK(mark_stack != NULL); 557 // TODO: Check performance? 558 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 559 } 560 if (kCountTasks) { 561 ++mark_sweep_->work_chunks_created_; 562 } 563 } 564 565 static const size_t kMaxSize = 1 * KB; 566 567 protected: 568 class MarkObjectParallelVisitor { 569 public: 570 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 571 MarkSweep* mark_sweep) ALWAYS_INLINE 572 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 573 574 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 575 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 576 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); 577 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 578 if (kUseFinger) { 579 android_memory_barrier(); 580 if (reinterpret_cast<uintptr_t>(ref) >= 581 static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) { 582 return; 583 } 584 } 585 chunk_task_->MarkStackPush(ref); 586 } 587 } 588 589 private: 590 MarkStackTask<kUseFinger>* const chunk_task_; 591 MarkSweep* const mark_sweep_; 592 }; 593 594 class ScanObjectParallelVisitor { 595 public: 596 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 597 : chunk_task_(chunk_task) {} 598 599 // No thread safety analysis since multiple threads will use this visitor. 600 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 601 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 602 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 603 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 604 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 605 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 606 } 607 608 private: 609 MarkStackTask<kUseFinger>* const chunk_task_; 610 }; 611 612 virtual ~MarkStackTask() { 613 // Make sure that we have cleared our mark stack. 614 DCHECK_EQ(mark_stack_pos_, 0U); 615 if (kCountTasks) { 616 ++mark_sweep_->work_chunks_deleted_; 617 } 618 } 619 620 MarkSweep* const mark_sweep_; 621 ThreadPool* const thread_pool_; 622 // Thread local mark stack for this task. 623 Object* mark_stack_[kMaxSize]; 624 // Mark stack position. 625 size_t mark_stack_pos_; 626 627 void MarkStackPush(Object* obj) ALWAYS_INLINE { 628 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 629 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 630 mark_stack_pos_ /= 2; 631 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 632 mark_stack_ + mark_stack_pos_); 633 thread_pool_->AddTask(Thread::Current(), task); 634 } 635 DCHECK(obj != nullptr); 636 DCHECK_LT(mark_stack_pos_, kMaxSize); 637 mark_stack_[mark_stack_pos_++] = obj; 638 } 639 640 virtual void Finalize() { 641 delete this; 642 } 643 644 // Scans all of the objects 645 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 646 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 647 ScanObjectParallelVisitor visitor(this); 648 // TODO: Tune this. 649 static const size_t kFifoSize = 4; 650 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 651 for (;;) { 652 Object* obj = nullptr; 653 if (kUseMarkStackPrefetch) { 654 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 655 Object* obj = mark_stack_[--mark_stack_pos_]; 656 DCHECK(obj != nullptr); 657 __builtin_prefetch(obj); 658 prefetch_fifo.push_back(obj); 659 } 660 if (UNLIKELY(prefetch_fifo.empty())) { 661 break; 662 } 663 obj = prefetch_fifo.front(); 664 prefetch_fifo.pop_front(); 665 } else { 666 if (UNLIKELY(mark_stack_pos_ == 0)) { 667 break; 668 } 669 obj = mark_stack_[--mark_stack_pos_]; 670 } 671 DCHECK(obj != nullptr); 672 visitor(obj); 673 } 674 } 675}; 676 677class CardScanTask : public MarkStackTask<false> { 678 public: 679 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 680 accounting::ContinuousSpaceBitmap* bitmap, 681 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 682 Object** mark_stack_obj) 683 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 684 bitmap_(bitmap), 685 begin_(begin), 686 end_(end), 687 minimum_age_(minimum_age) { 688 } 689 690 protected: 691 accounting::ContinuousSpaceBitmap* const bitmap_; 692 byte* const begin_; 693 byte* const end_; 694 const byte minimum_age_; 695 696 virtual void Finalize() { 697 delete this; 698 } 699 700 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 701 ScanObjectParallelVisitor visitor(this); 702 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 703 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 704 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 705 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 706 // Finish by emptying our local mark stack. 707 MarkStackTask::Run(self); 708 } 709}; 710 711size_t MarkSweep::GetThreadCount(bool paused) const { 712 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 713 return 1; 714 } 715 if (paused) { 716 return heap_->GetParallelGCThreadCount() + 1; 717 } else { 718 return heap_->GetConcGCThreadCount() + 1; 719 } 720} 721 722void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 723 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 724 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 725 size_t thread_count = GetThreadCount(paused); 726 // The parallel version with only one thread is faster for card scanning, TODO: fix. 727 if (kParallelCardScan && thread_count > 1) { 728 Thread* self = Thread::Current(); 729 // Can't have a different split for each space since multiple spaces can have their cards being 730 // scanned at the same time. 731 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 732 // Try to take some of the mark stack since we can pass this off to the worker tasks. 733 Object** mark_stack_begin = mark_stack_->Begin(); 734 Object** mark_stack_end = mark_stack_->End(); 735 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 736 // Estimated number of work tasks we will create. 737 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 738 DCHECK_NE(mark_stack_tasks, 0U); 739 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 740 mark_stack_size / mark_stack_tasks + 1); 741 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 742 if (space->GetMarkBitmap() == nullptr) { 743 continue; 744 } 745 byte* card_begin = space->Begin(); 746 byte* card_end = space->End(); 747 // Align up the end address. For example, the image space's end 748 // may not be card-size-aligned. 749 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 750 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 751 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 752 // Calculate how many bytes of heap we will scan, 753 const size_t address_range = card_end - card_begin; 754 // Calculate how much address range each task gets. 755 const size_t card_delta = RoundUp(address_range / thread_count + 1, 756 accounting::CardTable::kCardSize); 757 // Create the worker tasks for this space. 758 while (card_begin != card_end) { 759 // Add a range of cards. 760 size_t addr_remaining = card_end - card_begin; 761 size_t card_increment = std::min(card_delta, addr_remaining); 762 // Take from the back of the mark stack. 763 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 764 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 765 mark_stack_end -= mark_stack_increment; 766 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 767 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 768 // Add the new task to the thread pool. 769 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 770 card_begin + card_increment, minimum_age, 771 mark_stack_increment, mark_stack_end); 772 thread_pool->AddTask(self, task); 773 card_begin += card_increment; 774 } 775 } 776 777 // Note: the card scan below may dirty new cards (and scan them) 778 // as a side effect when a Reference object is encountered and 779 // queued during the marking. See b/11465268. 780 thread_pool->SetMaxActiveWorkers(thread_count - 1); 781 thread_pool->StartWorkers(self); 782 thread_pool->Wait(self, true, true); 783 thread_pool->StopWorkers(self); 784 timings_.EndSplit(); 785 } else { 786 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 787 if (space->GetMarkBitmap() != nullptr) { 788 // Image spaces are handled properly since live == marked for them. 789 switch (space->GetGcRetentionPolicy()) { 790 case space::kGcRetentionPolicyNeverCollect: 791 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 792 "ScanGrayImageSpaceObjects"); 793 break; 794 case space::kGcRetentionPolicyFullCollect: 795 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 796 "ScanGrayZygoteSpaceObjects"); 797 break; 798 case space::kGcRetentionPolicyAlwaysCollect: 799 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 800 "ScanGrayAllocSpaceObjects"); 801 break; 802 } 803 ScanObjectVisitor visitor(this); 804 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 805 timings_.EndSplit(); 806 } 807 } 808 } 809} 810 811class RecursiveMarkTask : public MarkStackTask<false> { 812 public: 813 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 814 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 815 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 816 bitmap_(bitmap), 817 begin_(begin), 818 end_(end) { 819 } 820 821 protected: 822 accounting::ContinuousSpaceBitmap* const bitmap_; 823 const uintptr_t begin_; 824 const uintptr_t end_; 825 826 virtual void Finalize() { 827 delete this; 828 } 829 830 // Scans all of the objects 831 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 832 ScanObjectParallelVisitor visitor(this); 833 bitmap_->VisitMarkedRange(begin_, end_, visitor); 834 // Finish by emptying our local mark stack. 835 MarkStackTask::Run(self); 836 } 837}; 838 839// Populates the mark stack based on the set of marked objects and 840// recursively marks until the mark stack is emptied. 841void MarkSweep::RecursiveMark() { 842 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 843 // RecursiveMark will build the lists of known instances of the Reference classes. See 844 // DelayReferenceReferent for details. 845 if (kUseRecursiveMark) { 846 const bool partial = GetGcType() == kGcTypePartial; 847 ScanObjectVisitor scan_visitor(this); 848 auto* self = Thread::Current(); 849 ThreadPool* thread_pool = heap_->GetThreadPool(); 850 size_t thread_count = GetThreadCount(false); 851 const bool parallel = kParallelRecursiveMark && thread_count > 1; 852 mark_stack_->Reset(); 853 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 854 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 855 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 856 current_space_bitmap_ = space->GetMarkBitmap(); 857 if (current_space_bitmap_ == nullptr) { 858 continue; 859 } 860 if (parallel) { 861 // We will use the mark stack the future. 862 // CHECK(mark_stack_->IsEmpty()); 863 // This function does not handle heap end increasing, so we must use the space end. 864 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 865 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 866 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 867 868 // Create a few worker tasks. 869 const size_t n = thread_count * 2; 870 while (begin != end) { 871 uintptr_t start = begin; 872 uintptr_t delta = (end - begin) / n; 873 delta = RoundUp(delta, KB); 874 if (delta < 16 * KB) delta = end - begin; 875 begin += delta; 876 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 877 begin); 878 thread_pool->AddTask(self, task); 879 } 880 thread_pool->SetMaxActiveWorkers(thread_count - 1); 881 thread_pool->StartWorkers(self); 882 thread_pool->Wait(self, true, true); 883 thread_pool->StopWorkers(self); 884 } else { 885 // This function does not handle heap end increasing, so we must use the space end. 886 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 887 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 888 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 889 } 890 } 891 } 892 } 893 ProcessMarkStack(false); 894} 895 896mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 897 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 898 return object; 899 } 900 return nullptr; 901} 902 903void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 904 ScanGrayObjects(paused, minimum_age); 905 ProcessMarkStack(paused); 906} 907 908void MarkSweep::ReMarkRoots() { 909 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 910 timings_.StartSplit("(Paused)ReMarkRoots"); 911 Runtime::Current()->VisitRoots( 912 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 913 kVisitRootFlagStopLoggingNewRoots | 914 kVisitRootFlagClearRootLog)); 915 timings_.EndSplit(); 916 if (kVerifyRootsMarked) { 917 timings_.StartSplit("(Paused)VerifyRoots"); 918 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 919 timings_.EndSplit(); 920 } 921} 922 923void MarkSweep::SweepSystemWeaks(Thread* self) { 924 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 925 timings_.StartSplit("SweepSystemWeaks"); 926 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 927 timings_.EndSplit(); 928} 929 930mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 931 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 932 // We don't actually want to sweep the object, so lets return "marked" 933 return obj; 934} 935 936void MarkSweep::VerifyIsLive(const Object* obj) { 937 if (!heap_->GetLiveBitmap()->Test(obj)) { 938 if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) == 939 heap_->allocation_stack_->End()) { 940 // Object not found! 941 heap_->DumpSpaces(); 942 LOG(FATAL) << "Found dead object " << obj; 943 } 944 } 945} 946 947void MarkSweep::VerifySystemWeaks() { 948 // Verify system weaks, uses a special object visitor which returns the input object. 949 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 950} 951 952class CheckpointMarkThreadRoots : public Closure { 953 public: 954 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 955 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 956 : mark_sweep_(mark_sweep), 957 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 958 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 959 } 960 961 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 962 ATRACE_BEGIN("Marking thread roots"); 963 // Note: self is not necessarily equal to thread since thread may be suspended. 964 Thread* self = Thread::Current(); 965 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 966 << thread->GetState() << " thread " << thread << " self " << self; 967 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 968 ATRACE_END(); 969 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 970 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 971 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 972 ATRACE_END(); 973 } 974 mark_sweep_->GetBarrier().Pass(self); 975 } 976 977 private: 978 MarkSweep* const mark_sweep_; 979 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 980}; 981 982void MarkSweep::MarkRootsCheckpoint(Thread* self, 983 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 984 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 985 timings_.StartSplit("MarkRootsCheckpoint"); 986 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 987 // Request the check point is run on all threads returning a count of the threads that must 988 // run through the barrier including self. 989 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 990 // Release locks then wait for all mutator threads to pass the barrier. 991 // TODO: optimize to not release locks when there are no threads to wait for. 992 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 993 Locks::mutator_lock_->SharedUnlock(self); 994 { 995 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 996 gc_barrier_->Increment(self, barrier_count); 997 } 998 Locks::mutator_lock_->SharedLock(self); 999 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1000 timings_.EndSplit(); 1001} 1002 1003void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1004 timings_.StartSplit("SweepArray"); 1005 Thread* self = Thread::Current(); 1006 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize]; 1007 size_t chunk_free_pos = 0; 1008 size_t freed_bytes = 0; 1009 size_t freed_large_object_bytes = 0; 1010 size_t freed_objects = 0; 1011 size_t freed_large_objects = 0; 1012 // How many objects are left in the array, modified after each space is swept. 1013 Object** objects = allocations->Begin(); 1014 size_t count = allocations->Size(); 1015 // Change the order to ensure that the non-moving space last swept as an optimization. 1016 std::vector<space::ContinuousSpace*> sweep_spaces; 1017 space::ContinuousSpace* non_moving_space = nullptr; 1018 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1019 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1020 space->GetLiveBitmap() != nullptr) { 1021 if (space == heap_->GetNonMovingSpace()) { 1022 non_moving_space = space; 1023 } else { 1024 sweep_spaces.push_back(space); 1025 } 1026 } 1027 } 1028 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1029 // the other alloc spaces as an optimization. 1030 if (non_moving_space != nullptr) { 1031 sweep_spaces.push_back(non_moving_space); 1032 } 1033 // Start by sweeping the continuous spaces. 1034 for (space::ContinuousSpace* space : sweep_spaces) { 1035 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1036 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1037 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1038 if (swap_bitmaps) { 1039 std::swap(live_bitmap, mark_bitmap); 1040 } 1041 Object** out = objects; 1042 for (size_t i = 0; i < count; ++i) { 1043 Object* obj = objects[i]; 1044 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1045 continue; 1046 } 1047 if (space->HasAddress(obj)) { 1048 // This object is in the space, remove it from the array and add it to the sweep buffer 1049 // if needed. 1050 if (!mark_bitmap->Test(obj)) { 1051 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1052 timings_.StartSplit("FreeList"); 1053 freed_objects += chunk_free_pos; 1054 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1055 timings_.EndSplit(); 1056 chunk_free_pos = 0; 1057 } 1058 chunk_free_buffer[chunk_free_pos++] = obj; 1059 } 1060 } else { 1061 *(out++) = obj; 1062 } 1063 } 1064 if (chunk_free_pos > 0) { 1065 timings_.StartSplit("FreeList"); 1066 freed_objects += chunk_free_pos; 1067 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1068 timings_.EndSplit(); 1069 chunk_free_pos = 0; 1070 } 1071 // All of the references which space contained are no longer in the allocation stack, update 1072 // the count. 1073 count = out - objects; 1074 } 1075 // Handle the large object space. 1076 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1077 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1078 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1079 if (swap_bitmaps) { 1080 std::swap(large_live_objects, large_mark_objects); 1081 } 1082 for (size_t i = 0; i < count; ++i) { 1083 Object* obj = objects[i]; 1084 // Handle large objects. 1085 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1086 continue; 1087 } 1088 if (!large_mark_objects->Test(obj)) { 1089 ++freed_large_objects; 1090 freed_large_object_bytes += large_object_space->Free(self, obj); 1091 } 1092 } 1093 timings_.EndSplit(); 1094 1095 timings_.StartSplit("RecordFree"); 1096 VLOG(heap) << "Freed " << freed_objects << "/" << count 1097 << " objects with size " << PrettySize(freed_bytes); 1098 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1099 freed_objects_.FetchAndAdd(freed_objects); 1100 freed_large_objects_.FetchAndAdd(freed_large_objects); 1101 freed_bytes_.FetchAndAdd(freed_bytes); 1102 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes); 1103 timings_.EndSplit(); 1104 1105 timings_.StartSplit("ResetStack"); 1106 allocations->Reset(); 1107 timings_.EndSplit(); 1108} 1109 1110void MarkSweep::Sweep(bool swap_bitmaps) { 1111 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1112 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1113 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1114 // knowing that new allocations won't be marked as live. 1115 timings_.StartSplit("MarkStackAsLive"); 1116 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1117 heap_->MarkAllocStackAsLive(live_stack); 1118 live_stack->Reset(); 1119 timings_.EndSplit(); 1120 1121 DCHECK(mark_stack_->IsEmpty()); 1122 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1123 if (space->IsContinuousMemMapAllocSpace()) { 1124 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1125 TimingLogger::ScopedSplit split( 1126 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1127 size_t freed_objects = 0; 1128 size_t freed_bytes = 0; 1129 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1130 heap_->RecordFree(freed_objects, freed_bytes); 1131 freed_objects_.FetchAndAdd(freed_objects); 1132 freed_bytes_.FetchAndAdd(freed_bytes); 1133 } 1134 } 1135 SweepLargeObjects(swap_bitmaps); 1136} 1137 1138void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1139 TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_); 1140 size_t freed_objects = 0; 1141 size_t freed_bytes = 0; 1142 heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1143 freed_large_objects_.FetchAndAdd(freed_objects); 1144 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 1145 heap_->RecordFree(freed_objects, freed_bytes); 1146} 1147 1148// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1149// marked, put it on the appropriate list in the heap for later processing. 1150void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1151 DCHECK(klass != nullptr); 1152 if (kCountJavaLangRefs) { 1153 ++reference_count_; 1154 } 1155 heap_->DelayReferenceReferent(klass, ref, IsMarkedCallback, this); 1156} 1157 1158class MarkObjectVisitor { 1159 public: 1160 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1161 } 1162 1163 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1164 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1165 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1166 if (kCheckLocks) { 1167 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1168 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1169 } 1170 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset, false)); 1171 } 1172 1173 private: 1174 MarkSweep* const mark_sweep_; 1175}; 1176 1177// Scans an object reference. Determines the type of the reference 1178// and dispatches to a specialized scanning routine. 1179void MarkSweep::ScanObject(Object* obj) { 1180 MarkObjectVisitor mark_visitor(this); 1181 DelayReferenceReferentVisitor ref_visitor(this); 1182 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1183} 1184 1185void MarkSweep::ProcessMarkStackPausedCallback(void* arg) { 1186 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true); 1187} 1188 1189void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1190 Thread* self = Thread::Current(); 1191 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1192 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1193 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1194 CHECK_GT(chunk_size, 0U); 1195 // Split the current mark stack up into work tasks. 1196 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1197 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1198 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1199 it += delta; 1200 } 1201 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1202 thread_pool->StartWorkers(self); 1203 thread_pool->Wait(self, true, true); 1204 thread_pool->StopWorkers(self); 1205 mark_stack_->Reset(); 1206 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1207} 1208 1209// Scan anything that's on the mark stack. 1210void MarkSweep::ProcessMarkStack(bool paused) { 1211 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1212 size_t thread_count = GetThreadCount(paused); 1213 if (kParallelProcessMarkStack && thread_count > 1 && 1214 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1215 ProcessMarkStackParallel(thread_count); 1216 } else { 1217 // TODO: Tune this. 1218 static const size_t kFifoSize = 4; 1219 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1220 for (;;) { 1221 Object* obj = NULL; 1222 if (kUseMarkStackPrefetch) { 1223 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1224 Object* obj = mark_stack_->PopBack(); 1225 DCHECK(obj != NULL); 1226 __builtin_prefetch(obj); 1227 prefetch_fifo.push_back(obj); 1228 } 1229 if (prefetch_fifo.empty()) { 1230 break; 1231 } 1232 obj = prefetch_fifo.front(); 1233 prefetch_fifo.pop_front(); 1234 } else { 1235 if (mark_stack_->IsEmpty()) { 1236 break; 1237 } 1238 obj = mark_stack_->PopBack(); 1239 } 1240 DCHECK(obj != nullptr); 1241 ScanObject(obj); 1242 } 1243 } 1244 timings_.EndSplit(); 1245} 1246 1247inline bool MarkSweep::IsMarked(const Object* object) const 1248 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1249 if (immune_region_.ContainsObject(object)) { 1250 return true; 1251 } 1252 if (current_space_bitmap_->HasAddress(object)) { 1253 return current_space_bitmap_->Test(object); 1254 } 1255 return mark_bitmap_->Test(object); 1256} 1257 1258void MarkSweep::FinishPhase() { 1259 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1260 // Can't enqueue references if we hold the mutator lock. 1261 timings_.NewSplit("PostGcVerification"); 1262 heap_->PostGcVerification(this); 1263 if (kCountScannedTypes) { 1264 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1265 << " other=" << other_count_; 1266 } 1267 if (kCountTasks) { 1268 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1269 } 1270 if (kMeasureOverhead) { 1271 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1272 } 1273 if (kProfileLargeObjects) { 1274 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1275 } 1276 if (kCountJavaLangRefs) { 1277 VLOG(gc) << "References scanned " << reference_count_; 1278 } 1279 if (kCountMarkedObjects) { 1280 VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_ 1281 << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_; 1282 } 1283 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1284 mark_stack_->Reset(); 1285 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1286 heap_->ClearMarkedObjects(); 1287} 1288 1289void MarkSweep::RevokeAllThreadLocalBuffers() { 1290 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1291 // If concurrent, rosalloc thread-local buffers are revoked at the 1292 // thread checkpoint. Bump pointer space thread-local buffers must 1293 // not be in use. 1294 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1295 } else { 1296 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 1297 GetHeap()->RevokeAllThreadLocalBuffers(); 1298 timings_.EndSplit(); 1299 } 1300} 1301 1302} // namespace collector 1303} // namespace gc 1304} // namespace art 1305