mark_sweep.cc revision a8e8f9c0a8e259a807d7b99a148d14104c24209d
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap-inl.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "mark_sweep-inl.h" 38#include "mirror/art_field-inl.h" 39#include "mirror/object-inl.h" 40#include "runtime.h" 41#include "scoped_thread_state_change.h" 42#include "thread-inl.h" 43#include "thread_list.h" 44 45using ::art::mirror::ArtField; 46using ::art::mirror::Class; 47using ::art::mirror::Object; 48using ::art::mirror::ObjectArray; 49 50namespace art { 51namespace gc { 52namespace collector { 53 54// Performance options. 55static constexpr bool kUseRecursiveMark = false; 56static constexpr bool kUseMarkStackPrefetch = true; 57static constexpr size_t kSweepArrayChunkFreeSize = 1024; 58static constexpr bool kPreCleanCards = true; 59 60// Parallelism options. 61static constexpr bool kParallelCardScan = true; 62static constexpr bool kParallelRecursiveMark = true; 63// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 64// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 65// having this can add overhead in ProcessReferences since we may end up doing many calls of 66// ProcessMarkStack with very small mark stacks. 67static constexpr size_t kMinimumParallelMarkStackSize = 128; 68static constexpr bool kParallelProcessMarkStack = true; 69 70// Profiling and information flags. 71static constexpr bool kProfileLargeObjects = false; 72static constexpr bool kMeasureOverhead = false; 73static constexpr bool kCountTasks = false; 74static constexpr bool kCountJavaLangRefs = false; 75static constexpr bool kCountMarkedObjects = false; 76 77// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 78static constexpr bool kCheckLocks = kDebugLocking; 79static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 80 81// If true, revoke the rosalloc thread-local buffers at the 82// checkpoint, as opposed to during the pause. 83static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 84 85void MarkSweep::BindBitmaps() { 86 timings_.StartSplit("BindBitmaps"); 87 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 88 // Mark all of the spaces we never collect as immune. 89 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 90 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 91 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 92 } 93 } 94 timings_.EndSplit(); 95} 96 97MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 98 : GarbageCollector(heap, 99 name_prefix + 100 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 101 gc_barrier_(new Barrier(0)), 102 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 103 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 104 is_concurrent_(is_concurrent) { 105} 106 107void MarkSweep::InitializePhase() { 108 timings_.Reset(); 109 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 110 mark_stack_ = heap_->mark_stack_.get(); 111 DCHECK(mark_stack_ != nullptr); 112 immune_region_.Reset(); 113 class_count_ = 0; 114 array_count_ = 0; 115 other_count_ = 0; 116 large_object_test_ = 0; 117 large_object_mark_ = 0; 118 overhead_time_ = 0; 119 work_chunks_created_ = 0; 120 work_chunks_deleted_ = 0; 121 reference_count_ = 0; 122 mark_null_count_ = 0; 123 mark_immune_count_ = 0; 124 mark_fastpath_count_ = 0; 125 mark_slowpath_count_ = 0; 126 { 127 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap. 128 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 129 mark_bitmap_ = heap_->GetMarkBitmap(); 130 } 131 if (!clear_soft_references_) { 132 // Always clear soft references if a non-sticky collection. 133 clear_soft_references_ = GetGcType() != collector::kGcTypeSticky; 134 } 135 // Do any pre GC verification. 136 timings_.NewSplit("PreGcVerification"); 137 heap_->PreGcVerification(this); 138} 139 140void MarkSweep::ProcessReferences(Thread* self) { 141 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 142 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 143 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback, 144 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 145} 146 147void MarkSweep::PreProcessReferences() { 148 if (IsConcurrent()) { 149 // No reason to do this for non-concurrent GC since pre processing soft references only helps 150 // pauses. 151 timings_.NewSplit("PreProcessReferences"); 152 GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback, 153 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 154 } 155} 156 157void MarkSweep::PausePhase() { 158 TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_); 159 Thread* self = Thread::Current(); 160 Locks::mutator_lock_->AssertExclusiveHeld(self); 161 if (IsConcurrent()) { 162 // Handle the dirty objects if we are a concurrent GC. 163 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 164 // Re-mark root set. 165 ReMarkRoots(); 166 // Scan dirty objects, this is only required if we are not doing concurrent GC. 167 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 168 } 169 ProcessReferences(self); 170 { 171 timings_.NewSplit("SwapStacks"); 172 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 173 heap_->SwapStacks(self); 174 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 175 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 176 // stacks and don't want anybody to allocate into the live stack. 177 RevokeAllThreadLocalAllocationStacks(self); 178 } 179 timings_.StartSplit("PreSweepingGcVerification"); 180 heap_->PreSweepingGcVerification(this); 181 timings_.EndSplit(); 182 if (IsConcurrent()) { 183 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 184 // weak before we sweep them. Since this new system weak may not be marked, the GC may 185 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 186 // reference to a string that is about to be swept. 187 Runtime::Current()->DisallowNewSystemWeaks(); 188 } 189} 190 191void MarkSweep::PreCleanCards() { 192 // Don't do this for non concurrent GCs since they don't have any dirty cards. 193 if (kPreCleanCards && IsConcurrent()) { 194 Thread* self = Thread::Current(); 195 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 196 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 197 heap_->ProcessCards(timings_, false); 198 // The checkpoint root marking is required to avoid a race condition which occurs if the 199 // following happens during a reference write: 200 // 1. mutator dirties the card (write barrier) 201 // 2. GC ages the card (the above ProcessCards call) 202 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 203 // 4. mutator writes the value (corresponding to the write barrier in 1.) 204 // This causes the GC to age the card but not necessarily mark the reference which the mutator 205 // wrote into the object stored in the card. 206 // Having the checkpoint fixes this issue since it ensures that the card mark and the 207 // reference write are visible to the GC before the card is scanned (this is due to locks being 208 // acquired / released in the checkpoint code). 209 // The other roots are also marked to help reduce the pause. 210 MarkRootsCheckpoint(self, false); 211 MarkNonThreadRoots(); 212 MarkConcurrentRoots( 213 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 214 // Process the newly aged cards. 215 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 216 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 217 // in the next GC. 218 } 219} 220 221void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 222 if (kUseThreadLocalAllocationStack) { 223 timings_.NewSplit("RevokeAllThreadLocalAllocationStacks"); 224 Locks::mutator_lock_->AssertExclusiveHeld(self); 225 heap_->RevokeAllThreadLocalAllocationStacks(self); 226 } 227} 228 229void MarkSweep::MarkingPhase() { 230 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 231 Thread* self = Thread::Current(); 232 233 BindBitmaps(); 234 FindDefaultSpaceBitmap(); 235 236 // Process dirty cards and add dirty cards to mod union tables. 237 heap_->ProcessCards(timings_, false); 238 239 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 240 MarkRoots(self); 241 MarkReachableObjects(); 242 // Pre-clean dirtied cards to reduce pauses. 243 PreCleanCards(); 244 PreProcessReferences(); 245} 246 247void MarkSweep::UpdateAndMarkModUnion() { 248 for (const auto& space : heap_->GetContinuousSpaces()) { 249 if (immune_region_.ContainsSpace(space)) { 250 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 251 "UpdateAndMarkImageModUnionTable"; 252 TimingLogger::ScopedSplit split(name, &timings_); 253 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 254 CHECK(mod_union_table != nullptr); 255 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 256 } 257 } 258} 259 260void MarkSweep::MarkReachableObjects() { 261 UpdateAndMarkModUnion(); 262 // Recursively mark all the non-image bits set in the mark bitmap. 263 RecursiveMark(); 264} 265 266void MarkSweep::ReclaimPhase() { 267 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 268 Thread* self = Thread::Current(); 269 SweepSystemWeaks(self); 270 if (IsConcurrent()) { 271 Runtime::Current()->AllowNewSystemWeaks(); 272 } 273 { 274 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 275 276 // Reclaim unmarked objects. 277 Sweep(false); 278 279 // Swap the live and mark bitmaps for each space which we modified space. This is an 280 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 281 // bitmaps. 282 timings_.StartSplit("SwapBitmaps"); 283 SwapBitmaps(); 284 timings_.EndSplit(); 285 286 // Unbind the live and mark bitmaps. 287 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 288 GetHeap()->UnBindBitmaps(); 289 } 290} 291 292void MarkSweep::FindDefaultSpaceBitmap() { 293 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 294 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 295 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 296 if (bitmap != nullptr && 297 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 298 current_space_bitmap_ = bitmap; 299 return; 300 } 301 } 302 GetHeap()->DumpSpaces(); 303 LOG(FATAL) << "Could not find a default mark bitmap"; 304} 305 306void MarkSweep::ExpandMarkStack() { 307 ResizeMarkStack(mark_stack_->Capacity() * 2); 308} 309 310void MarkSweep::ResizeMarkStack(size_t new_size) { 311 // Rare case, no need to have Thread::Current be a parameter. 312 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 313 // Someone else acquired the lock and expanded the mark stack before us. 314 return; 315 } 316 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 317 CHECK_LE(mark_stack_->Size(), new_size); 318 mark_stack_->Resize(new_size); 319 for (const auto& obj : temp) { 320 mark_stack_->PushBack(obj); 321 } 322} 323 324inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 325 DCHECK(obj != NULL); 326 if (MarkObjectParallel(obj)) { 327 MutexLock mu(Thread::Current(), mark_stack_lock_); 328 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 329 ExpandMarkStack(); 330 } 331 // The object must be pushed on to the mark stack. 332 mark_stack_->PushBack(obj); 333 } 334} 335 336mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 337 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 338 mark_sweep->MarkObject(obj); 339 return obj; 340} 341 342void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 343 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 344} 345 346inline void MarkSweep::MarkObjectNonNull(Object* obj) { 347 DCHECK(obj != nullptr); 348 if (kUseBakerOrBrooksReadBarrier) { 349 // Verify all the objects have the correct pointer installed. 350 obj->AssertReadBarrierPointer(); 351 } 352 if (immune_region_.ContainsObject(obj)) { 353 if (kCountMarkedObjects) { 354 ++mark_immune_count_; 355 } 356 DCHECK(IsMarked(obj)); 357 return; 358 } 359 // Try to take advantage of locality of references within a space, failing this find the space 360 // the hard way. 361 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 362 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 363 object_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj); 364 if (kCountMarkedObjects) { 365 ++mark_slowpath_count_; 366 } 367 if (UNLIKELY(object_bitmap == nullptr)) { 368 MarkLargeObject(obj, true); 369 return; 370 } 371 } else if (kCountMarkedObjects) { 372 ++mark_fastpath_count_; 373 } 374 // This object was not previously marked. 375 if (!object_bitmap->Set(obj)) { 376 PushOnMarkStack(obj); 377 } 378} 379 380inline void MarkSweep::PushOnMarkStack(Object* obj) { 381 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 382 // Lock is not needed but is here anyways to please annotalysis. 383 MutexLock mu(Thread::Current(), mark_stack_lock_); 384 ExpandMarkStack(); 385 } 386 // The object must be pushed on to the mark stack. 387 mark_stack_->PushBack(obj); 388} 389 390// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 391bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 392 // TODO: support >1 discontinuous space. 393 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 394 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 395 if (kProfileLargeObjects) { 396 ++large_object_test_; 397 } 398 if (UNLIKELY(!large_objects->Test(obj))) { 399 if (!large_object_space->Contains(obj)) { 400 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 401 LOG(ERROR) << "Attempting see if it's a bad root"; 402 VerifyRoots(); 403 LOG(FATAL) << "Can't mark bad root"; 404 } 405 if (kProfileLargeObjects) { 406 ++large_object_mark_; 407 } 408 if (set) { 409 large_objects->Set(obj); 410 } else { 411 large_objects->Clear(obj); 412 } 413 return true; 414 } 415 return false; 416} 417 418inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 419 DCHECK(obj != nullptr); 420 if (kUseBakerOrBrooksReadBarrier) { 421 // Verify all the objects have the correct pointer installed. 422 obj->AssertReadBarrierPointer(); 423 } 424 if (immune_region_.ContainsObject(obj)) { 425 DCHECK(IsMarked(obj)); 426 return false; 427 } 428 // Try to take advantage of locality of references within a space, failing this find the space 429 // the hard way. 430 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 431 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 432 accounting::ContinuousSpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj); 433 if (new_bitmap != NULL) { 434 object_bitmap = new_bitmap; 435 } else { 436 // TODO: Remove the Thread::Current here? 437 // TODO: Convert this to some kind of atomic marking? 438 MutexLock mu(Thread::Current(), large_object_lock_); 439 return MarkLargeObject(obj, true); 440 } 441 } 442 // Return true if the object was not previously marked. 443 return !object_bitmap->AtomicTestAndSet(obj); 444} 445 446// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 447inline void MarkSweep::MarkObject(Object* obj) { 448 if (obj != nullptr) { 449 MarkObjectNonNull(obj); 450 } else if (kCountMarkedObjects) { 451 ++mark_null_count_; 452 } 453} 454 455void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/, 456 RootType /*root_type*/) { 457 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 458} 459 460void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 461 RootType /*root_type*/) { 462 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 463} 464 465void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 466 RootType /*root_type*/) { 467 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 468} 469 470void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 471 const StackVisitor* visitor, RootType root_type) { 472 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type); 473} 474 475void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor, 476 RootType root_type) { 477 // See if the root is on any space bitmap. 478 if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 479 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 480 if (!large_object_space->Contains(root)) { 481 LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type; 482 if (visitor != NULL) { 483 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 484 } 485 } 486 } 487} 488 489void MarkSweep::VerifyRoots() { 490 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 491} 492 493void MarkSweep::MarkRoots(Thread* self) { 494 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 495 // If we exclusively hold the mutator lock, all threads must be suspended. 496 timings_.StartSplit("MarkRoots"); 497 Runtime::Current()->VisitRoots(MarkRootCallback, this); 498 timings_.EndSplit(); 499 RevokeAllThreadLocalAllocationStacks(self); 500 } else { 501 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 502 // At this point the live stack should no longer have any mutators which push into it. 503 MarkNonThreadRoots(); 504 MarkConcurrentRoots( 505 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 506 } 507} 508 509void MarkSweep::MarkNonThreadRoots() { 510 timings_.StartSplit("MarkNonThreadRoots"); 511 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 512 timings_.EndSplit(); 513} 514 515void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 516 timings_.StartSplit("MarkConcurrentRoots"); 517 // Visit all runtime roots and clear dirty flags. 518 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 519 timings_.EndSplit(); 520} 521 522class ScanObjectVisitor { 523 public: 524 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 525 : mark_sweep_(mark_sweep) {} 526 527 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 528 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 529 if (kCheckLocks) { 530 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 531 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 532 } 533 mark_sweep_->ScanObject(obj); 534 } 535 536 private: 537 MarkSweep* const mark_sweep_; 538}; 539 540class DelayReferenceReferentVisitor { 541 public: 542 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 543 } 544 545 void operator()(mirror::Class* klass, mirror::Reference* ref) const 546 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 547 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 548 collector_->DelayReferenceReferent(klass, ref); 549 } 550 551 private: 552 MarkSweep* const collector_; 553}; 554 555template <bool kUseFinger = false> 556class MarkStackTask : public Task { 557 public: 558 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 559 Object** mark_stack) 560 : mark_sweep_(mark_sweep), 561 thread_pool_(thread_pool), 562 mark_stack_pos_(mark_stack_size) { 563 // We may have to copy part of an existing mark stack when another mark stack overflows. 564 if (mark_stack_size != 0) { 565 DCHECK(mark_stack != NULL); 566 // TODO: Check performance? 567 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 568 } 569 if (kCountTasks) { 570 ++mark_sweep_->work_chunks_created_; 571 } 572 } 573 574 static const size_t kMaxSize = 1 * KB; 575 576 protected: 577 class MarkObjectParallelVisitor { 578 public: 579 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 580 MarkSweep* mark_sweep) ALWAYS_INLINE 581 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 582 583 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 584 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 585 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); 586 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 587 if (kUseFinger) { 588 android_memory_barrier(); 589 if (reinterpret_cast<uintptr_t>(ref) >= 590 static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) { 591 return; 592 } 593 } 594 chunk_task_->MarkStackPush(ref); 595 } 596 } 597 598 private: 599 MarkStackTask<kUseFinger>* const chunk_task_; 600 MarkSweep* const mark_sweep_; 601 }; 602 603 class ScanObjectParallelVisitor { 604 public: 605 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 606 : chunk_task_(chunk_task) {} 607 608 // No thread safety analysis since multiple threads will use this visitor. 609 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 610 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 611 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 612 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 613 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 614 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 615 } 616 617 private: 618 MarkStackTask<kUseFinger>* const chunk_task_; 619 }; 620 621 virtual ~MarkStackTask() { 622 // Make sure that we have cleared our mark stack. 623 DCHECK_EQ(mark_stack_pos_, 0U); 624 if (kCountTasks) { 625 ++mark_sweep_->work_chunks_deleted_; 626 } 627 } 628 629 MarkSweep* const mark_sweep_; 630 ThreadPool* const thread_pool_; 631 // Thread local mark stack for this task. 632 Object* mark_stack_[kMaxSize]; 633 // Mark stack position. 634 size_t mark_stack_pos_; 635 636 void MarkStackPush(Object* obj) ALWAYS_INLINE { 637 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 638 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 639 mark_stack_pos_ /= 2; 640 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 641 mark_stack_ + mark_stack_pos_); 642 thread_pool_->AddTask(Thread::Current(), task); 643 } 644 DCHECK(obj != nullptr); 645 DCHECK_LT(mark_stack_pos_, kMaxSize); 646 mark_stack_[mark_stack_pos_++] = obj; 647 } 648 649 virtual void Finalize() { 650 delete this; 651 } 652 653 // Scans all of the objects 654 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 655 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 656 ScanObjectParallelVisitor visitor(this); 657 // TODO: Tune this. 658 static const size_t kFifoSize = 4; 659 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 660 for (;;) { 661 Object* obj = nullptr; 662 if (kUseMarkStackPrefetch) { 663 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 664 Object* obj = mark_stack_[--mark_stack_pos_]; 665 DCHECK(obj != nullptr); 666 __builtin_prefetch(obj); 667 prefetch_fifo.push_back(obj); 668 } 669 if (UNLIKELY(prefetch_fifo.empty())) { 670 break; 671 } 672 obj = prefetch_fifo.front(); 673 prefetch_fifo.pop_front(); 674 } else { 675 if (UNLIKELY(mark_stack_pos_ == 0)) { 676 break; 677 } 678 obj = mark_stack_[--mark_stack_pos_]; 679 } 680 DCHECK(obj != nullptr); 681 visitor(obj); 682 } 683 } 684}; 685 686class CardScanTask : public MarkStackTask<false> { 687 public: 688 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 689 accounting::ContinuousSpaceBitmap* bitmap, 690 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 691 Object** mark_stack_obj) 692 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 693 bitmap_(bitmap), 694 begin_(begin), 695 end_(end), 696 minimum_age_(minimum_age) { 697 } 698 699 protected: 700 accounting::ContinuousSpaceBitmap* const bitmap_; 701 byte* const begin_; 702 byte* const end_; 703 const byte minimum_age_; 704 705 virtual void Finalize() { 706 delete this; 707 } 708 709 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 710 ScanObjectParallelVisitor visitor(this); 711 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 712 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 713 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 714 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 715 // Finish by emptying our local mark stack. 716 MarkStackTask::Run(self); 717 } 718}; 719 720size_t MarkSweep::GetThreadCount(bool paused) const { 721 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 722 return 0; 723 } 724 if (paused) { 725 return heap_->GetParallelGCThreadCount() + 1; 726 } else { 727 return heap_->GetConcGCThreadCount() + 1; 728 } 729} 730 731void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 732 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 733 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 734 size_t thread_count = GetThreadCount(paused); 735 // The parallel version with only one thread is faster for card scanning, TODO: fix. 736 if (kParallelCardScan && thread_count > 0) { 737 Thread* self = Thread::Current(); 738 // Can't have a different split for each space since multiple spaces can have their cards being 739 // scanned at the same time. 740 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 741 // Try to take some of the mark stack since we can pass this off to the worker tasks. 742 Object** mark_stack_begin = mark_stack_->Begin(); 743 Object** mark_stack_end = mark_stack_->End(); 744 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 745 // Estimated number of work tasks we will create. 746 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 747 DCHECK_NE(mark_stack_tasks, 0U); 748 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 749 mark_stack_size / mark_stack_tasks + 1); 750 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 751 if (space->GetMarkBitmap() == nullptr) { 752 continue; 753 } 754 byte* card_begin = space->Begin(); 755 byte* card_end = space->End(); 756 // Align up the end address. For example, the image space's end 757 // may not be card-size-aligned. 758 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 759 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 760 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 761 // Calculate how many bytes of heap we will scan, 762 const size_t address_range = card_end - card_begin; 763 // Calculate how much address range each task gets. 764 const size_t card_delta = RoundUp(address_range / thread_count + 1, 765 accounting::CardTable::kCardSize); 766 // Create the worker tasks for this space. 767 while (card_begin != card_end) { 768 // Add a range of cards. 769 size_t addr_remaining = card_end - card_begin; 770 size_t card_increment = std::min(card_delta, addr_remaining); 771 // Take from the back of the mark stack. 772 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 773 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 774 mark_stack_end -= mark_stack_increment; 775 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 776 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 777 // Add the new task to the thread pool. 778 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 779 card_begin + card_increment, minimum_age, 780 mark_stack_increment, mark_stack_end); 781 thread_pool->AddTask(self, task); 782 card_begin += card_increment; 783 } 784 } 785 786 // Note: the card scan below may dirty new cards (and scan them) 787 // as a side effect when a Reference object is encountered and 788 // queued during the marking. See b/11465268. 789 thread_pool->SetMaxActiveWorkers(thread_count - 1); 790 thread_pool->StartWorkers(self); 791 thread_pool->Wait(self, true, true); 792 thread_pool->StopWorkers(self); 793 timings_.EndSplit(); 794 } else { 795 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 796 if (space->GetMarkBitmap() != nullptr) { 797 // Image spaces are handled properly since live == marked for them. 798 switch (space->GetGcRetentionPolicy()) { 799 case space::kGcRetentionPolicyNeverCollect: 800 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 801 "ScanGrayImageSpaceObjects"); 802 break; 803 case space::kGcRetentionPolicyFullCollect: 804 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 805 "ScanGrayZygoteSpaceObjects"); 806 break; 807 case space::kGcRetentionPolicyAlwaysCollect: 808 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 809 "ScanGrayAllocSpaceObjects"); 810 break; 811 } 812 ScanObjectVisitor visitor(this); 813 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 814 timings_.EndSplit(); 815 } 816 } 817 } 818} 819 820class RecursiveMarkTask : public MarkStackTask<false> { 821 public: 822 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 823 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 824 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 825 bitmap_(bitmap), 826 begin_(begin), 827 end_(end) { 828 } 829 830 protected: 831 accounting::ContinuousSpaceBitmap* const bitmap_; 832 const uintptr_t begin_; 833 const uintptr_t end_; 834 835 virtual void Finalize() { 836 delete this; 837 } 838 839 // Scans all of the objects 840 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 841 ScanObjectParallelVisitor visitor(this); 842 bitmap_->VisitMarkedRange(begin_, end_, visitor); 843 // Finish by emptying our local mark stack. 844 MarkStackTask::Run(self); 845 } 846}; 847 848// Populates the mark stack based on the set of marked objects and 849// recursively marks until the mark stack is emptied. 850void MarkSweep::RecursiveMark() { 851 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 852 // RecursiveMark will build the lists of known instances of the Reference classes. See 853 // DelayReferenceReferent for details. 854 if (kUseRecursiveMark) { 855 const bool partial = GetGcType() == kGcTypePartial; 856 ScanObjectVisitor scan_visitor(this); 857 auto* self = Thread::Current(); 858 ThreadPool* thread_pool = heap_->GetThreadPool(); 859 size_t thread_count = GetThreadCount(false); 860 const bool parallel = kParallelRecursiveMark && thread_count > 1; 861 mark_stack_->Reset(); 862 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 863 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 864 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 865 current_space_bitmap_ = space->GetMarkBitmap(); 866 if (current_space_bitmap_ == nullptr) { 867 continue; 868 } 869 if (parallel) { 870 // We will use the mark stack the future. 871 // CHECK(mark_stack_->IsEmpty()); 872 // This function does not handle heap end increasing, so we must use the space end. 873 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 874 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 875 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 876 877 // Create a few worker tasks. 878 const size_t n = thread_count * 2; 879 while (begin != end) { 880 uintptr_t start = begin; 881 uintptr_t delta = (end - begin) / n; 882 delta = RoundUp(delta, KB); 883 if (delta < 16 * KB) delta = end - begin; 884 begin += delta; 885 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 886 begin); 887 thread_pool->AddTask(self, task); 888 } 889 thread_pool->SetMaxActiveWorkers(thread_count - 1); 890 thread_pool->StartWorkers(self); 891 thread_pool->Wait(self, true, true); 892 thread_pool->StopWorkers(self); 893 } else { 894 // This function does not handle heap end increasing, so we must use the space end. 895 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 896 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 897 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 898 } 899 } 900 } 901 } 902 ProcessMarkStack(false); 903} 904 905mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 906 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 907 return object; 908 } 909 return nullptr; 910} 911 912void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 913 ScanGrayObjects(paused, minimum_age); 914 ProcessMarkStack(paused); 915} 916 917void MarkSweep::ReMarkRoots() { 918 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 919 timings_.StartSplit("(Paused)ReMarkRoots"); 920 Runtime::Current()->VisitRoots( 921 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 922 kVisitRootFlagStopLoggingNewRoots | 923 kVisitRootFlagClearRootLog)); 924 timings_.EndSplit(); 925 if (kVerifyRootsMarked) { 926 timings_.StartSplit("(Paused)VerifyRoots"); 927 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 928 timings_.EndSplit(); 929 } 930} 931 932void MarkSweep::SweepSystemWeaks(Thread* self) { 933 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 934 timings_.StartSplit("SweepSystemWeaks"); 935 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 936 timings_.EndSplit(); 937} 938 939mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 940 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 941 // We don't actually want to sweep the object, so lets return "marked" 942 return obj; 943} 944 945void MarkSweep::VerifyIsLive(const Object* obj) { 946 if (!heap_->GetLiveBitmap()->Test(obj)) { 947 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 948 if (!large_object_space->GetLiveObjects()->Test(obj)) { 949 if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) == 950 heap_->allocation_stack_->End()) { 951 // Object not found! 952 heap_->DumpSpaces(); 953 LOG(FATAL) << "Found dead object " << obj; 954 } 955 } 956 } 957} 958 959void MarkSweep::VerifySystemWeaks() { 960 // Verify system weaks, uses a special object visitor which returns the input object. 961 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 962} 963 964class CheckpointMarkThreadRoots : public Closure { 965 public: 966 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 967 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 968 : mark_sweep_(mark_sweep), 969 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 970 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 971 } 972 973 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 974 ATRACE_BEGIN("Marking thread roots"); 975 // Note: self is not necessarily equal to thread since thread may be suspended. 976 Thread* self = Thread::Current(); 977 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 978 << thread->GetState() << " thread " << thread << " self " << self; 979 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 980 ATRACE_END(); 981 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 982 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 983 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 984 ATRACE_END(); 985 } 986 mark_sweep_->GetBarrier().Pass(self); 987 } 988 989 private: 990 MarkSweep* const mark_sweep_; 991 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 992}; 993 994void MarkSweep::MarkRootsCheckpoint(Thread* self, 995 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 996 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 997 timings_.StartSplit("MarkRootsCheckpoint"); 998 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 999 // Request the check point is run on all threads returning a count of the threads that must 1000 // run through the barrier including self. 1001 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1002 // Release locks then wait for all mutator threads to pass the barrier. 1003 // TODO: optimize to not release locks when there are no threads to wait for. 1004 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1005 Locks::mutator_lock_->SharedUnlock(self); 1006 { 1007 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1008 gc_barrier_->Increment(self, barrier_count); 1009 } 1010 Locks::mutator_lock_->SharedLock(self); 1011 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1012 timings_.EndSplit(); 1013} 1014 1015void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1016 timings_.StartSplit("SweepArray"); 1017 Thread* self = Thread::Current(); 1018 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize]; 1019 size_t chunk_free_pos = 0; 1020 size_t freed_bytes = 0; 1021 size_t freed_large_object_bytes = 0; 1022 size_t freed_objects = 0; 1023 size_t freed_large_objects = 0; 1024 // How many objects are left in the array, modified after each space is swept. 1025 Object** objects = allocations->Begin(); 1026 size_t count = allocations->Size(); 1027 // Change the order to ensure that the non-moving space last swept as an optimization. 1028 std::vector<space::ContinuousSpace*> sweep_spaces; 1029 space::ContinuousSpace* non_moving_space = nullptr; 1030 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1031 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1032 space->GetLiveBitmap() != nullptr) { 1033 if (space == heap_->GetNonMovingSpace()) { 1034 non_moving_space = space; 1035 } else { 1036 sweep_spaces.push_back(space); 1037 } 1038 } 1039 } 1040 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1041 // the other alloc spaces as an optimization. 1042 if (non_moving_space != nullptr) { 1043 sweep_spaces.push_back(non_moving_space); 1044 } 1045 // Start by sweeping the continuous spaces. 1046 for (space::ContinuousSpace* space : sweep_spaces) { 1047 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1048 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1049 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1050 if (swap_bitmaps) { 1051 std::swap(live_bitmap, mark_bitmap); 1052 } 1053 Object** out = objects; 1054 for (size_t i = 0; i < count; ++i) { 1055 Object* obj = objects[i]; 1056 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1057 continue; 1058 } 1059 if (space->HasAddress(obj)) { 1060 // This object is in the space, remove it from the array and add it to the sweep buffer 1061 // if needed. 1062 if (!mark_bitmap->Test(obj)) { 1063 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1064 timings_.StartSplit("FreeList"); 1065 freed_objects += chunk_free_pos; 1066 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1067 timings_.EndSplit(); 1068 chunk_free_pos = 0; 1069 } 1070 chunk_free_buffer[chunk_free_pos++] = obj; 1071 } 1072 } else { 1073 *(out++) = obj; 1074 } 1075 } 1076 if (chunk_free_pos > 0) { 1077 timings_.StartSplit("FreeList"); 1078 freed_objects += chunk_free_pos; 1079 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1080 timings_.EndSplit(); 1081 chunk_free_pos = 0; 1082 } 1083 // All of the references which space contained are no longer in the allocation stack, update 1084 // the count. 1085 count = out - objects; 1086 } 1087 // Handle the large object space. 1088 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1089 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 1090 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects(); 1091 if (swap_bitmaps) { 1092 std::swap(large_live_objects, large_mark_objects); 1093 } 1094 for (size_t i = 0; i < count; ++i) { 1095 Object* obj = objects[i]; 1096 // Handle large objects. 1097 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1098 continue; 1099 } 1100 if (!large_mark_objects->Test(obj)) { 1101 ++freed_large_objects; 1102 freed_large_object_bytes += large_object_space->Free(self, obj); 1103 } 1104 } 1105 timings_.EndSplit(); 1106 1107 timings_.StartSplit("RecordFree"); 1108 VLOG(heap) << "Freed " << freed_objects << "/" << count 1109 << " objects with size " << PrettySize(freed_bytes); 1110 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1111 freed_objects_.FetchAndAdd(freed_objects); 1112 freed_large_objects_.FetchAndAdd(freed_large_objects); 1113 freed_bytes_.FetchAndAdd(freed_bytes); 1114 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes); 1115 timings_.EndSplit(); 1116 1117 timings_.StartSplit("ResetStack"); 1118 allocations->Reset(); 1119 timings_.EndSplit(); 1120} 1121 1122void MarkSweep::Sweep(bool swap_bitmaps) { 1123 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1124 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1125 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1126 // knowing that new allocations won't be marked as live. 1127 timings_.StartSplit("MarkStackAsLive"); 1128 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1129 heap_->MarkAllocStackAsLive(live_stack); 1130 live_stack->Reset(); 1131 timings_.EndSplit(); 1132 1133 DCHECK(mark_stack_->IsEmpty()); 1134 TimingLogger::ScopedSplit("Sweep", &timings_); 1135 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1136 if (space->IsContinuousMemMapAllocSpace()) { 1137 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1138 TimingLogger::ScopedSplit split( 1139 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1140 size_t freed_objects = 0; 1141 size_t freed_bytes = 0; 1142 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1143 heap_->RecordFree(freed_objects, freed_bytes); 1144 freed_objects_.FetchAndAdd(freed_objects); 1145 freed_bytes_.FetchAndAdd(freed_bytes); 1146 } 1147 } 1148 SweepLargeObjects(swap_bitmaps); 1149} 1150 1151void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1152 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1153 size_t freed_objects = 0; 1154 size_t freed_bytes = 0; 1155 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1156 freed_large_objects_.FetchAndAdd(freed_objects); 1157 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 1158 GetHeap()->RecordFree(freed_objects, freed_bytes); 1159} 1160 1161// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1162// marked, put it on the appropriate list in the heap for later processing. 1163void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1164 DCHECK(klass != nullptr); 1165 if (kCountJavaLangRefs) { 1166 ++reference_count_; 1167 } 1168 heap_->DelayReferenceReferent(klass, ref, IsMarkedCallback, this); 1169} 1170 1171class MarkObjectVisitor { 1172 public: 1173 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1174 } 1175 1176 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1177 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1178 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1179 if (kCheckLocks) { 1180 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1181 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1182 } 1183 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset, false)); 1184 } 1185 1186 private: 1187 MarkSweep* const mark_sweep_; 1188}; 1189 1190// Scans an object reference. Determines the type of the reference 1191// and dispatches to a specialized scanning routine. 1192void MarkSweep::ScanObject(Object* obj) { 1193 MarkObjectVisitor mark_visitor(this); 1194 DelayReferenceReferentVisitor ref_visitor(this); 1195 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1196} 1197 1198void MarkSweep::ProcessMarkStackPausedCallback(void* arg) { 1199 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true); 1200} 1201 1202void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1203 Thread* self = Thread::Current(); 1204 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1205 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1206 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1207 CHECK_GT(chunk_size, 0U); 1208 // Split the current mark stack up into work tasks. 1209 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1210 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1211 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1212 it += delta; 1213 } 1214 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1215 thread_pool->StartWorkers(self); 1216 thread_pool->Wait(self, true, true); 1217 thread_pool->StopWorkers(self); 1218 mark_stack_->Reset(); 1219 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1220} 1221 1222// Scan anything that's on the mark stack. 1223void MarkSweep::ProcessMarkStack(bool paused) { 1224 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1225 size_t thread_count = GetThreadCount(paused); 1226 if (kParallelProcessMarkStack && thread_count > 1 && 1227 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1228 ProcessMarkStackParallel(thread_count); 1229 } else { 1230 // TODO: Tune this. 1231 static const size_t kFifoSize = 4; 1232 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1233 for (;;) { 1234 Object* obj = NULL; 1235 if (kUseMarkStackPrefetch) { 1236 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1237 Object* obj = mark_stack_->PopBack(); 1238 DCHECK(obj != NULL); 1239 __builtin_prefetch(obj); 1240 prefetch_fifo.push_back(obj); 1241 } 1242 if (prefetch_fifo.empty()) { 1243 break; 1244 } 1245 obj = prefetch_fifo.front(); 1246 prefetch_fifo.pop_front(); 1247 } else { 1248 if (mark_stack_->IsEmpty()) { 1249 break; 1250 } 1251 obj = mark_stack_->PopBack(); 1252 } 1253 DCHECK(obj != nullptr); 1254 ScanObject(obj); 1255 } 1256 } 1257 timings_.EndSplit(); 1258} 1259 1260inline bool MarkSweep::IsMarked(const Object* object) const 1261 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1262 if (immune_region_.ContainsObject(object)) { 1263 return true; 1264 } 1265 if (current_space_bitmap_->HasAddress(object)) { 1266 return current_space_bitmap_->Test(object); 1267 } 1268 return mark_bitmap_->Test(object); 1269} 1270 1271void MarkSweep::FinishPhase() { 1272 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1273 // Can't enqueue references if we hold the mutator lock. 1274 timings_.NewSplit("PostGcVerification"); 1275 heap_->PostGcVerification(this); 1276 if (kCountScannedTypes) { 1277 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1278 << " other=" << other_count_; 1279 } 1280 if (kCountTasks) { 1281 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1282 } 1283 if (kMeasureOverhead) { 1284 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1285 } 1286 if (kProfileLargeObjects) { 1287 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1288 } 1289 if (kCountJavaLangRefs) { 1290 VLOG(gc) << "References scanned " << reference_count_; 1291 } 1292 if (kCountMarkedObjects) { 1293 VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_ 1294 << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_; 1295 } 1296 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1297 mark_stack_->Reset(); 1298 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1299 heap_->ClearMarkedObjects(); 1300} 1301 1302void MarkSweep::RevokeAllThreadLocalBuffers() { 1303 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1304 // If concurrent, rosalloc thread-local buffers are revoked at the 1305 // thread checkpoint. Bump pointer space thread-local buffers must 1306 // not be in use. 1307 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1308 } else { 1309 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 1310 GetHeap()->RevokeAllThreadLocalBuffers(); 1311 timings_.EndSplit(); 1312 } 1313} 1314 1315} // namespace collector 1316} // namespace gc 1317} // namespace art 1318