mark_sweep.cc revision 8016bdee7ca1a066221a5d2fe5e60890de950a5b
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <atomic> 20#include <functional> 21#include <numeric> 22#include <climits> 23#include <vector> 24 25#include "base/bounded_fifo.h" 26#include "base/logging.h" 27#include "base/macros.h" 28#include "base/mutex-inl.h" 29#include "base/systrace.h" 30#include "base/time_utils.h" 31#include "base/timing_logger.h" 32#include "gc/accounting/card_table-inl.h" 33#include "gc/accounting/heap_bitmap-inl.h" 34#include "gc/accounting/mod_union_table.h" 35#include "gc/accounting/space_bitmap-inl.h" 36#include "gc/heap.h" 37#include "gc/reference_processor.h" 38#include "gc/space/large_object_space.h" 39#include "gc/space/space-inl.h" 40#include "mark_sweep-inl.h" 41#include "mirror/object-inl.h" 42#include "runtime.h" 43#include "scoped_thread_state_change.h" 44#include "thread-inl.h" 45#include "thread_list.h" 46 47namespace art { 48namespace gc { 49namespace collector { 50 51// Performance options. 52static constexpr bool kUseRecursiveMark = false; 53static constexpr bool kUseMarkStackPrefetch = true; 54static constexpr size_t kSweepArrayChunkFreeSize = 1024; 55static constexpr bool kPreCleanCards = true; 56 57// Parallelism options. 58static constexpr bool kParallelCardScan = true; 59static constexpr bool kParallelRecursiveMark = true; 60// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 61// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 62// having this can add overhead in ProcessReferences since we may end up doing many calls of 63// ProcessMarkStack with very small mark stacks. 64static constexpr size_t kMinimumParallelMarkStackSize = 128; 65static constexpr bool kParallelProcessMarkStack = true; 66 67// Profiling and information flags. 68static constexpr bool kProfileLargeObjects = false; 69static constexpr bool kMeasureOverhead = false; 70static constexpr bool kCountTasks = false; 71static constexpr bool kCountMarkedObjects = false; 72 73// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 74static constexpr bool kCheckLocks = kDebugLocking; 75static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 76 77// If true, revoke the rosalloc thread-local buffers at the 78// checkpoint, as opposed to during the pause. 79static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 80 81void MarkSweep::BindBitmaps() { 82 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 83 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 84 // Mark all of the spaces we never collect as immune. 85 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 86 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 87 immune_spaces_.AddSpace(space); 88 } 89 } 90} 91 92MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 93 : GarbageCollector(heap, 94 name_prefix + 95 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 96 current_space_bitmap_(nullptr), 97 mark_bitmap_(nullptr), 98 mark_stack_(nullptr), 99 gc_barrier_(new Barrier(0)), 100 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 101 is_concurrent_(is_concurrent), 102 live_stack_freeze_size_(0) { 103 std::string error_msg; 104 MemMap* mem_map = MemMap::MapAnonymous( 105 "mark sweep sweep array free buffer", nullptr, 106 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), 107 PROT_READ | PROT_WRITE, false, false, &error_msg); 108 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg; 109 sweep_array_free_buffer_mem_map_.reset(mem_map); 110} 111 112void MarkSweep::InitializePhase() { 113 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 114 mark_stack_ = heap_->GetMarkStack(); 115 DCHECK(mark_stack_ != nullptr); 116 immune_spaces_.Reset(); 117 no_reference_class_count_.StoreRelaxed(0); 118 normal_count_.StoreRelaxed(0); 119 class_count_.StoreRelaxed(0); 120 object_array_count_.StoreRelaxed(0); 121 other_count_.StoreRelaxed(0); 122 reference_count_.StoreRelaxed(0); 123 large_object_test_.StoreRelaxed(0); 124 large_object_mark_.StoreRelaxed(0); 125 overhead_time_ .StoreRelaxed(0); 126 work_chunks_created_.StoreRelaxed(0); 127 work_chunks_deleted_.StoreRelaxed(0); 128 mark_null_count_.StoreRelaxed(0); 129 mark_immune_count_.StoreRelaxed(0); 130 mark_fastpath_count_.StoreRelaxed(0); 131 mark_slowpath_count_.StoreRelaxed(0); 132 { 133 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 134 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 135 mark_bitmap_ = heap_->GetMarkBitmap(); 136 } 137 if (!GetCurrentIteration()->GetClearSoftReferences()) { 138 // Always clear soft references if a non-sticky collection. 139 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky); 140 } 141} 142 143void MarkSweep::RunPhases() { 144 Thread* self = Thread::Current(); 145 InitializePhase(); 146 Locks::mutator_lock_->AssertNotHeld(self); 147 if (IsConcurrent()) { 148 GetHeap()->PreGcVerification(this); 149 { 150 ReaderMutexLock mu(self, *Locks::mutator_lock_); 151 MarkingPhase(); 152 } 153 ScopedPause pause(this); 154 GetHeap()->PrePauseRosAllocVerification(this); 155 PausePhase(); 156 RevokeAllThreadLocalBuffers(); 157 } else { 158 ScopedPause pause(this); 159 GetHeap()->PreGcVerificationPaused(this); 160 MarkingPhase(); 161 GetHeap()->PrePauseRosAllocVerification(this); 162 PausePhase(); 163 RevokeAllThreadLocalBuffers(); 164 } 165 { 166 // Sweeping always done concurrently, even for non concurrent mark sweep. 167 ReaderMutexLock mu(self, *Locks::mutator_lock_); 168 ReclaimPhase(); 169 } 170 GetHeap()->PostGcVerification(this); 171 FinishPhase(); 172} 173 174void MarkSweep::ProcessReferences(Thread* self) { 175 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 176 GetHeap()->GetReferenceProcessor()->ProcessReferences( 177 true, 178 GetTimings(), 179 GetCurrentIteration()->GetClearSoftReferences(), 180 this); 181} 182 183void MarkSweep::PausePhase() { 184 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings()); 185 Thread* self = Thread::Current(); 186 Locks::mutator_lock_->AssertExclusiveHeld(self); 187 if (IsConcurrent()) { 188 // Handle the dirty objects if we are a concurrent GC. 189 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 190 // Re-mark root set. 191 ReMarkRoots(); 192 // Scan dirty objects, this is only required if we are not doing concurrent GC. 193 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 194 } 195 { 196 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings()); 197 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 198 heap_->SwapStacks(); 199 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 200 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 201 // stacks and don't want anybody to allocate into the live stack. 202 RevokeAllThreadLocalAllocationStacks(self); 203 } 204 heap_->PreSweepingGcVerification(this); 205 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 206 // weak before we sweep them. Since this new system weak may not be marked, the GC may 207 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 208 // reference to a string that is about to be swept. 209 Runtime::Current()->DisallowNewSystemWeaks(); 210 // Enable the reference processing slow path, needs to be done with mutators paused since there 211 // is no lock in the GetReferent fast path. 212 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 213} 214 215void MarkSweep::PreCleanCards() { 216 // Don't do this for non concurrent GCs since they don't have any dirty cards. 217 if (kPreCleanCards && IsConcurrent()) { 218 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 219 Thread* self = Thread::Current(); 220 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 221 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 222 heap_->ProcessCards(GetTimings(), false, true, false); 223 // The checkpoint root marking is required to avoid a race condition which occurs if the 224 // following happens during a reference write: 225 // 1. mutator dirties the card (write barrier) 226 // 2. GC ages the card (the above ProcessCards call) 227 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 228 // 4. mutator writes the value (corresponding to the write barrier in 1.) 229 // This causes the GC to age the card but not necessarily mark the reference which the mutator 230 // wrote into the object stored in the card. 231 // Having the checkpoint fixes this issue since it ensures that the card mark and the 232 // reference write are visible to the GC before the card is scanned (this is due to locks being 233 // acquired / released in the checkpoint code). 234 // The other roots are also marked to help reduce the pause. 235 MarkRootsCheckpoint(self, false); 236 MarkNonThreadRoots(); 237 MarkConcurrentRoots( 238 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 239 // Process the newly aged cards. 240 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 241 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 242 // in the next GC. 243 } 244} 245 246void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 247 if (kUseThreadLocalAllocationStack) { 248 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 249 Locks::mutator_lock_->AssertExclusiveHeld(self); 250 heap_->RevokeAllThreadLocalAllocationStacks(self); 251 } 252} 253 254void MarkSweep::MarkingPhase() { 255 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 256 Thread* self = Thread::Current(); 257 BindBitmaps(); 258 FindDefaultSpaceBitmap(); 259 // Process dirty cards and add dirty cards to mod union tables. 260 // If the GC type is non sticky, then we just clear the cards instead of ageing them. 261 heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky); 262 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 263 MarkRoots(self); 264 MarkReachableObjects(); 265 // Pre-clean dirtied cards to reduce pauses. 266 PreCleanCards(); 267} 268 269class MarkSweep::ScanObjectVisitor { 270 public: 271 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 272 : mark_sweep_(mark_sweep) {} 273 274 void operator()(mirror::Object* obj) const 275 ALWAYS_INLINE 276 REQUIRES(Locks::heap_bitmap_lock_) 277 SHARED_REQUIRES(Locks::mutator_lock_) { 278 if (kCheckLocks) { 279 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 280 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 281 } 282 mark_sweep_->ScanObject(obj); 283 } 284 285 private: 286 MarkSweep* const mark_sweep_; 287}; 288 289void MarkSweep::UpdateAndMarkModUnion() { 290 for (const auto& space : immune_spaces_.GetSpaces()) { 291 const char* name = space->IsZygoteSpace() 292 ? "UpdateAndMarkZygoteModUnionTable" 293 : "UpdateAndMarkImageModUnionTable"; 294 DCHECK(space->IsZygoteSpace() || space->IsImageSpace()) << *space; 295 TimingLogger::ScopedTiming t(name, GetTimings()); 296 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 297 if (mod_union_table != nullptr) { 298 mod_union_table->UpdateAndMarkReferences(this); 299 } else { 300 // No mod-union table, scan all the live bits. This can only occur for app images. 301 space->GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 302 reinterpret_cast<uintptr_t>(space->End()), 303 ScanObjectVisitor(this)); 304 } 305 } 306} 307 308void MarkSweep::MarkReachableObjects() { 309 UpdateAndMarkModUnion(); 310 // Recursively mark all the non-image bits set in the mark bitmap. 311 RecursiveMark(); 312} 313 314void MarkSweep::ReclaimPhase() { 315 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 316 Thread* const self = Thread::Current(); 317 // Process the references concurrently. 318 ProcessReferences(self); 319 SweepSystemWeaks(self); 320 Runtime* const runtime = Runtime::Current(); 321 runtime->AllowNewSystemWeaks(); 322 // Clean up class loaders after system weaks are swept since that is how we know if class 323 // unloading occurred. 324 runtime->GetClassLinker()->CleanupClassLoaders(); 325 { 326 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 327 GetHeap()->RecordFreeRevoke(); 328 // Reclaim unmarked objects. 329 Sweep(false); 330 // Swap the live and mark bitmaps for each space which we modified space. This is an 331 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 332 // bitmaps. 333 SwapBitmaps(); 334 // Unbind the live and mark bitmaps. 335 GetHeap()->UnBindBitmaps(); 336 } 337} 338 339void MarkSweep::FindDefaultSpaceBitmap() { 340 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 341 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 342 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 343 // We want to have the main space instead of non moving if possible. 344 if (bitmap != nullptr && 345 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 346 current_space_bitmap_ = bitmap; 347 // If we are not the non moving space exit the loop early since this will be good enough. 348 if (space != heap_->GetNonMovingSpace()) { 349 break; 350 } 351 } 352 } 353 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n" 354 << heap_->DumpSpaces(); 355} 356 357void MarkSweep::ExpandMarkStack() { 358 ResizeMarkStack(mark_stack_->Capacity() * 2); 359} 360 361void MarkSweep::ResizeMarkStack(size_t new_size) { 362 // Rare case, no need to have Thread::Current be a parameter. 363 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 364 // Someone else acquired the lock and expanded the mark stack before us. 365 return; 366 } 367 std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End()); 368 CHECK_LE(mark_stack_->Size(), new_size); 369 mark_stack_->Resize(new_size); 370 for (auto& obj : temp) { 371 mark_stack_->PushBack(obj.AsMirrorPtr()); 372 } 373} 374 375mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) { 376 MarkObject(obj, nullptr, MemberOffset(0)); 377 return obj; 378} 379 380inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) { 381 DCHECK(obj != nullptr); 382 if (MarkObjectParallel(obj)) { 383 MutexLock mu(Thread::Current(), mark_stack_lock_); 384 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 385 ExpandMarkStack(); 386 } 387 // The object must be pushed on to the mark stack. 388 mark_stack_->PushBack(obj); 389 } 390} 391 392bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) { 393 return IsMarked(ref->AsMirrorPtr()); 394} 395 396class MarkSweep::MarkObjectSlowPath { 397 public: 398 explicit MarkObjectSlowPath(MarkSweep* mark_sweep, 399 mirror::Object* holder = nullptr, 400 MemberOffset offset = MemberOffset(0)) 401 : mark_sweep_(mark_sweep), 402 holder_(holder), 403 offset_(offset) {} 404 405 void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 406 if (kProfileLargeObjects) { 407 // TODO: Differentiate between marking and testing somehow. 408 ++mark_sweep_->large_object_test_; 409 ++mark_sweep_->large_object_mark_; 410 } 411 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 412 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) || 413 (kIsDebugBuild && large_object_space != nullptr && 414 !large_object_space->Contains(obj)))) { 415 LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces"; 416 if (holder_ != nullptr) { 417 size_t holder_size = holder_->SizeOf(); 418 ArtField* field = holder_->FindFieldByOffset(offset_); 419 LOG(INTERNAL_FATAL) << "Field info: " 420 << " holder=" << holder_ 421 << " holder is " 422 << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_) 423 ? "alive" : "dead") 424 << " holder_size=" << holder_size 425 << " holder_type=" << PrettyTypeOf(holder_) 426 << " offset=" << offset_.Uint32Value() 427 << " field=" << (field != nullptr ? field->GetName() : "nullptr") 428 << " field_type=" 429 << (field != nullptr ? field->GetTypeDescriptor() : "") 430 << " first_ref_field_offset=" 431 << (holder_->IsClass() 432 ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset( 433 sizeof(void*)) 434 : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset()) 435 << " num_of_ref_fields=" 436 << (holder_->IsClass() 437 ? holder_->AsClass()->NumReferenceStaticFields() 438 : holder_->GetClass()->NumReferenceInstanceFields()) 439 << "\n"; 440 // Print the memory content of the holder. 441 for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) { 442 uint32_t* p = reinterpret_cast<uint32_t*>(holder_); 443 LOG(INTERNAL_FATAL) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = " 444 << std::hex << p[i]; 445 } 446 } 447 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL); 448 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true); 449 { 450 LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root"; 451 Thread* self = Thread::Current(); 452 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 453 mark_sweep_->VerifyRoots(); 454 } else { 455 const bool heap_bitmap_exclusive_locked = 456 Locks::heap_bitmap_lock_->IsExclusiveHeld(self); 457 if (heap_bitmap_exclusive_locked) { 458 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 459 } 460 { 461 ScopedThreadSuspension(self, kSuspended); 462 ScopedSuspendAll ssa(__FUNCTION__); 463 mark_sweep_->VerifyRoots(); 464 } 465 if (heap_bitmap_exclusive_locked) { 466 Locks::heap_bitmap_lock_->ExclusiveLock(self); 467 } 468 } 469 } 470 LOG(FATAL) << "Can't mark invalid object"; 471 } 472 } 473 474 private: 475 MarkSweep* const mark_sweep_; 476 mirror::Object* const holder_; 477 MemberOffset offset_; 478}; 479 480inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, 481 mirror::Object* holder, 482 MemberOffset offset) { 483 DCHECK(obj != nullptr); 484 if (kUseBakerOrBrooksReadBarrier) { 485 // Verify all the objects have the correct pointer installed. 486 obj->AssertReadBarrierPointer(); 487 } 488 if (immune_spaces_.IsInImmuneRegion(obj)) { 489 if (kCountMarkedObjects) { 490 ++mark_immune_count_; 491 } 492 DCHECK(mark_bitmap_->Test(obj)); 493 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 494 if (kCountMarkedObjects) { 495 ++mark_fastpath_count_; 496 } 497 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 498 PushOnMarkStack(obj); // This object was not previously marked. 499 } 500 } else { 501 if (kCountMarkedObjects) { 502 ++mark_slowpath_count_; 503 } 504 MarkObjectSlowPath visitor(this, holder, offset); 505 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 506 // will check again. 507 if (!mark_bitmap_->Set(obj, visitor)) { 508 PushOnMarkStack(obj); // Was not already marked, push. 509 } 510 } 511} 512 513inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) { 514 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 515 // Lock is not needed but is here anyways to please annotalysis. 516 MutexLock mu(Thread::Current(), mark_stack_lock_); 517 ExpandMarkStack(); 518 } 519 // The object must be pushed on to the mark stack. 520 mark_stack_->PushBack(obj); 521} 522 523inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) { 524 DCHECK(obj != nullptr); 525 if (kUseBakerOrBrooksReadBarrier) { 526 // Verify all the objects have the correct pointer installed. 527 obj->AssertReadBarrierPointer(); 528 } 529 if (immune_spaces_.IsInImmuneRegion(obj)) { 530 DCHECK(IsMarked(obj) != nullptr); 531 return false; 532 } 533 // Try to take advantage of locality of references within a space, failing this find the space 534 // the hard way. 535 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 536 if (LIKELY(object_bitmap->HasAddress(obj))) { 537 return !object_bitmap->AtomicTestAndSet(obj); 538 } 539 MarkObjectSlowPath visitor(this); 540 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 541} 542 543void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) { 544 MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0)); 545} 546 547// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 548inline void MarkSweep::MarkObject(mirror::Object* obj, 549 mirror::Object* holder, 550 MemberOffset offset) { 551 if (obj != nullptr) { 552 MarkObjectNonNull(obj, holder, offset); 553 } else if (kCountMarkedObjects) { 554 ++mark_null_count_; 555 } 556} 557 558class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor { 559 public: 560 explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { } 561 562 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE 563 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 564 CHECK(collector_->IsMarked(root) != nullptr) << info.ToString(); 565 } 566 567 private: 568 MarkSweep* const collector_; 569}; 570 571void MarkSweep::VisitRoots(mirror::Object*** roots, 572 size_t count, 573 const RootInfo& info ATTRIBUTE_UNUSED) { 574 for (size_t i = 0; i < count; ++i) { 575 MarkObjectNonNull(*roots[i]); 576 } 577} 578 579void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, 580 size_t count, 581 const RootInfo& info ATTRIBUTE_UNUSED) { 582 for (size_t i = 0; i < count; ++i) { 583 MarkObjectNonNull(roots[i]->AsMirrorPtr()); 584 } 585} 586 587class MarkSweep::VerifyRootVisitor : public SingleRootVisitor { 588 public: 589 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE 590 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 591 // See if the root is on any space bitmap. 592 auto* heap = Runtime::Current()->GetHeap(); 593 if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 594 space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace(); 595 if (large_object_space != nullptr && !large_object_space->Contains(root)) { 596 LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info; 597 } 598 } 599 } 600}; 601 602void MarkSweep::VerifyRoots() { 603 VerifyRootVisitor visitor; 604 Runtime::Current()->GetThreadList()->VisitRoots(&visitor); 605} 606 607void MarkSweep::MarkRoots(Thread* self) { 608 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 609 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 610 // If we exclusively hold the mutator lock, all threads must be suspended. 611 Runtime::Current()->VisitRoots(this); 612 RevokeAllThreadLocalAllocationStacks(self); 613 } else { 614 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 615 // At this point the live stack should no longer have any mutators which push into it. 616 MarkNonThreadRoots(); 617 MarkConcurrentRoots( 618 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 619 } 620} 621 622void MarkSweep::MarkNonThreadRoots() { 623 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 624 Runtime::Current()->VisitNonThreadRoots(this); 625} 626 627void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 628 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 629 // Visit all runtime roots and clear dirty flags. 630 Runtime::Current()->VisitConcurrentRoots( 631 this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving)); 632} 633 634class MarkSweep::DelayReferenceReferentVisitor { 635 public: 636 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {} 637 638 void operator()(mirror::Class* klass, mirror::Reference* ref) const 639 REQUIRES(Locks::heap_bitmap_lock_) 640 SHARED_REQUIRES(Locks::mutator_lock_) { 641 collector_->DelayReferenceReferent(klass, ref); 642 } 643 644 private: 645 MarkSweep* const collector_; 646}; 647 648template <bool kUseFinger = false> 649class MarkSweep::MarkStackTask : public Task { 650 public: 651 MarkStackTask(ThreadPool* thread_pool, 652 MarkSweep* mark_sweep, 653 size_t mark_stack_size, 654 StackReference<mirror::Object>* mark_stack) 655 : mark_sweep_(mark_sweep), 656 thread_pool_(thread_pool), 657 mark_stack_pos_(mark_stack_size) { 658 // We may have to copy part of an existing mark stack when another mark stack overflows. 659 if (mark_stack_size != 0) { 660 DCHECK(mark_stack != nullptr); 661 // TODO: Check performance? 662 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 663 } 664 if (kCountTasks) { 665 ++mark_sweep_->work_chunks_created_; 666 } 667 } 668 669 static const size_t kMaxSize = 1 * KB; 670 671 protected: 672 class MarkObjectParallelVisitor { 673 public: 674 ALWAYS_INLINE MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 675 MarkSweep* mark_sweep) 676 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 677 678 ALWAYS_INLINE void operator()(mirror::Object* obj, 679 MemberOffset offset, 680 bool is_static ATTRIBUTE_UNUSED) const 681 SHARED_REQUIRES(Locks::mutator_lock_) { 682 Mark(obj->GetFieldObject<mirror::Object>(offset)); 683 } 684 685 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 686 SHARED_REQUIRES(Locks::mutator_lock_) { 687 if (!root->IsNull()) { 688 VisitRoot(root); 689 } 690 } 691 692 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 693 SHARED_REQUIRES(Locks::mutator_lock_) { 694 if (kCheckLocks) { 695 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 696 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 697 } 698 Mark(root->AsMirrorPtr()); 699 } 700 701 private: 702 ALWAYS_INLINE void Mark(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) { 703 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 704 if (kUseFinger) { 705 std::atomic_thread_fence(std::memory_order_seq_cst); 706 if (reinterpret_cast<uintptr_t>(ref) >= 707 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) { 708 return; 709 } 710 } 711 chunk_task_->MarkStackPush(ref); 712 } 713 } 714 715 MarkStackTask<kUseFinger>* const chunk_task_; 716 MarkSweep* const mark_sweep_; 717 }; 718 719 class ScanObjectParallelVisitor { 720 public: 721 ALWAYS_INLINE explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) 722 : chunk_task_(chunk_task) {} 723 724 // No thread safety analysis since multiple threads will use this visitor. 725 void operator()(mirror::Object* obj) const 726 REQUIRES(Locks::heap_bitmap_lock_) 727 SHARED_REQUIRES(Locks::mutator_lock_) { 728 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 729 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 730 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 731 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 732 } 733 734 private: 735 MarkStackTask<kUseFinger>* const chunk_task_; 736 }; 737 738 virtual ~MarkStackTask() { 739 // Make sure that we have cleared our mark stack. 740 DCHECK_EQ(mark_stack_pos_, 0U); 741 if (kCountTasks) { 742 ++mark_sweep_->work_chunks_deleted_; 743 } 744 } 745 746 MarkSweep* const mark_sweep_; 747 ThreadPool* const thread_pool_; 748 // Thread local mark stack for this task. 749 StackReference<mirror::Object> mark_stack_[kMaxSize]; 750 // Mark stack position. 751 size_t mark_stack_pos_; 752 753 ALWAYS_INLINE void MarkStackPush(mirror::Object* obj) 754 SHARED_REQUIRES(Locks::mutator_lock_) { 755 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 756 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 757 mark_stack_pos_ /= 2; 758 auto* task = new MarkStackTask(thread_pool_, 759 mark_sweep_, 760 kMaxSize - mark_stack_pos_, 761 mark_stack_ + mark_stack_pos_); 762 thread_pool_->AddTask(Thread::Current(), task); 763 } 764 DCHECK(obj != nullptr); 765 DCHECK_LT(mark_stack_pos_, kMaxSize); 766 mark_stack_[mark_stack_pos_++].Assign(obj); 767 } 768 769 virtual void Finalize() { 770 delete this; 771 } 772 773 // Scans all of the objects 774 virtual void Run(Thread* self ATTRIBUTE_UNUSED) 775 REQUIRES(Locks::heap_bitmap_lock_) 776 SHARED_REQUIRES(Locks::mutator_lock_) { 777 ScanObjectParallelVisitor visitor(this); 778 // TODO: Tune this. 779 static const size_t kFifoSize = 4; 780 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo; 781 for (;;) { 782 mirror::Object* obj = nullptr; 783 if (kUseMarkStackPrefetch) { 784 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 785 mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 786 DCHECK(mark_stack_obj != nullptr); 787 __builtin_prefetch(mark_stack_obj); 788 prefetch_fifo.push_back(mark_stack_obj); 789 } 790 if (UNLIKELY(prefetch_fifo.empty())) { 791 break; 792 } 793 obj = prefetch_fifo.front(); 794 prefetch_fifo.pop_front(); 795 } else { 796 if (UNLIKELY(mark_stack_pos_ == 0)) { 797 break; 798 } 799 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 800 } 801 DCHECK(obj != nullptr); 802 visitor(obj); 803 } 804 } 805}; 806 807class MarkSweep::CardScanTask : public MarkStackTask<false> { 808 public: 809 CardScanTask(ThreadPool* thread_pool, 810 MarkSweep* mark_sweep, 811 accounting::ContinuousSpaceBitmap* bitmap, 812 uint8_t* begin, 813 uint8_t* end, 814 uint8_t minimum_age, 815 size_t mark_stack_size, 816 StackReference<mirror::Object>* mark_stack_obj, 817 bool clear_card) 818 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 819 bitmap_(bitmap), 820 begin_(begin), 821 end_(end), 822 minimum_age_(minimum_age), 823 clear_card_(clear_card) {} 824 825 protected: 826 accounting::ContinuousSpaceBitmap* const bitmap_; 827 uint8_t* const begin_; 828 uint8_t* const end_; 829 const uint8_t minimum_age_; 830 const bool clear_card_; 831 832 virtual void Finalize() { 833 delete this; 834 } 835 836 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 837 ScanObjectParallelVisitor visitor(this); 838 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 839 size_t cards_scanned = clear_card_ 840 ? card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) 841 : card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_); 842 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 843 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 844 // Finish by emptying our local mark stack. 845 MarkStackTask::Run(self); 846 } 847}; 848 849size_t MarkSweep::GetThreadCount(bool paused) const { 850 // Use less threads if we are in a background state (non jank perceptible) since we want to leave 851 // more CPU time for the foreground apps. 852 if (heap_->GetThreadPool() == nullptr || !Runtime::Current()->InJankPerceptibleProcessState()) { 853 return 1; 854 } 855 return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1; 856} 857 858void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { 859 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 860 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 861 size_t thread_count = GetThreadCount(paused); 862 // The parallel version with only one thread is faster for card scanning, TODO: fix. 863 if (kParallelCardScan && thread_count > 1) { 864 Thread* self = Thread::Current(); 865 // Can't have a different split for each space since multiple spaces can have their cards being 866 // scanned at the same time. 867 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__, 868 GetTimings()); 869 // Try to take some of the mark stack since we can pass this off to the worker tasks. 870 StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin(); 871 StackReference<mirror::Object>* mark_stack_end = mark_stack_->End(); 872 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 873 // Estimated number of work tasks we will create. 874 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 875 DCHECK_NE(mark_stack_tasks, 0U); 876 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 877 mark_stack_size / mark_stack_tasks + 1); 878 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 879 if (space->GetMarkBitmap() == nullptr) { 880 continue; 881 } 882 uint8_t* card_begin = space->Begin(); 883 uint8_t* card_end = space->End(); 884 // Align up the end address. For example, the image space's end 885 // may not be card-size-aligned. 886 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 887 DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize); 888 DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize); 889 // Calculate how many bytes of heap we will scan, 890 const size_t address_range = card_end - card_begin; 891 // Calculate how much address range each task gets. 892 const size_t card_delta = RoundUp(address_range / thread_count + 1, 893 accounting::CardTable::kCardSize); 894 // If paused and the space is neither zygote nor image space, we could clear the dirty 895 // cards to avoid accumulating them to increase card scanning load in the following GC 896 // cycles. We need to keep dirty cards of image space and zygote space in order to track 897 // references to the other spaces. 898 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 899 // Create the worker tasks for this space. 900 while (card_begin != card_end) { 901 // Add a range of cards. 902 size_t addr_remaining = card_end - card_begin; 903 size_t card_increment = std::min(card_delta, addr_remaining); 904 // Take from the back of the mark stack. 905 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 906 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 907 mark_stack_end -= mark_stack_increment; 908 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 909 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 910 // Add the new task to the thread pool. 911 auto* task = new CardScanTask(thread_pool, 912 this, 913 space->GetMarkBitmap(), 914 card_begin, 915 card_begin + card_increment, 916 minimum_age, 917 mark_stack_increment, 918 mark_stack_end, 919 clear_card); 920 thread_pool->AddTask(self, task); 921 card_begin += card_increment; 922 } 923 } 924 925 // Note: the card scan below may dirty new cards (and scan them) 926 // as a side effect when a Reference object is encountered and 927 // queued during the marking. See b/11465268. 928 thread_pool->SetMaxActiveWorkers(thread_count - 1); 929 thread_pool->StartWorkers(self); 930 thread_pool->Wait(self, true, true); 931 thread_pool->StopWorkers(self); 932 } else { 933 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 934 if (space->GetMarkBitmap() != nullptr) { 935 // Image spaces are handled properly since live == marked for them. 936 const char* name = nullptr; 937 switch (space->GetGcRetentionPolicy()) { 938 case space::kGcRetentionPolicyNeverCollect: 939 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects"; 940 break; 941 case space::kGcRetentionPolicyFullCollect: 942 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects"; 943 break; 944 case space::kGcRetentionPolicyAlwaysCollect: 945 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects"; 946 break; 947 default: 948 LOG(FATAL) << "Unreachable"; 949 UNREACHABLE(); 950 } 951 TimingLogger::ScopedTiming t(name, GetTimings()); 952 ScanObjectVisitor visitor(this); 953 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 954 if (clear_card) { 955 card_table->Scan<true>(space->GetMarkBitmap(), 956 space->Begin(), 957 space->End(), 958 visitor, 959 minimum_age); 960 } else { 961 card_table->Scan<false>(space->GetMarkBitmap(), 962 space->Begin(), 963 space->End(), 964 visitor, 965 minimum_age); 966 } 967 } 968 } 969 } 970} 971 972class MarkSweep::RecursiveMarkTask : public MarkStackTask<false> { 973 public: 974 RecursiveMarkTask(ThreadPool* thread_pool, 975 MarkSweep* mark_sweep, 976 accounting::ContinuousSpaceBitmap* bitmap, 977 uintptr_t begin, 978 uintptr_t end) 979 : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), 980 bitmap_(bitmap), 981 begin_(begin), 982 end_(end) {} 983 984 protected: 985 accounting::ContinuousSpaceBitmap* const bitmap_; 986 const uintptr_t begin_; 987 const uintptr_t end_; 988 989 virtual void Finalize() { 990 delete this; 991 } 992 993 // Scans all of the objects 994 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 995 ScanObjectParallelVisitor visitor(this); 996 bitmap_->VisitMarkedRange(begin_, end_, visitor); 997 // Finish by emptying our local mark stack. 998 MarkStackTask::Run(self); 999 } 1000}; 1001 1002// Populates the mark stack based on the set of marked objects and 1003// recursively marks until the mark stack is emptied. 1004void MarkSweep::RecursiveMark() { 1005 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1006 // RecursiveMark will build the lists of known instances of the Reference classes. See 1007 // DelayReferenceReferent for details. 1008 if (kUseRecursiveMark) { 1009 const bool partial = GetGcType() == kGcTypePartial; 1010 ScanObjectVisitor scan_visitor(this); 1011 auto* self = Thread::Current(); 1012 ThreadPool* thread_pool = heap_->GetThreadPool(); 1013 size_t thread_count = GetThreadCount(false); 1014 const bool parallel = kParallelRecursiveMark && thread_count > 1; 1015 mark_stack_->Reset(); 1016 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1017 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 1018 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 1019 current_space_bitmap_ = space->GetMarkBitmap(); 1020 if (current_space_bitmap_ == nullptr) { 1021 continue; 1022 } 1023 if (parallel) { 1024 // We will use the mark stack the future. 1025 // CHECK(mark_stack_->IsEmpty()); 1026 // This function does not handle heap end increasing, so we must use the space end. 1027 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 1028 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 1029 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue()); 1030 1031 // Create a few worker tasks. 1032 const size_t n = thread_count * 2; 1033 while (begin != end) { 1034 uintptr_t start = begin; 1035 uintptr_t delta = (end - begin) / n; 1036 delta = RoundUp(delta, KB); 1037 if (delta < 16 * KB) delta = end - begin; 1038 begin += delta; 1039 auto* task = new RecursiveMarkTask(thread_pool, 1040 this, 1041 current_space_bitmap_, 1042 start, 1043 begin); 1044 thread_pool->AddTask(self, task); 1045 } 1046 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1047 thread_pool->StartWorkers(self); 1048 thread_pool->Wait(self, true, true); 1049 thread_pool->StopWorkers(self); 1050 } else { 1051 // This function does not handle heap end increasing, so we must use the space end. 1052 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 1053 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 1054 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 1055 } 1056 } 1057 } 1058 } 1059 ProcessMarkStack(false); 1060} 1061 1062void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) { 1063 ScanGrayObjects(paused, minimum_age); 1064 ProcessMarkStack(paused); 1065} 1066 1067void MarkSweep::ReMarkRoots() { 1068 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1069 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1070 Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>( 1071 kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog)); 1072 if (kVerifyRootsMarked) { 1073 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings()); 1074 VerifyRootMarkedVisitor visitor(this); 1075 Runtime::Current()->VisitRoots(&visitor); 1076 } 1077} 1078 1079void MarkSweep::SweepSystemWeaks(Thread* self) { 1080 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1081 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1082 Runtime::Current()->SweepSystemWeaks(this); 1083} 1084 1085class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor { 1086 public: 1087 explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1088 1089 virtual mirror::Object* IsMarked(mirror::Object* obj) 1090 OVERRIDE 1091 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1092 mark_sweep_->VerifyIsLive(obj); 1093 return obj; 1094 } 1095 1096 MarkSweep* const mark_sweep_; 1097}; 1098 1099void MarkSweep::VerifyIsLive(const mirror::Object* obj) { 1100 if (!heap_->GetLiveBitmap()->Test(obj)) { 1101 // TODO: Consider live stack? Has this code bitrotted? 1102 CHECK(!heap_->allocation_stack_->Contains(obj)) 1103 << "Found dead object " << obj << "\n" << heap_->DumpSpaces(); 1104 } 1105} 1106 1107void MarkSweep::VerifySystemWeaks() { 1108 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1109 // Verify system weaks, uses a special object visitor which returns the input object. 1110 VerifySystemWeakVisitor visitor(this); 1111 Runtime::Current()->SweepSystemWeaks(&visitor); 1112} 1113 1114class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor { 1115 public: 1116 CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 1117 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 1118 : mark_sweep_(mark_sweep), 1119 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 1120 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1121 } 1122 1123 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) 1124 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) 1125 REQUIRES(Locks::heap_bitmap_lock_) { 1126 for (size_t i = 0; i < count; ++i) { 1127 mark_sweep_->MarkObjectNonNullParallel(*roots[i]); 1128 } 1129 } 1130 1131 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, 1132 size_t count, 1133 const RootInfo& info ATTRIBUTE_UNUSED) 1134 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) 1135 REQUIRES(Locks::heap_bitmap_lock_) { 1136 for (size_t i = 0; i < count; ++i) { 1137 mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr()); 1138 } 1139 } 1140 1141 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 1142 ScopedTrace trace("Marking thread roots"); 1143 // Note: self is not necessarily equal to thread since thread may be suspended. 1144 Thread* const self = Thread::Current(); 1145 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1146 << thread->GetState() << " thread " << thread << " self " << self; 1147 thread->VisitRoots(this); 1148 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 1149 ScopedTrace trace2("RevokeRosAllocThreadLocalBuffers"); 1150 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 1151 } 1152 // If thread is a running mutator, then act on behalf of the garbage collector. 1153 // See the code in ThreadList::RunCheckpoint. 1154 mark_sweep_->GetBarrier().Pass(self); 1155 } 1156 1157 private: 1158 MarkSweep* const mark_sweep_; 1159 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 1160}; 1161 1162void MarkSweep::MarkRootsCheckpoint(Thread* self, 1163 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1164 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1165 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 1166 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1167 // Request the check point is run on all threads returning a count of the threads that must 1168 // run through the barrier including self. 1169 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1170 // Release locks then wait for all mutator threads to pass the barrier. 1171 // If there are no threads to wait which implys that all the checkpoint functions are finished, 1172 // then no need to release locks. 1173 if (barrier_count == 0) { 1174 return; 1175 } 1176 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1177 Locks::mutator_lock_->SharedUnlock(self); 1178 { 1179 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1180 gc_barrier_->Increment(self, barrier_count); 1181 } 1182 Locks::mutator_lock_->SharedLock(self); 1183 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1184} 1185 1186void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1187 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1188 Thread* self = Thread::Current(); 1189 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>( 1190 sweep_array_free_buffer_mem_map_->BaseBegin()); 1191 size_t chunk_free_pos = 0; 1192 ObjectBytePair freed; 1193 ObjectBytePair freed_los; 1194 // How many objects are left in the array, modified after each space is swept. 1195 StackReference<mirror::Object>* objects = allocations->Begin(); 1196 size_t count = allocations->Size(); 1197 // Change the order to ensure that the non-moving space last swept as an optimization. 1198 std::vector<space::ContinuousSpace*> sweep_spaces; 1199 space::ContinuousSpace* non_moving_space = nullptr; 1200 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1201 if (space->IsAllocSpace() && 1202 !immune_spaces_.ContainsSpace(space) && 1203 space->GetLiveBitmap() != nullptr) { 1204 if (space == heap_->GetNonMovingSpace()) { 1205 non_moving_space = space; 1206 } else { 1207 sweep_spaces.push_back(space); 1208 } 1209 } 1210 } 1211 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1212 // the other alloc spaces as an optimization. 1213 if (non_moving_space != nullptr) { 1214 sweep_spaces.push_back(non_moving_space); 1215 } 1216 // Start by sweeping the continuous spaces. 1217 for (space::ContinuousSpace* space : sweep_spaces) { 1218 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1219 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1220 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1221 if (swap_bitmaps) { 1222 std::swap(live_bitmap, mark_bitmap); 1223 } 1224 StackReference<mirror::Object>* out = objects; 1225 for (size_t i = 0; i < count; ++i) { 1226 mirror::Object* const obj = objects[i].AsMirrorPtr(); 1227 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1228 continue; 1229 } 1230 if (space->HasAddress(obj)) { 1231 // This object is in the space, remove it from the array and add it to the sweep buffer 1232 // if needed. 1233 if (!mark_bitmap->Test(obj)) { 1234 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1235 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1236 freed.objects += chunk_free_pos; 1237 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1238 chunk_free_pos = 0; 1239 } 1240 chunk_free_buffer[chunk_free_pos++] = obj; 1241 } 1242 } else { 1243 (out++)->Assign(obj); 1244 } 1245 } 1246 if (chunk_free_pos > 0) { 1247 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1248 freed.objects += chunk_free_pos; 1249 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1250 chunk_free_pos = 0; 1251 } 1252 // All of the references which space contained are no longer in the allocation stack, update 1253 // the count. 1254 count = out - objects; 1255 } 1256 // Handle the large object space. 1257 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1258 if (large_object_space != nullptr) { 1259 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1260 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1261 if (swap_bitmaps) { 1262 std::swap(large_live_objects, large_mark_objects); 1263 } 1264 for (size_t i = 0; i < count; ++i) { 1265 mirror::Object* const obj = objects[i].AsMirrorPtr(); 1266 // Handle large objects. 1267 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1268 continue; 1269 } 1270 if (!large_mark_objects->Test(obj)) { 1271 ++freed_los.objects; 1272 freed_los.bytes += large_object_space->Free(self, obj); 1273 } 1274 } 1275 } 1276 { 1277 TimingLogger::ScopedTiming t2("RecordFree", GetTimings()); 1278 RecordFree(freed); 1279 RecordFreeLOS(freed_los); 1280 t2.NewTiming("ResetStack"); 1281 allocations->Reset(); 1282 } 1283 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero(); 1284} 1285 1286void MarkSweep::Sweep(bool swap_bitmaps) { 1287 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1288 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1289 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1290 { 1291 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings()); 1292 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1293 // knowing that new allocations won't be marked as live. 1294 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1295 heap_->MarkAllocStackAsLive(live_stack); 1296 live_stack->Reset(); 1297 DCHECK(mark_stack_->IsEmpty()); 1298 } 1299 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1300 if (space->IsContinuousMemMapAllocSpace()) { 1301 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1302 TimingLogger::ScopedTiming split( 1303 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", 1304 GetTimings()); 1305 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1306 } 1307 } 1308 SweepLargeObjects(swap_bitmaps); 1309} 1310 1311void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1312 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace(); 1313 if (los != nullptr) { 1314 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 1315 RecordFreeLOS(los->Sweep(swap_bitmaps)); 1316 } 1317} 1318 1319// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1320// marked, put it on the appropriate list in the heap for later processing. 1321void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1322 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this); 1323} 1324 1325class MarkVisitor { 1326 public: 1327 ALWAYS_INLINE explicit MarkVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 1328 1329 ALWAYS_INLINE void operator()(mirror::Object* obj, 1330 MemberOffset offset, 1331 bool is_static ATTRIBUTE_UNUSED) const 1332 REQUIRES(Locks::heap_bitmap_lock_) 1333 SHARED_REQUIRES(Locks::mutator_lock_) { 1334 if (kCheckLocks) { 1335 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1336 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1337 } 1338 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset); 1339 } 1340 1341 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 1342 REQUIRES(Locks::heap_bitmap_lock_) 1343 SHARED_REQUIRES(Locks::mutator_lock_) { 1344 if (!root->IsNull()) { 1345 VisitRoot(root); 1346 } 1347 } 1348 1349 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 1350 REQUIRES(Locks::heap_bitmap_lock_) 1351 SHARED_REQUIRES(Locks::mutator_lock_) { 1352 if (kCheckLocks) { 1353 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1354 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1355 } 1356 mark_sweep_->MarkObject(root->AsMirrorPtr()); 1357 } 1358 1359 private: 1360 MarkSweep* const mark_sweep_; 1361}; 1362 1363// Scans an object reference. Determines the type of the reference 1364// and dispatches to a specialized scanning routine. 1365void MarkSweep::ScanObject(mirror::Object* obj) { 1366 MarkVisitor mark_visitor(this); 1367 DelayReferenceReferentVisitor ref_visitor(this); 1368 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1369} 1370 1371void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1372 Thread* self = Thread::Current(); 1373 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1374 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1375 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1376 CHECK_GT(chunk_size, 0U); 1377 // Split the current mark stack up into work tasks. 1378 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) { 1379 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1380 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1381 it += delta; 1382 } 1383 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1384 thread_pool->StartWorkers(self); 1385 thread_pool->Wait(self, true, true); 1386 thread_pool->StopWorkers(self); 1387 mark_stack_->Reset(); 1388 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(), 1389 work_chunks_deleted_.LoadSequentiallyConsistent()) 1390 << " some of the work chunks were leaked"; 1391} 1392 1393// Scan anything that's on the mark stack. 1394void MarkSweep::ProcessMarkStack(bool paused) { 1395 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings()); 1396 size_t thread_count = GetThreadCount(paused); 1397 if (kParallelProcessMarkStack && thread_count > 1 && 1398 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1399 ProcessMarkStackParallel(thread_count); 1400 } else { 1401 // TODO: Tune this. 1402 static const size_t kFifoSize = 4; 1403 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo; 1404 for (;;) { 1405 mirror::Object* obj = nullptr; 1406 if (kUseMarkStackPrefetch) { 1407 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1408 mirror::Object* mark_stack_obj = mark_stack_->PopBack(); 1409 DCHECK(mark_stack_obj != nullptr); 1410 __builtin_prefetch(mark_stack_obj); 1411 prefetch_fifo.push_back(mark_stack_obj); 1412 } 1413 if (prefetch_fifo.empty()) { 1414 break; 1415 } 1416 obj = prefetch_fifo.front(); 1417 prefetch_fifo.pop_front(); 1418 } else { 1419 if (mark_stack_->IsEmpty()) { 1420 break; 1421 } 1422 obj = mark_stack_->PopBack(); 1423 } 1424 DCHECK(obj != nullptr); 1425 ScanObject(obj); 1426 } 1427 } 1428} 1429 1430inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) { 1431 if (immune_spaces_.IsInImmuneRegion(object)) { 1432 return object; 1433 } 1434 if (current_space_bitmap_->HasAddress(object)) { 1435 return current_space_bitmap_->Test(object) ? object : nullptr; 1436 } 1437 return mark_bitmap_->Test(object) ? object : nullptr; 1438} 1439 1440void MarkSweep::FinishPhase() { 1441 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1442 if (kCountScannedTypes) { 1443 VLOG(gc) 1444 << "MarkSweep scanned" 1445 << " no reference objects=" << no_reference_class_count_.LoadRelaxed() 1446 << " normal objects=" << normal_count_.LoadRelaxed() 1447 << " classes=" << class_count_.LoadRelaxed() 1448 << " object arrays=" << object_array_count_.LoadRelaxed() 1449 << " references=" << reference_count_.LoadRelaxed() 1450 << " other=" << other_count_.LoadRelaxed(); 1451 } 1452 if (kCountTasks) { 1453 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed(); 1454 } 1455 if (kMeasureOverhead) { 1456 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed()); 1457 } 1458 if (kProfileLargeObjects) { 1459 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed() 1460 << " marked " << large_object_mark_.LoadRelaxed(); 1461 } 1462 if (kCountMarkedObjects) { 1463 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed() 1464 << " immune=" << mark_immune_count_.LoadRelaxed() 1465 << " fastpath=" << mark_fastpath_count_.LoadRelaxed() 1466 << " slowpath=" << mark_slowpath_count_.LoadRelaxed(); 1467 } 1468 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1469 mark_stack_->Reset(); 1470 Thread* const self = Thread::Current(); 1471 ReaderMutexLock mu(self, *Locks::mutator_lock_); 1472 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_); 1473 heap_->ClearMarkedObjects(); 1474} 1475 1476void MarkSweep::RevokeAllThreadLocalBuffers() { 1477 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1478 // If concurrent, rosalloc thread-local buffers are revoked at the 1479 // thread checkpoint. Bump pointer space thread-local buffers must 1480 // not be in use. 1481 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1482 } else { 1483 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1484 GetHeap()->RevokeAllThreadLocalBuffers(); 1485 } 1486} 1487 1488} // namespace collector 1489} // namespace gc 1490} // namespace art 1491