mark_sweep.cc revision fc80ff7d8025d720906ee43b793d07bd916ec160
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <atomic> 20#include <functional> 21#include <numeric> 22#include <climits> 23#include <vector> 24 25#include "base/bounded_fifo.h" 26#include "base/enums.h" 27#include "base/logging.h" 28#include "base/macros.h" 29#include "base/mutex-inl.h" 30#include "base/systrace.h" 31#include "base/time_utils.h" 32#include "base/timing_logger.h" 33#include "gc/accounting/card_table-inl.h" 34#include "gc/accounting/heap_bitmap-inl.h" 35#include "gc/accounting/mod_union_table.h" 36#include "gc/accounting/space_bitmap-inl.h" 37#include "gc/heap.h" 38#include "gc/reference_processor.h" 39#include "gc/space/large_object_space.h" 40#include "gc/space/space-inl.h" 41#include "mark_sweep-inl.h" 42#include "mirror/object-inl.h" 43#include "runtime.h" 44#include "scoped_thread_state_change-inl.h" 45#include "thread-inl.h" 46#include "thread_list.h" 47 48namespace art { 49namespace gc { 50namespace collector { 51 52// Performance options. 53static constexpr bool kUseRecursiveMark = false; 54static constexpr bool kUseMarkStackPrefetch = true; 55static constexpr size_t kSweepArrayChunkFreeSize = 1024; 56static constexpr bool kPreCleanCards = true; 57 58// Parallelism options. 59static constexpr bool kParallelCardScan = true; 60static constexpr bool kParallelRecursiveMark = true; 61// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 62// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 63// having this can add overhead in ProcessReferences since we may end up doing many calls of 64// ProcessMarkStack with very small mark stacks. 65static constexpr size_t kMinimumParallelMarkStackSize = 128; 66static constexpr bool kParallelProcessMarkStack = true; 67 68// Profiling and information flags. 69static constexpr bool kProfileLargeObjects = false; 70static constexpr bool kMeasureOverhead = false; 71static constexpr bool kCountTasks = false; 72static constexpr bool kCountMarkedObjects = false; 73 74// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 75static constexpr bool kCheckLocks = kDebugLocking; 76static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 77 78// If true, revoke the rosalloc thread-local buffers at the 79// checkpoint, as opposed to during the pause. 80static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 81 82void MarkSweep::BindBitmaps() { 83 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 84 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 85 // Mark all of the spaces we never collect as immune. 86 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 87 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 88 immune_spaces_.AddSpace(space); 89 } 90 } 91} 92 93MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 94 : GarbageCollector(heap, 95 name_prefix + 96 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 97 current_space_bitmap_(nullptr), 98 mark_bitmap_(nullptr), 99 mark_stack_(nullptr), 100 gc_barrier_(new Barrier(0)), 101 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 102 is_concurrent_(is_concurrent), 103 live_stack_freeze_size_(0) { 104 std::string error_msg; 105 MemMap* mem_map = MemMap::MapAnonymous( 106 "mark sweep sweep array free buffer", nullptr, 107 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), 108 PROT_READ | PROT_WRITE, false, false, &error_msg); 109 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg; 110 sweep_array_free_buffer_mem_map_.reset(mem_map); 111} 112 113void MarkSweep::InitializePhase() { 114 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 115 mark_stack_ = heap_->GetMarkStack(); 116 DCHECK(mark_stack_ != nullptr); 117 immune_spaces_.Reset(); 118 no_reference_class_count_.StoreRelaxed(0); 119 normal_count_.StoreRelaxed(0); 120 class_count_.StoreRelaxed(0); 121 object_array_count_.StoreRelaxed(0); 122 other_count_.StoreRelaxed(0); 123 reference_count_.StoreRelaxed(0); 124 large_object_test_.StoreRelaxed(0); 125 large_object_mark_.StoreRelaxed(0); 126 overhead_time_ .StoreRelaxed(0); 127 work_chunks_created_.StoreRelaxed(0); 128 work_chunks_deleted_.StoreRelaxed(0); 129 mark_null_count_.StoreRelaxed(0); 130 mark_immune_count_.StoreRelaxed(0); 131 mark_fastpath_count_.StoreRelaxed(0); 132 mark_slowpath_count_.StoreRelaxed(0); 133 { 134 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 135 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 136 mark_bitmap_ = heap_->GetMarkBitmap(); 137 } 138 if (!GetCurrentIteration()->GetClearSoftReferences()) { 139 // Always clear soft references if a non-sticky collection. 140 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky); 141 } 142} 143 144void MarkSweep::RunPhases() { 145 Thread* self = Thread::Current(); 146 InitializePhase(); 147 Locks::mutator_lock_->AssertNotHeld(self); 148 if (IsConcurrent()) { 149 GetHeap()->PreGcVerification(this); 150 { 151 ReaderMutexLock mu(self, *Locks::mutator_lock_); 152 MarkingPhase(); 153 } 154 ScopedPause pause(this); 155 GetHeap()->PrePauseRosAllocVerification(this); 156 PausePhase(); 157 RevokeAllThreadLocalBuffers(); 158 } else { 159 ScopedPause pause(this); 160 GetHeap()->PreGcVerificationPaused(this); 161 MarkingPhase(); 162 GetHeap()->PrePauseRosAllocVerification(this); 163 PausePhase(); 164 RevokeAllThreadLocalBuffers(); 165 } 166 { 167 // Sweeping always done concurrently, even for non concurrent mark sweep. 168 ReaderMutexLock mu(self, *Locks::mutator_lock_); 169 ReclaimPhase(); 170 } 171 GetHeap()->PostGcVerification(this); 172 FinishPhase(); 173} 174 175void MarkSweep::ProcessReferences(Thread* self) { 176 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 177 GetHeap()->GetReferenceProcessor()->ProcessReferences( 178 true, 179 GetTimings(), 180 GetCurrentIteration()->GetClearSoftReferences(), 181 this); 182} 183 184void MarkSweep::PausePhase() { 185 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings()); 186 Thread* self = Thread::Current(); 187 Locks::mutator_lock_->AssertExclusiveHeld(self); 188 if (IsConcurrent()) { 189 // Handle the dirty objects if we are a concurrent GC. 190 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 191 // Re-mark root set. 192 ReMarkRoots(); 193 // Scan dirty objects, this is only required if we are not doing concurrent GC. 194 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 195 } 196 { 197 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings()); 198 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 199 heap_->SwapStacks(); 200 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 201 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 202 // stacks and don't want anybody to allocate into the live stack. 203 RevokeAllThreadLocalAllocationStacks(self); 204 } 205 heap_->PreSweepingGcVerification(this); 206 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 207 // weak before we sweep them. Since this new system weak may not be marked, the GC may 208 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 209 // reference to a string that is about to be swept. 210 Runtime::Current()->DisallowNewSystemWeaks(); 211 // Enable the reference processing slow path, needs to be done with mutators paused since there 212 // is no lock in the GetReferent fast path. 213 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 214} 215 216void MarkSweep::PreCleanCards() { 217 // Don't do this for non concurrent GCs since they don't have any dirty cards. 218 if (kPreCleanCards && IsConcurrent()) { 219 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 220 Thread* self = Thread::Current(); 221 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 222 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 223 heap_->ProcessCards(GetTimings(), false, true, false); 224 // The checkpoint root marking is required to avoid a race condition which occurs if the 225 // following happens during a reference write: 226 // 1. mutator dirties the card (write barrier) 227 // 2. GC ages the card (the above ProcessCards call) 228 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 229 // 4. mutator writes the value (corresponding to the write barrier in 1.) 230 // This causes the GC to age the card but not necessarily mark the reference which the mutator 231 // wrote into the object stored in the card. 232 // Having the checkpoint fixes this issue since it ensures that the card mark and the 233 // reference write are visible to the GC before the card is scanned (this is due to locks being 234 // acquired / released in the checkpoint code). 235 // The other roots are also marked to help reduce the pause. 236 MarkRootsCheckpoint(self, false); 237 MarkNonThreadRoots(); 238 MarkConcurrentRoots( 239 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 240 // Process the newly aged cards. 241 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 242 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 243 // in the next GC. 244 } 245} 246 247void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 248 if (kUseThreadLocalAllocationStack) { 249 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 250 Locks::mutator_lock_->AssertExclusiveHeld(self); 251 heap_->RevokeAllThreadLocalAllocationStacks(self); 252 } 253} 254 255void MarkSweep::MarkingPhase() { 256 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 257 Thread* self = Thread::Current(); 258 BindBitmaps(); 259 FindDefaultSpaceBitmap(); 260 // Process dirty cards and add dirty cards to mod union tables. 261 // If the GC type is non sticky, then we just clear the cards instead of ageing them. 262 heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky); 263 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 264 MarkRoots(self); 265 MarkReachableObjects(); 266 // Pre-clean dirtied cards to reduce pauses. 267 PreCleanCards(); 268} 269 270class MarkSweep::ScanObjectVisitor { 271 public: 272 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 273 : mark_sweep_(mark_sweep) {} 274 275 void operator()(ObjPtr<mirror::Object> obj) const 276 ALWAYS_INLINE 277 REQUIRES(Locks::heap_bitmap_lock_) 278 REQUIRES_SHARED(Locks::mutator_lock_) { 279 if (kCheckLocks) { 280 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 281 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 282 } 283 mark_sweep_->ScanObject(obj.Ptr()); 284 } 285 286 private: 287 MarkSweep* const mark_sweep_; 288}; 289 290void MarkSweep::UpdateAndMarkModUnion() { 291 for (const auto& space : immune_spaces_.GetSpaces()) { 292 const char* name = space->IsZygoteSpace() 293 ? "UpdateAndMarkZygoteModUnionTable" 294 : "UpdateAndMarkImageModUnionTable"; 295 DCHECK(space->IsZygoteSpace() || space->IsImageSpace()) << *space; 296 TimingLogger::ScopedTiming t(name, GetTimings()); 297 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 298 if (mod_union_table != nullptr) { 299 mod_union_table->UpdateAndMarkReferences(this); 300 } else { 301 // No mod-union table, scan all the live bits. This can only occur for app images. 302 space->GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 303 reinterpret_cast<uintptr_t>(space->End()), 304 ScanObjectVisitor(this)); 305 } 306 } 307} 308 309void MarkSweep::MarkReachableObjects() { 310 UpdateAndMarkModUnion(); 311 // Recursively mark all the non-image bits set in the mark bitmap. 312 RecursiveMark(); 313} 314 315void MarkSweep::ReclaimPhase() { 316 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 317 Thread* const self = Thread::Current(); 318 // Process the references concurrently. 319 ProcessReferences(self); 320 SweepSystemWeaks(self); 321 Runtime* const runtime = Runtime::Current(); 322 runtime->AllowNewSystemWeaks(); 323 // Clean up class loaders after system weaks are swept since that is how we know if class 324 // unloading occurred. 325 runtime->GetClassLinker()->CleanupClassLoaders(); 326 { 327 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 328 GetHeap()->RecordFreeRevoke(); 329 // Reclaim unmarked objects. 330 Sweep(false); 331 // Swap the live and mark bitmaps for each space which we modified space. This is an 332 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 333 // bitmaps. 334 SwapBitmaps(); 335 // Unbind the live and mark bitmaps. 336 GetHeap()->UnBindBitmaps(); 337 } 338} 339 340void MarkSweep::FindDefaultSpaceBitmap() { 341 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 342 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 343 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 344 // We want to have the main space instead of non moving if possible. 345 if (bitmap != nullptr && 346 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 347 current_space_bitmap_ = bitmap; 348 // If we are not the non moving space exit the loop early since this will be good enough. 349 if (space != heap_->GetNonMovingSpace()) { 350 break; 351 } 352 } 353 } 354 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n" 355 << heap_->DumpSpaces(); 356} 357 358void MarkSweep::ExpandMarkStack() { 359 ResizeMarkStack(mark_stack_->Capacity() * 2); 360} 361 362void MarkSweep::ResizeMarkStack(size_t new_size) { 363 // Rare case, no need to have Thread::Current be a parameter. 364 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 365 // Someone else acquired the lock and expanded the mark stack before us. 366 return; 367 } 368 std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End()); 369 CHECK_LE(mark_stack_->Size(), new_size); 370 mark_stack_->Resize(new_size); 371 for (auto& obj : temp) { 372 mark_stack_->PushBack(obj.AsMirrorPtr()); 373 } 374} 375 376mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) { 377 MarkObject(obj, nullptr, MemberOffset(0)); 378 return obj; 379} 380 381inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) { 382 DCHECK(obj != nullptr); 383 if (MarkObjectParallel(obj)) { 384 MutexLock mu(Thread::Current(), mark_stack_lock_); 385 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 386 ExpandMarkStack(); 387 } 388 // The object must be pushed on to the mark stack. 389 mark_stack_->PushBack(obj); 390 } 391} 392 393bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) { 394 return IsMarked(ref->AsMirrorPtr()); 395} 396 397class MarkSweep::MarkObjectSlowPath { 398 public: 399 explicit MarkObjectSlowPath(MarkSweep* mark_sweep, 400 mirror::Object* holder = nullptr, 401 MemberOffset offset = MemberOffset(0)) 402 : mark_sweep_(mark_sweep), 403 holder_(holder), 404 offset_(offset) {} 405 406 void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 407 if (kProfileLargeObjects) { 408 // TODO: Differentiate between marking and testing somehow. 409 ++mark_sweep_->large_object_test_; 410 ++mark_sweep_->large_object_mark_; 411 } 412 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 413 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) || 414 (kIsDebugBuild && large_object_space != nullptr && 415 !large_object_space->Contains(obj)))) { 416 // Lowest priority logging first: 417 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT); 418 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true); 419 // Buffer the output in the string stream since it is more important than the stack traces 420 // and we want it to have log priority. The stack traces are printed from Runtime::Abort 421 // which is called from LOG(FATAL) but before the abort message. 422 std::ostringstream oss; 423 oss << "Tried to mark " << obj << " not contained by any spaces" << std::endl; 424 if (holder_ != nullptr) { 425 size_t holder_size = holder_->SizeOf(); 426 ArtField* field = holder_->FindFieldByOffset(offset_); 427 oss << "Field info: " 428 << " holder=" << holder_ 429 << " holder is " 430 << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_) 431 ? "alive" : "dead") 432 << " holder_size=" << holder_size 433 << " holder_type=" << holder_->PrettyTypeOf() 434 << " offset=" << offset_.Uint32Value() 435 << " field=" << (field != nullptr ? field->GetName() : "nullptr") 436 << " field_type=" 437 << (field != nullptr ? field->GetTypeDescriptor() : "") 438 << " first_ref_field_offset=" 439 << (holder_->IsClass() 440 ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset( 441 kRuntimePointerSize) 442 : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset()) 443 << " num_of_ref_fields=" 444 << (holder_->IsClass() 445 ? holder_->AsClass()->NumReferenceStaticFields() 446 : holder_->GetClass()->NumReferenceInstanceFields()) 447 << std::endl; 448 // Print the memory content of the holder. 449 for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) { 450 uint32_t* p = reinterpret_cast<uint32_t*>(holder_); 451 oss << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = " << std::hex << p[i] 452 << std::endl; 453 } 454 } 455 oss << "Attempting see if it's a bad thread root" << std::endl; 456 mark_sweep_->VerifySuspendedThreadRoots(oss); 457 LOG(FATAL) << oss.str(); 458 } 459 } 460 461 private: 462 MarkSweep* const mark_sweep_; 463 mirror::Object* const holder_; 464 MemberOffset offset_; 465}; 466 467inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, 468 mirror::Object* holder, 469 MemberOffset offset) { 470 DCHECK(obj != nullptr); 471 if (kUseBakerReadBarrier) { 472 // Verify all the objects have the correct state installed. 473 obj->AssertReadBarrierState(); 474 } 475 if (immune_spaces_.IsInImmuneRegion(obj)) { 476 if (kCountMarkedObjects) { 477 ++mark_immune_count_; 478 } 479 DCHECK(mark_bitmap_->Test(obj)); 480 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 481 if (kCountMarkedObjects) { 482 ++mark_fastpath_count_; 483 } 484 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 485 PushOnMarkStack(obj); // This object was not previously marked. 486 } 487 } else { 488 if (kCountMarkedObjects) { 489 ++mark_slowpath_count_; 490 } 491 MarkObjectSlowPath visitor(this, holder, offset); 492 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 493 // will check again. 494 if (!mark_bitmap_->Set(obj, visitor)) { 495 PushOnMarkStack(obj); // Was not already marked, push. 496 } 497 } 498} 499 500inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) { 501 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 502 // Lock is not needed but is here anyways to please annotalysis. 503 MutexLock mu(Thread::Current(), mark_stack_lock_); 504 ExpandMarkStack(); 505 } 506 // The object must be pushed on to the mark stack. 507 mark_stack_->PushBack(obj); 508} 509 510inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) { 511 DCHECK(obj != nullptr); 512 if (kUseBakerReadBarrier) { 513 // Verify all the objects have the correct state installed. 514 obj->AssertReadBarrierState(); 515 } 516 if (immune_spaces_.IsInImmuneRegion(obj)) { 517 DCHECK(IsMarked(obj) != nullptr); 518 return false; 519 } 520 // Try to take advantage of locality of references within a space, failing this find the space 521 // the hard way. 522 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 523 if (LIKELY(object_bitmap->HasAddress(obj))) { 524 return !object_bitmap->AtomicTestAndSet(obj); 525 } 526 MarkObjectSlowPath visitor(this); 527 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 528} 529 530void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) { 531 MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0)); 532} 533 534// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 535inline void MarkSweep::MarkObject(mirror::Object* obj, 536 mirror::Object* holder, 537 MemberOffset offset) { 538 if (obj != nullptr) { 539 MarkObjectNonNull(obj, holder, offset); 540 } else if (kCountMarkedObjects) { 541 ++mark_null_count_; 542 } 543} 544 545class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor { 546 public: 547 explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { } 548 549 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE 550 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 551 CHECK(collector_->IsMarked(root) != nullptr) << info.ToString(); 552 } 553 554 private: 555 MarkSweep* const collector_; 556}; 557 558void MarkSweep::VisitRoots(mirror::Object*** roots, 559 size_t count, 560 const RootInfo& info ATTRIBUTE_UNUSED) { 561 for (size_t i = 0; i < count; ++i) { 562 MarkObjectNonNull(*roots[i]); 563 } 564} 565 566void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, 567 size_t count, 568 const RootInfo& info ATTRIBUTE_UNUSED) { 569 for (size_t i = 0; i < count; ++i) { 570 MarkObjectNonNull(roots[i]->AsMirrorPtr()); 571 } 572} 573 574class MarkSweep::VerifyRootVisitor : public SingleRootVisitor { 575 public: 576 explicit VerifyRootVisitor(std::ostream& os) : os_(os) {} 577 578 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE 579 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 580 // See if the root is on any space bitmap. 581 auto* heap = Runtime::Current()->GetHeap(); 582 if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 583 space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace(); 584 if (large_object_space != nullptr && !large_object_space->Contains(root)) { 585 os_ << "Found invalid root: " << root << " " << info << std::endl; 586 } 587 } 588 } 589 590 private: 591 std::ostream& os_; 592}; 593 594void MarkSweep::VerifySuspendedThreadRoots(std::ostream& os) { 595 VerifyRootVisitor visitor(os); 596 Runtime::Current()->GetThreadList()->VisitRootsForSuspendedThreads(&visitor); 597} 598 599void MarkSweep::MarkRoots(Thread* self) { 600 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 601 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 602 // If we exclusively hold the mutator lock, all threads must be suspended. 603 Runtime::Current()->VisitRoots(this); 604 RevokeAllThreadLocalAllocationStacks(self); 605 } else { 606 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 607 // At this point the live stack should no longer have any mutators which push into it. 608 MarkNonThreadRoots(); 609 MarkConcurrentRoots( 610 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 611 } 612} 613 614void MarkSweep::MarkNonThreadRoots() { 615 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 616 Runtime::Current()->VisitNonThreadRoots(this); 617} 618 619void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 620 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 621 // Visit all runtime roots and clear dirty flags. 622 Runtime::Current()->VisitConcurrentRoots(this, flags); 623} 624 625class MarkSweep::DelayReferenceReferentVisitor { 626 public: 627 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {} 628 629 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const 630 REQUIRES(Locks::heap_bitmap_lock_) 631 REQUIRES_SHARED(Locks::mutator_lock_) { 632 collector_->DelayReferenceReferent(klass, ref); 633 } 634 635 private: 636 MarkSweep* const collector_; 637}; 638 639template <bool kUseFinger = false> 640class MarkSweep::MarkStackTask : public Task { 641 public: 642 MarkStackTask(ThreadPool* thread_pool, 643 MarkSweep* mark_sweep, 644 size_t mark_stack_size, 645 StackReference<mirror::Object>* mark_stack) 646 : mark_sweep_(mark_sweep), 647 thread_pool_(thread_pool), 648 mark_stack_pos_(mark_stack_size) { 649 // We may have to copy part of an existing mark stack when another mark stack overflows. 650 if (mark_stack_size != 0) { 651 DCHECK(mark_stack != nullptr); 652 // TODO: Check performance? 653 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 654 } 655 if (kCountTasks) { 656 ++mark_sweep_->work_chunks_created_; 657 } 658 } 659 660 static const size_t kMaxSize = 1 * KB; 661 662 protected: 663 class MarkObjectParallelVisitor { 664 public: 665 ALWAYS_INLINE MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 666 MarkSweep* mark_sweep) 667 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 668 669 ALWAYS_INLINE void operator()(mirror::Object* obj, 670 MemberOffset offset, 671 bool is_static ATTRIBUTE_UNUSED) const 672 REQUIRES_SHARED(Locks::mutator_lock_) { 673 Mark(obj->GetFieldObject<mirror::Object>(offset)); 674 } 675 676 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 677 REQUIRES_SHARED(Locks::mutator_lock_) { 678 if (!root->IsNull()) { 679 VisitRoot(root); 680 } 681 } 682 683 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 684 REQUIRES_SHARED(Locks::mutator_lock_) { 685 if (kCheckLocks) { 686 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 687 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 688 } 689 Mark(root->AsMirrorPtr()); 690 } 691 692 private: 693 ALWAYS_INLINE void Mark(mirror::Object* ref) const REQUIRES_SHARED(Locks::mutator_lock_) { 694 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 695 if (kUseFinger) { 696 std::atomic_thread_fence(std::memory_order_seq_cst); 697 if (reinterpret_cast<uintptr_t>(ref) >= 698 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) { 699 return; 700 } 701 } 702 chunk_task_->MarkStackPush(ref); 703 } 704 } 705 706 MarkStackTask<kUseFinger>* const chunk_task_; 707 MarkSweep* const mark_sweep_; 708 }; 709 710 class ScanObjectParallelVisitor { 711 public: 712 ALWAYS_INLINE explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) 713 : chunk_task_(chunk_task) {} 714 715 // No thread safety analysis since multiple threads will use this visitor. 716 void operator()(mirror::Object* obj) const 717 REQUIRES(Locks::heap_bitmap_lock_) 718 REQUIRES_SHARED(Locks::mutator_lock_) { 719 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 720 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 721 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 722 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 723 } 724 725 private: 726 MarkStackTask<kUseFinger>* const chunk_task_; 727 }; 728 729 virtual ~MarkStackTask() { 730 // Make sure that we have cleared our mark stack. 731 DCHECK_EQ(mark_stack_pos_, 0U); 732 if (kCountTasks) { 733 ++mark_sweep_->work_chunks_deleted_; 734 } 735 } 736 737 MarkSweep* const mark_sweep_; 738 ThreadPool* const thread_pool_; 739 // Thread local mark stack for this task. 740 StackReference<mirror::Object> mark_stack_[kMaxSize]; 741 // Mark stack position. 742 size_t mark_stack_pos_; 743 744 ALWAYS_INLINE void MarkStackPush(mirror::Object* obj) 745 REQUIRES_SHARED(Locks::mutator_lock_) { 746 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 747 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 748 mark_stack_pos_ /= 2; 749 auto* task = new MarkStackTask(thread_pool_, 750 mark_sweep_, 751 kMaxSize - mark_stack_pos_, 752 mark_stack_ + mark_stack_pos_); 753 thread_pool_->AddTask(Thread::Current(), task); 754 } 755 DCHECK(obj != nullptr); 756 DCHECK_LT(mark_stack_pos_, kMaxSize); 757 mark_stack_[mark_stack_pos_++].Assign(obj); 758 } 759 760 virtual void Finalize() { 761 delete this; 762 } 763 764 // Scans all of the objects 765 virtual void Run(Thread* self ATTRIBUTE_UNUSED) 766 REQUIRES(Locks::heap_bitmap_lock_) 767 REQUIRES_SHARED(Locks::mutator_lock_) { 768 ScanObjectParallelVisitor visitor(this); 769 // TODO: Tune this. 770 static const size_t kFifoSize = 4; 771 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo; 772 for (;;) { 773 mirror::Object* obj = nullptr; 774 if (kUseMarkStackPrefetch) { 775 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 776 mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 777 DCHECK(mark_stack_obj != nullptr); 778 __builtin_prefetch(mark_stack_obj); 779 prefetch_fifo.push_back(mark_stack_obj); 780 } 781 if (UNLIKELY(prefetch_fifo.empty())) { 782 break; 783 } 784 obj = prefetch_fifo.front(); 785 prefetch_fifo.pop_front(); 786 } else { 787 if (UNLIKELY(mark_stack_pos_ == 0)) { 788 break; 789 } 790 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 791 } 792 DCHECK(obj != nullptr); 793 visitor(obj); 794 } 795 } 796}; 797 798class MarkSweep::CardScanTask : public MarkStackTask<false> { 799 public: 800 CardScanTask(ThreadPool* thread_pool, 801 MarkSweep* mark_sweep, 802 accounting::ContinuousSpaceBitmap* bitmap, 803 uint8_t* begin, 804 uint8_t* end, 805 uint8_t minimum_age, 806 size_t mark_stack_size, 807 StackReference<mirror::Object>* mark_stack_obj, 808 bool clear_card) 809 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 810 bitmap_(bitmap), 811 begin_(begin), 812 end_(end), 813 minimum_age_(minimum_age), 814 clear_card_(clear_card) {} 815 816 protected: 817 accounting::ContinuousSpaceBitmap* const bitmap_; 818 uint8_t* const begin_; 819 uint8_t* const end_; 820 const uint8_t minimum_age_; 821 const bool clear_card_; 822 823 virtual void Finalize() { 824 delete this; 825 } 826 827 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 828 ScanObjectParallelVisitor visitor(this); 829 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 830 size_t cards_scanned = clear_card_ 831 ? card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) 832 : card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_); 833 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 834 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 835 // Finish by emptying our local mark stack. 836 MarkStackTask::Run(self); 837 } 838}; 839 840size_t MarkSweep::GetThreadCount(bool paused) const { 841 // Use less threads if we are in a background state (non jank perceptible) since we want to leave 842 // more CPU time for the foreground apps. 843 if (heap_->GetThreadPool() == nullptr || !Runtime::Current()->InJankPerceptibleProcessState()) { 844 return 1; 845 } 846 return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1; 847} 848 849void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { 850 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 851 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 852 size_t thread_count = GetThreadCount(paused); 853 // The parallel version with only one thread is faster for card scanning, TODO: fix. 854 if (kParallelCardScan && thread_count > 1) { 855 Thread* self = Thread::Current(); 856 // Can't have a different split for each space since multiple spaces can have their cards being 857 // scanned at the same time. 858 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__, 859 GetTimings()); 860 // Try to take some of the mark stack since we can pass this off to the worker tasks. 861 StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin(); 862 StackReference<mirror::Object>* mark_stack_end = mark_stack_->End(); 863 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 864 // Estimated number of work tasks we will create. 865 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 866 DCHECK_NE(mark_stack_tasks, 0U); 867 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 868 mark_stack_size / mark_stack_tasks + 1); 869 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 870 if (space->GetMarkBitmap() == nullptr) { 871 continue; 872 } 873 uint8_t* card_begin = space->Begin(); 874 uint8_t* card_end = space->End(); 875 // Align up the end address. For example, the image space's end 876 // may not be card-size-aligned. 877 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 878 DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize); 879 DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize); 880 // Calculate how many bytes of heap we will scan, 881 const size_t address_range = card_end - card_begin; 882 // Calculate how much address range each task gets. 883 const size_t card_delta = RoundUp(address_range / thread_count + 1, 884 accounting::CardTable::kCardSize); 885 // If paused and the space is neither zygote nor image space, we could clear the dirty 886 // cards to avoid accumulating them to increase card scanning load in the following GC 887 // cycles. We need to keep dirty cards of image space and zygote space in order to track 888 // references to the other spaces. 889 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 890 // Create the worker tasks for this space. 891 while (card_begin != card_end) { 892 // Add a range of cards. 893 size_t addr_remaining = card_end - card_begin; 894 size_t card_increment = std::min(card_delta, addr_remaining); 895 // Take from the back of the mark stack. 896 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 897 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 898 mark_stack_end -= mark_stack_increment; 899 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 900 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 901 // Add the new task to the thread pool. 902 auto* task = new CardScanTask(thread_pool, 903 this, 904 space->GetMarkBitmap(), 905 card_begin, 906 card_begin + card_increment, 907 minimum_age, 908 mark_stack_increment, 909 mark_stack_end, 910 clear_card); 911 thread_pool->AddTask(self, task); 912 card_begin += card_increment; 913 } 914 } 915 916 // Note: the card scan below may dirty new cards (and scan them) 917 // as a side effect when a Reference object is encountered and 918 // queued during the marking. See b/11465268. 919 thread_pool->SetMaxActiveWorkers(thread_count - 1); 920 thread_pool->StartWorkers(self); 921 thread_pool->Wait(self, true, true); 922 thread_pool->StopWorkers(self); 923 } else { 924 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 925 if (space->GetMarkBitmap() != nullptr) { 926 // Image spaces are handled properly since live == marked for them. 927 const char* name = nullptr; 928 switch (space->GetGcRetentionPolicy()) { 929 case space::kGcRetentionPolicyNeverCollect: 930 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects"; 931 break; 932 case space::kGcRetentionPolicyFullCollect: 933 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects"; 934 break; 935 case space::kGcRetentionPolicyAlwaysCollect: 936 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects"; 937 break; 938 default: 939 LOG(FATAL) << "Unreachable"; 940 UNREACHABLE(); 941 } 942 TimingLogger::ScopedTiming t(name, GetTimings()); 943 ScanObjectVisitor visitor(this); 944 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 945 if (clear_card) { 946 card_table->Scan<true>(space->GetMarkBitmap(), 947 space->Begin(), 948 space->End(), 949 visitor, 950 minimum_age); 951 } else { 952 card_table->Scan<false>(space->GetMarkBitmap(), 953 space->Begin(), 954 space->End(), 955 visitor, 956 minimum_age); 957 } 958 } 959 } 960 } 961} 962 963class MarkSweep::RecursiveMarkTask : public MarkStackTask<false> { 964 public: 965 RecursiveMarkTask(ThreadPool* thread_pool, 966 MarkSweep* mark_sweep, 967 accounting::ContinuousSpaceBitmap* bitmap, 968 uintptr_t begin, 969 uintptr_t end) 970 : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), 971 bitmap_(bitmap), 972 begin_(begin), 973 end_(end) {} 974 975 protected: 976 accounting::ContinuousSpaceBitmap* const bitmap_; 977 const uintptr_t begin_; 978 const uintptr_t end_; 979 980 virtual void Finalize() { 981 delete this; 982 } 983 984 // Scans all of the objects 985 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 986 ScanObjectParallelVisitor visitor(this); 987 bitmap_->VisitMarkedRange(begin_, end_, visitor); 988 // Finish by emptying our local mark stack. 989 MarkStackTask::Run(self); 990 } 991}; 992 993// Populates the mark stack based on the set of marked objects and 994// recursively marks until the mark stack is emptied. 995void MarkSweep::RecursiveMark() { 996 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 997 // RecursiveMark will build the lists of known instances of the Reference classes. See 998 // DelayReferenceReferent for details. 999 if (kUseRecursiveMark) { 1000 const bool partial = GetGcType() == kGcTypePartial; 1001 ScanObjectVisitor scan_visitor(this); 1002 auto* self = Thread::Current(); 1003 ThreadPool* thread_pool = heap_->GetThreadPool(); 1004 size_t thread_count = GetThreadCount(false); 1005 const bool parallel = kParallelRecursiveMark && thread_count > 1; 1006 mark_stack_->Reset(); 1007 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1008 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 1009 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 1010 current_space_bitmap_ = space->GetMarkBitmap(); 1011 if (current_space_bitmap_ == nullptr) { 1012 continue; 1013 } 1014 if (parallel) { 1015 // We will use the mark stack the future. 1016 // CHECK(mark_stack_->IsEmpty()); 1017 // This function does not handle heap end increasing, so we must use the space end. 1018 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 1019 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 1020 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue()); 1021 1022 // Create a few worker tasks. 1023 const size_t n = thread_count * 2; 1024 while (begin != end) { 1025 uintptr_t start = begin; 1026 uintptr_t delta = (end - begin) / n; 1027 delta = RoundUp(delta, KB); 1028 if (delta < 16 * KB) delta = end - begin; 1029 begin += delta; 1030 auto* task = new RecursiveMarkTask(thread_pool, 1031 this, 1032 current_space_bitmap_, 1033 start, 1034 begin); 1035 thread_pool->AddTask(self, task); 1036 } 1037 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1038 thread_pool->StartWorkers(self); 1039 thread_pool->Wait(self, true, true); 1040 thread_pool->StopWorkers(self); 1041 } else { 1042 // This function does not handle heap end increasing, so we must use the space end. 1043 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 1044 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 1045 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 1046 } 1047 } 1048 } 1049 } 1050 ProcessMarkStack(false); 1051} 1052 1053void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) { 1054 ScanGrayObjects(paused, minimum_age); 1055 ProcessMarkStack(paused); 1056} 1057 1058void MarkSweep::ReMarkRoots() { 1059 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1060 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1061 Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>( 1062 kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog)); 1063 if (kVerifyRootsMarked) { 1064 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings()); 1065 VerifyRootMarkedVisitor visitor(this); 1066 Runtime::Current()->VisitRoots(&visitor); 1067 } 1068} 1069 1070void MarkSweep::SweepSystemWeaks(Thread* self) { 1071 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1072 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1073 Runtime::Current()->SweepSystemWeaks(this); 1074} 1075 1076class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor { 1077 public: 1078 explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1079 1080 virtual mirror::Object* IsMarked(mirror::Object* obj) 1081 OVERRIDE 1082 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1083 mark_sweep_->VerifyIsLive(obj); 1084 return obj; 1085 } 1086 1087 MarkSweep* const mark_sweep_; 1088}; 1089 1090void MarkSweep::VerifyIsLive(const mirror::Object* obj) { 1091 if (!heap_->GetLiveBitmap()->Test(obj)) { 1092 // TODO: Consider live stack? Has this code bitrotted? 1093 CHECK(!heap_->allocation_stack_->Contains(obj)) 1094 << "Found dead object " << obj << "\n" << heap_->DumpSpaces(); 1095 } 1096} 1097 1098void MarkSweep::VerifySystemWeaks() { 1099 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1100 // Verify system weaks, uses a special object visitor which returns the input object. 1101 VerifySystemWeakVisitor visitor(this); 1102 Runtime::Current()->SweepSystemWeaks(&visitor); 1103} 1104 1105class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor { 1106 public: 1107 CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 1108 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 1109 : mark_sweep_(mark_sweep), 1110 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 1111 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1112 } 1113 1114 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) 1115 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) 1116 REQUIRES(Locks::heap_bitmap_lock_) { 1117 for (size_t i = 0; i < count; ++i) { 1118 mark_sweep_->MarkObjectNonNullParallel(*roots[i]); 1119 } 1120 } 1121 1122 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, 1123 size_t count, 1124 const RootInfo& info ATTRIBUTE_UNUSED) 1125 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) 1126 REQUIRES(Locks::heap_bitmap_lock_) { 1127 for (size_t i = 0; i < count; ++i) { 1128 mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr()); 1129 } 1130 } 1131 1132 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 1133 ScopedTrace trace("Marking thread roots"); 1134 // Note: self is not necessarily equal to thread since thread may be suspended. 1135 Thread* const self = Thread::Current(); 1136 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1137 << thread->GetState() << " thread " << thread << " self " << self; 1138 thread->VisitRoots(this); 1139 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 1140 ScopedTrace trace2("RevokeRosAllocThreadLocalBuffers"); 1141 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 1142 } 1143 // If thread is a running mutator, then act on behalf of the garbage collector. 1144 // See the code in ThreadList::RunCheckpoint. 1145 mark_sweep_->GetBarrier().Pass(self); 1146 } 1147 1148 private: 1149 MarkSweep* const mark_sweep_; 1150 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 1151}; 1152 1153void MarkSweep::MarkRootsCheckpoint(Thread* self, 1154 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1155 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1156 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 1157 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1158 // Request the check point is run on all threads returning a count of the threads that must 1159 // run through the barrier including self. 1160 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1161 // Release locks then wait for all mutator threads to pass the barrier. 1162 // If there are no threads to wait which implys that all the checkpoint functions are finished, 1163 // then no need to release locks. 1164 if (barrier_count == 0) { 1165 return; 1166 } 1167 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1168 Locks::mutator_lock_->SharedUnlock(self); 1169 { 1170 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1171 gc_barrier_->Increment(self, barrier_count); 1172 } 1173 Locks::mutator_lock_->SharedLock(self); 1174 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1175} 1176 1177void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1178 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1179 Thread* self = Thread::Current(); 1180 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>( 1181 sweep_array_free_buffer_mem_map_->BaseBegin()); 1182 size_t chunk_free_pos = 0; 1183 ObjectBytePair freed; 1184 ObjectBytePair freed_los; 1185 // How many objects are left in the array, modified after each space is swept. 1186 StackReference<mirror::Object>* objects = allocations->Begin(); 1187 size_t count = allocations->Size(); 1188 // Change the order to ensure that the non-moving space last swept as an optimization. 1189 std::vector<space::ContinuousSpace*> sweep_spaces; 1190 space::ContinuousSpace* non_moving_space = nullptr; 1191 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1192 if (space->IsAllocSpace() && 1193 !immune_spaces_.ContainsSpace(space) && 1194 space->GetLiveBitmap() != nullptr) { 1195 if (space == heap_->GetNonMovingSpace()) { 1196 non_moving_space = space; 1197 } else { 1198 sweep_spaces.push_back(space); 1199 } 1200 } 1201 } 1202 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1203 // the other alloc spaces as an optimization. 1204 if (non_moving_space != nullptr) { 1205 sweep_spaces.push_back(non_moving_space); 1206 } 1207 // Start by sweeping the continuous spaces. 1208 for (space::ContinuousSpace* space : sweep_spaces) { 1209 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1210 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1211 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1212 if (swap_bitmaps) { 1213 std::swap(live_bitmap, mark_bitmap); 1214 } 1215 StackReference<mirror::Object>* out = objects; 1216 for (size_t i = 0; i < count; ++i) { 1217 mirror::Object* const obj = objects[i].AsMirrorPtr(); 1218 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1219 continue; 1220 } 1221 if (space->HasAddress(obj)) { 1222 // This object is in the space, remove it from the array and add it to the sweep buffer 1223 // if needed. 1224 if (!mark_bitmap->Test(obj)) { 1225 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1226 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1227 freed.objects += chunk_free_pos; 1228 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1229 chunk_free_pos = 0; 1230 } 1231 chunk_free_buffer[chunk_free_pos++] = obj; 1232 } 1233 } else { 1234 (out++)->Assign(obj); 1235 } 1236 } 1237 if (chunk_free_pos > 0) { 1238 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1239 freed.objects += chunk_free_pos; 1240 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1241 chunk_free_pos = 0; 1242 } 1243 // All of the references which space contained are no longer in the allocation stack, update 1244 // the count. 1245 count = out - objects; 1246 } 1247 // Handle the large object space. 1248 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1249 if (large_object_space != nullptr) { 1250 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1251 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1252 if (swap_bitmaps) { 1253 std::swap(large_live_objects, large_mark_objects); 1254 } 1255 for (size_t i = 0; i < count; ++i) { 1256 mirror::Object* const obj = objects[i].AsMirrorPtr(); 1257 // Handle large objects. 1258 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1259 continue; 1260 } 1261 if (!large_mark_objects->Test(obj)) { 1262 ++freed_los.objects; 1263 freed_los.bytes += large_object_space->Free(self, obj); 1264 } 1265 } 1266 } 1267 { 1268 TimingLogger::ScopedTiming t2("RecordFree", GetTimings()); 1269 RecordFree(freed); 1270 RecordFreeLOS(freed_los); 1271 t2.NewTiming("ResetStack"); 1272 allocations->Reset(); 1273 } 1274 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero(); 1275} 1276 1277void MarkSweep::Sweep(bool swap_bitmaps) { 1278 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1279 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1280 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1281 { 1282 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings()); 1283 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1284 // knowing that new allocations won't be marked as live. 1285 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1286 heap_->MarkAllocStackAsLive(live_stack); 1287 live_stack->Reset(); 1288 DCHECK(mark_stack_->IsEmpty()); 1289 } 1290 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1291 if (space->IsContinuousMemMapAllocSpace()) { 1292 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1293 TimingLogger::ScopedTiming split( 1294 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", 1295 GetTimings()); 1296 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1297 } 1298 } 1299 SweepLargeObjects(swap_bitmaps); 1300} 1301 1302void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1303 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace(); 1304 if (los != nullptr) { 1305 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 1306 RecordFreeLOS(los->Sweep(swap_bitmaps)); 1307 } 1308} 1309 1310// Process the "referent" field lin a java.lang.ref.Reference. If the referent has not yet been 1311// marked, put it on the appropriate list in the heap for later processing. 1312void MarkSweep::DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) { 1313 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this); 1314} 1315 1316class MarkVisitor { 1317 public: 1318 ALWAYS_INLINE explicit MarkVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 1319 1320 ALWAYS_INLINE void operator()(mirror::Object* obj, 1321 MemberOffset offset, 1322 bool is_static ATTRIBUTE_UNUSED) const 1323 REQUIRES(Locks::heap_bitmap_lock_) 1324 REQUIRES_SHARED(Locks::mutator_lock_) { 1325 if (kCheckLocks) { 1326 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1327 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1328 } 1329 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset); 1330 } 1331 1332 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 1333 REQUIRES(Locks::heap_bitmap_lock_) 1334 REQUIRES_SHARED(Locks::mutator_lock_) { 1335 if (!root->IsNull()) { 1336 VisitRoot(root); 1337 } 1338 } 1339 1340 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 1341 REQUIRES(Locks::heap_bitmap_lock_) 1342 REQUIRES_SHARED(Locks::mutator_lock_) { 1343 if (kCheckLocks) { 1344 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1345 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1346 } 1347 mark_sweep_->MarkObject(root->AsMirrorPtr()); 1348 } 1349 1350 private: 1351 MarkSweep* const mark_sweep_; 1352}; 1353 1354// Scans an object reference. Determines the type of the reference 1355// and dispatches to a specialized scanning routine. 1356void MarkSweep::ScanObject(mirror::Object* obj) { 1357 MarkVisitor mark_visitor(this); 1358 DelayReferenceReferentVisitor ref_visitor(this); 1359 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1360} 1361 1362void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1363 Thread* self = Thread::Current(); 1364 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1365 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1366 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1367 CHECK_GT(chunk_size, 0U); 1368 // Split the current mark stack up into work tasks. 1369 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) { 1370 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1371 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1372 it += delta; 1373 } 1374 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1375 thread_pool->StartWorkers(self); 1376 thread_pool->Wait(self, true, true); 1377 thread_pool->StopWorkers(self); 1378 mark_stack_->Reset(); 1379 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(), 1380 work_chunks_deleted_.LoadSequentiallyConsistent()) 1381 << " some of the work chunks were leaked"; 1382} 1383 1384// Scan anything that's on the mark stack. 1385void MarkSweep::ProcessMarkStack(bool paused) { 1386 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings()); 1387 size_t thread_count = GetThreadCount(paused); 1388 if (kParallelProcessMarkStack && thread_count > 1 && 1389 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1390 ProcessMarkStackParallel(thread_count); 1391 } else { 1392 // TODO: Tune this. 1393 static const size_t kFifoSize = 4; 1394 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo; 1395 for (;;) { 1396 mirror::Object* obj = nullptr; 1397 if (kUseMarkStackPrefetch) { 1398 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1399 mirror::Object* mark_stack_obj = mark_stack_->PopBack(); 1400 DCHECK(mark_stack_obj != nullptr); 1401 __builtin_prefetch(mark_stack_obj); 1402 prefetch_fifo.push_back(mark_stack_obj); 1403 } 1404 if (prefetch_fifo.empty()) { 1405 break; 1406 } 1407 obj = prefetch_fifo.front(); 1408 prefetch_fifo.pop_front(); 1409 } else { 1410 if (mark_stack_->IsEmpty()) { 1411 break; 1412 } 1413 obj = mark_stack_->PopBack(); 1414 } 1415 DCHECK(obj != nullptr); 1416 ScanObject(obj); 1417 } 1418 } 1419} 1420 1421inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) { 1422 if (immune_spaces_.IsInImmuneRegion(object)) { 1423 return object; 1424 } 1425 if (current_space_bitmap_->HasAddress(object)) { 1426 return current_space_bitmap_->Test(object) ? object : nullptr; 1427 } 1428 return mark_bitmap_->Test(object) ? object : nullptr; 1429} 1430 1431void MarkSweep::FinishPhase() { 1432 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1433 if (kCountScannedTypes) { 1434 VLOG(gc) 1435 << "MarkSweep scanned" 1436 << " no reference objects=" << no_reference_class_count_.LoadRelaxed() 1437 << " normal objects=" << normal_count_.LoadRelaxed() 1438 << " classes=" << class_count_.LoadRelaxed() 1439 << " object arrays=" << object_array_count_.LoadRelaxed() 1440 << " references=" << reference_count_.LoadRelaxed() 1441 << " other=" << other_count_.LoadRelaxed(); 1442 } 1443 if (kCountTasks) { 1444 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed(); 1445 } 1446 if (kMeasureOverhead) { 1447 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed()); 1448 } 1449 if (kProfileLargeObjects) { 1450 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed() 1451 << " marked " << large_object_mark_.LoadRelaxed(); 1452 } 1453 if (kCountMarkedObjects) { 1454 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed() 1455 << " immune=" << mark_immune_count_.LoadRelaxed() 1456 << " fastpath=" << mark_fastpath_count_.LoadRelaxed() 1457 << " slowpath=" << mark_slowpath_count_.LoadRelaxed(); 1458 } 1459 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1460 mark_stack_->Reset(); 1461 Thread* const self = Thread::Current(); 1462 ReaderMutexLock mu(self, *Locks::mutator_lock_); 1463 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_); 1464 heap_->ClearMarkedObjects(); 1465} 1466 1467void MarkSweep::RevokeAllThreadLocalBuffers() { 1468 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1469 // If concurrent, rosalloc thread-local buffers are revoked at the 1470 // thread checkpoint. Bump pointer space thread-local buffers must 1471 // not be in use. 1472 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1473 } else { 1474 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1475 GetHeap()->RevokeAllThreadLocalBuffers(); 1476 } 1477} 1478 1479} // namespace collector 1480} // namespace gc 1481} // namespace art 1482