mark_sweep.cc revision 951ec2c93c79c5539cbcc669566f0808d4460338
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <atomic> 20#include <functional> 21#include <numeric> 22#include <climits> 23#include <vector> 24 25#define ATRACE_TAG ATRACE_TAG_DALVIK 26#include "cutils/trace.h" 27 28#include "base/bounded_fifo.h" 29#include "base/logging.h" 30#include "base/macros.h" 31#include "base/mutex-inl.h" 32#include "base/time_utils.h" 33#include "base/timing_logger.h" 34#include "gc/accounting/card_table-inl.h" 35#include "gc/accounting/heap_bitmap-inl.h" 36#include "gc/accounting/mod_union_table.h" 37#include "gc/accounting/space_bitmap-inl.h" 38#include "gc/heap.h" 39#include "gc/reference_processor.h" 40#include "gc/space/large_object_space.h" 41#include "gc/space/space-inl.h" 42#include "mark_sweep-inl.h" 43#include "mirror/object-inl.h" 44#include "runtime.h" 45#include "scoped_thread_state_change.h" 46#include "thread-inl.h" 47#include "thread_list.h" 48 49namespace art { 50namespace gc { 51namespace collector { 52 53// Performance options. 54static constexpr bool kUseRecursiveMark = false; 55static constexpr bool kUseMarkStackPrefetch = true; 56static constexpr size_t kSweepArrayChunkFreeSize = 1024; 57static constexpr bool kPreCleanCards = true; 58 59// Parallelism options. 60static constexpr bool kParallelCardScan = true; 61static constexpr bool kParallelRecursiveMark = true; 62// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 63// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 64// having this can add overhead in ProcessReferences since we may end up doing many calls of 65// ProcessMarkStack with very small mark stacks. 66static constexpr size_t kMinimumParallelMarkStackSize = 128; 67static constexpr bool kParallelProcessMarkStack = true; 68 69// Profiling and information flags. 70static constexpr bool kProfileLargeObjects = false; 71static constexpr bool kMeasureOverhead = false; 72static constexpr bool kCountTasks = false; 73static constexpr bool kCountMarkedObjects = false; 74 75// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 76static constexpr bool kCheckLocks = kDebugLocking; 77static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 78 79// If true, revoke the rosalloc thread-local buffers at the 80// checkpoint, as opposed to during the pause. 81static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 82 83void MarkSweep::BindBitmaps() { 84 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 85 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 86 // Mark all of the spaces we never collect as immune. 87 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 88 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 89 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 90 } 91 } 92} 93 94MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 95 : GarbageCollector(heap, 96 name_prefix + 97 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 98 current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr), 99 gc_barrier_(new Barrier(0)), 100 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 101 is_concurrent_(is_concurrent), live_stack_freeze_size_(0) { 102 std::string error_msg; 103 MemMap* mem_map = MemMap::MapAnonymous( 104 "mark sweep sweep array free buffer", nullptr, 105 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), 106 PROT_READ | PROT_WRITE, false, false, &error_msg); 107 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg; 108 sweep_array_free_buffer_mem_map_.reset(mem_map); 109} 110 111void MarkSweep::InitializePhase() { 112 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 113 mark_stack_ = heap_->GetMarkStack(); 114 DCHECK(mark_stack_ != nullptr); 115 immune_region_.Reset(); 116 no_reference_class_count_.StoreRelaxed(0); 117 normal_count_.StoreRelaxed(0); 118 class_count_.StoreRelaxed(0); 119 object_array_count_.StoreRelaxed(0); 120 other_count_.StoreRelaxed(0); 121 reference_count_.StoreRelaxed(0); 122 large_object_test_.StoreRelaxed(0); 123 large_object_mark_.StoreRelaxed(0); 124 overhead_time_ .StoreRelaxed(0); 125 work_chunks_created_.StoreRelaxed(0); 126 work_chunks_deleted_.StoreRelaxed(0); 127 mark_null_count_.StoreRelaxed(0); 128 mark_immune_count_.StoreRelaxed(0); 129 mark_fastpath_count_.StoreRelaxed(0); 130 mark_slowpath_count_.StoreRelaxed(0); 131 { 132 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 133 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 134 mark_bitmap_ = heap_->GetMarkBitmap(); 135 } 136 if (!GetCurrentIteration()->GetClearSoftReferences()) { 137 // Always clear soft references if a non-sticky collection. 138 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky); 139 } 140} 141 142void MarkSweep::RunPhases() { 143 Thread* self = Thread::Current(); 144 InitializePhase(); 145 Locks::mutator_lock_->AssertNotHeld(self); 146 if (IsConcurrent()) { 147 GetHeap()->PreGcVerification(this); 148 { 149 ReaderMutexLock mu(self, *Locks::mutator_lock_); 150 MarkingPhase(); 151 } 152 ScopedPause pause(this); 153 GetHeap()->PrePauseRosAllocVerification(this); 154 PausePhase(); 155 RevokeAllThreadLocalBuffers(); 156 } else { 157 ScopedPause pause(this); 158 GetHeap()->PreGcVerificationPaused(this); 159 MarkingPhase(); 160 GetHeap()->PrePauseRosAllocVerification(this); 161 PausePhase(); 162 RevokeAllThreadLocalBuffers(); 163 } 164 { 165 // Sweeping always done concurrently, even for non concurrent mark sweep. 166 ReaderMutexLock mu(self, *Locks::mutator_lock_); 167 ReclaimPhase(); 168 } 169 GetHeap()->PostGcVerification(this); 170 FinishPhase(); 171} 172 173void MarkSweep::ProcessReferences(Thread* self) { 174 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 175 GetHeap()->GetReferenceProcessor()->ProcessReferences( 176 true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); 177} 178 179void MarkSweep::PausePhase() { 180 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings()); 181 Thread* self = Thread::Current(); 182 Locks::mutator_lock_->AssertExclusiveHeld(self); 183 if (IsConcurrent()) { 184 // Handle the dirty objects if we are a concurrent GC. 185 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 186 // Re-mark root set. 187 ReMarkRoots(); 188 // Scan dirty objects, this is only required if we are not doing concurrent GC. 189 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 190 } 191 { 192 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings()); 193 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 194 heap_->SwapStacks(); 195 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 196 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 197 // stacks and don't want anybody to allocate into the live stack. 198 RevokeAllThreadLocalAllocationStacks(self); 199 } 200 heap_->PreSweepingGcVerification(this); 201 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 202 // weak before we sweep them. Since this new system weak may not be marked, the GC may 203 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 204 // reference to a string that is about to be swept. 205 Runtime::Current()->DisallowNewSystemWeaks(); 206 // Enable the reference processing slow path, needs to be done with mutators paused since there 207 // is no lock in the GetReferent fast path. 208 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 209} 210 211void MarkSweep::PreCleanCards() { 212 // Don't do this for non concurrent GCs since they don't have any dirty cards. 213 if (kPreCleanCards && IsConcurrent()) { 214 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 215 Thread* self = Thread::Current(); 216 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 217 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 218 heap_->ProcessCards(GetTimings(), false, true, false); 219 // The checkpoint root marking is required to avoid a race condition which occurs if the 220 // following happens during a reference write: 221 // 1. mutator dirties the card (write barrier) 222 // 2. GC ages the card (the above ProcessCards call) 223 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 224 // 4. mutator writes the value (corresponding to the write barrier in 1.) 225 // This causes the GC to age the card but not necessarily mark the reference which the mutator 226 // wrote into the object stored in the card. 227 // Having the checkpoint fixes this issue since it ensures that the card mark and the 228 // reference write are visible to the GC before the card is scanned (this is due to locks being 229 // acquired / released in the checkpoint code). 230 // The other roots are also marked to help reduce the pause. 231 MarkRootsCheckpoint(self, false); 232 MarkNonThreadRoots(); 233 MarkConcurrentRoots( 234 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 235 // Process the newly aged cards. 236 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 237 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 238 // in the next GC. 239 } 240} 241 242void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 243 if (kUseThreadLocalAllocationStack) { 244 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 245 Locks::mutator_lock_->AssertExclusiveHeld(self); 246 heap_->RevokeAllThreadLocalAllocationStacks(self); 247 } 248} 249 250void MarkSweep::MarkingPhase() { 251 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 252 Thread* self = Thread::Current(); 253 BindBitmaps(); 254 FindDefaultSpaceBitmap(); 255 // Process dirty cards and add dirty cards to mod union tables. 256 // If the GC type is non sticky, then we just clear the cards instead of ageing them. 257 heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky); 258 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 259 MarkRoots(self); 260 MarkReachableObjects(); 261 // Pre-clean dirtied cards to reduce pauses. 262 PreCleanCards(); 263} 264 265void MarkSweep::UpdateAndMarkModUnion() { 266 for (const auto& space : heap_->GetContinuousSpaces()) { 267 if (immune_region_.ContainsSpace(space)) { 268 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 269 "UpdateAndMarkImageModUnionTable"; 270 TimingLogger::ScopedTiming t(name, GetTimings()); 271 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 272 CHECK(mod_union_table != nullptr); 273 mod_union_table->UpdateAndMarkReferences(this); 274 } 275 } 276} 277 278void MarkSweep::MarkReachableObjects() { 279 UpdateAndMarkModUnion(); 280 // Recursively mark all the non-image bits set in the mark bitmap. 281 RecursiveMark(); 282} 283 284void MarkSweep::ReclaimPhase() { 285 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 286 Thread* const self = Thread::Current(); 287 // Process the references concurrently. 288 ProcessReferences(self); 289 SweepSystemWeaks(self); 290 Runtime* const runtime = Runtime::Current(); 291 runtime->AllowNewSystemWeaks(); 292 // Clean up class loaders after system weaks are swept since that is how we know if class 293 // unloading occurred. 294 runtime->GetClassLinker()->CleanupClassLoaders(); 295 { 296 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 297 GetHeap()->RecordFreeRevoke(); 298 // Reclaim unmarked objects. 299 Sweep(false); 300 // Swap the live and mark bitmaps for each space which we modified space. This is an 301 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 302 // bitmaps. 303 SwapBitmaps(); 304 // Unbind the live and mark bitmaps. 305 GetHeap()->UnBindBitmaps(); 306 } 307} 308 309void MarkSweep::FindDefaultSpaceBitmap() { 310 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 311 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 312 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 313 // We want to have the main space instead of non moving if possible. 314 if (bitmap != nullptr && 315 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 316 current_space_bitmap_ = bitmap; 317 // If we are not the non moving space exit the loop early since this will be good enough. 318 if (space != heap_->GetNonMovingSpace()) { 319 break; 320 } 321 } 322 } 323 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n" 324 << heap_->DumpSpaces(); 325} 326 327void MarkSweep::ExpandMarkStack() { 328 ResizeMarkStack(mark_stack_->Capacity() * 2); 329} 330 331void MarkSweep::ResizeMarkStack(size_t new_size) { 332 // Rare case, no need to have Thread::Current be a parameter. 333 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 334 // Someone else acquired the lock and expanded the mark stack before us. 335 return; 336 } 337 std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End()); 338 CHECK_LE(mark_stack_->Size(), new_size); 339 mark_stack_->Resize(new_size); 340 for (auto& obj : temp) { 341 mark_stack_->PushBack(obj.AsMirrorPtr()); 342 } 343} 344 345mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) { 346 MarkObject(obj, nullptr, MemberOffset(0)); 347 return obj; 348} 349 350inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) { 351 DCHECK(obj != nullptr); 352 if (MarkObjectParallel(obj)) { 353 MutexLock mu(Thread::Current(), mark_stack_lock_); 354 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 355 ExpandMarkStack(); 356 } 357 // The object must be pushed on to the mark stack. 358 mark_stack_->PushBack(obj); 359 } 360} 361 362bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) { 363 return IsMarked(ref->AsMirrorPtr()); 364} 365 366class MarkSweepMarkObjectSlowPath { 367 public: 368 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, mirror::Object* holder = nullptr, 369 MemberOffset offset = MemberOffset(0)) 370 : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) { 371 } 372 373 void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 374 if (kProfileLargeObjects) { 375 // TODO: Differentiate between marking and testing somehow. 376 ++mark_sweep_->large_object_test_; 377 ++mark_sweep_->large_object_mark_; 378 } 379 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 380 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) || 381 (kIsDebugBuild && large_object_space != nullptr && 382 !large_object_space->Contains(obj)))) { 383 LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces"; 384 if (holder_ != nullptr) { 385 size_t holder_size = holder_->SizeOf(); 386 ArtField* field = holder_->FindFieldByOffset(offset_); 387 LOG(INTERNAL_FATAL) << "Field info: " 388 << " holder=" << holder_ 389 << " holder is " 390 << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_) 391 ? "alive" : "dead") 392 << " holder_size=" << holder_size 393 << " holder_type=" << PrettyTypeOf(holder_) 394 << " offset=" << offset_.Uint32Value() 395 << " field=" << (field != nullptr ? field->GetName() : "nullptr") 396 << " field_type=" 397 << (field != nullptr ? field->GetTypeDescriptor() : "") 398 << " first_ref_field_offset=" 399 << (holder_->IsClass() 400 ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset( 401 sizeof(void*)) 402 : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset()) 403 << " num_of_ref_fields=" 404 << (holder_->IsClass() 405 ? holder_->AsClass()->NumReferenceStaticFields() 406 : holder_->GetClass()->NumReferenceInstanceFields()) 407 << "\n"; 408 // Print the memory content of the holder. 409 for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) { 410 uint32_t* p = reinterpret_cast<uint32_t*>(holder_); 411 LOG(INTERNAL_FATAL) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = " 412 << std::hex << p[i]; 413 } 414 } 415 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL); 416 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true); 417 { 418 LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root"; 419 Thread* self = Thread::Current(); 420 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 421 mark_sweep_->VerifyRoots(); 422 } else { 423 const bool heap_bitmap_exclusive_locked = 424 Locks::heap_bitmap_lock_->IsExclusiveHeld(self); 425 if (heap_bitmap_exclusive_locked) { 426 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 427 } 428 { 429 ScopedThreadSuspension(self, kSuspended); 430 ScopedSuspendAll ssa(__FUNCTION__); 431 mark_sweep_->VerifyRoots(); 432 } 433 if (heap_bitmap_exclusive_locked) { 434 Locks::heap_bitmap_lock_->ExclusiveLock(self); 435 } 436 } 437 } 438 LOG(FATAL) << "Can't mark invalid object"; 439 } 440 } 441 442 private: 443 MarkSweep* const mark_sweep_; 444 mirror::Object* const holder_; 445 MemberOffset offset_; 446}; 447 448inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder, 449 MemberOffset offset) { 450 DCHECK(obj != nullptr); 451 if (kUseBakerOrBrooksReadBarrier) { 452 // Verify all the objects have the correct pointer installed. 453 obj->AssertReadBarrierPointer(); 454 } 455 if (immune_region_.ContainsObject(obj)) { 456 if (kCountMarkedObjects) { 457 ++mark_immune_count_; 458 } 459 DCHECK(mark_bitmap_->Test(obj)); 460 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 461 if (kCountMarkedObjects) { 462 ++mark_fastpath_count_; 463 } 464 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 465 PushOnMarkStack(obj); // This object was not previously marked. 466 } 467 } else { 468 if (kCountMarkedObjects) { 469 ++mark_slowpath_count_; 470 } 471 MarkSweepMarkObjectSlowPath visitor(this, holder, offset); 472 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 473 // will check again. 474 if (!mark_bitmap_->Set(obj, visitor)) { 475 PushOnMarkStack(obj); // Was not already marked, push. 476 } 477 } 478} 479 480inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) { 481 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 482 // Lock is not needed but is here anyways to please annotalysis. 483 MutexLock mu(Thread::Current(), mark_stack_lock_); 484 ExpandMarkStack(); 485 } 486 // The object must be pushed on to the mark stack. 487 mark_stack_->PushBack(obj); 488} 489 490inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) { 491 DCHECK(obj != nullptr); 492 if (kUseBakerOrBrooksReadBarrier) { 493 // Verify all the objects have the correct pointer installed. 494 obj->AssertReadBarrierPointer(); 495 } 496 if (immune_region_.ContainsObject(obj)) { 497 DCHECK(IsMarked(obj) != nullptr); 498 return false; 499 } 500 // Try to take advantage of locality of references within a space, failing this find the space 501 // the hard way. 502 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 503 if (LIKELY(object_bitmap->HasAddress(obj))) { 504 return !object_bitmap->AtomicTestAndSet(obj); 505 } 506 MarkSweepMarkObjectSlowPath visitor(this); 507 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 508} 509 510void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) { 511 MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0)); 512} 513 514// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 515inline void MarkSweep::MarkObject(mirror::Object* obj, mirror::Object* holder, 516 MemberOffset offset) { 517 if (obj != nullptr) { 518 MarkObjectNonNull(obj, holder, offset); 519 } else if (kCountMarkedObjects) { 520 ++mark_null_count_; 521 } 522} 523 524class VerifyRootMarkedVisitor : public SingleRootVisitor { 525 public: 526 explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { } 527 528 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE 529 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 530 CHECK(collector_->IsMarked(root) != nullptr) << info.ToString(); 531 } 532 533 private: 534 MarkSweep* const collector_; 535}; 536 537void MarkSweep::VisitRoots(mirror::Object*** roots, size_t count, 538 const RootInfo& info ATTRIBUTE_UNUSED) { 539 for (size_t i = 0; i < count; ++i) { 540 MarkObjectNonNull(*roots[i]); 541 } 542} 543 544void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, 545 const RootInfo& info ATTRIBUTE_UNUSED) { 546 for (size_t i = 0; i < count; ++i) { 547 MarkObjectNonNull(roots[i]->AsMirrorPtr()); 548 } 549} 550 551class VerifyRootVisitor : public SingleRootVisitor { 552 public: 553 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE 554 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 555 // See if the root is on any space bitmap. 556 auto* heap = Runtime::Current()->GetHeap(); 557 if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 558 space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace(); 559 if (large_object_space != nullptr && !large_object_space->Contains(root)) { 560 LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info; 561 } 562 } 563 } 564}; 565 566void MarkSweep::VerifyRoots() { 567 VerifyRootVisitor visitor; 568 Runtime::Current()->GetThreadList()->VisitRoots(&visitor); 569} 570 571void MarkSweep::MarkRoots(Thread* self) { 572 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 573 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 574 // If we exclusively hold the mutator lock, all threads must be suspended. 575 Runtime::Current()->VisitRoots(this); 576 RevokeAllThreadLocalAllocationStacks(self); 577 } else { 578 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 579 // At this point the live stack should no longer have any mutators which push into it. 580 MarkNonThreadRoots(); 581 MarkConcurrentRoots( 582 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 583 } 584} 585 586void MarkSweep::MarkNonThreadRoots() { 587 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 588 Runtime::Current()->VisitNonThreadRoots(this); 589} 590 591void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 592 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 593 // Visit all runtime roots and clear dirty flags. 594 Runtime::Current()->VisitConcurrentRoots( 595 this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving)); 596} 597 598class ScanObjectVisitor { 599 public: 600 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 601 : mark_sweep_(mark_sweep) {} 602 603 void operator()(mirror::Object* obj) const ALWAYS_INLINE 604 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { 605 if (kCheckLocks) { 606 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 607 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 608 } 609 mark_sweep_->ScanObject(obj); 610 } 611 612 private: 613 MarkSweep* const mark_sweep_; 614}; 615 616class DelayReferenceReferentVisitor { 617 public: 618 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 619 } 620 621 void operator()(mirror::Class* klass, mirror::Reference* ref) const 622 SHARED_REQUIRES(Locks::mutator_lock_) 623 REQUIRES(Locks::heap_bitmap_lock_) { 624 collector_->DelayReferenceReferent(klass, ref); 625 } 626 627 private: 628 MarkSweep* const collector_; 629}; 630 631template <bool kUseFinger = false> 632class MarkStackTask : public Task { 633 public: 634 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 635 StackReference<mirror::Object>* mark_stack) 636 : mark_sweep_(mark_sweep), 637 thread_pool_(thread_pool), 638 mark_stack_pos_(mark_stack_size) { 639 // We may have to copy part of an existing mark stack when another mark stack overflows. 640 if (mark_stack_size != 0) { 641 DCHECK(mark_stack != nullptr); 642 // TODO: Check performance? 643 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 644 } 645 if (kCountTasks) { 646 ++mark_sweep_->work_chunks_created_; 647 } 648 } 649 650 static const size_t kMaxSize = 1 * KB; 651 652 protected: 653 class MarkObjectParallelVisitor { 654 public: 655 ALWAYS_INLINE MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 656 MarkSweep* mark_sweep) 657 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 658 659 void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const 660 ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { 661 Mark(obj->GetFieldObject<mirror::Object>(offset)); 662 } 663 664 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 665 SHARED_REQUIRES(Locks::mutator_lock_) { 666 if (!root->IsNull()) { 667 VisitRoot(root); 668 } 669 } 670 671 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 672 SHARED_REQUIRES(Locks::mutator_lock_) { 673 if (kCheckLocks) { 674 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 675 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 676 } 677 Mark(root->AsMirrorPtr()); 678 } 679 680 private: 681 void Mark(mirror::Object* ref) const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { 682 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 683 if (kUseFinger) { 684 std::atomic_thread_fence(std::memory_order_seq_cst); 685 if (reinterpret_cast<uintptr_t>(ref) >= 686 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) { 687 return; 688 } 689 } 690 chunk_task_->MarkStackPush(ref); 691 } 692 } 693 694 MarkStackTask<kUseFinger>* const chunk_task_; 695 MarkSweep* const mark_sweep_; 696 }; 697 698 class ScanObjectParallelVisitor { 699 public: 700 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 701 : chunk_task_(chunk_task) {} 702 703 // No thread safety analysis since multiple threads will use this visitor. 704 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) 705 REQUIRES(Locks::heap_bitmap_lock_) { 706 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 707 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 708 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 709 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 710 } 711 712 private: 713 MarkStackTask<kUseFinger>* const chunk_task_; 714 }; 715 716 virtual ~MarkStackTask() { 717 // Make sure that we have cleared our mark stack. 718 DCHECK_EQ(mark_stack_pos_, 0U); 719 if (kCountTasks) { 720 ++mark_sweep_->work_chunks_deleted_; 721 } 722 } 723 724 MarkSweep* const mark_sweep_; 725 ThreadPool* const thread_pool_; 726 // Thread local mark stack for this task. 727 StackReference<mirror::Object> mark_stack_[kMaxSize]; 728 // Mark stack position. 729 size_t mark_stack_pos_; 730 731 ALWAYS_INLINE void MarkStackPush(mirror::Object* obj) 732 SHARED_REQUIRES(Locks::mutator_lock_) { 733 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 734 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 735 mark_stack_pos_ /= 2; 736 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 737 mark_stack_ + mark_stack_pos_); 738 thread_pool_->AddTask(Thread::Current(), task); 739 } 740 DCHECK(obj != nullptr); 741 DCHECK_LT(mark_stack_pos_, kMaxSize); 742 mark_stack_[mark_stack_pos_++].Assign(obj); 743 } 744 745 virtual void Finalize() { 746 delete this; 747 } 748 749 // Scans all of the objects 750 virtual void Run(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) 751 REQUIRES(Locks::heap_bitmap_lock_) { 752 UNUSED(self); 753 ScanObjectParallelVisitor visitor(this); 754 // TODO: Tune this. 755 static const size_t kFifoSize = 4; 756 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo; 757 for (;;) { 758 mirror::Object* obj = nullptr; 759 if (kUseMarkStackPrefetch) { 760 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 761 mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 762 DCHECK(mark_stack_obj != nullptr); 763 __builtin_prefetch(mark_stack_obj); 764 prefetch_fifo.push_back(mark_stack_obj); 765 } 766 if (UNLIKELY(prefetch_fifo.empty())) { 767 break; 768 } 769 obj = prefetch_fifo.front(); 770 prefetch_fifo.pop_front(); 771 } else { 772 if (UNLIKELY(mark_stack_pos_ == 0)) { 773 break; 774 } 775 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 776 } 777 DCHECK(obj != nullptr); 778 visitor(obj); 779 } 780 } 781}; 782 783class CardScanTask : public MarkStackTask<false> { 784 public: 785 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 786 accounting::ContinuousSpaceBitmap* bitmap, 787 uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size, 788 StackReference<mirror::Object>* mark_stack_obj, bool clear_card) 789 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 790 bitmap_(bitmap), 791 begin_(begin), 792 end_(end), 793 minimum_age_(minimum_age), clear_card_(clear_card) { 794 } 795 796 protected: 797 accounting::ContinuousSpaceBitmap* const bitmap_; 798 uint8_t* const begin_; 799 uint8_t* const end_; 800 const uint8_t minimum_age_; 801 const bool clear_card_; 802 803 virtual void Finalize() { 804 delete this; 805 } 806 807 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 808 ScanObjectParallelVisitor visitor(this); 809 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 810 size_t cards_scanned = clear_card_ ? 811 card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) : 812 card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_); 813 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 814 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 815 // Finish by emptying our local mark stack. 816 MarkStackTask::Run(self); 817 } 818}; 819 820size_t MarkSweep::GetThreadCount(bool paused) const { 821 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 822 return 1; 823 } 824 return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1; 825} 826 827void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { 828 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 829 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 830 size_t thread_count = GetThreadCount(paused); 831 // The parallel version with only one thread is faster for card scanning, TODO: fix. 832 if (kParallelCardScan && thread_count > 1) { 833 Thread* self = Thread::Current(); 834 // Can't have a different split for each space since multiple spaces can have their cards being 835 // scanned at the same time. 836 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__, 837 GetTimings()); 838 // Try to take some of the mark stack since we can pass this off to the worker tasks. 839 StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin(); 840 StackReference<mirror::Object>* mark_stack_end = mark_stack_->End(); 841 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 842 // Estimated number of work tasks we will create. 843 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 844 DCHECK_NE(mark_stack_tasks, 0U); 845 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 846 mark_stack_size / mark_stack_tasks + 1); 847 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 848 if (space->GetMarkBitmap() == nullptr) { 849 continue; 850 } 851 uint8_t* card_begin = space->Begin(); 852 uint8_t* card_end = space->End(); 853 // Align up the end address. For example, the image space's end 854 // may not be card-size-aligned. 855 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 856 DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize); 857 DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize); 858 // Calculate how many bytes of heap we will scan, 859 const size_t address_range = card_end - card_begin; 860 // Calculate how much address range each task gets. 861 const size_t card_delta = RoundUp(address_range / thread_count + 1, 862 accounting::CardTable::kCardSize); 863 // If paused and the space is neither zygote nor image space, we could clear the dirty 864 // cards to avoid accumulating them to increase card scanning load in the following GC 865 // cycles. We need to keep dirty cards of image space and zygote space in order to track 866 // references to the other spaces. 867 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 868 // Create the worker tasks for this space. 869 while (card_begin != card_end) { 870 // Add a range of cards. 871 size_t addr_remaining = card_end - card_begin; 872 size_t card_increment = std::min(card_delta, addr_remaining); 873 // Take from the back of the mark stack. 874 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 875 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 876 mark_stack_end -= mark_stack_increment; 877 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 878 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 879 // Add the new task to the thread pool. 880 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 881 card_begin + card_increment, minimum_age, 882 mark_stack_increment, mark_stack_end, clear_card); 883 thread_pool->AddTask(self, task); 884 card_begin += card_increment; 885 } 886 } 887 888 // Note: the card scan below may dirty new cards (and scan them) 889 // as a side effect when a Reference object is encountered and 890 // queued during the marking. See b/11465268. 891 thread_pool->SetMaxActiveWorkers(thread_count - 1); 892 thread_pool->StartWorkers(self); 893 thread_pool->Wait(self, true, true); 894 thread_pool->StopWorkers(self); 895 } else { 896 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 897 if (space->GetMarkBitmap() != nullptr) { 898 // Image spaces are handled properly since live == marked for them. 899 const char* name = nullptr; 900 switch (space->GetGcRetentionPolicy()) { 901 case space::kGcRetentionPolicyNeverCollect: 902 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects"; 903 break; 904 case space::kGcRetentionPolicyFullCollect: 905 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects"; 906 break; 907 case space::kGcRetentionPolicyAlwaysCollect: 908 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects"; 909 break; 910 default: 911 LOG(FATAL) << "Unreachable"; 912 UNREACHABLE(); 913 } 914 TimingLogger::ScopedTiming t(name, GetTimings()); 915 ScanObjectVisitor visitor(this); 916 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 917 if (clear_card) { 918 card_table->Scan<true>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, 919 minimum_age); 920 } else { 921 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, 922 minimum_age); 923 } 924 } 925 } 926 } 927} 928 929class RecursiveMarkTask : public MarkStackTask<false> { 930 public: 931 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 932 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 933 : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin), 934 end_(end) { 935 } 936 937 protected: 938 accounting::ContinuousSpaceBitmap* const bitmap_; 939 const uintptr_t begin_; 940 const uintptr_t end_; 941 942 virtual void Finalize() { 943 delete this; 944 } 945 946 // Scans all of the objects 947 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 948 ScanObjectParallelVisitor visitor(this); 949 bitmap_->VisitMarkedRange(begin_, end_, visitor); 950 // Finish by emptying our local mark stack. 951 MarkStackTask::Run(self); 952 } 953}; 954 955// Populates the mark stack based on the set of marked objects and 956// recursively marks until the mark stack is emptied. 957void MarkSweep::RecursiveMark() { 958 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 959 // RecursiveMark will build the lists of known instances of the Reference classes. See 960 // DelayReferenceReferent for details. 961 if (kUseRecursiveMark) { 962 const bool partial = GetGcType() == kGcTypePartial; 963 ScanObjectVisitor scan_visitor(this); 964 auto* self = Thread::Current(); 965 ThreadPool* thread_pool = heap_->GetThreadPool(); 966 size_t thread_count = GetThreadCount(false); 967 const bool parallel = kParallelRecursiveMark && thread_count > 1; 968 mark_stack_->Reset(); 969 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 970 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 971 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 972 current_space_bitmap_ = space->GetMarkBitmap(); 973 if (current_space_bitmap_ == nullptr) { 974 continue; 975 } 976 if (parallel) { 977 // We will use the mark stack the future. 978 // CHECK(mark_stack_->IsEmpty()); 979 // This function does not handle heap end increasing, so we must use the space end. 980 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 981 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 982 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue()); 983 984 // Create a few worker tasks. 985 const size_t n = thread_count * 2; 986 while (begin != end) { 987 uintptr_t start = begin; 988 uintptr_t delta = (end - begin) / n; 989 delta = RoundUp(delta, KB); 990 if (delta < 16 * KB) delta = end - begin; 991 begin += delta; 992 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 993 begin); 994 thread_pool->AddTask(self, task); 995 } 996 thread_pool->SetMaxActiveWorkers(thread_count - 1); 997 thread_pool->StartWorkers(self); 998 thread_pool->Wait(self, true, true); 999 thread_pool->StopWorkers(self); 1000 } else { 1001 // This function does not handle heap end increasing, so we must use the space end. 1002 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 1003 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 1004 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 1005 } 1006 } 1007 } 1008 } 1009 ProcessMarkStack(false); 1010} 1011 1012void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) { 1013 ScanGrayObjects(paused, minimum_age); 1014 ProcessMarkStack(paused); 1015} 1016 1017void MarkSweep::ReMarkRoots() { 1018 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1019 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1020 Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>( 1021 kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog)); 1022 if (kVerifyRootsMarked) { 1023 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings()); 1024 VerifyRootMarkedVisitor visitor(this); 1025 Runtime::Current()->VisitRoots(&visitor); 1026 } 1027} 1028 1029void MarkSweep::SweepSystemWeaks(Thread* self) { 1030 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1031 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1032 Runtime::Current()->SweepSystemWeaks(this); 1033} 1034 1035class VerifySystemWeakVisitor : public IsMarkedVisitor { 1036 public: 1037 explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1038 1039 virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE 1040 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1041 mark_sweep_->VerifyIsLive(obj); 1042 return obj; 1043 } 1044 1045 MarkSweep* const mark_sweep_; 1046}; 1047 1048void MarkSweep::VerifyIsLive(const mirror::Object* obj) { 1049 if (!heap_->GetLiveBitmap()->Test(obj)) { 1050 // TODO: Consider live stack? Has this code bitrotted? 1051 CHECK(!heap_->allocation_stack_->Contains(obj)) 1052 << "Found dead object " << obj << "\n" << heap_->DumpSpaces(); 1053 } 1054} 1055 1056void MarkSweep::VerifySystemWeaks() { 1057 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1058 // Verify system weaks, uses a special object visitor which returns the input object. 1059 VerifySystemWeakVisitor visitor(this); 1060 Runtime::Current()->SweepSystemWeaks(&visitor); 1061} 1062 1063class CheckpointMarkThreadRoots : public Closure, public RootVisitor { 1064 public: 1065 CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 1066 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 1067 : mark_sweep_(mark_sweep), 1068 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 1069 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1070 } 1071 1072 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) 1073 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) 1074 REQUIRES(Locks::heap_bitmap_lock_) { 1075 for (size_t i = 0; i < count; ++i) { 1076 mark_sweep_->MarkObjectNonNullParallel(*roots[i]); 1077 } 1078 } 1079 1080 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, 1081 const RootInfo& info ATTRIBUTE_UNUSED) 1082 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) 1083 REQUIRES(Locks::heap_bitmap_lock_) { 1084 for (size_t i = 0; i < count; ++i) { 1085 mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr()); 1086 } 1087 } 1088 1089 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 1090 ATRACE_BEGIN("Marking thread roots"); 1091 // Note: self is not necessarily equal to thread since thread may be suspended. 1092 Thread* const self = Thread::Current(); 1093 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1094 << thread->GetState() << " thread " << thread << " self " << self; 1095 thread->VisitRoots(this); 1096 ATRACE_END(); 1097 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 1098 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 1099 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 1100 ATRACE_END(); 1101 } 1102 // If thread is a running mutator, then act on behalf of the garbage collector. 1103 // See the code in ThreadList::RunCheckpoint. 1104 if (thread->GetState() == kRunnable) { 1105 mark_sweep_->GetBarrier().Pass(self); 1106 } 1107 } 1108 1109 private: 1110 MarkSweep* const mark_sweep_; 1111 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 1112}; 1113 1114void MarkSweep::MarkRootsCheckpoint(Thread* self, 1115 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1116 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1117 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 1118 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1119 // Request the check point is run on all threads returning a count of the threads that must 1120 // run through the barrier including self. 1121 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1122 // Release locks then wait for all mutator threads to pass the barrier. 1123 // If there are no threads to wait which implys that all the checkpoint functions are finished, 1124 // then no need to release locks. 1125 if (barrier_count == 0) { 1126 return; 1127 } 1128 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1129 Locks::mutator_lock_->SharedUnlock(self); 1130 { 1131 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1132 gc_barrier_->Increment(self, barrier_count); 1133 } 1134 Locks::mutator_lock_->SharedLock(self); 1135 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1136} 1137 1138void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1139 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1140 Thread* self = Thread::Current(); 1141 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>( 1142 sweep_array_free_buffer_mem_map_->BaseBegin()); 1143 size_t chunk_free_pos = 0; 1144 ObjectBytePair freed; 1145 ObjectBytePair freed_los; 1146 // How many objects are left in the array, modified after each space is swept. 1147 StackReference<mirror::Object>* objects = allocations->Begin(); 1148 size_t count = allocations->Size(); 1149 // Change the order to ensure that the non-moving space last swept as an optimization. 1150 std::vector<space::ContinuousSpace*> sweep_spaces; 1151 space::ContinuousSpace* non_moving_space = nullptr; 1152 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1153 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1154 space->GetLiveBitmap() != nullptr) { 1155 if (space == heap_->GetNonMovingSpace()) { 1156 non_moving_space = space; 1157 } else { 1158 sweep_spaces.push_back(space); 1159 } 1160 } 1161 } 1162 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1163 // the other alloc spaces as an optimization. 1164 if (non_moving_space != nullptr) { 1165 sweep_spaces.push_back(non_moving_space); 1166 } 1167 // Start by sweeping the continuous spaces. 1168 for (space::ContinuousSpace* space : sweep_spaces) { 1169 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1170 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1171 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1172 if (swap_bitmaps) { 1173 std::swap(live_bitmap, mark_bitmap); 1174 } 1175 StackReference<mirror::Object>* out = objects; 1176 for (size_t i = 0; i < count; ++i) { 1177 mirror::Object* const obj = objects[i].AsMirrorPtr(); 1178 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1179 continue; 1180 } 1181 if (space->HasAddress(obj)) { 1182 // This object is in the space, remove it from the array and add it to the sweep buffer 1183 // if needed. 1184 if (!mark_bitmap->Test(obj)) { 1185 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1186 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1187 freed.objects += chunk_free_pos; 1188 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1189 chunk_free_pos = 0; 1190 } 1191 chunk_free_buffer[chunk_free_pos++] = obj; 1192 } 1193 } else { 1194 (out++)->Assign(obj); 1195 } 1196 } 1197 if (chunk_free_pos > 0) { 1198 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1199 freed.objects += chunk_free_pos; 1200 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1201 chunk_free_pos = 0; 1202 } 1203 // All of the references which space contained are no longer in the allocation stack, update 1204 // the count. 1205 count = out - objects; 1206 } 1207 // Handle the large object space. 1208 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1209 if (large_object_space != nullptr) { 1210 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1211 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1212 if (swap_bitmaps) { 1213 std::swap(large_live_objects, large_mark_objects); 1214 } 1215 for (size_t i = 0; i < count; ++i) { 1216 mirror::Object* const obj = objects[i].AsMirrorPtr(); 1217 // Handle large objects. 1218 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1219 continue; 1220 } 1221 if (!large_mark_objects->Test(obj)) { 1222 ++freed_los.objects; 1223 freed_los.bytes += large_object_space->Free(self, obj); 1224 } 1225 } 1226 } 1227 { 1228 TimingLogger::ScopedTiming t2("RecordFree", GetTimings()); 1229 RecordFree(freed); 1230 RecordFreeLOS(freed_los); 1231 t2.NewTiming("ResetStack"); 1232 allocations->Reset(); 1233 } 1234 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero(); 1235} 1236 1237void MarkSweep::Sweep(bool swap_bitmaps) { 1238 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1239 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1240 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1241 { 1242 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings()); 1243 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1244 // knowing that new allocations won't be marked as live. 1245 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1246 heap_->MarkAllocStackAsLive(live_stack); 1247 live_stack->Reset(); 1248 DCHECK(mark_stack_->IsEmpty()); 1249 } 1250 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1251 if (space->IsContinuousMemMapAllocSpace()) { 1252 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1253 TimingLogger::ScopedTiming split( 1254 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings()); 1255 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1256 } 1257 } 1258 SweepLargeObjects(swap_bitmaps); 1259} 1260 1261void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1262 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace(); 1263 if (los != nullptr) { 1264 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 1265 RecordFreeLOS(los->Sweep(swap_bitmaps)); 1266 } 1267} 1268 1269// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1270// marked, put it on the appropriate list in the heap for later processing. 1271void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1272 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this); 1273} 1274 1275class MarkVisitor { 1276 public: 1277 explicit MarkVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1278 } 1279 1280 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const 1281 ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) 1282 REQUIRES(Locks::heap_bitmap_lock_) { 1283 if (kCheckLocks) { 1284 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1285 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1286 } 1287 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset); 1288 } 1289 1290 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 1291 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { 1292 if (!root->IsNull()) { 1293 VisitRoot(root); 1294 } 1295 } 1296 1297 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 1298 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { 1299 if (kCheckLocks) { 1300 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1301 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1302 } 1303 mark_sweep_->MarkObject(root->AsMirrorPtr()); 1304 } 1305 1306 private: 1307 MarkSweep* const mark_sweep_; 1308}; 1309 1310// Scans an object reference. Determines the type of the reference 1311// and dispatches to a specialized scanning routine. 1312void MarkSweep::ScanObject(mirror::Object* obj) { 1313 MarkVisitor mark_visitor(this); 1314 DelayReferenceReferentVisitor ref_visitor(this); 1315 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1316} 1317 1318void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1319 Thread* self = Thread::Current(); 1320 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1321 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1322 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1323 CHECK_GT(chunk_size, 0U); 1324 // Split the current mark stack up into work tasks. 1325 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) { 1326 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1327 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1328 it += delta; 1329 } 1330 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1331 thread_pool->StartWorkers(self); 1332 thread_pool->Wait(self, true, true); 1333 thread_pool->StopWorkers(self); 1334 mark_stack_->Reset(); 1335 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(), 1336 work_chunks_deleted_.LoadSequentiallyConsistent()) 1337 << " some of the work chunks were leaked"; 1338} 1339 1340// Scan anything that's on the mark stack. 1341void MarkSweep::ProcessMarkStack(bool paused) { 1342 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings()); 1343 size_t thread_count = GetThreadCount(paused); 1344 if (kParallelProcessMarkStack && thread_count > 1 && 1345 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1346 ProcessMarkStackParallel(thread_count); 1347 } else { 1348 // TODO: Tune this. 1349 static const size_t kFifoSize = 4; 1350 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo; 1351 for (;;) { 1352 mirror::Object* obj = nullptr; 1353 if (kUseMarkStackPrefetch) { 1354 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1355 mirror::Object* mark_stack_obj = mark_stack_->PopBack(); 1356 DCHECK(mark_stack_obj != nullptr); 1357 __builtin_prefetch(mark_stack_obj); 1358 prefetch_fifo.push_back(mark_stack_obj); 1359 } 1360 if (prefetch_fifo.empty()) { 1361 break; 1362 } 1363 obj = prefetch_fifo.front(); 1364 prefetch_fifo.pop_front(); 1365 } else { 1366 if (mark_stack_->IsEmpty()) { 1367 break; 1368 } 1369 obj = mark_stack_->PopBack(); 1370 } 1371 DCHECK(obj != nullptr); 1372 ScanObject(obj); 1373 } 1374 } 1375} 1376 1377inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) { 1378 if (immune_region_.ContainsObject(object)) { 1379 return object; 1380 } 1381 if (current_space_bitmap_->HasAddress(object)) { 1382 return current_space_bitmap_->Test(object) ? object : nullptr; 1383 } 1384 return mark_bitmap_->Test(object) ? object : nullptr; 1385} 1386 1387void MarkSweep::FinishPhase() { 1388 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1389 if (kCountScannedTypes) { 1390 VLOG(gc) 1391 << "MarkSweep scanned" 1392 << " no reference objects=" << no_reference_class_count_.LoadRelaxed() 1393 << " normal objects=" << normal_count_.LoadRelaxed() 1394 << " classes=" << class_count_.LoadRelaxed() 1395 << " object arrays=" << object_array_count_.LoadRelaxed() 1396 << " references=" << reference_count_.LoadRelaxed() 1397 << " other=" << other_count_.LoadRelaxed(); 1398 } 1399 if (kCountTasks) { 1400 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed(); 1401 } 1402 if (kMeasureOverhead) { 1403 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed()); 1404 } 1405 if (kProfileLargeObjects) { 1406 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed() 1407 << " marked " << large_object_mark_.LoadRelaxed(); 1408 } 1409 if (kCountMarkedObjects) { 1410 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed() 1411 << " immune=" << mark_immune_count_.LoadRelaxed() 1412 << " fastpath=" << mark_fastpath_count_.LoadRelaxed() 1413 << " slowpath=" << mark_slowpath_count_.LoadRelaxed(); 1414 } 1415 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1416 mark_stack_->Reset(); 1417 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1418 heap_->ClearMarkedObjects(); 1419} 1420 1421void MarkSweep::RevokeAllThreadLocalBuffers() { 1422 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1423 // If concurrent, rosalloc thread-local buffers are revoked at the 1424 // thread checkpoint. Bump pointer space thread-local buffers must 1425 // not be in use. 1426 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1427 } else { 1428 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1429 GetHeap()->RevokeAllThreadLocalBuffers(); 1430 } 1431} 1432 1433} // namespace collector 1434} // namespace gc 1435} // namespace art 1436