mark_sweep.cc revision 90443477f9a0061581c420775ce3b7eeae7468bc
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <atomic> 20#include <functional> 21#include <numeric> 22#include <climits> 23#include <vector> 24 25#define ATRACE_TAG ATRACE_TAG_DALVIK 26#include "cutils/trace.h" 27 28#include "base/bounded_fifo.h" 29#include "base/logging.h" 30#include "base/macros.h" 31#include "base/mutex-inl.h" 32#include "base/time_utils.h" 33#include "base/timing_logger.h" 34#include "gc/accounting/card_table-inl.h" 35#include "gc/accounting/heap_bitmap-inl.h" 36#include "gc/accounting/mod_union_table.h" 37#include "gc/accounting/space_bitmap-inl.h" 38#include "gc/heap.h" 39#include "gc/reference_processor.h" 40#include "gc/space/large_object_space.h" 41#include "gc/space/space-inl.h" 42#include "mark_sweep-inl.h" 43#include "mirror/object-inl.h" 44#include "runtime.h" 45#include "scoped_thread_state_change.h" 46#include "thread-inl.h" 47#include "thread_list.h" 48 49namespace art { 50namespace gc { 51namespace collector { 52 53// Performance options. 54static constexpr bool kUseRecursiveMark = false; 55static constexpr bool kUseMarkStackPrefetch = true; 56static constexpr size_t kSweepArrayChunkFreeSize = 1024; 57static constexpr bool kPreCleanCards = true; 58 59// Parallelism options. 60static constexpr bool kParallelCardScan = true; 61static constexpr bool kParallelRecursiveMark = true; 62// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 63// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 64// having this can add overhead in ProcessReferences since we may end up doing many calls of 65// ProcessMarkStack with very small mark stacks. 66static constexpr size_t kMinimumParallelMarkStackSize = 128; 67static constexpr bool kParallelProcessMarkStack = true; 68 69// Profiling and information flags. 70static constexpr bool kProfileLargeObjects = false; 71static constexpr bool kMeasureOverhead = false; 72static constexpr bool kCountTasks = false; 73static constexpr bool kCountJavaLangRefs = false; 74static constexpr bool kCountMarkedObjects = false; 75 76// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 77static constexpr bool kCheckLocks = kDebugLocking; 78static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 79 80// If true, revoke the rosalloc thread-local buffers at the 81// checkpoint, as opposed to during the pause. 82static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 83 84void MarkSweep::BindBitmaps() { 85 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 86 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 87 // Mark all of the spaces we never collect as immune. 88 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 89 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 90 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 91 } 92 } 93} 94 95MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 96 : GarbageCollector(heap, 97 name_prefix + 98 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 99 current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr), 100 gc_barrier_(new Barrier(0)), 101 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 102 is_concurrent_(is_concurrent), live_stack_freeze_size_(0) { 103 std::string error_msg; 104 MemMap* mem_map = MemMap::MapAnonymous( 105 "mark sweep sweep array free buffer", nullptr, 106 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), 107 PROT_READ | PROT_WRITE, false, false, &error_msg); 108 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg; 109 sweep_array_free_buffer_mem_map_.reset(mem_map); 110} 111 112void MarkSweep::InitializePhase() { 113 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 114 mark_stack_ = heap_->GetMarkStack(); 115 DCHECK(mark_stack_ != nullptr); 116 immune_region_.Reset(); 117 class_count_.StoreRelaxed(0); 118 array_count_.StoreRelaxed(0); 119 other_count_.StoreRelaxed(0); 120 large_object_test_.StoreRelaxed(0); 121 large_object_mark_.StoreRelaxed(0); 122 overhead_time_ .StoreRelaxed(0); 123 work_chunks_created_.StoreRelaxed(0); 124 work_chunks_deleted_.StoreRelaxed(0); 125 reference_count_.StoreRelaxed(0); 126 mark_null_count_.StoreRelaxed(0); 127 mark_immune_count_.StoreRelaxed(0); 128 mark_fastpath_count_.StoreRelaxed(0); 129 mark_slowpath_count_.StoreRelaxed(0); 130 { 131 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 132 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 133 mark_bitmap_ = heap_->GetMarkBitmap(); 134 } 135 if (!GetCurrentIteration()->GetClearSoftReferences()) { 136 // Always clear soft references if a non-sticky collection. 137 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky); 138 } 139} 140 141void MarkSweep::RunPhases() { 142 Thread* self = Thread::Current(); 143 InitializePhase(); 144 Locks::mutator_lock_->AssertNotHeld(self); 145 if (IsConcurrent()) { 146 GetHeap()->PreGcVerification(this); 147 { 148 ReaderMutexLock mu(self, *Locks::mutator_lock_); 149 MarkingPhase(); 150 } 151 ScopedPause pause(this); 152 GetHeap()->PrePauseRosAllocVerification(this); 153 PausePhase(); 154 RevokeAllThreadLocalBuffers(); 155 } else { 156 ScopedPause pause(this); 157 GetHeap()->PreGcVerificationPaused(this); 158 MarkingPhase(); 159 GetHeap()->PrePauseRosAllocVerification(this); 160 PausePhase(); 161 RevokeAllThreadLocalBuffers(); 162 } 163 { 164 // Sweeping always done concurrently, even for non concurrent mark sweep. 165 ReaderMutexLock mu(self, *Locks::mutator_lock_); 166 ReclaimPhase(); 167 } 168 GetHeap()->PostGcVerification(this); 169 FinishPhase(); 170} 171 172void MarkSweep::ProcessReferences(Thread* self) { 173 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 174 GetHeap()->GetReferenceProcessor()->ProcessReferences( 175 true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); 176} 177 178void MarkSweep::PausePhase() { 179 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings()); 180 Thread* self = Thread::Current(); 181 Locks::mutator_lock_->AssertExclusiveHeld(self); 182 if (IsConcurrent()) { 183 // Handle the dirty objects if we are a concurrent GC. 184 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 185 // Re-mark root set. 186 ReMarkRoots(); 187 // Scan dirty objects, this is only required if we are not doing concurrent GC. 188 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 189 } 190 { 191 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings()); 192 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 193 heap_->SwapStacks(self); 194 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 195 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 196 // stacks and don't want anybody to allocate into the live stack. 197 RevokeAllThreadLocalAllocationStacks(self); 198 } 199 heap_->PreSweepingGcVerification(this); 200 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 201 // weak before we sweep them. Since this new system weak may not be marked, the GC may 202 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 203 // reference to a string that is about to be swept. 204 Runtime::Current()->DisallowNewSystemWeaks(); 205 // Enable the reference processing slow path, needs to be done with mutators paused since there 206 // is no lock in the GetReferent fast path. 207 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 208} 209 210void MarkSweep::PreCleanCards() { 211 // Don't do this for non concurrent GCs since they don't have any dirty cards. 212 if (kPreCleanCards && IsConcurrent()) { 213 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 214 Thread* self = Thread::Current(); 215 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 216 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 217 heap_->ProcessCards(GetTimings(), false, true, false); 218 // The checkpoint root marking is required to avoid a race condition which occurs if the 219 // following happens during a reference write: 220 // 1. mutator dirties the card (write barrier) 221 // 2. GC ages the card (the above ProcessCards call) 222 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 223 // 4. mutator writes the value (corresponding to the write barrier in 1.) 224 // This causes the GC to age the card but not necessarily mark the reference which the mutator 225 // wrote into the object stored in the card. 226 // Having the checkpoint fixes this issue since it ensures that the card mark and the 227 // reference write are visible to the GC before the card is scanned (this is due to locks being 228 // acquired / released in the checkpoint code). 229 // The other roots are also marked to help reduce the pause. 230 MarkRootsCheckpoint(self, false); 231 MarkNonThreadRoots(); 232 MarkConcurrentRoots( 233 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 234 // Process the newly aged cards. 235 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 236 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 237 // in the next GC. 238 } 239} 240 241void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 242 if (kUseThreadLocalAllocationStack) { 243 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 244 Locks::mutator_lock_->AssertExclusiveHeld(self); 245 heap_->RevokeAllThreadLocalAllocationStacks(self); 246 } 247} 248 249void MarkSweep::MarkingPhase() { 250 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 251 Thread* self = Thread::Current(); 252 BindBitmaps(); 253 FindDefaultSpaceBitmap(); 254 // Process dirty cards and add dirty cards to mod union tables. 255 // If the GC type is non sticky, then we just clear the cards instead of ageing them. 256 heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky); 257 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 258 MarkRoots(self); 259 MarkReachableObjects(); 260 // Pre-clean dirtied cards to reduce pauses. 261 PreCleanCards(); 262} 263 264void MarkSweep::UpdateAndMarkModUnion() { 265 for (const auto& space : heap_->GetContinuousSpaces()) { 266 if (immune_region_.ContainsSpace(space)) { 267 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 268 "UpdateAndMarkImageModUnionTable"; 269 TimingLogger::ScopedTiming t(name, GetTimings()); 270 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 271 CHECK(mod_union_table != nullptr); 272 mod_union_table->UpdateAndMarkReferences(this); 273 } 274 } 275} 276 277void MarkSweep::MarkReachableObjects() { 278 UpdateAndMarkModUnion(); 279 // Recursively mark all the non-image bits set in the mark bitmap. 280 RecursiveMark(); 281} 282 283void MarkSweep::ReclaimPhase() { 284 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 285 Thread* self = Thread::Current(); 286 // Process the references concurrently. 287 ProcessReferences(self); 288 SweepSystemWeaks(self); 289 Runtime::Current()->AllowNewSystemWeaks(); 290 { 291 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 292 GetHeap()->RecordFreeRevoke(); 293 // Reclaim unmarked objects. 294 Sweep(false); 295 // Swap the live and mark bitmaps for each space which we modified space. This is an 296 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 297 // bitmaps. 298 SwapBitmaps(); 299 // Unbind the live and mark bitmaps. 300 GetHeap()->UnBindBitmaps(); 301 } 302} 303 304void MarkSweep::FindDefaultSpaceBitmap() { 305 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 306 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 307 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 308 // We want to have the main space instead of non moving if possible. 309 if (bitmap != nullptr && 310 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 311 current_space_bitmap_ = bitmap; 312 // If we are not the non moving space exit the loop early since this will be good enough. 313 if (space != heap_->GetNonMovingSpace()) { 314 break; 315 } 316 } 317 } 318 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n" 319 << heap_->DumpSpaces(); 320} 321 322void MarkSweep::ExpandMarkStack() { 323 ResizeMarkStack(mark_stack_->Capacity() * 2); 324} 325 326void MarkSweep::ResizeMarkStack(size_t new_size) { 327 // Rare case, no need to have Thread::Current be a parameter. 328 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 329 // Someone else acquired the lock and expanded the mark stack before us. 330 return; 331 } 332 std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End()); 333 CHECK_LE(mark_stack_->Size(), new_size); 334 mark_stack_->Resize(new_size); 335 for (auto& obj : temp) { 336 mark_stack_->PushBack(obj.AsMirrorPtr()); 337 } 338} 339 340mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) { 341 MarkObject(obj, nullptr, MemberOffset(0)); 342 return obj; 343} 344 345inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) { 346 DCHECK(obj != nullptr); 347 if (MarkObjectParallel(obj)) { 348 MutexLock mu(Thread::Current(), mark_stack_lock_); 349 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 350 ExpandMarkStack(); 351 } 352 // The object must be pushed on to the mark stack. 353 mark_stack_->PushBack(obj); 354 } 355} 356 357bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) { 358 return IsMarked(ref->AsMirrorPtr()); 359} 360 361class MarkSweepMarkObjectSlowPath { 362 public: 363 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, mirror::Object* holder = nullptr, 364 MemberOffset offset = MemberOffset(0)) 365 : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) { 366 } 367 368 void operator()(const mirror::Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { 369 if (kProfileLargeObjects) { 370 // TODO: Differentiate between marking and testing somehow. 371 ++mark_sweep_->large_object_test_; 372 ++mark_sweep_->large_object_mark_; 373 } 374 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 375 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) || 376 (kIsDebugBuild && large_object_space != nullptr && 377 !large_object_space->Contains(obj)))) { 378 LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces"; 379 if (holder_ != nullptr) { 380 size_t holder_size = holder_->SizeOf(); 381 ArtField* field = holder_->FindFieldByOffset(offset_); 382 LOG(INTERNAL_FATAL) << "Field info: " 383 << " holder=" << holder_ 384 << " holder is " 385 << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_) 386 ? "alive" : "dead") 387 << " holder_size=" << holder_size 388 << " holder_type=" << PrettyTypeOf(holder_) 389 << " offset=" << offset_.Uint32Value() 390 << " field=" << (field != nullptr ? field->GetName() : "nullptr") 391 << " field_type=" 392 << (field != nullptr ? field->GetTypeDescriptor() : "") 393 << " first_ref_field_offset=" 394 << (holder_->IsClass() 395 ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset( 396 sizeof(void*)) 397 : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset()) 398 << " num_of_ref_fields=" 399 << (holder_->IsClass() 400 ? holder_->AsClass()->NumReferenceStaticFields() 401 : holder_->GetClass()->NumReferenceInstanceFields()) 402 << "\n"; 403 // Print the memory content of the holder. 404 for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) { 405 uint32_t* p = reinterpret_cast<uint32_t*>(holder_); 406 LOG(INTERNAL_FATAL) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = " 407 << std::hex << p[i]; 408 } 409 } 410 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL); 411 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true); 412 { 413 LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root"; 414 Thread* self = Thread::Current(); 415 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 416 mark_sweep_->VerifyRoots(); 417 } else { 418 const bool heap_bitmap_exclusive_locked = 419 Locks::heap_bitmap_lock_->IsExclusiveHeld(self); 420 if (heap_bitmap_exclusive_locked) { 421 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 422 } 423 Locks::mutator_lock_->SharedUnlock(self); 424 ThreadList* tl = Runtime::Current()->GetThreadList(); 425 tl->SuspendAll(__FUNCTION__); 426 mark_sweep_->VerifyRoots(); 427 tl->ResumeAll(); 428 Locks::mutator_lock_->SharedLock(self); 429 if (heap_bitmap_exclusive_locked) { 430 Locks::heap_bitmap_lock_->ExclusiveLock(self); 431 } 432 } 433 } 434 LOG(FATAL) << "Can't mark invalid object"; 435 } 436 } 437 438 private: 439 MarkSweep* const mark_sweep_; 440 mirror::Object* const holder_; 441 MemberOffset offset_; 442}; 443 444inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder, 445 MemberOffset offset) { 446 DCHECK(obj != nullptr); 447 if (kUseBakerOrBrooksReadBarrier) { 448 // Verify all the objects have the correct pointer installed. 449 obj->AssertReadBarrierPointer(); 450 } 451 if (immune_region_.ContainsObject(obj)) { 452 if (kCountMarkedObjects) { 453 ++mark_immune_count_; 454 } 455 DCHECK(mark_bitmap_->Test(obj)); 456 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 457 if (kCountMarkedObjects) { 458 ++mark_fastpath_count_; 459 } 460 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 461 PushOnMarkStack(obj); // This object was not previously marked. 462 } 463 } else { 464 if (kCountMarkedObjects) { 465 ++mark_slowpath_count_; 466 } 467 MarkSweepMarkObjectSlowPath visitor(this, holder, offset); 468 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 469 // will check again. 470 if (!mark_bitmap_->Set(obj, visitor)) { 471 PushOnMarkStack(obj); // Was not already marked, push. 472 } 473 } 474} 475 476inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) { 477 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 478 // Lock is not needed but is here anyways to please annotalysis. 479 MutexLock mu(Thread::Current(), mark_stack_lock_); 480 ExpandMarkStack(); 481 } 482 // The object must be pushed on to the mark stack. 483 mark_stack_->PushBack(obj); 484} 485 486inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) { 487 DCHECK(obj != nullptr); 488 if (kUseBakerOrBrooksReadBarrier) { 489 // Verify all the objects have the correct pointer installed. 490 obj->AssertReadBarrierPointer(); 491 } 492 if (immune_region_.ContainsObject(obj)) { 493 DCHECK(IsMarked(obj) != nullptr); 494 return false; 495 } 496 // Try to take advantage of locality of references within a space, failing this find the space 497 // the hard way. 498 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 499 if (LIKELY(object_bitmap->HasAddress(obj))) { 500 return !object_bitmap->AtomicTestAndSet(obj); 501 } 502 MarkSweepMarkObjectSlowPath visitor(this); 503 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 504} 505 506void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) { 507 MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0)); 508} 509 510// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 511inline void MarkSweep::MarkObject(mirror::Object* obj, mirror::Object* holder, 512 MemberOffset offset) { 513 if (obj != nullptr) { 514 MarkObjectNonNull(obj, holder, offset); 515 } else if (kCountMarkedObjects) { 516 ++mark_null_count_; 517 } 518} 519 520class VerifyRootMarkedVisitor : public SingleRootVisitor { 521 public: 522 explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { } 523 524 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE 525 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 526 CHECK(collector_->IsMarked(root) != nullptr) << info.ToString(); 527 } 528 529 private: 530 MarkSweep* const collector_; 531}; 532 533void MarkSweep::VisitRoots(mirror::Object*** roots, size_t count, 534 const RootInfo& info ATTRIBUTE_UNUSED) { 535 for (size_t i = 0; i < count; ++i) { 536 MarkObjectNonNull(*roots[i]); 537 } 538} 539 540void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, 541 const RootInfo& info ATTRIBUTE_UNUSED) { 542 for (size_t i = 0; i < count; ++i) { 543 MarkObjectNonNull(roots[i]->AsMirrorPtr()); 544 } 545} 546 547class VerifyRootVisitor : public SingleRootVisitor { 548 public: 549 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE 550 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 551 // See if the root is on any space bitmap. 552 auto* heap = Runtime::Current()->GetHeap(); 553 if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 554 space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace(); 555 if (large_object_space != nullptr && !large_object_space->Contains(root)) { 556 LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info; 557 } 558 } 559 } 560}; 561 562void MarkSweep::VerifyRoots() { 563 VerifyRootVisitor visitor; 564 Runtime::Current()->GetThreadList()->VisitRoots(&visitor); 565} 566 567void MarkSweep::MarkRoots(Thread* self) { 568 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 569 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 570 // If we exclusively hold the mutator lock, all threads must be suspended. 571 Runtime::Current()->VisitRoots(this); 572 RevokeAllThreadLocalAllocationStacks(self); 573 } else { 574 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 575 // At this point the live stack should no longer have any mutators which push into it. 576 MarkNonThreadRoots(); 577 MarkConcurrentRoots( 578 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 579 } 580} 581 582void MarkSweep::MarkNonThreadRoots() { 583 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 584 Runtime::Current()->VisitNonThreadRoots(this); 585} 586 587void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 588 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 589 // Visit all runtime roots and clear dirty flags. 590 Runtime::Current()->VisitConcurrentRoots( 591 this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving)); 592} 593 594class ScanObjectVisitor { 595 public: 596 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 597 : mark_sweep_(mark_sweep) {} 598 599 void operator()(mirror::Object* obj) const ALWAYS_INLINE 600 SHARED_REQUIRES(Locks::mutator_lock_) 601 REQUIRES(Locks::heap_bitmap_lock_) { 602 if (kCheckLocks) { 603 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 604 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 605 } 606 mark_sweep_->ScanObject(obj); 607 } 608 609 private: 610 MarkSweep* const mark_sweep_; 611}; 612 613class DelayReferenceReferentVisitor { 614 public: 615 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 616 } 617 618 void operator()(mirror::Class* klass, mirror::Reference* ref) const 619 SHARED_REQUIRES(Locks::mutator_lock_) 620 REQUIRES(Locks::heap_bitmap_lock_) { 621 collector_->DelayReferenceReferent(klass, ref); 622 } 623 624 private: 625 MarkSweep* const collector_; 626}; 627 628template <bool kUseFinger = false> 629class MarkStackTask : public Task { 630 public: 631 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 632 StackReference<mirror::Object>* mark_stack) 633 : mark_sweep_(mark_sweep), 634 thread_pool_(thread_pool), 635 mark_stack_pos_(mark_stack_size) { 636 // We may have to copy part of an existing mark stack when another mark stack overflows. 637 if (mark_stack_size != 0) { 638 DCHECK(mark_stack != nullptr); 639 // TODO: Check performance? 640 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 641 } 642 if (kCountTasks) { 643 ++mark_sweep_->work_chunks_created_; 644 } 645 } 646 647 static const size_t kMaxSize = 1 * KB; 648 649 protected: 650 class MarkObjectParallelVisitor { 651 public: 652 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 653 MarkSweep* mark_sweep) ALWAYS_INLINE 654 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 655 656 void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 657 SHARED_REQUIRES(Locks::mutator_lock_) { 658 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 659 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 660 if (kUseFinger) { 661 std::atomic_thread_fence(std::memory_order_seq_cst); 662 if (reinterpret_cast<uintptr_t>(ref) >= 663 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) { 664 return; 665 } 666 } 667 chunk_task_->MarkStackPush(ref); 668 } 669 } 670 671 private: 672 MarkStackTask<kUseFinger>* const chunk_task_; 673 MarkSweep* const mark_sweep_; 674 }; 675 676 class ScanObjectParallelVisitor { 677 public: 678 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 679 : chunk_task_(chunk_task) {} 680 681 // No thread safety analysis since multiple threads will use this visitor. 682 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) 683 REQUIRES(Locks::heap_bitmap_lock_) { 684 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 685 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 686 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 687 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 688 } 689 690 private: 691 MarkStackTask<kUseFinger>* const chunk_task_; 692 }; 693 694 virtual ~MarkStackTask() { 695 // Make sure that we have cleared our mark stack. 696 DCHECK_EQ(mark_stack_pos_, 0U); 697 if (kCountTasks) { 698 ++mark_sweep_->work_chunks_deleted_; 699 } 700 } 701 702 MarkSweep* const mark_sweep_; 703 ThreadPool* const thread_pool_; 704 // Thread local mark stack for this task. 705 StackReference<mirror::Object> mark_stack_[kMaxSize]; 706 // Mark stack position. 707 size_t mark_stack_pos_; 708 709 ALWAYS_INLINE void MarkStackPush(mirror::Object* obj) 710 SHARED_REQUIRES(Locks::mutator_lock_) { 711 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 712 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 713 mark_stack_pos_ /= 2; 714 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 715 mark_stack_ + mark_stack_pos_); 716 thread_pool_->AddTask(Thread::Current(), task); 717 } 718 DCHECK(obj != nullptr); 719 DCHECK_LT(mark_stack_pos_, kMaxSize); 720 mark_stack_[mark_stack_pos_++].Assign(obj); 721 } 722 723 virtual void Finalize() { 724 delete this; 725 } 726 727 // Scans all of the objects 728 virtual void Run(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) 729 REQUIRES(Locks::heap_bitmap_lock_) { 730 UNUSED(self); 731 ScanObjectParallelVisitor visitor(this); 732 // TODO: Tune this. 733 static const size_t kFifoSize = 4; 734 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo; 735 for (;;) { 736 mirror::Object* obj = nullptr; 737 if (kUseMarkStackPrefetch) { 738 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 739 mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 740 DCHECK(mark_stack_obj != nullptr); 741 __builtin_prefetch(mark_stack_obj); 742 prefetch_fifo.push_back(mark_stack_obj); 743 } 744 if (UNLIKELY(prefetch_fifo.empty())) { 745 break; 746 } 747 obj = prefetch_fifo.front(); 748 prefetch_fifo.pop_front(); 749 } else { 750 if (UNLIKELY(mark_stack_pos_ == 0)) { 751 break; 752 } 753 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 754 } 755 DCHECK(obj != nullptr); 756 visitor(obj); 757 } 758 } 759}; 760 761class CardScanTask : public MarkStackTask<false> { 762 public: 763 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 764 accounting::ContinuousSpaceBitmap* bitmap, 765 uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size, 766 StackReference<mirror::Object>* mark_stack_obj, bool clear_card) 767 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 768 bitmap_(bitmap), 769 begin_(begin), 770 end_(end), 771 minimum_age_(minimum_age), clear_card_(clear_card) { 772 } 773 774 protected: 775 accounting::ContinuousSpaceBitmap* const bitmap_; 776 uint8_t* const begin_; 777 uint8_t* const end_; 778 const uint8_t minimum_age_; 779 const bool clear_card_; 780 781 virtual void Finalize() { 782 delete this; 783 } 784 785 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 786 ScanObjectParallelVisitor visitor(this); 787 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 788 size_t cards_scanned = clear_card_ ? 789 card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) : 790 card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_); 791 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 792 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 793 // Finish by emptying our local mark stack. 794 MarkStackTask::Run(self); 795 } 796}; 797 798size_t MarkSweep::GetThreadCount(bool paused) const { 799 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 800 return 1; 801 } 802 return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1; 803} 804 805void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { 806 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 807 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 808 size_t thread_count = GetThreadCount(paused); 809 // The parallel version with only one thread is faster for card scanning, TODO: fix. 810 if (kParallelCardScan && thread_count > 1) { 811 Thread* self = Thread::Current(); 812 // Can't have a different split for each space since multiple spaces can have their cards being 813 // scanned at the same time. 814 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__, 815 GetTimings()); 816 // Try to take some of the mark stack since we can pass this off to the worker tasks. 817 StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin(); 818 StackReference<mirror::Object>* mark_stack_end = mark_stack_->End(); 819 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 820 // Estimated number of work tasks we will create. 821 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 822 DCHECK_NE(mark_stack_tasks, 0U); 823 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 824 mark_stack_size / mark_stack_tasks + 1); 825 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 826 if (space->GetMarkBitmap() == nullptr) { 827 continue; 828 } 829 uint8_t* card_begin = space->Begin(); 830 uint8_t* card_end = space->End(); 831 // Align up the end address. For example, the image space's end 832 // may not be card-size-aligned. 833 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 834 DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize); 835 DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize); 836 // Calculate how many bytes of heap we will scan, 837 const size_t address_range = card_end - card_begin; 838 // Calculate how much address range each task gets. 839 const size_t card_delta = RoundUp(address_range / thread_count + 1, 840 accounting::CardTable::kCardSize); 841 // If paused and the space is neither zygote nor image space, we could clear the dirty 842 // cards to avoid accumulating them to increase card scanning load in the following GC 843 // cycles. We need to keep dirty cards of image space and zygote space in order to track 844 // references to the other spaces. 845 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 846 // Create the worker tasks for this space. 847 while (card_begin != card_end) { 848 // Add a range of cards. 849 size_t addr_remaining = card_end - card_begin; 850 size_t card_increment = std::min(card_delta, addr_remaining); 851 // Take from the back of the mark stack. 852 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 853 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 854 mark_stack_end -= mark_stack_increment; 855 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 856 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 857 // Add the new task to the thread pool. 858 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 859 card_begin + card_increment, minimum_age, 860 mark_stack_increment, mark_stack_end, clear_card); 861 thread_pool->AddTask(self, task); 862 card_begin += card_increment; 863 } 864 } 865 866 // Note: the card scan below may dirty new cards (and scan them) 867 // as a side effect when a Reference object is encountered and 868 // queued during the marking. See b/11465268. 869 thread_pool->SetMaxActiveWorkers(thread_count - 1); 870 thread_pool->StartWorkers(self); 871 thread_pool->Wait(self, true, true); 872 thread_pool->StopWorkers(self); 873 } else { 874 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 875 if (space->GetMarkBitmap() != nullptr) { 876 // Image spaces are handled properly since live == marked for them. 877 const char* name = nullptr; 878 switch (space->GetGcRetentionPolicy()) { 879 case space::kGcRetentionPolicyNeverCollect: 880 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects"; 881 break; 882 case space::kGcRetentionPolicyFullCollect: 883 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects"; 884 break; 885 case space::kGcRetentionPolicyAlwaysCollect: 886 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects"; 887 break; 888 default: 889 LOG(FATAL) << "Unreachable"; 890 UNREACHABLE(); 891 } 892 TimingLogger::ScopedTiming t(name, GetTimings()); 893 ScanObjectVisitor visitor(this); 894 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 895 if (clear_card) { 896 card_table->Scan<true>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, 897 minimum_age); 898 } else { 899 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, 900 minimum_age); 901 } 902 } 903 } 904 } 905} 906 907class RecursiveMarkTask : public MarkStackTask<false> { 908 public: 909 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 910 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 911 : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin), 912 end_(end) { 913 } 914 915 protected: 916 accounting::ContinuousSpaceBitmap* const bitmap_; 917 const uintptr_t begin_; 918 const uintptr_t end_; 919 920 virtual void Finalize() { 921 delete this; 922 } 923 924 // Scans all of the objects 925 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 926 ScanObjectParallelVisitor visitor(this); 927 bitmap_->VisitMarkedRange(begin_, end_, visitor); 928 // Finish by emptying our local mark stack. 929 MarkStackTask::Run(self); 930 } 931}; 932 933// Populates the mark stack based on the set of marked objects and 934// recursively marks until the mark stack is emptied. 935void MarkSweep::RecursiveMark() { 936 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 937 // RecursiveMark will build the lists of known instances of the Reference classes. See 938 // DelayReferenceReferent for details. 939 if (kUseRecursiveMark) { 940 const bool partial = GetGcType() == kGcTypePartial; 941 ScanObjectVisitor scan_visitor(this); 942 auto* self = Thread::Current(); 943 ThreadPool* thread_pool = heap_->GetThreadPool(); 944 size_t thread_count = GetThreadCount(false); 945 const bool parallel = kParallelRecursiveMark && thread_count > 1; 946 mark_stack_->Reset(); 947 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 948 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 949 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 950 current_space_bitmap_ = space->GetMarkBitmap(); 951 if (current_space_bitmap_ == nullptr) { 952 continue; 953 } 954 if (parallel) { 955 // We will use the mark stack the future. 956 // CHECK(mark_stack_->IsEmpty()); 957 // This function does not handle heap end increasing, so we must use the space end. 958 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 959 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 960 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue()); 961 962 // Create a few worker tasks. 963 const size_t n = thread_count * 2; 964 while (begin != end) { 965 uintptr_t start = begin; 966 uintptr_t delta = (end - begin) / n; 967 delta = RoundUp(delta, KB); 968 if (delta < 16 * KB) delta = end - begin; 969 begin += delta; 970 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 971 begin); 972 thread_pool->AddTask(self, task); 973 } 974 thread_pool->SetMaxActiveWorkers(thread_count - 1); 975 thread_pool->StartWorkers(self); 976 thread_pool->Wait(self, true, true); 977 thread_pool->StopWorkers(self); 978 } else { 979 // This function does not handle heap end increasing, so we must use the space end. 980 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 981 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 982 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 983 } 984 } 985 } 986 } 987 ProcessMarkStack(false); 988} 989 990void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) { 991 ScanGrayObjects(paused, minimum_age); 992 ProcessMarkStack(paused); 993} 994 995void MarkSweep::ReMarkRoots() { 996 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 997 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 998 Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>( 999 kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog)); 1000 if (kVerifyRootsMarked) { 1001 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings()); 1002 VerifyRootMarkedVisitor visitor(this); 1003 Runtime::Current()->VisitRoots(&visitor); 1004 } 1005} 1006 1007void MarkSweep::SweepSystemWeaks(Thread* self) { 1008 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1009 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1010 Runtime::Current()->SweepSystemWeaks(this); 1011} 1012 1013class VerifySystemWeakVisitor : public IsMarkedVisitor { 1014 public: 1015 explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1016 1017 virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE 1018 SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1019 mark_sweep_->VerifyIsLive(obj); 1020 return obj; 1021 } 1022 1023 MarkSweep* const mark_sweep_; 1024}; 1025 1026void MarkSweep::VerifyIsLive(const mirror::Object* obj) { 1027 if (!heap_->GetLiveBitmap()->Test(obj)) { 1028 // TODO: Consider live stack? Has this code bitrotted? 1029 CHECK(!heap_->allocation_stack_->Contains(obj)) 1030 << "Found dead object " << obj << "\n" << heap_->DumpSpaces(); 1031 } 1032} 1033 1034void MarkSweep::VerifySystemWeaks() { 1035 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1036 // Verify system weaks, uses a special object visitor which returns the input object. 1037 VerifySystemWeakVisitor visitor(this); 1038 Runtime::Current()->SweepSystemWeaks(&visitor); 1039} 1040 1041class CheckpointMarkThreadRoots : public Closure, public RootVisitor { 1042 public: 1043 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 1044 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 1045 : mark_sweep_(mark_sweep), 1046 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 1047 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1048 } 1049 1050 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) 1051 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) 1052 REQUIRES(Locks::heap_bitmap_lock_) { 1053 for (size_t i = 0; i < count; ++i) { 1054 mark_sweep_->MarkObjectNonNullParallel(*roots[i]); 1055 } 1056 } 1057 1058 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, 1059 const RootInfo& info ATTRIBUTE_UNUSED) 1060 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) 1061 REQUIRES(Locks::heap_bitmap_lock_) { 1062 for (size_t i = 0; i < count; ++i) { 1063 mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr()); 1064 } 1065 } 1066 1067 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 1068 ATRACE_BEGIN("Marking thread roots"); 1069 // Note: self is not necessarily equal to thread since thread may be suspended. 1070 Thread* const self = Thread::Current(); 1071 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1072 << thread->GetState() << " thread " << thread << " self " << self; 1073 thread->VisitRoots(this); 1074 ATRACE_END(); 1075 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 1076 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 1077 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 1078 ATRACE_END(); 1079 } 1080 // If thread is a running mutator, then act on behalf of the garbage collector. 1081 // See the code in ThreadList::RunCheckpoint. 1082 if (thread->GetState() == kRunnable) { 1083 mark_sweep_->GetBarrier().Pass(self); 1084 } 1085 } 1086 1087 private: 1088 MarkSweep* const mark_sweep_; 1089 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 1090}; 1091 1092void MarkSweep::MarkRootsCheckpoint(Thread* self, 1093 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1094 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1095 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 1096 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1097 // Request the check point is run on all threads returning a count of the threads that must 1098 // run through the barrier including self. 1099 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1100 // Release locks then wait for all mutator threads to pass the barrier. 1101 // If there are no threads to wait which implys that all the checkpoint functions are finished, 1102 // then no need to release locks. 1103 if (barrier_count == 0) { 1104 return; 1105 } 1106 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1107 Locks::mutator_lock_->SharedUnlock(self); 1108 { 1109 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1110 gc_barrier_->Increment(self, barrier_count); 1111 } 1112 Locks::mutator_lock_->SharedLock(self); 1113 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1114} 1115 1116void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1117 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1118 Thread* self = Thread::Current(); 1119 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>( 1120 sweep_array_free_buffer_mem_map_->BaseBegin()); 1121 size_t chunk_free_pos = 0; 1122 ObjectBytePair freed; 1123 ObjectBytePair freed_los; 1124 // How many objects are left in the array, modified after each space is swept. 1125 StackReference<mirror::Object>* objects = allocations->Begin(); 1126 size_t count = allocations->Size(); 1127 // Change the order to ensure that the non-moving space last swept as an optimization. 1128 std::vector<space::ContinuousSpace*> sweep_spaces; 1129 space::ContinuousSpace* non_moving_space = nullptr; 1130 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1131 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1132 space->GetLiveBitmap() != nullptr) { 1133 if (space == heap_->GetNonMovingSpace()) { 1134 non_moving_space = space; 1135 } else { 1136 sweep_spaces.push_back(space); 1137 } 1138 } 1139 } 1140 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1141 // the other alloc spaces as an optimization. 1142 if (non_moving_space != nullptr) { 1143 sweep_spaces.push_back(non_moving_space); 1144 } 1145 // Start by sweeping the continuous spaces. 1146 for (space::ContinuousSpace* space : sweep_spaces) { 1147 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1148 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1149 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1150 if (swap_bitmaps) { 1151 std::swap(live_bitmap, mark_bitmap); 1152 } 1153 StackReference<mirror::Object>* out = objects; 1154 for (size_t i = 0; i < count; ++i) { 1155 mirror::Object* const obj = objects[i].AsMirrorPtr(); 1156 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1157 continue; 1158 } 1159 if (space->HasAddress(obj)) { 1160 // This object is in the space, remove it from the array and add it to the sweep buffer 1161 // if needed. 1162 if (!mark_bitmap->Test(obj)) { 1163 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1164 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1165 freed.objects += chunk_free_pos; 1166 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1167 chunk_free_pos = 0; 1168 } 1169 chunk_free_buffer[chunk_free_pos++] = obj; 1170 } 1171 } else { 1172 (out++)->Assign(obj); 1173 } 1174 } 1175 if (chunk_free_pos > 0) { 1176 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1177 freed.objects += chunk_free_pos; 1178 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1179 chunk_free_pos = 0; 1180 } 1181 // All of the references which space contained are no longer in the allocation stack, update 1182 // the count. 1183 count = out - objects; 1184 } 1185 // Handle the large object space. 1186 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1187 if (large_object_space != nullptr) { 1188 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1189 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1190 if (swap_bitmaps) { 1191 std::swap(large_live_objects, large_mark_objects); 1192 } 1193 for (size_t i = 0; i < count; ++i) { 1194 mirror::Object* const obj = objects[i].AsMirrorPtr(); 1195 // Handle large objects. 1196 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1197 continue; 1198 } 1199 if (!large_mark_objects->Test(obj)) { 1200 ++freed_los.objects; 1201 freed_los.bytes += large_object_space->Free(self, obj); 1202 } 1203 } 1204 } 1205 { 1206 TimingLogger::ScopedTiming t2("RecordFree", GetTimings()); 1207 RecordFree(freed); 1208 RecordFreeLOS(freed_los); 1209 t2.NewTiming("ResetStack"); 1210 allocations->Reset(); 1211 } 1212 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero(); 1213} 1214 1215void MarkSweep::Sweep(bool swap_bitmaps) { 1216 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1217 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1218 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1219 { 1220 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings()); 1221 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1222 // knowing that new allocations won't be marked as live. 1223 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1224 heap_->MarkAllocStackAsLive(live_stack); 1225 live_stack->Reset(); 1226 DCHECK(mark_stack_->IsEmpty()); 1227 } 1228 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1229 if (space->IsContinuousMemMapAllocSpace()) { 1230 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1231 TimingLogger::ScopedTiming split( 1232 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings()); 1233 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1234 } 1235 } 1236 SweepLargeObjects(swap_bitmaps); 1237} 1238 1239void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1240 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace(); 1241 if (los != nullptr) { 1242 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 1243 RecordFreeLOS(los->Sweep(swap_bitmaps)); 1244 } 1245} 1246 1247// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1248// marked, put it on the appropriate list in the heap for later processing. 1249void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1250 if (kCountJavaLangRefs) { 1251 ++reference_count_; 1252 } 1253 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this); 1254} 1255 1256class MarkVisitor { 1257 public: 1258 explicit MarkVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1259 } 1260 1261 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 1262 ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) 1263 REQUIRES(Locks::heap_bitmap_lock_) { 1264 if (kCheckLocks) { 1265 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1266 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1267 } 1268 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset); 1269 } 1270 1271 private: 1272 MarkSweep* const mark_sweep_; 1273}; 1274 1275// Scans an object reference. Determines the type of the reference 1276// and dispatches to a specialized scanning routine. 1277void MarkSweep::ScanObject(mirror::Object* obj) { 1278 MarkVisitor mark_visitor(this); 1279 DelayReferenceReferentVisitor ref_visitor(this); 1280 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1281} 1282 1283void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1284 Thread* self = Thread::Current(); 1285 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1286 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1287 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1288 CHECK_GT(chunk_size, 0U); 1289 // Split the current mark stack up into work tasks. 1290 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) { 1291 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1292 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1293 it += delta; 1294 } 1295 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1296 thread_pool->StartWorkers(self); 1297 thread_pool->Wait(self, true, true); 1298 thread_pool->StopWorkers(self); 1299 mark_stack_->Reset(); 1300 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(), 1301 work_chunks_deleted_.LoadSequentiallyConsistent()) 1302 << " some of the work chunks were leaked"; 1303} 1304 1305// Scan anything that's on the mark stack. 1306void MarkSweep::ProcessMarkStack(bool paused) { 1307 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings()); 1308 size_t thread_count = GetThreadCount(paused); 1309 if (kParallelProcessMarkStack && thread_count > 1 && 1310 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1311 ProcessMarkStackParallel(thread_count); 1312 } else { 1313 // TODO: Tune this. 1314 static const size_t kFifoSize = 4; 1315 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo; 1316 for (;;) { 1317 mirror::Object* obj = nullptr; 1318 if (kUseMarkStackPrefetch) { 1319 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1320 mirror::Object* mark_stack_obj = mark_stack_->PopBack(); 1321 DCHECK(mark_stack_obj != nullptr); 1322 __builtin_prefetch(mark_stack_obj); 1323 prefetch_fifo.push_back(mark_stack_obj); 1324 } 1325 if (prefetch_fifo.empty()) { 1326 break; 1327 } 1328 obj = prefetch_fifo.front(); 1329 prefetch_fifo.pop_front(); 1330 } else { 1331 if (mark_stack_->IsEmpty()) { 1332 break; 1333 } 1334 obj = mark_stack_->PopBack(); 1335 } 1336 DCHECK(obj != nullptr); 1337 ScanObject(obj); 1338 } 1339 } 1340} 1341 1342inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) { 1343 if (immune_region_.ContainsObject(object)) { 1344 return object; 1345 } 1346 if (current_space_bitmap_->HasAddress(object)) { 1347 return current_space_bitmap_->Test(object) ? object : nullptr; 1348 } 1349 return mark_bitmap_->Test(object) ? object : nullptr; 1350} 1351 1352void MarkSweep::FinishPhase() { 1353 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1354 if (kCountScannedTypes) { 1355 VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed() 1356 << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed(); 1357 } 1358 if (kCountTasks) { 1359 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed(); 1360 } 1361 if (kMeasureOverhead) { 1362 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed()); 1363 } 1364 if (kProfileLargeObjects) { 1365 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed() 1366 << " marked " << large_object_mark_.LoadRelaxed(); 1367 } 1368 if (kCountJavaLangRefs) { 1369 VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed(); 1370 } 1371 if (kCountMarkedObjects) { 1372 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed() 1373 << " immune=" << mark_immune_count_.LoadRelaxed() 1374 << " fastpath=" << mark_fastpath_count_.LoadRelaxed() 1375 << " slowpath=" << mark_slowpath_count_.LoadRelaxed(); 1376 } 1377 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1378 mark_stack_->Reset(); 1379 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1380 heap_->ClearMarkedObjects(); 1381} 1382 1383void MarkSweep::RevokeAllThreadLocalBuffers() { 1384 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1385 // If concurrent, rosalloc thread-local buffers are revoked at the 1386 // thread checkpoint. Bump pointer space thread-local buffers must 1387 // not be in use. 1388 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1389 } else { 1390 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1391 GetHeap()->RevokeAllThreadLocalBuffers(); 1392 } 1393} 1394 1395} // namespace collector 1396} // namespace gc 1397} // namespace art 1398