mark_sweep.cc revision 4aeec176eaf11fe03f342aadcbb79142230270ed
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap-inl.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "mark_sweep-inl.h" 38#include "mirror/art_field-inl.h" 39#include "mirror/object-inl.h" 40#include "runtime.h" 41#include "scoped_thread_state_change.h" 42#include "thread-inl.h" 43#include "thread_list.h" 44 45using ::art::mirror::ArtField; 46using ::art::mirror::Class; 47using ::art::mirror::Object; 48using ::art::mirror::ObjectArray; 49 50namespace art { 51namespace gc { 52namespace collector { 53 54// Performance options. 55static constexpr bool kUseRecursiveMark = false; 56static constexpr bool kUseMarkStackPrefetch = true; 57static constexpr size_t kSweepArrayChunkFreeSize = 1024; 58static constexpr bool kPreCleanCards = true; 59 60// Parallelism options. 61static constexpr bool kParallelCardScan = true; 62static constexpr bool kParallelRecursiveMark = true; 63// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 64// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 65// having this can add overhead in ProcessReferences since we may end up doing many calls of 66// ProcessMarkStack with very small mark stacks. 67static constexpr size_t kMinimumParallelMarkStackSize = 128; 68static constexpr bool kParallelProcessMarkStack = true; 69 70// Profiling and information flags. 71static constexpr bool kProfileLargeObjects = false; 72static constexpr bool kMeasureOverhead = false; 73static constexpr bool kCountTasks = false; 74static constexpr bool kCountJavaLangRefs = false; 75static constexpr bool kCountMarkedObjects = false; 76 77// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 78static constexpr bool kCheckLocks = kDebugLocking; 79static constexpr bool kVerifyRoots = kIsDebugBuild; 80 81// If true, revoke the rosalloc thread-local buffers at the 82// checkpoint, as opposed to during the pause. 83static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 84 85void MarkSweep::BindBitmaps() { 86 timings_.StartSplit("BindBitmaps"); 87 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 88 // Mark all of the spaces we never collect as immune. 89 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 90 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 91 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 92 } 93 } 94 timings_.EndSplit(); 95} 96 97MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 98 : GarbageCollector(heap, 99 name_prefix + 100 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 101 gc_barrier_(new Barrier(0)), 102 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 103 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 104 is_concurrent_(is_concurrent) { 105} 106 107void MarkSweep::InitializePhase() { 108 timings_.Reset(); 109 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 110 mark_stack_ = heap_->mark_stack_.get(); 111 DCHECK(mark_stack_ != nullptr); 112 immune_region_.Reset(); 113 class_count_ = 0; 114 array_count_ = 0; 115 other_count_ = 0; 116 large_object_test_ = 0; 117 large_object_mark_ = 0; 118 overhead_time_ = 0; 119 work_chunks_created_ = 0; 120 work_chunks_deleted_ = 0; 121 reference_count_ = 0; 122 mark_null_count_ = 0; 123 mark_immune_count_ = 0; 124 mark_fastpath_count_ = 0; 125 mark_slowpath_count_ = 0; 126 FindDefaultSpaceBitmap(); 127 { 128 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap. 129 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 130 mark_bitmap_ = heap_->GetMarkBitmap(); 131 } 132 133 // Do any pre GC verification. 134 timings_.NewSplit("PreGcVerification"); 135 heap_->PreGcVerification(this); 136} 137 138void MarkSweep::ProcessReferences(Thread* self) { 139 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 140 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 141 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback, 142 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 143} 144 145void MarkSweep::PreProcessReferences() { 146 if (IsConcurrent()) { 147 // No reason to do this for non-concurrent GC since pre processing soft references only helps 148 // pauses. 149 timings_.NewSplit("PreProcessReferences"); 150 GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback, 151 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 152 } 153} 154 155void MarkSweep::HandleDirtyObjectsPhase() { 156 TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_); 157 Thread* self = Thread::Current(); 158 Locks::mutator_lock_->AssertExclusiveHeld(self); 159 160 { 161 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 162 163 // Re-mark root set. 164 ReMarkRoots(); 165 166 // Scan dirty objects, this is only required if we are not doing concurrent GC. 167 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 168 } 169 170 ProcessReferences(self); 171 172 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 173 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_|| 174 GetHeap()->verify_post_gc_heap_) { 175 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 176 // This second sweep makes sure that we don't have any objects in the live stack which point to 177 // freed objects. These cause problems since their references may be previously freed objects. 178 SweepArray(GetHeap()->allocation_stack_.get(), false); 179 // Since SweepArray() above resets the (active) allocation 180 // stack. Need to revoke the thread-local allocation stacks that 181 // point into it. 182 RevokeAllThreadLocalAllocationStacks(self); 183 } 184 185 timings_.StartSplit("PreSweepingGcVerification"); 186 heap_->PreSweepingGcVerification(this); 187 timings_.EndSplit(); 188 189 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 190 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 191 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 192 193 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 194 // weak before we sweep them. Since this new system weak may not be marked, the GC may 195 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 196 // reference to a string that is about to be swept. 197 Runtime::Current()->DisallowNewSystemWeaks(); 198} 199 200void MarkSweep::PreCleanCards() { 201 // Don't do this for non concurrent GCs since they don't have any dirty cards. 202 if (kPreCleanCards && IsConcurrent()) { 203 Thread* self = Thread::Current(); 204 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 205 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 206 heap_->ProcessCards(timings_, false); 207 // The checkpoint root marking is required to avoid a race condition which occurs if the 208 // following happens during a reference write: 209 // 1. mutator dirties the card (write barrier) 210 // 2. GC ages the card (the above ProcessCards call) 211 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 212 // 4. mutator writes the value (corresponding to the write barrier in 1.) 213 // This causes the GC to age the card but not necessarily mark the reference which the mutator 214 // wrote into the object stored in the card. 215 // Having the checkpoint fixes this issue since it ensures that the card mark and the 216 // reference write are visible to the GC before the card is scanned (this is due to locks being 217 // acquired / released in the checkpoint code). 218 // The other roots are also marked to help reduce the pause. 219 MarkThreadRoots(self); 220 MarkNonThreadRoots(); 221 MarkConcurrentRoots( 222 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 223 // Process the newly aged cards. 224 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 225 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 226 // in the next GC. 227 } 228} 229 230void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 231 if (kUseThreadLocalAllocationStack) { 232 Locks::mutator_lock_->AssertExclusiveHeld(self); 233 heap_->RevokeAllThreadLocalAllocationStacks(self); 234 } 235} 236 237void MarkSweep::MarkingPhase() { 238 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 239 Thread* self = Thread::Current(); 240 241 BindBitmaps(); 242 FindDefaultSpaceBitmap(); 243 244 // Process dirty cards and add dirty cards to mod union tables. 245 heap_->ProcessCards(timings_, false); 246 247 // Need to do this before the checkpoint since we don't want any threads to add references to 248 // the live stack during the recursive mark. 249 timings_.NewSplit("SwapStacks"); 250 heap_->SwapStacks(self); 251 252 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 253 MarkRoots(self); 254 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 255 MarkReachableObjects(); 256 // Pre-clean dirtied cards to reduce pauses. 257 PreCleanCards(); 258 PreProcessReferences(); 259} 260 261void MarkSweep::UpdateAndMarkModUnion() { 262 for (const auto& space : heap_->GetContinuousSpaces()) { 263 if (immune_region_.ContainsSpace(space)) { 264 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 265 "UpdateAndMarkImageModUnionTable"; 266 TimingLogger::ScopedSplit split(name, &timings_); 267 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 268 CHECK(mod_union_table != nullptr); 269 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 270 } 271 } 272} 273 274void MarkSweep::MarkThreadRoots(Thread* self) { 275 MarkRootsCheckpoint(self); 276} 277 278void MarkSweep::MarkReachableObjects() { 279 UpdateAndMarkModUnion(); 280 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 281 // knowing that new allocations won't be marked as live. 282 timings_.StartSplit("MarkStackAsLive"); 283 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 284 heap_->MarkAllocStackAsLive(live_stack); 285 live_stack->Reset(); 286 timings_.EndSplit(); 287 // Recursively mark all the non-image bits set in the mark bitmap. 288 RecursiveMark(); 289} 290 291void MarkSweep::ReclaimPhase() { 292 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 293 Thread* self = Thread::Current(); 294 295 if (!IsConcurrent()) { 296 ProcessReferences(self); 297 } 298 299 { 300 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 301 SweepSystemWeaks(); 302 } 303 304 if (IsConcurrent()) { 305 Runtime::Current()->AllowNewSystemWeaks(); 306 307 TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); 308 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 309 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 310 if (!kPreCleanCards) { 311 // The allocation stack contains things allocated since the start of the GC. These may have 312 // been marked during this GC meaning they won't be eligible for reclaiming in the next 313 // sticky GC. Unmark these objects so that they are eligible for reclaiming in the next 314 // sticky GC. 315 // There is a race here which is safely handled. Another thread such as the hprof could 316 // have flushed the alloc stack after we resumed the threads. This is safe however, since 317 // reseting the allocation stack zeros it out with madvise. This means that we will either 318 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the 319 // first place. 320 // We can't do this if we pre-clean cards since we will unmark objects which are no longer on 321 // a dirty card since we aged cards during the pre-cleaning process. 322 mirror::Object** end = allocation_stack->End(); 323 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { 324 const Object* obj = *it; 325 if (obj != nullptr) { 326 UnMarkObjectNonNull(obj); 327 } 328 } 329 } 330 } 331 332 { 333 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 334 335 // Reclaim unmarked objects. 336 Sweep(false); 337 338 // Swap the live and mark bitmaps for each space which we modified space. This is an 339 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 340 // bitmaps. 341 timings_.StartSplit("SwapBitmaps"); 342 SwapBitmaps(); 343 timings_.EndSplit(); 344 345 // Unbind the live and mark bitmaps. 346 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 347 GetHeap()->UnBindBitmaps(); 348 } 349} 350 351void MarkSweep::FindDefaultSpaceBitmap() { 352 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 353 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 354 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 355 if (bitmap != nullptr && 356 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 357 current_space_bitmap_ = bitmap; 358 return; 359 } 360 } 361 GetHeap()->DumpSpaces(); 362 LOG(FATAL) << "Could not find a default mark bitmap"; 363} 364 365void MarkSweep::ExpandMarkStack() { 366 ResizeMarkStack(mark_stack_->Capacity() * 2); 367} 368 369void MarkSweep::ResizeMarkStack(size_t new_size) { 370 // Rare case, no need to have Thread::Current be a parameter. 371 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 372 // Someone else acquired the lock and expanded the mark stack before us. 373 return; 374 } 375 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 376 CHECK_LE(mark_stack_->Size(), new_size); 377 mark_stack_->Resize(new_size); 378 for (const auto& obj : temp) { 379 mark_stack_->PushBack(obj); 380 } 381} 382 383inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 384 DCHECK(obj != NULL); 385 if (MarkObjectParallel(obj)) { 386 MutexLock mu(Thread::Current(), mark_stack_lock_); 387 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 388 ExpandMarkStack(); 389 } 390 // The object must be pushed on to the mark stack. 391 mark_stack_->PushBack(obj); 392 } 393} 394 395mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 396 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 397 mark_sweep->MarkObject(obj); 398 return obj; 399} 400 401void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 402 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 403} 404 405inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { 406 DCHECK(!immune_region_.ContainsObject(obj)); 407 if (kUseBrooksPointer) { 408 // Verify all the objects have the correct Brooks pointer installed. 409 obj->AssertSelfBrooksPointer(); 410 } 411 // Try to take advantage of locality of references within a space, failing this find the space 412 // the hard way. 413 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_; 414 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 415 accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj); 416 if (LIKELY(new_bitmap != NULL)) { 417 object_bitmap = new_bitmap; 418 } else { 419 MarkLargeObject(obj, false); 420 return; 421 } 422 } 423 DCHECK(object_bitmap->HasAddress(obj)); 424 object_bitmap->Clear(obj); 425} 426 427inline void MarkSweep::MarkObjectNonNull(Object* obj) { 428 DCHECK(obj != nullptr); 429 if (kUseBrooksPointer) { 430 // Verify all the objects have the correct Brooks pointer installed. 431 obj->AssertSelfBrooksPointer(); 432 } 433 if (immune_region_.ContainsObject(obj)) { 434 if (kCountMarkedObjects) { 435 ++mark_immune_count_; 436 } 437 DCHECK(IsMarked(obj)); 438 return; 439 } 440 // Try to take advantage of locality of references within a space, failing this find the space 441 // the hard way. 442 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_; 443 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 444 object_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj); 445 if (kCountMarkedObjects) { 446 ++mark_slowpath_count_; 447 } 448 if (UNLIKELY(object_bitmap == nullptr)) { 449 MarkLargeObject(obj, true); 450 return; 451 } 452 } else if (kCountMarkedObjects) { 453 ++mark_fastpath_count_; 454 } 455 // This object was not previously marked. 456 if (!object_bitmap->Set(obj)) { 457 PushOnMarkStack(obj); 458 } 459} 460 461inline void MarkSweep::PushOnMarkStack(Object* obj) { 462 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 463 // Lock is not needed but is here anyways to please annotalysis. 464 MutexLock mu(Thread::Current(), mark_stack_lock_); 465 ExpandMarkStack(); 466 } 467 // The object must be pushed on to the mark stack. 468 mark_stack_->PushBack(obj); 469} 470 471// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 472bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 473 // TODO: support >1 discontinuous space. 474 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 475 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 476 if (kProfileLargeObjects) { 477 ++large_object_test_; 478 } 479 if (UNLIKELY(!large_objects->Test(obj))) { 480 if (!large_object_space->Contains(obj)) { 481 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 482 LOG(ERROR) << "Attempting see if it's a bad root"; 483 VerifyRoots(); 484 LOG(FATAL) << "Can't mark bad root"; 485 } 486 if (kProfileLargeObjects) { 487 ++large_object_mark_; 488 } 489 if (set) { 490 large_objects->Set(obj); 491 } else { 492 large_objects->Clear(obj); 493 } 494 return true; 495 } 496 return false; 497} 498 499inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 500 DCHECK(obj != nullptr); 501 if (kUseBrooksPointer) { 502 // Verify all the objects have the correct Brooks pointer installed. 503 obj->AssertSelfBrooksPointer(); 504 } 505 if (immune_region_.ContainsObject(obj)) { 506 DCHECK(IsMarked(obj)); 507 return false; 508 } 509 // Try to take advantage of locality of references within a space, failing this find the space 510 // the hard way. 511 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_; 512 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 513 accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj); 514 if (new_bitmap != NULL) { 515 object_bitmap = new_bitmap; 516 } else { 517 // TODO: Remove the Thread::Current here? 518 // TODO: Convert this to some kind of atomic marking? 519 MutexLock mu(Thread::Current(), large_object_lock_); 520 return MarkLargeObject(obj, true); 521 } 522 } 523 // Return true if the object was not previously marked. 524 return !object_bitmap->AtomicTestAndSet(obj); 525} 526 527// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 528inline void MarkSweep::MarkObject(Object* obj) { 529 if (obj != nullptr) { 530 MarkObjectNonNull(obj); 531 } else if (kCountMarkedObjects) { 532 ++mark_null_count_; 533 } 534} 535 536void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/, 537 RootType /*root_type*/) { 538 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 539} 540 541void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 542 RootType /*root_type*/) { 543 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 544} 545 546void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 547 RootType /*root_type*/) { 548 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 549} 550 551void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 552 const StackVisitor* visitor) { 553 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 554} 555 556void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 557 // See if the root is on any space bitmap. 558 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 559 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 560 if (!large_object_space->Contains(root)) { 561 LOG(ERROR) << "Found invalid root: " << root; 562 if (visitor != NULL) { 563 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 564 } 565 } 566 } 567} 568 569void MarkSweep::VerifyRoots() { 570 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 571} 572 573void MarkSweep::MarkRoots(Thread* self) { 574 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 575 // If we exclusively hold the mutator lock, all threads must be suspended. 576 timings_.StartSplit("MarkRoots"); 577 Runtime::Current()->VisitRoots(MarkRootCallback, this); 578 timings_.EndSplit(); 579 RevokeAllThreadLocalAllocationStacks(self); 580 } else { 581 MarkThreadRoots(self); 582 // At this point the live stack should no longer have any mutators which push into it. 583 MarkNonThreadRoots(); 584 MarkConcurrentRoots( 585 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 586 } 587} 588 589void MarkSweep::MarkNonThreadRoots() { 590 timings_.StartSplit("MarkNonThreadRoots"); 591 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 592 timings_.EndSplit(); 593} 594 595void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 596 timings_.StartSplit("MarkConcurrentRoots"); 597 // Visit all runtime roots and clear dirty flags. 598 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 599 timings_.EndSplit(); 600} 601 602class ScanObjectVisitor { 603 public: 604 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 605 : mark_sweep_(mark_sweep) {} 606 607 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 608 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 609 if (kCheckLocks) { 610 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 611 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 612 } 613 mark_sweep_->ScanObject(obj); 614 } 615 616 private: 617 MarkSweep* const mark_sweep_; 618}; 619 620class DelayReferenceReferentVisitor { 621 public: 622 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 623 } 624 625 void operator()(mirror::Class* klass, mirror::Reference* ref) const 626 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 627 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 628 collector_->DelayReferenceReferent(klass, ref); 629 } 630 631 private: 632 MarkSweep* const collector_; 633}; 634 635template <bool kUseFinger = false> 636class MarkStackTask : public Task { 637 public: 638 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 639 Object** mark_stack) 640 : mark_sweep_(mark_sweep), 641 thread_pool_(thread_pool), 642 mark_stack_pos_(mark_stack_size) { 643 // We may have to copy part of an existing mark stack when another mark stack overflows. 644 if (mark_stack_size != 0) { 645 DCHECK(mark_stack != NULL); 646 // TODO: Check performance? 647 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 648 } 649 if (kCountTasks) { 650 ++mark_sweep_->work_chunks_created_; 651 } 652 } 653 654 static const size_t kMaxSize = 1 * KB; 655 656 protected: 657 class MarkObjectParallelVisitor { 658 public: 659 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 660 MarkSweep* mark_sweep) ALWAYS_INLINE 661 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 662 663 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 664 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 665 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); 666 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 667 if (kUseFinger) { 668 android_memory_barrier(); 669 if (reinterpret_cast<uintptr_t>(ref) >= 670 static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) { 671 return; 672 } 673 } 674 chunk_task_->MarkStackPush(ref); 675 } 676 } 677 678 private: 679 MarkStackTask<kUseFinger>* const chunk_task_; 680 MarkSweep* const mark_sweep_; 681 }; 682 683 class ScanObjectParallelVisitor { 684 public: 685 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 686 : chunk_task_(chunk_task) {} 687 688 // No thread safety analysis since multiple threads will use this visitor. 689 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 690 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 691 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 692 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 693 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 694 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 695 } 696 697 private: 698 MarkStackTask<kUseFinger>* const chunk_task_; 699 }; 700 701 virtual ~MarkStackTask() { 702 // Make sure that we have cleared our mark stack. 703 DCHECK_EQ(mark_stack_pos_, 0U); 704 if (kCountTasks) { 705 ++mark_sweep_->work_chunks_deleted_; 706 } 707 } 708 709 MarkSweep* const mark_sweep_; 710 ThreadPool* const thread_pool_; 711 // Thread local mark stack for this task. 712 Object* mark_stack_[kMaxSize]; 713 // Mark stack position. 714 size_t mark_stack_pos_; 715 716 void MarkStackPush(Object* obj) ALWAYS_INLINE { 717 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 718 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 719 mark_stack_pos_ /= 2; 720 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 721 mark_stack_ + mark_stack_pos_); 722 thread_pool_->AddTask(Thread::Current(), task); 723 } 724 DCHECK(obj != nullptr); 725 DCHECK_LT(mark_stack_pos_, kMaxSize); 726 mark_stack_[mark_stack_pos_++] = obj; 727 } 728 729 virtual void Finalize() { 730 delete this; 731 } 732 733 // Scans all of the objects 734 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 735 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 736 ScanObjectParallelVisitor visitor(this); 737 // TODO: Tune this. 738 static const size_t kFifoSize = 4; 739 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 740 for (;;) { 741 Object* obj = nullptr; 742 if (kUseMarkStackPrefetch) { 743 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 744 Object* obj = mark_stack_[--mark_stack_pos_]; 745 DCHECK(obj != nullptr); 746 __builtin_prefetch(obj); 747 prefetch_fifo.push_back(obj); 748 } 749 if (UNLIKELY(prefetch_fifo.empty())) { 750 break; 751 } 752 obj = prefetch_fifo.front(); 753 prefetch_fifo.pop_front(); 754 } else { 755 if (UNLIKELY(mark_stack_pos_ == 0)) { 756 break; 757 } 758 obj = mark_stack_[--mark_stack_pos_]; 759 } 760 DCHECK(obj != nullptr); 761 visitor(obj); 762 } 763 } 764}; 765 766class CardScanTask : public MarkStackTask<false> { 767 public: 768 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, 769 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 770 Object** mark_stack_obj) 771 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 772 bitmap_(bitmap), 773 begin_(begin), 774 end_(end), 775 minimum_age_(minimum_age) { 776 } 777 778 protected: 779 accounting::SpaceBitmap* const bitmap_; 780 byte* const begin_; 781 byte* const end_; 782 const byte minimum_age_; 783 784 virtual void Finalize() { 785 delete this; 786 } 787 788 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 789 ScanObjectParallelVisitor visitor(this); 790 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 791 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 792 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 793 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 794 // Finish by emptying our local mark stack. 795 MarkStackTask::Run(self); 796 } 797}; 798 799size_t MarkSweep::GetThreadCount(bool paused) const { 800 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 801 return 0; 802 } 803 if (paused) { 804 return heap_->GetParallelGCThreadCount() + 1; 805 } else { 806 return heap_->GetConcGCThreadCount() + 1; 807 } 808} 809 810void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 811 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 812 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 813 size_t thread_count = GetThreadCount(paused); 814 // The parallel version with only one thread is faster for card scanning, TODO: fix. 815 if (kParallelCardScan && thread_count > 0) { 816 Thread* self = Thread::Current(); 817 // Can't have a different split for each space since multiple spaces can have their cards being 818 // scanned at the same time. 819 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 820 // Try to take some of the mark stack since we can pass this off to the worker tasks. 821 Object** mark_stack_begin = mark_stack_->Begin(); 822 Object** mark_stack_end = mark_stack_->End(); 823 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 824 // Estimated number of work tasks we will create. 825 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 826 DCHECK_NE(mark_stack_tasks, 0U); 827 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 828 mark_stack_size / mark_stack_tasks + 1); 829 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 830 if (space->GetMarkBitmap() == nullptr) { 831 continue; 832 } 833 byte* card_begin = space->Begin(); 834 byte* card_end = space->End(); 835 // Align up the end address. For example, the image space's end 836 // may not be card-size-aligned. 837 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 838 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 839 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 840 // Calculate how many bytes of heap we will scan, 841 const size_t address_range = card_end - card_begin; 842 // Calculate how much address range each task gets. 843 const size_t card_delta = RoundUp(address_range / thread_count + 1, 844 accounting::CardTable::kCardSize); 845 // Create the worker tasks for this space. 846 while (card_begin != card_end) { 847 // Add a range of cards. 848 size_t addr_remaining = card_end - card_begin; 849 size_t card_increment = std::min(card_delta, addr_remaining); 850 // Take from the back of the mark stack. 851 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 852 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 853 mark_stack_end -= mark_stack_increment; 854 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 855 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 856 // Add the new task to the thread pool. 857 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 858 card_begin + card_increment, minimum_age, 859 mark_stack_increment, mark_stack_end); 860 thread_pool->AddTask(self, task); 861 card_begin += card_increment; 862 } 863 } 864 865 // Note: the card scan below may dirty new cards (and scan them) 866 // as a side effect when a Reference object is encountered and 867 // queued during the marking. See b/11465268. 868 thread_pool->SetMaxActiveWorkers(thread_count - 1); 869 thread_pool->StartWorkers(self); 870 thread_pool->Wait(self, true, true); 871 thread_pool->StopWorkers(self); 872 timings_.EndSplit(); 873 } else { 874 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 875 if (space->GetMarkBitmap() != nullptr) { 876 // Image spaces are handled properly since live == marked for them. 877 switch (space->GetGcRetentionPolicy()) { 878 case space::kGcRetentionPolicyNeverCollect: 879 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 880 "ScanGrayImageSpaceObjects"); 881 break; 882 case space::kGcRetentionPolicyFullCollect: 883 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 884 "ScanGrayZygoteSpaceObjects"); 885 break; 886 case space::kGcRetentionPolicyAlwaysCollect: 887 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 888 "ScanGrayAllocSpaceObjects"); 889 break; 890 } 891 ScanObjectVisitor visitor(this); 892 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 893 timings_.EndSplit(); 894 } 895 } 896 } 897} 898 899class RecursiveMarkTask : public MarkStackTask<false> { 900 public: 901 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 902 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 903 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 904 bitmap_(bitmap), 905 begin_(begin), 906 end_(end) { 907 } 908 909 protected: 910 accounting::SpaceBitmap* const bitmap_; 911 const uintptr_t begin_; 912 const uintptr_t end_; 913 914 virtual void Finalize() { 915 delete this; 916 } 917 918 // Scans all of the objects 919 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 920 ScanObjectParallelVisitor visitor(this); 921 bitmap_->VisitMarkedRange(begin_, end_, visitor); 922 // Finish by emptying our local mark stack. 923 MarkStackTask::Run(self); 924 } 925}; 926 927// Populates the mark stack based on the set of marked objects and 928// recursively marks until the mark stack is emptied. 929void MarkSweep::RecursiveMark() { 930 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 931 // RecursiveMark will build the lists of known instances of the Reference classes. See 932 // DelayReferenceReferent for details. 933 if (kUseRecursiveMark) { 934 const bool partial = GetGcType() == kGcTypePartial; 935 ScanObjectVisitor scan_visitor(this); 936 auto* self = Thread::Current(); 937 ThreadPool* thread_pool = heap_->GetThreadPool(); 938 size_t thread_count = GetThreadCount(false); 939 const bool parallel = kParallelRecursiveMark && thread_count > 1; 940 mark_stack_->Reset(); 941 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 942 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 943 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 944 current_space_bitmap_ = space->GetMarkBitmap(); 945 if (current_space_bitmap_ == nullptr) { 946 continue; 947 } 948 if (parallel) { 949 // We will use the mark stack the future. 950 // CHECK(mark_stack_->IsEmpty()); 951 // This function does not handle heap end increasing, so we must use the space end. 952 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 953 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 954 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 955 956 // Create a few worker tasks. 957 const size_t n = thread_count * 2; 958 while (begin != end) { 959 uintptr_t start = begin; 960 uintptr_t delta = (end - begin) / n; 961 delta = RoundUp(delta, KB); 962 if (delta < 16 * KB) delta = end - begin; 963 begin += delta; 964 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 965 begin); 966 thread_pool->AddTask(self, task); 967 } 968 thread_pool->SetMaxActiveWorkers(thread_count - 1); 969 thread_pool->StartWorkers(self); 970 thread_pool->Wait(self, true, true); 971 thread_pool->StopWorkers(self); 972 } else { 973 // This function does not handle heap end increasing, so we must use the space end. 974 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 975 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 976 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 977 } 978 } 979 } 980 } 981 ProcessMarkStack(false); 982} 983 984mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 985 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 986 return object; 987 } 988 return nullptr; 989} 990 991void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 992 ScanGrayObjects(paused, minimum_age); 993 ProcessMarkStack(paused); 994} 995 996void MarkSweep::ReMarkRoots() { 997 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 998 timings_.StartSplit("(Paused)ReMarkRoots"); 999 Runtime::Current()->VisitRoots( 1000 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 1001 kVisitRootFlagStopLoggingNewRoots | 1002 kVisitRootFlagClearRootLog)); 1003 timings_.EndSplit(); 1004 if (kVerifyRoots) { 1005 timings_.StartSplit("(Paused)VerifyRoots"); 1006 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 1007 timings_.EndSplit(); 1008 } 1009} 1010 1011void MarkSweep::SweepSystemWeaks() { 1012 Runtime* runtime = Runtime::Current(); 1013 timings_.StartSplit("SweepSystemWeaks"); 1014 runtime->SweepSystemWeaks(IsMarkedCallback, this); 1015 timings_.EndSplit(); 1016} 1017 1018mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 1019 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 1020 // We don't actually want to sweep the object, so lets return "marked" 1021 return obj; 1022} 1023 1024void MarkSweep::VerifyIsLive(const Object* obj) { 1025 if (!heap_->GetLiveBitmap()->Test(obj)) { 1026 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 1027 if (!large_object_space->GetLiveObjects()->Test(obj)) { 1028 if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) == 1029 heap_->allocation_stack_->End()) { 1030 // Object not found! 1031 heap_->DumpSpaces(); 1032 LOG(FATAL) << "Found dead object " << obj; 1033 } 1034 } 1035 } 1036} 1037 1038void MarkSweep::VerifySystemWeaks() { 1039 // Verify system weaks, uses a special object visitor which returns the input object. 1040 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 1041} 1042 1043class CheckpointMarkThreadRoots : public Closure { 1044 public: 1045 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1046 1047 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 1048 ATRACE_BEGIN("Marking thread roots"); 1049 // Note: self is not necessarily equal to thread since thread may be suspended. 1050 Thread* self = Thread::Current(); 1051 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1052 << thread->GetState() << " thread " << thread << " self " << self; 1053 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 1054 ATRACE_END(); 1055 if (kUseThreadLocalAllocationStack) { 1056 thread->RevokeThreadLocalAllocationStack(); 1057 } 1058 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint) { 1059 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 1060 } 1061 mark_sweep_->GetBarrier().Pass(self); 1062 } 1063 1064 private: 1065 MarkSweep* const mark_sweep_; 1066}; 1067 1068void MarkSweep::MarkRootsCheckpoint(Thread* self) { 1069 CheckpointMarkThreadRoots check_point(this); 1070 timings_.StartSplit("MarkRootsCheckpoint"); 1071 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1072 // Request the check point is run on all threads returning a count of the threads that must 1073 // run through the barrier including self. 1074 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1075 // Release locks then wait for all mutator threads to pass the barrier. 1076 // TODO: optimize to not release locks when there are no threads to wait for. 1077 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1078 Locks::mutator_lock_->SharedUnlock(self); 1079 { 1080 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1081 gc_barrier_->Increment(self, barrier_count); 1082 } 1083 Locks::mutator_lock_->SharedLock(self); 1084 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1085 timings_.EndSplit(); 1086} 1087 1088void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1089 timings_.StartSplit("SweepArray"); 1090 Thread* self = Thread::Current(); 1091 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize]; 1092 size_t chunk_free_pos = 0; 1093 size_t freed_bytes = 0; 1094 size_t freed_large_object_bytes = 0; 1095 size_t freed_objects = 0; 1096 size_t freed_large_objects = 0; 1097 // How many objects are left in the array, modified after each space is swept. 1098 Object** objects = const_cast<Object**>(allocations->Begin()); 1099 size_t count = allocations->Size(); 1100 // Change the order to ensure that the non-moving space last swept as an optimization. 1101 std::vector<space::ContinuousSpace*> sweep_spaces; 1102 space::ContinuousSpace* non_moving_space = nullptr; 1103 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1104 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1105 space->GetLiveBitmap() != nullptr) { 1106 if (space == heap_->GetNonMovingSpace()) { 1107 non_moving_space = space; 1108 } else { 1109 sweep_spaces.push_back(space); 1110 } 1111 } 1112 } 1113 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1114 // the other alloc spaces as an optimization. 1115 if (non_moving_space != nullptr) { 1116 sweep_spaces.push_back(non_moving_space); 1117 } 1118 // Start by sweeping the continuous spaces. 1119 for (space::ContinuousSpace* space : sweep_spaces) { 1120 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1121 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1122 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1123 if (swap_bitmaps) { 1124 std::swap(live_bitmap, mark_bitmap); 1125 } 1126 Object** out = objects; 1127 for (size_t i = 0; i < count; ++i) { 1128 Object* obj = objects[i]; 1129 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1130 continue; 1131 } 1132 if (space->HasAddress(obj)) { 1133 // This object is in the space, remove it from the array and add it to the sweep buffer 1134 // if needed. 1135 if (!mark_bitmap->Test(obj)) { 1136 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1137 timings_.StartSplit("FreeList"); 1138 freed_objects += chunk_free_pos; 1139 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1140 timings_.EndSplit(); 1141 chunk_free_pos = 0; 1142 } 1143 chunk_free_buffer[chunk_free_pos++] = obj; 1144 } 1145 } else { 1146 *(out++) = obj; 1147 } 1148 } 1149 if (chunk_free_pos > 0) { 1150 timings_.StartSplit("FreeList"); 1151 freed_objects += chunk_free_pos; 1152 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1153 timings_.EndSplit(); 1154 chunk_free_pos = 0; 1155 } 1156 // All of the references which space contained are no longer in the allocation stack, update 1157 // the count. 1158 count = out - objects; 1159 } 1160 // Handle the large object space. 1161 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1162 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 1163 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects(); 1164 if (swap_bitmaps) { 1165 std::swap(large_live_objects, large_mark_objects); 1166 } 1167 for (size_t i = 0; i < count; ++i) { 1168 Object* obj = objects[i]; 1169 // Handle large objects. 1170 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1171 continue; 1172 } 1173 if (!large_mark_objects->Test(obj)) { 1174 ++freed_large_objects; 1175 freed_large_object_bytes += large_object_space->Free(self, obj); 1176 } 1177 } 1178 timings_.EndSplit(); 1179 1180 timings_.StartSplit("RecordFree"); 1181 VLOG(heap) << "Freed " << freed_objects << "/" << count 1182 << " objects with size " << PrettySize(freed_bytes); 1183 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1184 freed_objects_.FetchAndAdd(freed_objects); 1185 freed_large_objects_.FetchAndAdd(freed_large_objects); 1186 freed_bytes_.FetchAndAdd(freed_bytes); 1187 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes); 1188 timings_.EndSplit(); 1189 1190 timings_.StartSplit("ResetStack"); 1191 allocations->Reset(); 1192 timings_.EndSplit(); 1193} 1194 1195void MarkSweep::Sweep(bool swap_bitmaps) { 1196 DCHECK(mark_stack_->IsEmpty()); 1197 TimingLogger::ScopedSplit("Sweep", &timings_); 1198 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1199 if (space->IsContinuousMemMapAllocSpace()) { 1200 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1201 TimingLogger::ScopedSplit split( 1202 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1203 size_t freed_objects = 0; 1204 size_t freed_bytes = 0; 1205 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1206 heap_->RecordFree(freed_objects, freed_bytes); 1207 freed_objects_.FetchAndAdd(freed_objects); 1208 freed_bytes_.FetchAndAdd(freed_bytes); 1209 } 1210 } 1211 SweepLargeObjects(swap_bitmaps); 1212} 1213 1214void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1215 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1216 size_t freed_objects = 0; 1217 size_t freed_bytes = 0; 1218 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1219 freed_large_objects_.FetchAndAdd(freed_objects); 1220 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 1221 GetHeap()->RecordFree(freed_objects, freed_bytes); 1222} 1223 1224// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1225// marked, put it on the appropriate list in the heap for later processing. 1226void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1227 DCHECK(klass != nullptr); 1228 if (kCountJavaLangRefs) { 1229 ++reference_count_; 1230 } 1231 heap_->DelayReferenceReferent(klass, ref, IsMarkedCallback, this); 1232} 1233 1234class MarkObjectVisitor { 1235 public: 1236 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1237 } 1238 1239 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1240 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1241 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1242 if (kCheckLocks) { 1243 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1244 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1245 } 1246 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset, false)); 1247 } 1248 1249 private: 1250 MarkSweep* const mark_sweep_; 1251}; 1252 1253// Scans an object reference. Determines the type of the reference 1254// and dispatches to a specialized scanning routine. 1255void MarkSweep::ScanObject(Object* obj) { 1256 MarkObjectVisitor mark_visitor(this); 1257 DelayReferenceReferentVisitor ref_visitor(this); 1258 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1259} 1260 1261void MarkSweep::ProcessMarkStackPausedCallback(void* arg) { 1262 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true); 1263} 1264 1265void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1266 Thread* self = Thread::Current(); 1267 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1268 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1269 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1270 CHECK_GT(chunk_size, 0U); 1271 // Split the current mark stack up into work tasks. 1272 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1273 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1274 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1275 it += delta; 1276 } 1277 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1278 thread_pool->StartWorkers(self); 1279 thread_pool->Wait(self, true, true); 1280 thread_pool->StopWorkers(self); 1281 mark_stack_->Reset(); 1282 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1283} 1284 1285// Scan anything that's on the mark stack. 1286void MarkSweep::ProcessMarkStack(bool paused) { 1287 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1288 size_t thread_count = GetThreadCount(paused); 1289 if (kParallelProcessMarkStack && thread_count > 1 && 1290 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1291 ProcessMarkStackParallel(thread_count); 1292 } else { 1293 // TODO: Tune this. 1294 static const size_t kFifoSize = 4; 1295 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1296 for (;;) { 1297 Object* obj = NULL; 1298 if (kUseMarkStackPrefetch) { 1299 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1300 Object* obj = mark_stack_->PopBack(); 1301 DCHECK(obj != NULL); 1302 __builtin_prefetch(obj); 1303 prefetch_fifo.push_back(obj); 1304 } 1305 if (prefetch_fifo.empty()) { 1306 break; 1307 } 1308 obj = prefetch_fifo.front(); 1309 prefetch_fifo.pop_front(); 1310 } else { 1311 if (mark_stack_->IsEmpty()) { 1312 break; 1313 } 1314 obj = mark_stack_->PopBack(); 1315 } 1316 DCHECK(obj != nullptr); 1317 ScanObject(obj); 1318 } 1319 } 1320 timings_.EndSplit(); 1321} 1322 1323inline bool MarkSweep::IsMarked(const Object* object) const 1324 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1325 if (immune_region_.ContainsObject(object)) { 1326 return true; 1327 } 1328 if (current_space_bitmap_->HasAddress(object)) { 1329 return current_space_bitmap_->Test(object); 1330 } 1331 return mark_bitmap_->Test(object); 1332} 1333 1334void MarkSweep::FinishPhase() { 1335 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1336 // Can't enqueue references if we hold the mutator lock. 1337 timings_.NewSplit("PostGcVerification"); 1338 heap_->PostGcVerification(this); 1339 if (kCountScannedTypes) { 1340 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1341 << " other=" << other_count_; 1342 } 1343 if (kCountTasks) { 1344 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1345 } 1346 if (kMeasureOverhead) { 1347 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1348 } 1349 if (kProfileLargeObjects) { 1350 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1351 } 1352 if (kCountJavaLangRefs) { 1353 VLOG(gc) << "References scanned " << reference_count_; 1354 } 1355 if (kCountMarkedObjects) { 1356 VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_ 1357 << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_; 1358 } 1359 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1360 mark_stack_->Reset(); 1361 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1362 heap_->ClearMarkedObjects(); 1363} 1364 1365void MarkSweep::RevokeAllThreadLocalBuffers() { 1366 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1367 // If concurrent, rosalloc thread-local buffers are revoked at the 1368 // thread checkpoint. Bump pointer space thread-local buffers must 1369 // not be in use. 1370 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1371 } else { 1372 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 1373 GetHeap()->RevokeAllThreadLocalBuffers(); 1374 timings_.EndSplit(); 1375 } 1376} 1377 1378} // namespace collector 1379} // namespace gc 1380} // namespace art 1381