mark_sweep.cc revision 1ad2784ad9f311ebf9fe0677d33818648f423f9c
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "monitor.h" 41#include "mark_sweep-inl.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/reference-inl.h" 48#include "mirror/object-inl.h" 49#include "mirror/object_array.h" 50#include "mirror/object_array-inl.h" 51#include "runtime.h" 52#include "thread-inl.h" 53#include "thread_list.h" 54#include "verifier/method_verifier.h" 55 56using ::art::mirror::ArtField; 57using ::art::mirror::Class; 58using ::art::mirror::Object; 59using ::art::mirror::ObjectArray; 60 61namespace art { 62namespace gc { 63namespace collector { 64 65// Performance options. 66static constexpr bool kUseRecursiveMark = false; 67static constexpr bool kUseMarkStackPrefetch = true; 68static constexpr size_t kSweepArrayChunkFreeSize = 1024; 69static constexpr bool kPreCleanCards = true; 70 71// Parallelism options. 72static constexpr bool kParallelCardScan = true; 73static constexpr bool kParallelRecursiveMark = true; 74// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 75// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 76// having this can add overhead in ProcessReferences since we may end up doing many calls of 77// ProcessMarkStack with very small mark stacks. 78static constexpr size_t kMinimumParallelMarkStackSize = 128; 79static constexpr bool kParallelProcessMarkStack = true; 80 81// Profiling and information flags. 82static constexpr bool kCountClassesMarked = false; 83static constexpr bool kProfileLargeObjects = false; 84static constexpr bool kMeasureOverhead = false; 85static constexpr bool kCountTasks = false; 86static constexpr bool kCountJavaLangRefs = false; 87 88// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 89static constexpr bool kCheckLocks = kDebugLocking; 90static constexpr bool kVerifyRoots = kIsDebugBuild; 91 92void MarkSweep::BindBitmaps() { 93 timings_.StartSplit("BindBitmaps"); 94 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 95 // Mark all of the spaces we never collect as immune. 96 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 97 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 98 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 99 } 100 } 101 timings_.EndSplit(); 102} 103 104MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 105 : GarbageCollector(heap, 106 name_prefix + 107 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 108 current_mark_bitmap_(NULL), 109 mark_stack_(NULL), 110 live_stack_freeze_size_(0), 111 gc_barrier_(new Barrier(0)), 112 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 113 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 114 is_concurrent_(is_concurrent) { 115} 116 117void MarkSweep::InitializePhase() { 118 timings_.Reset(); 119 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 120 mark_stack_ = heap_->mark_stack_.get(); 121 DCHECK(mark_stack_ != nullptr); 122 immune_region_.Reset(); 123 class_count_ = 0; 124 array_count_ = 0; 125 other_count_ = 0; 126 large_object_test_ = 0; 127 large_object_mark_ = 0; 128 classes_marked_ = 0; 129 overhead_time_ = 0; 130 work_chunks_created_ = 0; 131 work_chunks_deleted_ = 0; 132 reference_count_ = 0; 133 134 FindDefaultMarkBitmap(); 135 136 // Do any pre GC verification. 137 timings_.NewSplit("PreGcVerification"); 138 heap_->PreGcVerification(this); 139} 140 141void MarkSweep::ProcessReferences(Thread* self) { 142 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 143 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 144 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback, 145 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 146} 147 148void MarkSweep::PreProcessReferences(Thread* self) { 149 timings_.NewSplit("PreProcessReferences"); 150 GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback, 151 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 152} 153 154bool MarkSweep::HandleDirtyObjectsPhase() { 155 TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_); 156 Thread* self = Thread::Current(); 157 Locks::mutator_lock_->AssertExclusiveHeld(self); 158 159 { 160 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 161 162 // Re-mark root set. 163 ReMarkRoots(); 164 165 // Scan dirty objects, this is only required if we are not doing concurrent GC. 166 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 167 } 168 169 ProcessReferences(self); 170 171 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 172 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_|| 173 GetHeap()->verify_post_gc_heap_) { 174 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 175 // This second sweep makes sure that we don't have any objects in the live stack which point to 176 // freed objects. These cause problems since their references may be previously freed objects. 177 SweepArray(GetHeap()->allocation_stack_.get(), false); 178 // Since SweepArray() above resets the (active) allocation 179 // stack. Need to revoke the thread-local allocation stacks that 180 // point into it. 181 RevokeAllThreadLocalAllocationStacks(self); 182 } 183 184 timings_.StartSplit("PreSweepingGcVerification"); 185 heap_->PreSweepingGcVerification(this); 186 timings_.EndSplit(); 187 188 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 189 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 190 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 191 192 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 193 // weak before we sweep them. Since this new system weak may not be marked, the GC may 194 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 195 // reference to a string that is about to be swept. 196 Runtime::Current()->DisallowNewSystemWeaks(); 197 return true; 198} 199 200bool MarkSweep::IsConcurrent() const { 201 return is_concurrent_; 202} 203 204void MarkSweep::PreCleanCards() { 205 // Don't do this for non concurrent GCs since they don't have any dirty cards. 206 if (kPreCleanCards && IsConcurrent()) { 207 Thread* self = Thread::Current(); 208 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 209 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 210 heap_->ProcessCards(timings_, false); 211 // The checkpoint root marking is required to avoid a race condition which occurs if the 212 // following happens during a reference write: 213 // 1. mutator dirties the card (write barrier) 214 // 2. GC ages the card (the above ProcessCards call) 215 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 216 // 4. mutator writes the value (corresponding to the write barrier in 1.) 217 // This causes the GC to age the card but not necessarily mark the reference which the mutator 218 // wrote into the object stored in the card. 219 // Having the checkpoint fixes this issue since it ensures that the card mark and the 220 // reference write are visible to the GC before the card is scanned (this is due to locks being 221 // acquired / released in the checkpoint code). 222 // The other roots are also marked to help reduce the pause. 223 MarkThreadRoots(self); 224 // TODO: Only mark the dirty roots. 225 MarkNonThreadRoots(); 226 MarkConcurrentRoots( 227 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 228 // Process the newly aged cards. 229 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 230 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 231 // in the next GC. 232 } 233} 234 235void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 236 if (kUseThreadLocalAllocationStack) { 237 Locks::mutator_lock_->AssertExclusiveHeld(self); 238 heap_->RevokeAllThreadLocalAllocationStacks(self); 239 } 240} 241 242void MarkSweep::MarkingPhase() { 243 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 244 Thread* self = Thread::Current(); 245 246 BindBitmaps(); 247 FindDefaultMarkBitmap(); 248 249 // Process dirty cards and add dirty cards to mod union tables. 250 heap_->ProcessCards(timings_, false); 251 252 // Need to do this before the checkpoint since we don't want any threads to add references to 253 // the live stack during the recursive mark. 254 timings_.NewSplit("SwapStacks"); 255 heap_->SwapStacks(self); 256 257 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 258 MarkRoots(self); 259 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 260 UpdateAndMarkModUnion(); 261 MarkReachableObjects(); 262 // Pre-clean dirtied cards to reduce pauses. 263 PreCleanCards(); 264 if (IsConcurrent()) { 265 // No reason to do this for non-concurrent GC since pre processing soft references only helps 266 // pauses. 267 PreProcessReferences(self); 268 } 269} 270 271void MarkSweep::UpdateAndMarkModUnion() { 272 for (const auto& space : heap_->GetContinuousSpaces()) { 273 if (immune_region_.ContainsSpace(space)) { 274 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 275 "UpdateAndMarkImageModUnionTable"; 276 TimingLogger::ScopedSplit split(name, &timings_); 277 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 278 CHECK(mod_union_table != nullptr); 279 mod_union_table->UpdateAndMarkReferences(MarkObjectCallback, this); 280 } 281 } 282} 283 284void MarkSweep::MarkThreadRoots(Thread* self) { 285 MarkRootsCheckpoint(self); 286} 287 288void MarkSweep::MarkReachableObjects() { 289 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 290 // knowing that new allocations won't be marked as live. 291 timings_.StartSplit("MarkStackAsLive"); 292 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 293 heap_->MarkAllocStackAsLive(live_stack); 294 live_stack->Reset(); 295 timings_.EndSplit(); 296 // Recursively mark all the non-image bits set in the mark bitmap. 297 RecursiveMark(); 298} 299 300void MarkSweep::ReclaimPhase() { 301 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 302 Thread* self = Thread::Current(); 303 304 if (!IsConcurrent()) { 305 ProcessReferences(self); 306 } 307 308 { 309 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 310 SweepSystemWeaks(); 311 } 312 313 if (IsConcurrent()) { 314 Runtime::Current()->AllowNewSystemWeaks(); 315 316 TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); 317 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 318 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 319 if (!kPreCleanCards) { 320 // The allocation stack contains things allocated since the start of the GC. These may have 321 // been marked during this GC meaning they won't be eligible for reclaiming in the next 322 // sticky GC. Unmark these objects so that they are eligible for reclaiming in the next 323 // sticky GC. 324 // There is a race here which is safely handled. Another thread such as the hprof could 325 // have flushed the alloc stack after we resumed the threads. This is safe however, since 326 // reseting the allocation stack zeros it out with madvise. This means that we will either 327 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the 328 // first place. 329 // We can't do this if we pre-clean cards since we will unmark objects which are no longer on 330 // a dirty card since we aged cards during the pre-cleaning process. 331 mirror::Object** end = allocation_stack->End(); 332 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { 333 const Object* obj = *it; 334 if (obj != nullptr) { 335 UnMarkObjectNonNull(obj); 336 } 337 } 338 } 339 } 340 341 { 342 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 343 344 // Reclaim unmarked objects. 345 Sweep(false); 346 347 // Swap the live and mark bitmaps for each space which we modified space. This is an 348 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 349 // bitmaps. 350 timings_.StartSplit("SwapBitmaps"); 351 SwapBitmaps(); 352 timings_.EndSplit(); 353 354 // Unbind the live and mark bitmaps. 355 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 356 GetHeap()->UnBindBitmaps(); 357 } 358} 359 360void MarkSweep::FindDefaultMarkBitmap() { 361 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 362 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 363 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 364 if (bitmap != nullptr && 365 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 366 current_mark_bitmap_ = bitmap; 367 CHECK(current_mark_bitmap_ != NULL); 368 return; 369 } 370 } 371 GetHeap()->DumpSpaces(); 372 LOG(FATAL) << "Could not find a default mark bitmap"; 373} 374 375void MarkSweep::ExpandMarkStack() { 376 ResizeMarkStack(mark_stack_->Capacity() * 2); 377} 378 379void MarkSweep::ResizeMarkStack(size_t new_size) { 380 // Rare case, no need to have Thread::Current be a parameter. 381 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 382 // Someone else acquired the lock and expanded the mark stack before us. 383 return; 384 } 385 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 386 CHECK_LE(mark_stack_->Size(), new_size); 387 mark_stack_->Resize(new_size); 388 for (const auto& obj : temp) { 389 mark_stack_->PushBack(obj); 390 } 391} 392 393inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) { 394 DCHECK(obj != NULL); 395 if (MarkObjectParallel(obj)) { 396 MutexLock mu(Thread::Current(), mark_stack_lock_); 397 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 398 ExpandMarkStack(); 399 } 400 // The object must be pushed on to the mark stack. 401 mark_stack_->PushBack(const_cast<Object*>(obj)); 402 } 403} 404 405mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 406 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 407 mark_sweep->MarkObject(obj); 408 return obj; 409} 410 411inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { 412 DCHECK(!immune_region_.ContainsObject(obj)); 413 414 if (kUseBrooksPointer) { 415 // Verify all the objects have the correct Brooks pointer installed. 416 obj->AssertSelfBrooksPointer(); 417 } 418 419 // Try to take advantage of locality of references within a space, failing this find the space 420 // the hard way. 421 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 422 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 423 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 424 if (LIKELY(new_bitmap != NULL)) { 425 object_bitmap = new_bitmap; 426 } else { 427 MarkLargeObject(obj, false); 428 return; 429 } 430 } 431 432 DCHECK(object_bitmap->HasAddress(obj)); 433 object_bitmap->Clear(obj); 434} 435 436inline void MarkSweep::MarkObjectNonNull(const Object* obj) { 437 DCHECK(obj != NULL); 438 439 if (kUseBrooksPointer) { 440 // Verify all the objects have the correct Brooks pointer installed. 441 obj->AssertSelfBrooksPointer(); 442 } 443 444 if (immune_region_.ContainsObject(obj)) { 445 DCHECK(IsMarked(obj)); 446 return; 447 } 448 449 // Try to take advantage of locality of references within a space, failing this find the space 450 // the hard way. 451 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 452 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 453 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 454 if (LIKELY(new_bitmap != NULL)) { 455 object_bitmap = new_bitmap; 456 } else { 457 MarkLargeObject(obj, true); 458 return; 459 } 460 } 461 462 // This object was not previously marked. 463 if (!object_bitmap->Test(obj)) { 464 object_bitmap->Set(obj); 465 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 466 // Lock is not needed but is here anyways to please annotalysis. 467 MutexLock mu(Thread::Current(), mark_stack_lock_); 468 ExpandMarkStack(); 469 } 470 // The object must be pushed on to the mark stack. 471 mark_stack_->PushBack(const_cast<Object*>(obj)); 472 } 473} 474 475// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 476bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 477 // TODO: support >1 discontinuous space. 478 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 479 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 480 if (kProfileLargeObjects) { 481 ++large_object_test_; 482 } 483 if (UNLIKELY(!large_objects->Test(obj))) { 484 if (!large_object_space->Contains(obj)) { 485 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 486 LOG(ERROR) << "Attempting see if it's a bad root"; 487 VerifyRoots(); 488 LOG(FATAL) << "Can't mark bad root"; 489 } 490 if (kProfileLargeObjects) { 491 ++large_object_mark_; 492 } 493 if (set) { 494 large_objects->Set(obj); 495 } else { 496 large_objects->Clear(obj); 497 } 498 return true; 499 } 500 return false; 501} 502 503inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 504 DCHECK(obj != NULL); 505 506 if (kUseBrooksPointer) { 507 // Verify all the objects have the correct Brooks pointer installed. 508 obj->AssertSelfBrooksPointer(); 509 } 510 511 if (immune_region_.ContainsObject(obj)) { 512 DCHECK(IsMarked(obj)); 513 return false; 514 } 515 516 // Try to take advantage of locality of references within a space, failing this find the space 517 // the hard way. 518 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 519 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 520 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 521 if (new_bitmap != NULL) { 522 object_bitmap = new_bitmap; 523 } else { 524 // TODO: Remove the Thread::Current here? 525 // TODO: Convert this to some kind of atomic marking? 526 MutexLock mu(Thread::Current(), large_object_lock_); 527 return MarkLargeObject(obj, true); 528 } 529 } 530 531 // Return true if the object was not previously marked. 532 return !object_bitmap->AtomicTestAndSet(obj); 533} 534 535// Used to mark objects when recursing. Recursion is done by moving 536// the finger across the bitmaps in address order and marking child 537// objects. Any newly-marked objects whose addresses are lower than 538// the finger won't be visited by the bitmap scan, so those objects 539// need to be added to the mark stack. 540inline void MarkSweep::MarkObject(const Object* obj) { 541 if (obj != NULL) { 542 MarkObjectNonNull(obj); 543 } 544} 545 546void MarkSweep::MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/, 547 RootType /*root_type*/) { 548 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 549} 550 551void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 552 RootType /*root_type*/) { 553 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 554} 555 556void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 557 RootType /*root_type*/) { 558 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 559} 560 561void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 562 const StackVisitor* visitor) { 563 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 564} 565 566void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 567 // See if the root is on any space bitmap. 568 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 569 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 570 if (!large_object_space->Contains(root)) { 571 LOG(ERROR) << "Found invalid root: " << root; 572 if (visitor != NULL) { 573 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 574 } 575 } 576 } 577} 578 579void MarkSweep::VerifyRoots() { 580 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 581} 582 583void MarkSweep::MarkRoots(Thread* self) { 584 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 585 // If we exclusively hold the mutator lock, all threads must be suspended. 586 timings_.StartSplit("MarkRoots"); 587 Runtime::Current()->VisitRoots(MarkRootCallback, this); 588 timings_.EndSplit(); 589 RevokeAllThreadLocalAllocationStacks(self); 590 } else { 591 MarkThreadRoots(self); 592 // At this point the live stack should no longer have any mutators which push into it. 593 MarkNonThreadRoots(); 594 MarkConcurrentRoots( 595 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 596 } 597} 598 599void MarkSweep::MarkNonThreadRoots() { 600 timings_.StartSplit("MarkNonThreadRoots"); 601 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 602 timings_.EndSplit(); 603} 604 605void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 606 timings_.StartSplit("MarkConcurrentRoots"); 607 // Visit all runtime roots and clear dirty flags. 608 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 609 timings_.EndSplit(); 610} 611 612class ScanObjectVisitor { 613 public: 614 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 615 : mark_sweep_(mark_sweep) {} 616 617 // TODO: Fixme when anotatalysis works with visitors. 618 void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { 619 if (kCheckLocks) { 620 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 621 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 622 } 623 mark_sweep_->ScanObject(obj); 624 } 625 626 private: 627 MarkSweep* const mark_sweep_; 628}; 629 630template <bool kUseFinger = false> 631class MarkStackTask : public Task { 632 public: 633 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 634 const Object** mark_stack) 635 : mark_sweep_(mark_sweep), 636 thread_pool_(thread_pool), 637 mark_stack_pos_(mark_stack_size) { 638 // We may have to copy part of an existing mark stack when another mark stack overflows. 639 if (mark_stack_size != 0) { 640 DCHECK(mark_stack != NULL); 641 // TODO: Check performance? 642 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 643 } 644 if (kCountTasks) { 645 ++mark_sweep_->work_chunks_created_; 646 } 647 } 648 649 static const size_t kMaxSize = 1 * KB; 650 651 protected: 652 class ScanObjectParallelVisitor { 653 public: 654 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 655 : chunk_task_(chunk_task) {} 656 657 void operator()(Object* obj) const { 658 MarkSweep* mark_sweep = chunk_task_->mark_sweep_; 659 mark_sweep->ScanObjectVisit(obj, 660 [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */, 661 bool /* is_static */) ALWAYS_INLINE_LAMBDA { 662 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) { 663 if (kUseFinger) { 664 android_memory_barrier(); 665 if (reinterpret_cast<uintptr_t>(ref) >= 666 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) { 667 return; 668 } 669 } 670 chunk_task_->MarkStackPush(ref); 671 } 672 }); 673 } 674 675 private: 676 MarkStackTask<kUseFinger>* const chunk_task_; 677 }; 678 679 virtual ~MarkStackTask() { 680 // Make sure that we have cleared our mark stack. 681 DCHECK_EQ(mark_stack_pos_, 0U); 682 if (kCountTasks) { 683 ++mark_sweep_->work_chunks_deleted_; 684 } 685 } 686 687 MarkSweep* const mark_sweep_; 688 ThreadPool* const thread_pool_; 689 // Thread local mark stack for this task. 690 const Object* mark_stack_[kMaxSize]; 691 // Mark stack position. 692 size_t mark_stack_pos_; 693 694 void MarkStackPush(const Object* obj) ALWAYS_INLINE { 695 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 696 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 697 mark_stack_pos_ /= 2; 698 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 699 mark_stack_ + mark_stack_pos_); 700 thread_pool_->AddTask(Thread::Current(), task); 701 } 702 DCHECK(obj != nullptr); 703 DCHECK(mark_stack_pos_ < kMaxSize); 704 mark_stack_[mark_stack_pos_++] = obj; 705 } 706 707 virtual void Finalize() { 708 delete this; 709 } 710 711 // Scans all of the objects 712 virtual void Run(Thread* self) { 713 ScanObjectParallelVisitor visitor(this); 714 // TODO: Tune this. 715 static const size_t kFifoSize = 4; 716 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; 717 for (;;) { 718 const Object* obj = nullptr; 719 if (kUseMarkStackPrefetch) { 720 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 721 const Object* obj = mark_stack_[--mark_stack_pos_]; 722 DCHECK(obj != nullptr); 723 __builtin_prefetch(obj); 724 prefetch_fifo.push_back(obj); 725 } 726 if (UNLIKELY(prefetch_fifo.empty())) { 727 break; 728 } 729 obj = prefetch_fifo.front(); 730 prefetch_fifo.pop_front(); 731 } else { 732 if (UNLIKELY(mark_stack_pos_ == 0)) { 733 break; 734 } 735 obj = mark_stack_[--mark_stack_pos_]; 736 } 737 DCHECK(obj != nullptr); 738 visitor(const_cast<mirror::Object*>(obj)); 739 } 740 } 741}; 742 743class CardScanTask : public MarkStackTask<false> { 744 public: 745 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, 746 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 747 const Object** mark_stack_obj) 748 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 749 bitmap_(bitmap), 750 begin_(begin), 751 end_(end), 752 minimum_age_(minimum_age) { 753 } 754 755 protected: 756 accounting::SpaceBitmap* const bitmap_; 757 byte* const begin_; 758 byte* const end_; 759 const byte minimum_age_; 760 761 virtual void Finalize() { 762 delete this; 763 } 764 765 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 766 ScanObjectParallelVisitor visitor(this); 767 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 768 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 769 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 770 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 771 // Finish by emptying our local mark stack. 772 MarkStackTask::Run(self); 773 } 774}; 775 776size_t MarkSweep::GetThreadCount(bool paused) const { 777 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 778 return 0; 779 } 780 if (paused) { 781 return heap_->GetParallelGCThreadCount() + 1; 782 } else { 783 return heap_->GetConcGCThreadCount() + 1; 784 } 785} 786 787void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 788 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 789 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 790 size_t thread_count = GetThreadCount(paused); 791 // The parallel version with only one thread is faster for card scanning, TODO: fix. 792 if (kParallelCardScan && thread_count > 0) { 793 Thread* self = Thread::Current(); 794 // Can't have a different split for each space since multiple spaces can have their cards being 795 // scanned at the same time. 796 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 797 // Try to take some of the mark stack since we can pass this off to the worker tasks. 798 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin()); 799 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End()); 800 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 801 // Estimated number of work tasks we will create. 802 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 803 DCHECK_NE(mark_stack_tasks, 0U); 804 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 805 mark_stack_size / mark_stack_tasks + 1); 806 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 807 if (space->GetMarkBitmap() == nullptr) { 808 continue; 809 } 810 byte* card_begin = space->Begin(); 811 byte* card_end = space->End(); 812 // Align up the end address. For example, the image space's end 813 // may not be card-size-aligned. 814 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 815 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 816 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 817 // Calculate how many bytes of heap we will scan, 818 const size_t address_range = card_end - card_begin; 819 // Calculate how much address range each task gets. 820 const size_t card_delta = RoundUp(address_range / thread_count + 1, 821 accounting::CardTable::kCardSize); 822 // Create the worker tasks for this space. 823 while (card_begin != card_end) { 824 // Add a range of cards. 825 size_t addr_remaining = card_end - card_begin; 826 size_t card_increment = std::min(card_delta, addr_remaining); 827 // Take from the back of the mark stack. 828 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 829 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 830 mark_stack_end -= mark_stack_increment; 831 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 832 DCHECK_EQ(mark_stack_end, const_cast<const art::mirror::Object **>(mark_stack_->End())); 833 // Add the new task to the thread pool. 834 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 835 card_begin + card_increment, minimum_age, 836 mark_stack_increment, mark_stack_end); 837 thread_pool->AddTask(self, task); 838 card_begin += card_increment; 839 } 840 } 841 842 // Note: the card scan below may dirty new cards (and scan them) 843 // as a side effect when a Reference object is encountered and 844 // queued during the marking. See b/11465268. 845 thread_pool->SetMaxActiveWorkers(thread_count - 1); 846 thread_pool->StartWorkers(self); 847 thread_pool->Wait(self, true, true); 848 thread_pool->StopWorkers(self); 849 timings_.EndSplit(); 850 } else { 851 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 852 if (space->GetMarkBitmap() != nullptr) { 853 // Image spaces are handled properly since live == marked for them. 854 switch (space->GetGcRetentionPolicy()) { 855 case space::kGcRetentionPolicyNeverCollect: 856 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 857 "ScanGrayImageSpaceObjects"); 858 break; 859 case space::kGcRetentionPolicyFullCollect: 860 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 861 "ScanGrayZygoteSpaceObjects"); 862 break; 863 case space::kGcRetentionPolicyAlwaysCollect: 864 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 865 "ScanGrayAllocSpaceObjects"); 866 break; 867 } 868 ScanObjectVisitor visitor(this); 869 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 870 timings_.EndSplit(); 871 } 872 } 873 } 874} 875 876class RecursiveMarkTask : public MarkStackTask<false> { 877 public: 878 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 879 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 880 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 881 bitmap_(bitmap), 882 begin_(begin), 883 end_(end) { 884 } 885 886 protected: 887 accounting::SpaceBitmap* const bitmap_; 888 const uintptr_t begin_; 889 const uintptr_t end_; 890 891 virtual void Finalize() { 892 delete this; 893 } 894 895 // Scans all of the objects 896 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 897 ScanObjectParallelVisitor visitor(this); 898 bitmap_->VisitMarkedRange(begin_, end_, visitor); 899 // Finish by emptying our local mark stack. 900 MarkStackTask::Run(self); 901 } 902}; 903 904// Populates the mark stack based on the set of marked objects and 905// recursively marks until the mark stack is emptied. 906void MarkSweep::RecursiveMark() { 907 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 908 // RecursiveMark will build the lists of known instances of the Reference classes. See 909 // DelayReferenceReferent for details. 910 if (kUseRecursiveMark) { 911 const bool partial = GetGcType() == kGcTypePartial; 912 ScanObjectVisitor scan_visitor(this); 913 auto* self = Thread::Current(); 914 ThreadPool* thread_pool = heap_->GetThreadPool(); 915 size_t thread_count = GetThreadCount(false); 916 const bool parallel = kParallelRecursiveMark && thread_count > 1; 917 mark_stack_->Reset(); 918 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 919 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 920 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 921 current_mark_bitmap_ = space->GetMarkBitmap(); 922 if (current_mark_bitmap_ == nullptr) { 923 continue; 924 } 925 if (parallel) { 926 // We will use the mark stack the future. 927 // CHECK(mark_stack_->IsEmpty()); 928 // This function does not handle heap end increasing, so we must use the space end. 929 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 930 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 931 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 932 933 // Create a few worker tasks. 934 const size_t n = thread_count * 2; 935 while (begin != end) { 936 uintptr_t start = begin; 937 uintptr_t delta = (end - begin) / n; 938 delta = RoundUp(delta, KB); 939 if (delta < 16 * KB) delta = end - begin; 940 begin += delta; 941 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start, 942 begin); 943 thread_pool->AddTask(self, task); 944 } 945 thread_pool->SetMaxActiveWorkers(thread_count - 1); 946 thread_pool->StartWorkers(self); 947 thread_pool->Wait(self, true, true); 948 thread_pool->StopWorkers(self); 949 } else { 950 // This function does not handle heap end increasing, so we must use the space end. 951 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 952 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 953 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 954 } 955 } 956 } 957 } 958 ProcessMarkStack(false); 959} 960 961mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 962 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 963 return object; 964 } 965 return nullptr; 966} 967 968void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 969 ScanGrayObjects(paused, minimum_age); 970 ProcessMarkStack(paused); 971} 972 973void MarkSweep::ReMarkRoots() { 974 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 975 timings_.StartSplit("(Paused)ReMarkRoots"); 976 Runtime::Current()->VisitRoots( 977 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 978 kVisitRootFlagStopLoggingNewRoots | 979 kVisitRootFlagClearRootLog)); 980 timings_.EndSplit(); 981 if (kVerifyRoots) { 982 timings_.StartSplit("(Paused)VerifyRoots"); 983 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 984 timings_.EndSplit(); 985 } 986} 987 988void MarkSweep::SweepSystemWeaks() { 989 Runtime* runtime = Runtime::Current(); 990 timings_.StartSplit("SweepSystemWeaks"); 991 runtime->SweepSystemWeaks(IsMarkedCallback, this); 992 timings_.EndSplit(); 993} 994 995mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 996 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 997 // We don't actually want to sweep the object, so lets return "marked" 998 return obj; 999} 1000 1001void MarkSweep::VerifyIsLive(const Object* obj) { 1002 Heap* heap = GetHeap(); 1003 if (!heap->GetLiveBitmap()->Test(obj)) { 1004 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1005 if (!large_object_space->GetLiveObjects()->Test(obj)) { 1006 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 1007 heap->allocation_stack_->End()) { 1008 // Object not found! 1009 heap->DumpSpaces(); 1010 LOG(FATAL) << "Found dead object " << obj; 1011 } 1012 } 1013 } 1014} 1015 1016void MarkSweep::VerifySystemWeaks() { 1017 // Verify system weaks, uses a special object visitor which returns the input object. 1018 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 1019} 1020 1021class CheckpointMarkThreadRoots : public Closure { 1022 public: 1023 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1024 1025 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1026 ATRACE_BEGIN("Marking thread roots"); 1027 // Note: self is not necessarily equal to thread since thread may be suspended. 1028 Thread* self = Thread::Current(); 1029 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1030 << thread->GetState() << " thread " << thread << " self " << self; 1031 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 1032 ATRACE_END(); 1033 if (kUseThreadLocalAllocationStack) { 1034 thread->RevokeThreadLocalAllocationStack(); 1035 } 1036 mark_sweep_->GetBarrier().Pass(self); 1037 } 1038 1039 private: 1040 MarkSweep* mark_sweep_; 1041}; 1042 1043void MarkSweep::MarkRootsCheckpoint(Thread* self) { 1044 CheckpointMarkThreadRoots check_point(this); 1045 timings_.StartSplit("MarkRootsCheckpoint"); 1046 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1047 // Request the check point is run on all threads returning a count of the threads that must 1048 // run through the barrier including self. 1049 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1050 // Release locks then wait for all mutator threads to pass the barrier. 1051 // TODO: optimize to not release locks when there are no threads to wait for. 1052 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1053 Locks::mutator_lock_->SharedUnlock(self); 1054 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 1055 CHECK_EQ(old_state, kWaitingPerformingGc); 1056 gc_barrier_->Increment(self, barrier_count); 1057 self->SetState(kWaitingPerformingGc); 1058 Locks::mutator_lock_->SharedLock(self); 1059 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1060 timings_.EndSplit(); 1061} 1062 1063void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1064 timings_.StartSplit("SweepArray"); 1065 Thread* self = Thread::Current(); 1066 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize]; 1067 size_t chunk_free_pos = 0; 1068 size_t freed_bytes = 0; 1069 size_t freed_large_object_bytes = 0; 1070 size_t freed_objects = 0; 1071 size_t freed_large_objects = 0; 1072 // How many objects are left in the array, modified after each space is swept. 1073 Object** objects = const_cast<Object**>(allocations->Begin()); 1074 size_t count = allocations->Size(); 1075 // Change the order to ensure that the non-moving space last swept as an optimization. 1076 std::vector<space::ContinuousSpace*> sweep_spaces; 1077 space::ContinuousSpace* non_moving_space = nullptr; 1078 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1079 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1080 space->GetLiveBitmap() != nullptr) { 1081 if (space == heap_->GetNonMovingSpace()) { 1082 non_moving_space = space; 1083 } else { 1084 sweep_spaces.push_back(space); 1085 } 1086 } 1087 } 1088 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1089 // the other alloc spaces as an optimization. 1090 if (non_moving_space != nullptr) { 1091 sweep_spaces.push_back(non_moving_space); 1092 } 1093 // Start by sweeping the continuous spaces. 1094 for (space::ContinuousSpace* space : sweep_spaces) { 1095 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1096 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1097 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1098 if (swap_bitmaps) { 1099 std::swap(live_bitmap, mark_bitmap); 1100 } 1101 Object** out = objects; 1102 for (size_t i = 0; i < count; ++i) { 1103 Object* obj = objects[i]; 1104 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1105 continue; 1106 } 1107 if (space->HasAddress(obj)) { 1108 // This object is in the space, remove it from the array and add it to the sweep buffer 1109 // if needed. 1110 if (!mark_bitmap->Test(obj)) { 1111 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1112 timings_.StartSplit("FreeList"); 1113 freed_objects += chunk_free_pos; 1114 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1115 timings_.EndSplit(); 1116 chunk_free_pos = 0; 1117 } 1118 chunk_free_buffer[chunk_free_pos++] = obj; 1119 } 1120 } else { 1121 *(out++) = obj; 1122 } 1123 } 1124 if (chunk_free_pos > 0) { 1125 timings_.StartSplit("FreeList"); 1126 freed_objects += chunk_free_pos; 1127 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1128 timings_.EndSplit(); 1129 chunk_free_pos = 0; 1130 } 1131 // All of the references which space contained are no longer in the allocation stack, update 1132 // the count. 1133 count = out - objects; 1134 } 1135 // Handle the large object space. 1136 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1137 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 1138 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects(); 1139 if (swap_bitmaps) { 1140 std::swap(large_live_objects, large_mark_objects); 1141 } 1142 for (size_t i = 0; i < count; ++i) { 1143 Object* obj = objects[i]; 1144 // Handle large objects. 1145 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1146 continue; 1147 } 1148 if (!large_mark_objects->Test(obj)) { 1149 ++freed_large_objects; 1150 freed_large_object_bytes += large_object_space->Free(self, obj); 1151 } 1152 } 1153 timings_.EndSplit(); 1154 1155 timings_.StartSplit("RecordFree"); 1156 VLOG(heap) << "Freed " << freed_objects << "/" << count 1157 << " objects with size " << PrettySize(freed_bytes); 1158 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1159 freed_objects_.FetchAndAdd(freed_objects); 1160 freed_large_objects_.FetchAndAdd(freed_large_objects); 1161 freed_bytes_.FetchAndAdd(freed_bytes); 1162 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes); 1163 timings_.EndSplit(); 1164 1165 timings_.StartSplit("ResetStack"); 1166 allocations->Reset(); 1167 timings_.EndSplit(); 1168} 1169 1170void MarkSweep::Sweep(bool swap_bitmaps) { 1171 DCHECK(mark_stack_->IsEmpty()); 1172 TimingLogger::ScopedSplit("Sweep", &timings_); 1173 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1174 if (space->IsContinuousMemMapAllocSpace()) { 1175 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1176 TimingLogger::ScopedSplit split( 1177 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1178 size_t freed_objects = 0; 1179 size_t freed_bytes = 0; 1180 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1181 heap_->RecordFree(freed_objects, freed_bytes); 1182 freed_objects_.FetchAndAdd(freed_objects); 1183 freed_bytes_.FetchAndAdd(freed_bytes); 1184 } 1185 } 1186 SweepLargeObjects(swap_bitmaps); 1187} 1188 1189void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1190 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1191 size_t freed_objects = 0; 1192 size_t freed_bytes = 0; 1193 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1194 freed_large_objects_.FetchAndAdd(freed_objects); 1195 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 1196 GetHeap()->RecordFree(freed_objects, freed_bytes); 1197} 1198 1199// Process the "referent" field in a java.lang.ref.Reference. If the 1200// referent has not yet been marked, put it on the appropriate list in 1201// the heap for later processing. 1202void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 1203 DCHECK(klass != nullptr); 1204 heap_->DelayReferenceReferent(klass, obj->AsReference(), IsMarkedCallback, this); 1205} 1206 1207class MarkObjectVisitor { 1208 public: 1209 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {} 1210 1211 // TODO: Fixme when anotatalysis works with visitors. 1212 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1213 bool /* is_static */) const ALWAYS_INLINE 1214 NO_THREAD_SAFETY_ANALYSIS { 1215 if (kCheckLocks) { 1216 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1217 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1218 } 1219 mark_sweep_->MarkObject(ref); 1220 } 1221 1222 private: 1223 MarkSweep* const mark_sweep_; 1224}; 1225 1226// Scans an object reference. Determines the type of the reference 1227// and dispatches to a specialized scanning routine. 1228void MarkSweep::ScanObject(Object* obj) { 1229 MarkObjectVisitor visitor(this); 1230 ScanObjectVisit(obj, visitor); 1231} 1232 1233void MarkSweep::ProcessMarkStackPausedCallback(void* arg) { 1234 DCHECK(arg != nullptr); 1235 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true); 1236} 1237 1238void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1239 Thread* self = Thread::Current(); 1240 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1241 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1242 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1243 CHECK_GT(chunk_size, 0U); 1244 // Split the current mark stack up into work tasks. 1245 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1246 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1247 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, 1248 const_cast<const mirror::Object**>(it))); 1249 it += delta; 1250 } 1251 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1252 thread_pool->StartWorkers(self); 1253 thread_pool->Wait(self, true, true); 1254 thread_pool->StopWorkers(self); 1255 mark_stack_->Reset(); 1256 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1257} 1258 1259// Scan anything that's on the mark stack. 1260void MarkSweep::ProcessMarkStack(bool paused) { 1261 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1262 size_t thread_count = GetThreadCount(paused); 1263 if (kParallelProcessMarkStack && thread_count > 1 && 1264 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1265 ProcessMarkStackParallel(thread_count); 1266 } else { 1267 // TODO: Tune this. 1268 static const size_t kFifoSize = 4; 1269 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1270 for (;;) { 1271 Object* obj = NULL; 1272 if (kUseMarkStackPrefetch) { 1273 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1274 Object* obj = mark_stack_->PopBack(); 1275 DCHECK(obj != NULL); 1276 __builtin_prefetch(obj); 1277 prefetch_fifo.push_back(obj); 1278 } 1279 if (prefetch_fifo.empty()) { 1280 break; 1281 } 1282 obj = prefetch_fifo.front(); 1283 prefetch_fifo.pop_front(); 1284 } else { 1285 if (mark_stack_->IsEmpty()) { 1286 break; 1287 } 1288 obj = mark_stack_->PopBack(); 1289 } 1290 DCHECK(obj != NULL); 1291 ScanObject(obj); 1292 } 1293 } 1294 timings_.EndSplit(); 1295} 1296 1297inline bool MarkSweep::IsMarked(const Object* object) const 1298 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1299 if (immune_region_.ContainsObject(object)) { 1300 return true; 1301 } 1302 DCHECK(current_mark_bitmap_ != NULL); 1303 if (current_mark_bitmap_->HasAddress(object)) { 1304 return current_mark_bitmap_->Test(object); 1305 } 1306 return heap_->GetMarkBitmap()->Test(object); 1307} 1308 1309void MarkSweep::FinishPhase() { 1310 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1311 // Can't enqueue references if we hold the mutator lock. 1312 Heap* heap = GetHeap(); 1313 timings_.NewSplit("PostGcVerification"); 1314 heap->PostGcVerification(this); 1315 1316 // Update the cumulative statistics 1317 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 1318 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 1319 1320 // Ensure that the mark stack is empty. 1321 CHECK(mark_stack_->IsEmpty()); 1322 1323 if (kCountScannedTypes) { 1324 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1325 << " other=" << other_count_; 1326 } 1327 1328 if (kCountTasks) { 1329 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1330 } 1331 1332 if (kMeasureOverhead) { 1333 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1334 } 1335 1336 if (kProfileLargeObjects) { 1337 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1338 } 1339 1340 if (kCountClassesMarked) { 1341 VLOG(gc) << "Classes marked " << classes_marked_; 1342 } 1343 1344 if (kCountJavaLangRefs) { 1345 VLOG(gc) << "References scanned " << reference_count_; 1346 } 1347 1348 // Update the cumulative loggers. 1349 cumulative_timings_.Start(); 1350 cumulative_timings_.AddLogger(timings_); 1351 cumulative_timings_.End(); 1352 1353 // Clear all of the spaces' mark bitmaps. 1354 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1355 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 1356 if (bitmap != nullptr && 1357 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1358 bitmap->Clear(); 1359 } 1360 } 1361 mark_stack_->Reset(); 1362 1363 // Reset the marked large objects. 1364 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1365 large_objects->GetMarkObjects()->Clear(); 1366} 1367 1368} // namespace collector 1369} // namespace gc 1370} // namespace art 1371