mark_sweep.cc revision c22c59ef8513b4cbbfd25073d1afbf58196b522a
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "monitor.h" 41#include "mark_sweep-inl.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/object-inl.h" 48#include "mirror/object_array.h" 49#include "mirror/object_array-inl.h" 50#include "runtime.h" 51#include "thread-inl.h" 52#include "thread_list.h" 53#include "verifier/method_verifier.h" 54 55using ::art::mirror::ArtField; 56using ::art::mirror::Class; 57using ::art::mirror::Object; 58using ::art::mirror::ObjectArray; 59 60namespace art { 61namespace gc { 62namespace collector { 63 64// Performance options. 65constexpr bool kUseRecursiveMark = false; 66constexpr bool kUseMarkStackPrefetch = true; 67constexpr size_t kSweepArrayChunkFreeSize = 1024; 68constexpr bool kPreCleanCards = true; 69 70// Parallelism options. 71constexpr bool kParallelCardScan = true; 72constexpr bool kParallelRecursiveMark = true; 73// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 74// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 75// having this can add overhead in ProcessReferences since we may end up doing many calls of 76// ProcessMarkStack with very small mark stacks. 77constexpr size_t kMinimumParallelMarkStackSize = 128; 78constexpr bool kParallelProcessMarkStack = true; 79 80// Profiling and information flags. 81constexpr bool kCountClassesMarked = false; 82constexpr bool kProfileLargeObjects = false; 83constexpr bool kMeasureOverhead = false; 84constexpr bool kCountTasks = false; 85constexpr bool kCountJavaLangRefs = false; 86 87// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 88constexpr bool kCheckLocks = kDebugLocking; 89 90void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { 91 // Bind live to mark bitmap if necessary. 92 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 93 CHECK(space->IsContinuousMemMapAllocSpace()); 94 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 95 } 96 97 // Add the space to the immune region. 98 // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc 99 // callbacks. 100 if (immune_begin_ == NULL) { 101 DCHECK(immune_end_ == NULL); 102 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), 103 reinterpret_cast<Object*>(space->End())); 104 } else { 105 const space::ContinuousSpace* prev_space = nullptr; 106 // Find out if the previous space is immune. 107 for (const space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { 108 if (cur_space == space) { 109 break; 110 } 111 prev_space = cur_space; 112 } 113 // If previous space was immune, then extend the immune region. Relies on continuous spaces 114 // being sorted by Heap::AddContinuousSpace. 115 if (prev_space != nullptr && IsImmuneSpace(prev_space)) { 116 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 117 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 118 } 119 } 120} 121 122bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const { 123 return 124 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && 125 immune_end_ >= reinterpret_cast<Object*>(space->End()); 126} 127 128void MarkSweep::BindBitmaps() { 129 timings_.StartSplit("BindBitmaps"); 130 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 131 // Mark all of the spaces we never collect as immune. 132 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 133 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 134 ImmuneSpace(space); 135 } 136 } 137 timings_.EndSplit(); 138} 139 140MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 141 : GarbageCollector(heap, 142 name_prefix + 143 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 144 current_mark_bitmap_(NULL), 145 mark_stack_(NULL), 146 immune_begin_(NULL), 147 immune_end_(NULL), 148 live_stack_freeze_size_(0), 149 gc_barrier_(new Barrier(0)), 150 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 151 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 152 is_concurrent_(is_concurrent) { 153} 154 155void MarkSweep::InitializePhase() { 156 timings_.Reset(); 157 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 158 mark_stack_ = heap_->mark_stack_.get(); 159 DCHECK(mark_stack_ != nullptr); 160 SetImmuneRange(nullptr, nullptr); 161 class_count_ = 0; 162 array_count_ = 0; 163 other_count_ = 0; 164 large_object_test_ = 0; 165 large_object_mark_ = 0; 166 classes_marked_ = 0; 167 overhead_time_ = 0; 168 work_chunks_created_ = 0; 169 work_chunks_deleted_ = 0; 170 reference_count_ = 0; 171 172 FindDefaultMarkBitmap(); 173 174 // Do any pre GC verification. 175 timings_.NewSplit("PreGcVerification"); 176 heap_->PreGcVerification(this); 177} 178 179void MarkSweep::ProcessReferences(Thread* self) { 180 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 181 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 182 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback, 183 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 184} 185 186bool MarkSweep::HandleDirtyObjectsPhase() { 187 TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_); 188 Thread* self = Thread::Current(); 189 Locks::mutator_lock_->AssertExclusiveHeld(self); 190 191 { 192 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 193 194 // Re-mark root set. 195 ReMarkRoots(); 196 197 // Scan dirty objects, this is only required if we are not doing concurrent GC. 198 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 199 } 200 201 ProcessReferences(self); 202 203 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 204 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_|| 205 GetHeap()->verify_post_gc_heap_) { 206 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 207 // This second sweep makes sure that we don't have any objects in the live stack which point to 208 // freed objects. These cause problems since their references may be previously freed objects. 209 SweepArray(GetHeap()->allocation_stack_.get(), false); 210 // Since SweepArray() above resets the (active) allocation 211 // stack. Need to revoke the thread-local allocation stacks that 212 // point into it. 213 RevokeAllThreadLocalAllocationStacks(self); 214 } 215 216 timings_.StartSplit("PreSweepingGcVerification"); 217 heap_->PreSweepingGcVerification(this); 218 timings_.EndSplit(); 219 220 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 221 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 222 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 223 224 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 225 // weak before we sweep them. Since this new system weak may not be marked, the GC may 226 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 227 // reference to a string that is about to be swept. 228 Runtime::Current()->DisallowNewSystemWeaks(); 229 return true; 230} 231 232bool MarkSweep::IsConcurrent() const { 233 return is_concurrent_; 234} 235 236void MarkSweep::PreCleanCards() { 237 // Don't do this for non concurrent GCs since they don't have any dirty cards. 238 if (kPreCleanCards && IsConcurrent()) { 239 Thread* self = Thread::Current(); 240 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 241 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 242 heap_->ProcessCards(timings_); 243 // Required so that we see aged cards before we start scanning the cards. 244 MarkThreadRoots(self); 245 // TODO: Only mark the dirty roots. 246 MarkNonThreadRoots(); 247 MarkConcurrentRoots(); 248 // Process the newly aged cards. 249 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 250 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 251 // in the next GC. 252 } 253} 254 255void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 256 if (kUseThreadLocalAllocationStack) { 257 Locks::mutator_lock_->AssertExclusiveHeld(self); 258 heap_->RevokeAllThreadLocalAllocationStacks(self); 259 } 260} 261 262void MarkSweep::MarkingPhase() { 263 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 264 Thread* self = Thread::Current(); 265 266 BindBitmaps(); 267 FindDefaultMarkBitmap(); 268 269 // Process dirty cards and add dirty cards to mod union tables. 270 heap_->ProcessCards(timings_); 271 272 // Need to do this before the checkpoint since we don't want any threads to add references to 273 // the live stack during the recursive mark. 274 timings_.NewSplit("SwapStacks"); 275 heap_->SwapStacks(self); 276 277 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 278 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 279 // If we exclusively hold the mutator lock, all threads must be suspended. 280 MarkRoots(); 281 RevokeAllThreadLocalAllocationStacks(self); 282 } else { 283 MarkThreadRoots(self); 284 // At this point the live stack should no longer have any mutators which push into it. 285 MarkNonThreadRoots(); 286 } 287 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 288 MarkConcurrentRoots(); 289 UpdateAndMarkModUnion(); 290 MarkReachableObjects(); 291 // Pre-clean dirtied cards to reduce pauses. 292 PreCleanCards(); 293} 294 295void MarkSweep::UpdateAndMarkModUnion() { 296 for (const auto& space : heap_->GetContinuousSpaces()) { 297 if (IsImmuneSpace(space)) { 298 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 299 "UpdateAndMarkImageModUnionTable"; 300 TimingLogger::ScopedSplit split(name, &timings_); 301 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 302 CHECK(mod_union_table != nullptr); 303 mod_union_table->UpdateAndMarkReferences(MarkObjectCallback, this); 304 } 305 } 306} 307 308void MarkSweep::MarkThreadRoots(Thread* self) { 309 MarkRootsCheckpoint(self); 310} 311 312void MarkSweep::MarkReachableObjects() { 313 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 314 // knowing that new allocations won't be marked as live. 315 timings_.StartSplit("MarkStackAsLive"); 316 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 317 heap_->MarkAllocStackAsLive(live_stack); 318 live_stack->Reset(); 319 timings_.EndSplit(); 320 // Recursively mark all the non-image bits set in the mark bitmap. 321 RecursiveMark(); 322} 323 324void MarkSweep::ReclaimPhase() { 325 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 326 Thread* self = Thread::Current(); 327 328 if (!IsConcurrent()) { 329 ProcessReferences(self); 330 } 331 332 { 333 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 334 SweepSystemWeaks(); 335 } 336 337 if (IsConcurrent()) { 338 Runtime::Current()->AllowNewSystemWeaks(); 339 340 TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); 341 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 342 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 343 if (!kPreCleanCards) { 344 // The allocation stack contains things allocated since the start of the GC. These may have 345 // been marked during this GC meaning they won't be eligible for reclaiming in the next 346 // sticky GC. Unmark these objects so that they are eligible for reclaiming in the next 347 // sticky GC. 348 // There is a race here which is safely handled. Another thread such as the hprof could 349 // have flushed the alloc stack after we resumed the threads. This is safe however, since 350 // reseting the allocation stack zeros it out with madvise. This means that we will either 351 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the 352 // first place. 353 // We can't do this if we pre-clean cards since we will unmark objects which are no longer on 354 // a dirty card since we aged cards during the pre-cleaning process. 355 mirror::Object** end = allocation_stack->End(); 356 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { 357 const Object* obj = *it; 358 if (obj != nullptr) { 359 UnMarkObjectNonNull(obj); 360 } 361 } 362 } 363 } 364 365 { 366 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 367 368 // Reclaim unmarked objects. 369 Sweep(false); 370 371 // Swap the live and mark bitmaps for each space which we modified space. This is an 372 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 373 // bitmaps. 374 timings_.StartSplit("SwapBitmaps"); 375 SwapBitmaps(); 376 timings_.EndSplit(); 377 378 // Unbind the live and mark bitmaps. 379 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 380 GetHeap()->UnBindBitmaps(); 381 } 382} 383 384void MarkSweep::SetImmuneRange(Object* begin, Object* end) { 385 immune_begin_ = begin; 386 immune_end_ = end; 387} 388 389void MarkSweep::FindDefaultMarkBitmap() { 390 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 391 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 392 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 393 if (bitmap != nullptr && 394 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 395 current_mark_bitmap_ = bitmap; 396 CHECK(current_mark_bitmap_ != NULL); 397 return; 398 } 399 } 400 GetHeap()->DumpSpaces(); 401 LOG(FATAL) << "Could not find a default mark bitmap"; 402} 403 404void MarkSweep::ExpandMarkStack() { 405 ResizeMarkStack(mark_stack_->Capacity() * 2); 406} 407 408void MarkSweep::ResizeMarkStack(size_t new_size) { 409 // Rare case, no need to have Thread::Current be a parameter. 410 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 411 // Someone else acquired the lock and expanded the mark stack before us. 412 return; 413 } 414 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 415 CHECK_LE(mark_stack_->Size(), new_size); 416 mark_stack_->Resize(new_size); 417 for (const auto& obj : temp) { 418 mark_stack_->PushBack(obj); 419 } 420} 421 422inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) { 423 DCHECK(obj != NULL); 424 if (MarkObjectParallel(obj)) { 425 MutexLock mu(Thread::Current(), mark_stack_lock_); 426 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 427 ExpandMarkStack(); 428 } 429 // The object must be pushed on to the mark stack. 430 mark_stack_->PushBack(const_cast<Object*>(obj)); 431 } 432} 433 434mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 435 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 436 mark_sweep->MarkObject(obj); 437 return obj; 438} 439 440inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { 441 DCHECK(!IsImmune(obj)); 442 // Try to take advantage of locality of references within a space, failing this find the space 443 // the hard way. 444 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 445 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 446 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 447 if (LIKELY(new_bitmap != NULL)) { 448 object_bitmap = new_bitmap; 449 } else { 450 MarkLargeObject(obj, false); 451 return; 452 } 453 } 454 455 DCHECK(object_bitmap->HasAddress(obj)); 456 object_bitmap->Clear(obj); 457} 458 459inline void MarkSweep::MarkObjectNonNull(const Object* obj) { 460 DCHECK(obj != NULL); 461 462 if (IsImmune(obj)) { 463 DCHECK(IsMarked(obj)); 464 return; 465 } 466 467 // Try to take advantage of locality of references within a space, failing this find the space 468 // the hard way. 469 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 470 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 471 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 472 if (LIKELY(new_bitmap != NULL)) { 473 object_bitmap = new_bitmap; 474 } else { 475 MarkLargeObject(obj, true); 476 return; 477 } 478 } 479 480 // This object was not previously marked. 481 if (!object_bitmap->Test(obj)) { 482 object_bitmap->Set(obj); 483 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 484 // Lock is not needed but is here anyways to please annotalysis. 485 MutexLock mu(Thread::Current(), mark_stack_lock_); 486 ExpandMarkStack(); 487 } 488 // The object must be pushed on to the mark stack. 489 mark_stack_->PushBack(const_cast<Object*>(obj)); 490 } 491} 492 493// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 494bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 495 // TODO: support >1 discontinuous space. 496 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 497 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 498 if (kProfileLargeObjects) { 499 ++large_object_test_; 500 } 501 if (UNLIKELY(!large_objects->Test(obj))) { 502 if (!large_object_space->Contains(obj)) { 503 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 504 LOG(ERROR) << "Attempting see if it's a bad root"; 505 VerifyRoots(); 506 LOG(FATAL) << "Can't mark bad root"; 507 } 508 if (kProfileLargeObjects) { 509 ++large_object_mark_; 510 } 511 if (set) { 512 large_objects->Set(obj); 513 } else { 514 large_objects->Clear(obj); 515 } 516 return true; 517 } 518 return false; 519} 520 521inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 522 DCHECK(obj != NULL); 523 524 if (IsImmune(obj)) { 525 DCHECK(IsMarked(obj)); 526 return false; 527 } 528 529 // Try to take advantage of locality of references within a space, failing this find the space 530 // the hard way. 531 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 532 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 533 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 534 if (new_bitmap != NULL) { 535 object_bitmap = new_bitmap; 536 } else { 537 // TODO: Remove the Thread::Current here? 538 // TODO: Convert this to some kind of atomic marking? 539 MutexLock mu(Thread::Current(), large_object_lock_); 540 return MarkLargeObject(obj, true); 541 } 542 } 543 544 // Return true if the object was not previously marked. 545 return !object_bitmap->AtomicTestAndSet(obj); 546} 547 548// Used to mark objects when recursing. Recursion is done by moving 549// the finger across the bitmaps in address order and marking child 550// objects. Any newly-marked objects whose addresses are lower than 551// the finger won't be visited by the bitmap scan, so those objects 552// need to be added to the mark stack. 553inline void MarkSweep::MarkObject(const Object* obj) { 554 if (obj != NULL) { 555 MarkObjectNonNull(obj); 556 } 557} 558 559void MarkSweep::MarkRoot(const Object* obj) { 560 if (obj != NULL) { 561 MarkObjectNonNull(obj); 562 } 563} 564 565void MarkSweep::MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/, 566 RootType /*root_type*/) { 567 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 568} 569 570void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 571 RootType /*root_type*/) { 572 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 573} 574 575void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 576 const StackVisitor* visitor) { 577 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 578} 579 580void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 581 // See if the root is on any space bitmap. 582 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 583 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 584 if (!large_object_space->Contains(root)) { 585 LOG(ERROR) << "Found invalid root: " << root; 586 if (visitor != NULL) { 587 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 588 } 589 } 590 } 591} 592 593void MarkSweep::VerifyRoots() { 594 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 595} 596 597// Marks all objects in the root set. 598void MarkSweep::MarkRoots() { 599 timings_.StartSplit("MarkRoots"); 600 Runtime::Current()->VisitNonConcurrentRoots(MarkRootCallback, this); 601 timings_.EndSplit(); 602} 603 604void MarkSweep::MarkNonThreadRoots() { 605 timings_.StartSplit("MarkNonThreadRoots"); 606 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 607 timings_.EndSplit(); 608} 609 610void MarkSweep::MarkConcurrentRoots() { 611 timings_.StartSplit("MarkConcurrentRoots"); 612 // Visit all runtime roots and clear dirty flags. 613 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, false, true); 614 timings_.EndSplit(); 615} 616 617class ScanObjectVisitor { 618 public: 619 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 620 : mark_sweep_(mark_sweep) {} 621 622 // TODO: Fixme when anotatalysis works with visitors. 623 void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { 624 if (kCheckLocks) { 625 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 626 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 627 } 628 mark_sweep_->ScanObject(obj); 629 } 630 631 private: 632 MarkSweep* const mark_sweep_; 633}; 634 635template <bool kUseFinger = false> 636class MarkStackTask : public Task { 637 public: 638 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 639 const Object** mark_stack) 640 : mark_sweep_(mark_sweep), 641 thread_pool_(thread_pool), 642 mark_stack_pos_(mark_stack_size) { 643 // We may have to copy part of an existing mark stack when another mark stack overflows. 644 if (mark_stack_size != 0) { 645 DCHECK(mark_stack != NULL); 646 // TODO: Check performance? 647 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 648 } 649 if (kCountTasks) { 650 ++mark_sweep_->work_chunks_created_; 651 } 652 } 653 654 static const size_t kMaxSize = 1 * KB; 655 656 protected: 657 class ScanObjectParallelVisitor { 658 public: 659 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 660 : chunk_task_(chunk_task) {} 661 662 void operator()(Object* obj) const { 663 MarkSweep* mark_sweep = chunk_task_->mark_sweep_; 664 mark_sweep->ScanObjectVisit(obj, 665 [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */, 666 bool /* is_static */) ALWAYS_INLINE_LAMBDA { 667 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) { 668 if (kUseFinger) { 669 android_memory_barrier(); 670 if (reinterpret_cast<uintptr_t>(ref) >= 671 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) { 672 return; 673 } 674 } 675 chunk_task_->MarkStackPush(ref); 676 } 677 }); 678 } 679 680 private: 681 MarkStackTask<kUseFinger>* const chunk_task_; 682 }; 683 684 virtual ~MarkStackTask() { 685 // Make sure that we have cleared our mark stack. 686 DCHECK_EQ(mark_stack_pos_, 0U); 687 if (kCountTasks) { 688 ++mark_sweep_->work_chunks_deleted_; 689 } 690 } 691 692 MarkSweep* const mark_sweep_; 693 ThreadPool* const thread_pool_; 694 // Thread local mark stack for this task. 695 const Object* mark_stack_[kMaxSize]; 696 // Mark stack position. 697 size_t mark_stack_pos_; 698 699 void MarkStackPush(const Object* obj) ALWAYS_INLINE { 700 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 701 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 702 mark_stack_pos_ /= 2; 703 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 704 mark_stack_ + mark_stack_pos_); 705 thread_pool_->AddTask(Thread::Current(), task); 706 } 707 DCHECK(obj != nullptr); 708 DCHECK(mark_stack_pos_ < kMaxSize); 709 mark_stack_[mark_stack_pos_++] = obj; 710 } 711 712 virtual void Finalize() { 713 delete this; 714 } 715 716 // Scans all of the objects 717 virtual void Run(Thread* self) { 718 ScanObjectParallelVisitor visitor(this); 719 // TODO: Tune this. 720 static const size_t kFifoSize = 4; 721 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; 722 for (;;) { 723 const Object* obj = nullptr; 724 if (kUseMarkStackPrefetch) { 725 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 726 const Object* obj = mark_stack_[--mark_stack_pos_]; 727 DCHECK(obj != nullptr); 728 __builtin_prefetch(obj); 729 prefetch_fifo.push_back(obj); 730 } 731 if (UNLIKELY(prefetch_fifo.empty())) { 732 break; 733 } 734 obj = prefetch_fifo.front(); 735 prefetch_fifo.pop_front(); 736 } else { 737 if (UNLIKELY(mark_stack_pos_ == 0)) { 738 break; 739 } 740 obj = mark_stack_[--mark_stack_pos_]; 741 } 742 DCHECK(obj != nullptr); 743 visitor(const_cast<mirror::Object*>(obj)); 744 } 745 } 746}; 747 748class CardScanTask : public MarkStackTask<false> { 749 public: 750 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, 751 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 752 const Object** mark_stack_obj) 753 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 754 bitmap_(bitmap), 755 begin_(begin), 756 end_(end), 757 minimum_age_(minimum_age) { 758 } 759 760 protected: 761 accounting::SpaceBitmap* const bitmap_; 762 byte* const begin_; 763 byte* const end_; 764 const byte minimum_age_; 765 766 virtual void Finalize() { 767 delete this; 768 } 769 770 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 771 ScanObjectParallelVisitor visitor(this); 772 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 773 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 774 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 775 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 776 // Finish by emptying our local mark stack. 777 MarkStackTask::Run(self); 778 } 779}; 780 781size_t MarkSweep::GetThreadCount(bool paused) const { 782 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 783 return 0; 784 } 785 if (paused) { 786 return heap_->GetParallelGCThreadCount() + 1; 787 } else { 788 return heap_->GetConcGCThreadCount() + 1; 789 } 790} 791 792void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 793 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 794 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 795 size_t thread_count = GetThreadCount(paused); 796 // The parallel version with only one thread is faster for card scanning, TODO: fix. 797 if (kParallelCardScan && thread_count > 0) { 798 Thread* self = Thread::Current(); 799 // Can't have a different split for each space since multiple spaces can have their cards being 800 // scanned at the same time. 801 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 802 // Try to take some of the mark stack since we can pass this off to the worker tasks. 803 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin()); 804 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End()); 805 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 806 // Estimated number of work tasks we will create. 807 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 808 DCHECK_NE(mark_stack_tasks, 0U); 809 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 810 mark_stack_size / mark_stack_tasks + 1); 811 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 812 if (space->GetMarkBitmap() == nullptr) { 813 continue; 814 } 815 byte* card_begin = space->Begin(); 816 byte* card_end = space->End(); 817 // Align up the end address. For example, the image space's end 818 // may not be card-size-aligned. 819 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 820 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 821 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 822 // Calculate how many bytes of heap we will scan, 823 const size_t address_range = card_end - card_begin; 824 // Calculate how much address range each task gets. 825 const size_t card_delta = RoundUp(address_range / thread_count + 1, 826 accounting::CardTable::kCardSize); 827 // Create the worker tasks for this space. 828 while (card_begin != card_end) { 829 // Add a range of cards. 830 size_t addr_remaining = card_end - card_begin; 831 size_t card_increment = std::min(card_delta, addr_remaining); 832 // Take from the back of the mark stack. 833 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 834 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 835 mark_stack_end -= mark_stack_increment; 836 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 837 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 838 // Add the new task to the thread pool. 839 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 840 card_begin + card_increment, minimum_age, 841 mark_stack_increment, mark_stack_end); 842 thread_pool->AddTask(self, task); 843 card_begin += card_increment; 844 } 845 } 846 847 // Note: the card scan below may dirty new cards (and scan them) 848 // as a side effect when a Reference object is encountered and 849 // queued during the marking. See b/11465268. 850 thread_pool->SetMaxActiveWorkers(thread_count - 1); 851 thread_pool->StartWorkers(self); 852 thread_pool->Wait(self, true, true); 853 thread_pool->StopWorkers(self); 854 timings_.EndSplit(); 855 } else { 856 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 857 if (space->GetMarkBitmap() != nullptr) { 858 // Image spaces are handled properly since live == marked for them. 859 switch (space->GetGcRetentionPolicy()) { 860 case space::kGcRetentionPolicyNeverCollect: 861 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 862 "ScanGrayImageSpaceObjects"); 863 break; 864 case space::kGcRetentionPolicyFullCollect: 865 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 866 "ScanGrayZygoteSpaceObjects"); 867 break; 868 case space::kGcRetentionPolicyAlwaysCollect: 869 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 870 "ScanGrayAllocSpaceObjects"); 871 break; 872 } 873 ScanObjectVisitor visitor(this); 874 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 875 timings_.EndSplit(); 876 } 877 } 878 } 879} 880 881class RecursiveMarkTask : public MarkStackTask<false> { 882 public: 883 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 884 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 885 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 886 bitmap_(bitmap), 887 begin_(begin), 888 end_(end) { 889 } 890 891 protected: 892 accounting::SpaceBitmap* const bitmap_; 893 const uintptr_t begin_; 894 const uintptr_t end_; 895 896 virtual void Finalize() { 897 delete this; 898 } 899 900 // Scans all of the objects 901 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 902 ScanObjectParallelVisitor visitor(this); 903 bitmap_->VisitMarkedRange(begin_, end_, visitor); 904 // Finish by emptying our local mark stack. 905 MarkStackTask::Run(self); 906 } 907}; 908 909// Populates the mark stack based on the set of marked objects and 910// recursively marks until the mark stack is emptied. 911void MarkSweep::RecursiveMark() { 912 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 913 // RecursiveMark will build the lists of known instances of the Reference classes. See 914 // DelayReferenceReferent for details. 915 if (kUseRecursiveMark) { 916 const bool partial = GetGcType() == kGcTypePartial; 917 ScanObjectVisitor scan_visitor(this); 918 auto* self = Thread::Current(); 919 ThreadPool* thread_pool = heap_->GetThreadPool(); 920 size_t thread_count = GetThreadCount(false); 921 const bool parallel = kParallelRecursiveMark && thread_count > 1; 922 mark_stack_->Reset(); 923 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 924 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 925 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 926 current_mark_bitmap_ = space->GetMarkBitmap(); 927 if (current_mark_bitmap_ == nullptr) { 928 continue; 929 } 930 if (parallel) { 931 // We will use the mark stack the future. 932 // CHECK(mark_stack_->IsEmpty()); 933 // This function does not handle heap end increasing, so we must use the space end. 934 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 935 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 936 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 937 938 // Create a few worker tasks. 939 const size_t n = thread_count * 2; 940 while (begin != end) { 941 uintptr_t start = begin; 942 uintptr_t delta = (end - begin) / n; 943 delta = RoundUp(delta, KB); 944 if (delta < 16 * KB) delta = end - begin; 945 begin += delta; 946 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start, 947 begin); 948 thread_pool->AddTask(self, task); 949 } 950 thread_pool->SetMaxActiveWorkers(thread_count - 1); 951 thread_pool->StartWorkers(self); 952 thread_pool->Wait(self, true, true); 953 thread_pool->StopWorkers(self); 954 } else { 955 // This function does not handle heap end increasing, so we must use the space end. 956 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 957 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 958 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 959 } 960 } 961 } 962 } 963 ProcessMarkStack(false); 964} 965 966mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 967 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 968 return object; 969 } 970 return nullptr; 971} 972 973void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 974 ScanGrayObjects(paused, minimum_age); 975 ProcessMarkStack(paused); 976} 977 978void MarkSweep::ReMarkRoots() { 979 timings_.StartSplit("(Paused)ReMarkRoots"); 980 Runtime::Current()->VisitRoots(MarkRootCallback, this, true, true); 981 timings_.EndSplit(); 982} 983 984void MarkSweep::SweepSystemWeaks() { 985 Runtime* runtime = Runtime::Current(); 986 timings_.StartSplit("SweepSystemWeaks"); 987 runtime->SweepSystemWeaks(IsMarkedCallback, this); 988 timings_.EndSplit(); 989} 990 991mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 992 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 993 // We don't actually want to sweep the object, so lets return "marked" 994 return obj; 995} 996 997void MarkSweep::VerifyIsLive(const Object* obj) { 998 Heap* heap = GetHeap(); 999 if (!heap->GetLiveBitmap()->Test(obj)) { 1000 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1001 if (!large_object_space->GetLiveObjects()->Test(obj)) { 1002 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 1003 heap->allocation_stack_->End()) { 1004 // Object not found! 1005 heap->DumpSpaces(); 1006 LOG(FATAL) << "Found dead object " << obj; 1007 } 1008 } 1009 } 1010} 1011 1012void MarkSweep::VerifySystemWeaks() { 1013 // Verify system weaks, uses a special object visitor which returns the input object. 1014 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 1015} 1016 1017class CheckpointMarkThreadRoots : public Closure { 1018 public: 1019 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1020 1021 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1022 ATRACE_BEGIN("Marking thread roots"); 1023 // Note: self is not necessarily equal to thread since thread may be suspended. 1024 Thread* self = Thread::Current(); 1025 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1026 << thread->GetState() << " thread " << thread << " self " << self; 1027 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 1028 ATRACE_END(); 1029 if (kUseThreadLocalAllocationStack) { 1030 thread->RevokeThreadLocalAllocationStack(); 1031 } 1032 mark_sweep_->GetBarrier().Pass(self); 1033 } 1034 1035 private: 1036 MarkSweep* mark_sweep_; 1037}; 1038 1039void MarkSweep::MarkRootsCheckpoint(Thread* self) { 1040 CheckpointMarkThreadRoots check_point(this); 1041 timings_.StartSplit("MarkRootsCheckpoint"); 1042 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1043 // Request the check point is run on all threads returning a count of the threads that must 1044 // run through the barrier including self. 1045 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1046 // Release locks then wait for all mutator threads to pass the barrier. 1047 // TODO: optimize to not release locks when there are no threads to wait for. 1048 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1049 Locks::mutator_lock_->SharedUnlock(self); 1050 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 1051 CHECK_EQ(old_state, kWaitingPerformingGc); 1052 gc_barrier_->Increment(self, barrier_count); 1053 self->SetState(kWaitingPerformingGc); 1054 Locks::mutator_lock_->SharedLock(self); 1055 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1056 timings_.EndSplit(); 1057} 1058 1059void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1060 timings_.StartSplit("SweepArray"); 1061 Thread* self = Thread::Current(); 1062 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize]; 1063 size_t chunk_free_pos = 0; 1064 size_t freed_bytes = 0; 1065 size_t freed_large_object_bytes = 0; 1066 size_t freed_objects = 0; 1067 size_t freed_large_objects = 0; 1068 // How many objects are left in the array, modified after each space is swept. 1069 Object** objects = const_cast<Object**>(allocations->Begin()); 1070 size_t count = allocations->Size(); 1071 // Change the order to ensure that the non-moving space last swept as an optimization. 1072 std::vector<space::ContinuousSpace*> sweep_spaces; 1073 space::ContinuousSpace* non_moving_space = nullptr; 1074 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1075 if (space->IsAllocSpace() && !IsImmuneSpace(space) && space->GetLiveBitmap() != nullptr) { 1076 if (space == heap_->GetNonMovingSpace()) { 1077 non_moving_space = space; 1078 } else { 1079 sweep_spaces.push_back(space); 1080 } 1081 } 1082 } 1083 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1084 // the other alloc spaces as an optimization. 1085 if (non_moving_space != nullptr) { 1086 sweep_spaces.push_back(non_moving_space); 1087 } 1088 // Start by sweeping the continuous spaces. 1089 for (space::ContinuousSpace* space : sweep_spaces) { 1090 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1091 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1092 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1093 if (swap_bitmaps) { 1094 std::swap(live_bitmap, mark_bitmap); 1095 } 1096 Object** out = objects; 1097 for (size_t i = 0; i < count; ++i) { 1098 Object* obj = objects[i]; 1099 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1100 continue; 1101 } 1102 if (space->HasAddress(obj)) { 1103 // This object is in the space, remove it from the array and add it to the sweep buffer 1104 // if needed. 1105 if (!mark_bitmap->Test(obj)) { 1106 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1107 timings_.StartSplit("FreeList"); 1108 freed_objects += chunk_free_pos; 1109 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1110 timings_.EndSplit(); 1111 chunk_free_pos = 0; 1112 } 1113 chunk_free_buffer[chunk_free_pos++] = obj; 1114 } 1115 } else { 1116 *(out++) = obj; 1117 } 1118 } 1119 if (chunk_free_pos > 0) { 1120 timings_.StartSplit("FreeList"); 1121 freed_objects += chunk_free_pos; 1122 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1123 timings_.EndSplit(); 1124 chunk_free_pos = 0; 1125 } 1126 // All of the references which space contained are no longer in the allocation stack, update 1127 // the count. 1128 count = out - objects; 1129 } 1130 // Handle the large object space. 1131 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1132 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 1133 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects(); 1134 if (swap_bitmaps) { 1135 std::swap(large_live_objects, large_mark_objects); 1136 } 1137 for (size_t i = 0; i < count; ++i) { 1138 Object* obj = objects[i]; 1139 // Handle large objects. 1140 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1141 continue; 1142 } 1143 if (!large_mark_objects->Test(obj)) { 1144 ++freed_large_objects; 1145 freed_large_object_bytes += large_object_space->Free(self, obj); 1146 } 1147 } 1148 timings_.EndSplit(); 1149 1150 timings_.StartSplit("RecordFree"); 1151 VLOG(heap) << "Freed " << freed_objects << "/" << count 1152 << " objects with size " << PrettySize(freed_bytes); 1153 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1154 freed_objects_.FetchAndAdd(freed_objects); 1155 freed_large_objects_.FetchAndAdd(freed_large_objects); 1156 freed_bytes_.FetchAndAdd(freed_bytes); 1157 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes); 1158 timings_.EndSplit(); 1159 1160 timings_.StartSplit("ResetStack"); 1161 allocations->Reset(); 1162 timings_.EndSplit(); 1163} 1164 1165void MarkSweep::Sweep(bool swap_bitmaps) { 1166 DCHECK(mark_stack_->IsEmpty()); 1167 TimingLogger::ScopedSplit("Sweep", &timings_); 1168 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1169 if (space->IsContinuousMemMapAllocSpace()) { 1170 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1171 TimingLogger::ScopedSplit split( 1172 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1173 size_t freed_objects = 0; 1174 size_t freed_bytes = 0; 1175 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1176 heap_->RecordFree(freed_objects, freed_bytes); 1177 freed_objects_.FetchAndAdd(freed_objects); 1178 freed_bytes_.FetchAndAdd(freed_bytes); 1179 } 1180 } 1181 SweepLargeObjects(swap_bitmaps); 1182} 1183 1184void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1185 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1186 size_t freed_objects = 0; 1187 size_t freed_bytes = 0; 1188 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1189 freed_large_objects_.FetchAndAdd(freed_objects); 1190 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 1191 GetHeap()->RecordFree(freed_objects, freed_bytes); 1192} 1193 1194// Process the "referent" field in a java.lang.ref.Reference. If the 1195// referent has not yet been marked, put it on the appropriate list in 1196// the heap for later processing. 1197void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 1198 DCHECK(klass != nullptr); 1199 DCHECK(klass->IsReferenceClass()); 1200 DCHECK(obj != NULL); 1201 heap_->DelayReferenceReferent(klass, obj, IsMarkedCallback, this); 1202} 1203 1204class MarkObjectVisitor { 1205 public: 1206 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {} 1207 1208 // TODO: Fixme when anotatalysis works with visitors. 1209 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1210 bool /* is_static */) const ALWAYS_INLINE 1211 NO_THREAD_SAFETY_ANALYSIS { 1212 if (kCheckLocks) { 1213 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1214 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1215 } 1216 mark_sweep_->MarkObject(ref); 1217 } 1218 1219 private: 1220 MarkSweep* const mark_sweep_; 1221}; 1222 1223// Scans an object reference. Determines the type of the reference 1224// and dispatches to a specialized scanning routine. 1225void MarkSweep::ScanObject(Object* obj) { 1226 MarkObjectVisitor visitor(this); 1227 ScanObjectVisit(obj, visitor); 1228} 1229 1230void MarkSweep::ProcessMarkStackPausedCallback(void* arg) { 1231 DCHECK(arg != nullptr); 1232 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true); 1233} 1234 1235void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1236 Thread* self = Thread::Current(); 1237 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1238 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1239 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1240 CHECK_GT(chunk_size, 0U); 1241 // Split the current mark stack up into work tasks. 1242 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1243 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1244 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, 1245 const_cast<const mirror::Object**>(it))); 1246 it += delta; 1247 } 1248 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1249 thread_pool->StartWorkers(self); 1250 thread_pool->Wait(self, true, true); 1251 thread_pool->StopWorkers(self); 1252 mark_stack_->Reset(); 1253 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1254} 1255 1256// Scan anything that's on the mark stack. 1257void MarkSweep::ProcessMarkStack(bool paused) { 1258 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1259 size_t thread_count = GetThreadCount(paused); 1260 if (kParallelProcessMarkStack && thread_count > 1 && 1261 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1262 ProcessMarkStackParallel(thread_count); 1263 } else { 1264 // TODO: Tune this. 1265 static const size_t kFifoSize = 4; 1266 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1267 for (;;) { 1268 Object* obj = NULL; 1269 if (kUseMarkStackPrefetch) { 1270 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1271 Object* obj = mark_stack_->PopBack(); 1272 DCHECK(obj != NULL); 1273 __builtin_prefetch(obj); 1274 prefetch_fifo.push_back(obj); 1275 } 1276 if (prefetch_fifo.empty()) { 1277 break; 1278 } 1279 obj = prefetch_fifo.front(); 1280 prefetch_fifo.pop_front(); 1281 } else { 1282 if (mark_stack_->IsEmpty()) { 1283 break; 1284 } 1285 obj = mark_stack_->PopBack(); 1286 } 1287 DCHECK(obj != NULL); 1288 ScanObject(obj); 1289 } 1290 } 1291 timings_.EndSplit(); 1292} 1293 1294inline bool MarkSweep::IsMarked(const Object* object) const 1295 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1296 if (IsImmune(object)) { 1297 return true; 1298 } 1299 DCHECK(current_mark_bitmap_ != NULL); 1300 if (current_mark_bitmap_->HasAddress(object)) { 1301 return current_mark_bitmap_->Test(object); 1302 } 1303 return heap_->GetMarkBitmap()->Test(object); 1304} 1305 1306void MarkSweep::FinishPhase() { 1307 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1308 // Can't enqueue references if we hold the mutator lock. 1309 Heap* heap = GetHeap(); 1310 timings_.NewSplit("PostGcVerification"); 1311 heap->PostGcVerification(this); 1312 1313 timings_.NewSplit("RequestHeapTrim"); 1314 heap->RequestHeapTrim(); 1315 1316 // Update the cumulative statistics 1317 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 1318 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 1319 1320 // Ensure that the mark stack is empty. 1321 CHECK(mark_stack_->IsEmpty()); 1322 1323 if (kCountScannedTypes) { 1324 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1325 << " other=" << other_count_; 1326 } 1327 1328 if (kCountTasks) { 1329 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1330 } 1331 1332 if (kMeasureOverhead) { 1333 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1334 } 1335 1336 if (kProfileLargeObjects) { 1337 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1338 } 1339 1340 if (kCountClassesMarked) { 1341 VLOG(gc) << "Classes marked " << classes_marked_; 1342 } 1343 1344 if (kCountJavaLangRefs) { 1345 VLOG(gc) << "References scanned " << reference_count_; 1346 } 1347 1348 // Update the cumulative loggers. 1349 cumulative_timings_.Start(); 1350 cumulative_timings_.AddLogger(timings_); 1351 cumulative_timings_.End(); 1352 1353 // Clear all of the spaces' mark bitmaps. 1354 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1355 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 1356 if (bitmap != nullptr && 1357 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1358 bitmap->Clear(); 1359 } 1360 } 1361 mark_stack_->Reset(); 1362 1363 // Reset the marked large objects. 1364 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1365 large_objects->GetMarkObjects()->Clear(); 1366} 1367 1368} // namespace collector 1369} // namespace gc 1370} // namespace art 1371