mark_sweep.cc revision 407f702da4f867c074fc3c8c688b8f8c32279eff
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "monitor.h" 41#include "mark_sweep-inl.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/reference-inl.h" 48#include "mirror/object-inl.h" 49#include "mirror/object_array.h" 50#include "mirror/object_array-inl.h" 51#include "runtime.h" 52#include "thread-inl.h" 53#include "thread_list.h" 54#include "verifier/method_verifier.h" 55 56using ::art::mirror::ArtField; 57using ::art::mirror::Class; 58using ::art::mirror::Object; 59using ::art::mirror::ObjectArray; 60 61namespace art { 62namespace gc { 63namespace collector { 64 65// Performance options. 66static constexpr bool kUseRecursiveMark = false; 67static constexpr bool kUseMarkStackPrefetch = true; 68static constexpr size_t kSweepArrayChunkFreeSize = 1024; 69static constexpr bool kPreCleanCards = true; 70 71// Parallelism options. 72static constexpr bool kParallelCardScan = true; 73static constexpr bool kParallelRecursiveMark = true; 74// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 75// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 76// having this can add overhead in ProcessReferences since we may end up doing many calls of 77// ProcessMarkStack with very small mark stacks. 78static constexpr size_t kMinimumParallelMarkStackSize = 128; 79static constexpr bool kParallelProcessMarkStack = true; 80 81// Profiling and information flags. 82static constexpr bool kProfileLargeObjects = false; 83static constexpr bool kMeasureOverhead = false; 84static constexpr bool kCountTasks = false; 85static constexpr bool kCountJavaLangRefs = false; 86static constexpr bool kCountMarkedObjects = false; 87 88// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 89static constexpr bool kCheckLocks = kDebugLocking; 90static constexpr bool kVerifyRoots = kIsDebugBuild; 91 92// If true, revoke the rosalloc thread-local buffers at the 93// checkpoint, as opposed to during the pause. 94static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 95 96void MarkSweep::BindBitmaps() { 97 timings_.StartSplit("BindBitmaps"); 98 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 99 // Mark all of the spaces we never collect as immune. 100 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 101 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 102 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 103 } 104 } 105 timings_.EndSplit(); 106} 107 108MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 109 : GarbageCollector(heap, 110 name_prefix + 111 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 112 gc_barrier_(new Barrier(0)), 113 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 114 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 115 is_concurrent_(is_concurrent) { 116} 117 118void MarkSweep::InitializePhase() { 119 timings_.Reset(); 120 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 121 mark_stack_ = heap_->mark_stack_.get(); 122 DCHECK(mark_stack_ != nullptr); 123 immune_region_.Reset(); 124 class_count_ = 0; 125 array_count_ = 0; 126 other_count_ = 0; 127 large_object_test_ = 0; 128 large_object_mark_ = 0; 129 overhead_time_ = 0; 130 work_chunks_created_ = 0; 131 work_chunks_deleted_ = 0; 132 reference_count_ = 0; 133 mark_null_count_ = 0; 134 mark_immune_count_ = 0; 135 mark_fastpath_count_ = 0; 136 mark_slowpath_count_ = 0; 137 FindDefaultSpaceBitmap(); 138 { 139 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap. 140 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 141 mark_bitmap_ = heap_->GetMarkBitmap(); 142 } 143 144 // Do any pre GC verification. 145 timings_.NewSplit("PreGcVerification"); 146 heap_->PreGcVerification(this); 147} 148 149void MarkSweep::ProcessReferences(Thread* self) { 150 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 151 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 152 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback, 153 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 154} 155 156void MarkSweep::PreProcessReferences() { 157 if (IsConcurrent()) { 158 // No reason to do this for non-concurrent GC since pre processing soft references only helps 159 // pauses. 160 timings_.NewSplit("PreProcessReferences"); 161 GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback, 162 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 163 } 164} 165 166void MarkSweep::HandleDirtyObjectsPhase() { 167 TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_); 168 Thread* self = Thread::Current(); 169 Locks::mutator_lock_->AssertExclusiveHeld(self); 170 171 { 172 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 173 174 // Re-mark root set. 175 ReMarkRoots(); 176 177 // Scan dirty objects, this is only required if we are not doing concurrent GC. 178 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 179 } 180 181 ProcessReferences(self); 182 183 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 184 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_|| 185 GetHeap()->verify_post_gc_heap_) { 186 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 187 // This second sweep makes sure that we don't have any objects in the live stack which point to 188 // freed objects. These cause problems since their references may be previously freed objects. 189 SweepArray(GetHeap()->allocation_stack_.get(), false); 190 // Since SweepArray() above resets the (active) allocation 191 // stack. Need to revoke the thread-local allocation stacks that 192 // point into it. 193 RevokeAllThreadLocalAllocationStacks(self); 194 } 195 196 timings_.StartSplit("PreSweepingGcVerification"); 197 heap_->PreSweepingGcVerification(this); 198 timings_.EndSplit(); 199 200 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 201 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 202 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 203 204 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 205 // weak before we sweep them. Since this new system weak may not be marked, the GC may 206 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 207 // reference to a string that is about to be swept. 208 Runtime::Current()->DisallowNewSystemWeaks(); 209} 210 211void MarkSweep::PreCleanCards() { 212 // Don't do this for non concurrent GCs since they don't have any dirty cards. 213 if (kPreCleanCards && IsConcurrent()) { 214 Thread* self = Thread::Current(); 215 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 216 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 217 heap_->ProcessCards(timings_, false); 218 // The checkpoint root marking is required to avoid a race condition which occurs if the 219 // following happens during a reference write: 220 // 1. mutator dirties the card (write barrier) 221 // 2. GC ages the card (the above ProcessCards call) 222 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 223 // 4. mutator writes the value (corresponding to the write barrier in 1.) 224 // This causes the GC to age the card but not necessarily mark the reference which the mutator 225 // wrote into the object stored in the card. 226 // Having the checkpoint fixes this issue since it ensures that the card mark and the 227 // reference write are visible to the GC before the card is scanned (this is due to locks being 228 // acquired / released in the checkpoint code). 229 // The other roots are also marked to help reduce the pause. 230 MarkThreadRoots(self); 231 // TODO: Only mark the dirty roots. 232 MarkNonThreadRoots(); 233 MarkConcurrentRoots( 234 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 235 // Process the newly aged cards. 236 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 237 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 238 // in the next GC. 239 } 240} 241 242void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 243 if (kUseThreadLocalAllocationStack) { 244 Locks::mutator_lock_->AssertExclusiveHeld(self); 245 heap_->RevokeAllThreadLocalAllocationStacks(self); 246 } 247} 248 249void MarkSweep::MarkingPhase() { 250 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 251 Thread* self = Thread::Current(); 252 253 BindBitmaps(); 254 FindDefaultSpaceBitmap(); 255 256 // Process dirty cards and add dirty cards to mod union tables. 257 heap_->ProcessCards(timings_, false); 258 259 // Need to do this before the checkpoint since we don't want any threads to add references to 260 // the live stack during the recursive mark. 261 timings_.NewSplit("SwapStacks"); 262 heap_->SwapStacks(self); 263 264 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 265 MarkRoots(self); 266 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 267 UpdateAndMarkModUnion(); 268 MarkReachableObjects(); 269 // Pre-clean dirtied cards to reduce pauses. 270 PreCleanCards(); 271 PreProcessReferences(); 272} 273 274void MarkSweep::UpdateAndMarkModUnion() { 275 for (const auto& space : heap_->GetContinuousSpaces()) { 276 if (immune_region_.ContainsSpace(space)) { 277 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 278 "UpdateAndMarkImageModUnionTable"; 279 TimingLogger::ScopedSplit split(name, &timings_); 280 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 281 CHECK(mod_union_table != nullptr); 282 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 283 } 284 } 285} 286 287void MarkSweep::MarkThreadRoots(Thread* self) { 288 MarkRootsCheckpoint(self); 289} 290 291void MarkSweep::MarkReachableObjects() { 292 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 293 // knowing that new allocations won't be marked as live. 294 timings_.StartSplit("MarkStackAsLive"); 295 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 296 heap_->MarkAllocStackAsLive(live_stack); 297 live_stack->Reset(); 298 timings_.EndSplit(); 299 // Recursively mark all the non-image bits set in the mark bitmap. 300 RecursiveMark(); 301} 302 303void MarkSweep::ReclaimPhase() { 304 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 305 Thread* self = Thread::Current(); 306 307 if (!IsConcurrent()) { 308 ProcessReferences(self); 309 } 310 311 { 312 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 313 SweepSystemWeaks(); 314 } 315 316 if (IsConcurrent()) { 317 Runtime::Current()->AllowNewSystemWeaks(); 318 319 TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); 320 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 321 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 322 if (!kPreCleanCards) { 323 // The allocation stack contains things allocated since the start of the GC. These may have 324 // been marked during this GC meaning they won't be eligible for reclaiming in the next 325 // sticky GC. Unmark these objects so that they are eligible for reclaiming in the next 326 // sticky GC. 327 // There is a race here which is safely handled. Another thread such as the hprof could 328 // have flushed the alloc stack after we resumed the threads. This is safe however, since 329 // reseting the allocation stack zeros it out with madvise. This means that we will either 330 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the 331 // first place. 332 // We can't do this if we pre-clean cards since we will unmark objects which are no longer on 333 // a dirty card since we aged cards during the pre-cleaning process. 334 mirror::Object** end = allocation_stack->End(); 335 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { 336 const Object* obj = *it; 337 if (obj != nullptr) { 338 UnMarkObjectNonNull(obj); 339 } 340 } 341 } 342 } 343 344 { 345 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 346 347 // Reclaim unmarked objects. 348 Sweep(false); 349 350 // Swap the live and mark bitmaps for each space which we modified space. This is an 351 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 352 // bitmaps. 353 timings_.StartSplit("SwapBitmaps"); 354 SwapBitmaps(); 355 timings_.EndSplit(); 356 357 // Unbind the live and mark bitmaps. 358 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 359 GetHeap()->UnBindBitmaps(); 360 } 361} 362 363void MarkSweep::FindDefaultSpaceBitmap() { 364 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 365 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 366 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 367 if (bitmap != nullptr && 368 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 369 current_space_bitmap_ = bitmap; 370 return; 371 } 372 } 373 GetHeap()->DumpSpaces(); 374 LOG(FATAL) << "Could not find a default mark bitmap"; 375} 376 377void MarkSweep::ExpandMarkStack() { 378 ResizeMarkStack(mark_stack_->Capacity() * 2); 379} 380 381void MarkSweep::ResizeMarkStack(size_t new_size) { 382 // Rare case, no need to have Thread::Current be a parameter. 383 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 384 // Someone else acquired the lock and expanded the mark stack before us. 385 return; 386 } 387 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 388 CHECK_LE(mark_stack_->Size(), new_size); 389 mark_stack_->Resize(new_size); 390 for (const auto& obj : temp) { 391 mark_stack_->PushBack(obj); 392 } 393} 394 395inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 396 DCHECK(obj != NULL); 397 if (MarkObjectParallel(obj)) { 398 MutexLock mu(Thread::Current(), mark_stack_lock_); 399 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 400 ExpandMarkStack(); 401 } 402 // The object must be pushed on to the mark stack. 403 mark_stack_->PushBack(obj); 404 } 405} 406 407mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 408 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 409 mark_sweep->MarkObject(obj); 410 return obj; 411} 412 413void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 414 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 415} 416 417inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { 418 DCHECK(!immune_region_.ContainsObject(obj)); 419 if (kUseBrooksPointer) { 420 // Verify all the objects have the correct Brooks pointer installed. 421 obj->AssertSelfBrooksPointer(); 422 } 423 // Try to take advantage of locality of references within a space, failing this find the space 424 // the hard way. 425 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_; 426 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 427 accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj); 428 if (LIKELY(new_bitmap != NULL)) { 429 object_bitmap = new_bitmap; 430 } else { 431 MarkLargeObject(obj, false); 432 return; 433 } 434 } 435 DCHECK(object_bitmap->HasAddress(obj)); 436 object_bitmap->Clear(obj); 437} 438 439inline void MarkSweep::MarkObjectNonNull(Object* obj) { 440 DCHECK(obj != nullptr); 441 if (kUseBrooksPointer) { 442 // Verify all the objects have the correct Brooks pointer installed. 443 obj->AssertSelfBrooksPointer(); 444 } 445 if (immune_region_.ContainsObject(obj)) { 446 if (kCountMarkedObjects) { 447 ++mark_immune_count_; 448 } 449 DCHECK(IsMarked(obj)); 450 return; 451 } 452 // Try to take advantage of locality of references within a space, failing this find the space 453 // the hard way. 454 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_; 455 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 456 object_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj); 457 if (kCountMarkedObjects) { 458 ++mark_slowpath_count_; 459 } 460 if (UNLIKELY(object_bitmap == nullptr)) { 461 MarkLargeObject(obj, true); 462 return; 463 } 464 } else if (kCountMarkedObjects) { 465 ++mark_fastpath_count_; 466 } 467 // This object was not previously marked. 468 if (!object_bitmap->Set(obj)) { 469 PushOnMarkStack(obj); 470 } 471} 472 473inline void MarkSweep::PushOnMarkStack(Object* obj) { 474 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 475 // Lock is not needed but is here anyways to please annotalysis. 476 MutexLock mu(Thread::Current(), mark_stack_lock_); 477 ExpandMarkStack(); 478 } 479 // The object must be pushed on to the mark stack. 480 mark_stack_->PushBack(obj); 481} 482 483// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 484bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 485 // TODO: support >1 discontinuous space. 486 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 487 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 488 if (kProfileLargeObjects) { 489 ++large_object_test_; 490 } 491 if (UNLIKELY(!large_objects->Test(obj))) { 492 if (!large_object_space->Contains(obj)) { 493 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 494 LOG(ERROR) << "Attempting see if it's a bad root"; 495 VerifyRoots(); 496 LOG(FATAL) << "Can't mark bad root"; 497 } 498 if (kProfileLargeObjects) { 499 ++large_object_mark_; 500 } 501 if (set) { 502 large_objects->Set(obj); 503 } else { 504 large_objects->Clear(obj); 505 } 506 return true; 507 } 508 return false; 509} 510 511inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 512 DCHECK(obj != nullptr); 513 if (kUseBrooksPointer) { 514 // Verify all the objects have the correct Brooks pointer installed. 515 obj->AssertSelfBrooksPointer(); 516 } 517 if (immune_region_.ContainsObject(obj)) { 518 DCHECK(IsMarked(obj)); 519 return false; 520 } 521 // Try to take advantage of locality of references within a space, failing this find the space 522 // the hard way. 523 accounting::SpaceBitmap* object_bitmap = current_space_bitmap_; 524 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 525 accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj); 526 if (new_bitmap != NULL) { 527 object_bitmap = new_bitmap; 528 } else { 529 // TODO: Remove the Thread::Current here? 530 // TODO: Convert this to some kind of atomic marking? 531 MutexLock mu(Thread::Current(), large_object_lock_); 532 return MarkLargeObject(obj, true); 533 } 534 } 535 // Return true if the object was not previously marked. 536 return !object_bitmap->AtomicTestAndSet(obj); 537} 538 539// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 540inline void MarkSweep::MarkObject(Object* obj) { 541 if (obj != nullptr) { 542 MarkObjectNonNull(obj); 543 } else if (kCountMarkedObjects) { 544 ++mark_null_count_; 545 } 546} 547 548void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/, 549 RootType /*root_type*/) { 550 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 551} 552 553void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 554 RootType /*root_type*/) { 555 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 556} 557 558void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 559 RootType /*root_type*/) { 560 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 561} 562 563void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 564 const StackVisitor* visitor) { 565 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 566} 567 568void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 569 // See if the root is on any space bitmap. 570 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 571 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 572 if (!large_object_space->Contains(root)) { 573 LOG(ERROR) << "Found invalid root: " << root; 574 if (visitor != NULL) { 575 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 576 } 577 } 578 } 579} 580 581void MarkSweep::VerifyRoots() { 582 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 583} 584 585void MarkSweep::MarkRoots(Thread* self) { 586 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 587 // If we exclusively hold the mutator lock, all threads must be suspended. 588 timings_.StartSplit("MarkRoots"); 589 Runtime::Current()->VisitRoots(MarkRootCallback, this); 590 timings_.EndSplit(); 591 RevokeAllThreadLocalAllocationStacks(self); 592 } else { 593 MarkThreadRoots(self); 594 // At this point the live stack should no longer have any mutators which push into it. 595 MarkNonThreadRoots(); 596 MarkConcurrentRoots( 597 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 598 } 599} 600 601void MarkSweep::MarkNonThreadRoots() { 602 timings_.StartSplit("MarkNonThreadRoots"); 603 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 604 timings_.EndSplit(); 605} 606 607void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 608 timings_.StartSplit("MarkConcurrentRoots"); 609 // Visit all runtime roots and clear dirty flags. 610 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 611 timings_.EndSplit(); 612} 613 614class ScanObjectVisitor { 615 public: 616 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 617 : mark_sweep_(mark_sweep) {} 618 619 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 620 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 621 if (kCheckLocks) { 622 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 623 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 624 } 625 mark_sweep_->ScanObject(obj); 626 } 627 628 private: 629 MarkSweep* const mark_sweep_; 630}; 631 632class DelayReferenceReferentVisitor { 633 public: 634 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 635 } 636 637 void operator()(mirror::Class* klass, mirror::Reference* ref) const 638 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 639 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 640 collector_->DelayReferenceReferent(klass, ref); 641 } 642 643 private: 644 MarkSweep* const collector_; 645}; 646 647template <bool kUseFinger = false> 648class MarkStackTask : public Task { 649 public: 650 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 651 Object** mark_stack) 652 : mark_sweep_(mark_sweep), 653 thread_pool_(thread_pool), 654 mark_stack_pos_(mark_stack_size) { 655 // We may have to copy part of an existing mark stack when another mark stack overflows. 656 if (mark_stack_size != 0) { 657 DCHECK(mark_stack != NULL); 658 // TODO: Check performance? 659 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 660 } 661 if (kCountTasks) { 662 ++mark_sweep_->work_chunks_created_; 663 } 664 } 665 666 static const size_t kMaxSize = 1 * KB; 667 668 protected: 669 class MarkObjectParallelVisitor { 670 public: 671 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 672 MarkSweep* mark_sweep) ALWAYS_INLINE 673 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 674 675 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 676 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 677 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); 678 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 679 if (kUseFinger) { 680 android_memory_barrier(); 681 if (reinterpret_cast<uintptr_t>(ref) >= 682 static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) { 683 return; 684 } 685 } 686 chunk_task_->MarkStackPush(ref); 687 } 688 } 689 690 private: 691 MarkStackTask<kUseFinger>* const chunk_task_; 692 MarkSweep* const mark_sweep_; 693 }; 694 695 class ScanObjectParallelVisitor { 696 public: 697 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 698 : chunk_task_(chunk_task) {} 699 700 // No thread safety analysis since multiple threads will use this visitor. 701 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 702 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 703 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 704 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 705 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 706 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 707 } 708 709 private: 710 MarkStackTask<kUseFinger>* const chunk_task_; 711 }; 712 713 virtual ~MarkStackTask() { 714 // Make sure that we have cleared our mark stack. 715 DCHECK_EQ(mark_stack_pos_, 0U); 716 if (kCountTasks) { 717 ++mark_sweep_->work_chunks_deleted_; 718 } 719 } 720 721 MarkSweep* const mark_sweep_; 722 ThreadPool* const thread_pool_; 723 // Thread local mark stack for this task. 724 Object* mark_stack_[kMaxSize]; 725 // Mark stack position. 726 size_t mark_stack_pos_; 727 728 void MarkStackPush(Object* obj) ALWAYS_INLINE { 729 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 730 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 731 mark_stack_pos_ /= 2; 732 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 733 mark_stack_ + mark_stack_pos_); 734 thread_pool_->AddTask(Thread::Current(), task); 735 } 736 DCHECK(obj != nullptr); 737 DCHECK_LT(mark_stack_pos_, kMaxSize); 738 mark_stack_[mark_stack_pos_++] = obj; 739 } 740 741 virtual void Finalize() { 742 delete this; 743 } 744 745 // Scans all of the objects 746 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 747 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 748 ScanObjectParallelVisitor visitor(this); 749 // TODO: Tune this. 750 static const size_t kFifoSize = 4; 751 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 752 for (;;) { 753 Object* obj = nullptr; 754 if (kUseMarkStackPrefetch) { 755 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 756 Object* obj = mark_stack_[--mark_stack_pos_]; 757 DCHECK(obj != nullptr); 758 __builtin_prefetch(obj); 759 prefetch_fifo.push_back(obj); 760 } 761 if (UNLIKELY(prefetch_fifo.empty())) { 762 break; 763 } 764 obj = prefetch_fifo.front(); 765 prefetch_fifo.pop_front(); 766 } else { 767 if (UNLIKELY(mark_stack_pos_ == 0)) { 768 break; 769 } 770 obj = mark_stack_[--mark_stack_pos_]; 771 } 772 DCHECK(obj != nullptr); 773 visitor(obj); 774 } 775 } 776}; 777 778class CardScanTask : public MarkStackTask<false> { 779 public: 780 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, 781 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 782 Object** mark_stack_obj) 783 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 784 bitmap_(bitmap), 785 begin_(begin), 786 end_(end), 787 minimum_age_(minimum_age) { 788 } 789 790 protected: 791 accounting::SpaceBitmap* const bitmap_; 792 byte* const begin_; 793 byte* const end_; 794 const byte minimum_age_; 795 796 virtual void Finalize() { 797 delete this; 798 } 799 800 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 801 ScanObjectParallelVisitor visitor(this); 802 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 803 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 804 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 805 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 806 // Finish by emptying our local mark stack. 807 MarkStackTask::Run(self); 808 } 809}; 810 811size_t MarkSweep::GetThreadCount(bool paused) const { 812 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 813 return 0; 814 } 815 if (paused) { 816 return heap_->GetParallelGCThreadCount() + 1; 817 } else { 818 return heap_->GetConcGCThreadCount() + 1; 819 } 820} 821 822void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 823 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 824 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 825 size_t thread_count = GetThreadCount(paused); 826 // The parallel version with only one thread is faster for card scanning, TODO: fix. 827 if (kParallelCardScan && thread_count > 0) { 828 Thread* self = Thread::Current(); 829 // Can't have a different split for each space since multiple spaces can have their cards being 830 // scanned at the same time. 831 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 832 // Try to take some of the mark stack since we can pass this off to the worker tasks. 833 Object** mark_stack_begin = mark_stack_->Begin(); 834 Object** mark_stack_end = mark_stack_->End(); 835 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 836 // Estimated number of work tasks we will create. 837 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 838 DCHECK_NE(mark_stack_tasks, 0U); 839 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 840 mark_stack_size / mark_stack_tasks + 1); 841 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 842 if (space->GetMarkBitmap() == nullptr) { 843 continue; 844 } 845 byte* card_begin = space->Begin(); 846 byte* card_end = space->End(); 847 // Align up the end address. For example, the image space's end 848 // may not be card-size-aligned. 849 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 850 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 851 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 852 // Calculate how many bytes of heap we will scan, 853 const size_t address_range = card_end - card_begin; 854 // Calculate how much address range each task gets. 855 const size_t card_delta = RoundUp(address_range / thread_count + 1, 856 accounting::CardTable::kCardSize); 857 // Create the worker tasks for this space. 858 while (card_begin != card_end) { 859 // Add a range of cards. 860 size_t addr_remaining = card_end - card_begin; 861 size_t card_increment = std::min(card_delta, addr_remaining); 862 // Take from the back of the mark stack. 863 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 864 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 865 mark_stack_end -= mark_stack_increment; 866 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 867 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 868 // Add the new task to the thread pool. 869 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 870 card_begin + card_increment, minimum_age, 871 mark_stack_increment, mark_stack_end); 872 thread_pool->AddTask(self, task); 873 card_begin += card_increment; 874 } 875 } 876 877 // Note: the card scan below may dirty new cards (and scan them) 878 // as a side effect when a Reference object is encountered and 879 // queued during the marking. See b/11465268. 880 thread_pool->SetMaxActiveWorkers(thread_count - 1); 881 thread_pool->StartWorkers(self); 882 thread_pool->Wait(self, true, true); 883 thread_pool->StopWorkers(self); 884 timings_.EndSplit(); 885 } else { 886 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 887 if (space->GetMarkBitmap() != nullptr) { 888 // Image spaces are handled properly since live == marked for them. 889 switch (space->GetGcRetentionPolicy()) { 890 case space::kGcRetentionPolicyNeverCollect: 891 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 892 "ScanGrayImageSpaceObjects"); 893 break; 894 case space::kGcRetentionPolicyFullCollect: 895 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 896 "ScanGrayZygoteSpaceObjects"); 897 break; 898 case space::kGcRetentionPolicyAlwaysCollect: 899 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 900 "ScanGrayAllocSpaceObjects"); 901 break; 902 } 903 ScanObjectVisitor visitor(this); 904 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 905 timings_.EndSplit(); 906 } 907 } 908 } 909} 910 911class RecursiveMarkTask : public MarkStackTask<false> { 912 public: 913 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 914 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 915 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 916 bitmap_(bitmap), 917 begin_(begin), 918 end_(end) { 919 } 920 921 protected: 922 accounting::SpaceBitmap* const bitmap_; 923 const uintptr_t begin_; 924 const uintptr_t end_; 925 926 virtual void Finalize() { 927 delete this; 928 } 929 930 // Scans all of the objects 931 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 932 ScanObjectParallelVisitor visitor(this); 933 bitmap_->VisitMarkedRange(begin_, end_, visitor); 934 // Finish by emptying our local mark stack. 935 MarkStackTask::Run(self); 936 } 937}; 938 939// Populates the mark stack based on the set of marked objects and 940// recursively marks until the mark stack is emptied. 941void MarkSweep::RecursiveMark() { 942 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 943 // RecursiveMark will build the lists of known instances of the Reference classes. See 944 // DelayReferenceReferent for details. 945 if (kUseRecursiveMark) { 946 const bool partial = GetGcType() == kGcTypePartial; 947 ScanObjectVisitor scan_visitor(this); 948 auto* self = Thread::Current(); 949 ThreadPool* thread_pool = heap_->GetThreadPool(); 950 size_t thread_count = GetThreadCount(false); 951 const bool parallel = kParallelRecursiveMark && thread_count > 1; 952 mark_stack_->Reset(); 953 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 954 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 955 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 956 current_space_bitmap_ = space->GetMarkBitmap(); 957 if (current_space_bitmap_ == nullptr) { 958 continue; 959 } 960 if (parallel) { 961 // We will use the mark stack the future. 962 // CHECK(mark_stack_->IsEmpty()); 963 // This function does not handle heap end increasing, so we must use the space end. 964 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 965 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 966 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 967 968 // Create a few worker tasks. 969 const size_t n = thread_count * 2; 970 while (begin != end) { 971 uintptr_t start = begin; 972 uintptr_t delta = (end - begin) / n; 973 delta = RoundUp(delta, KB); 974 if (delta < 16 * KB) delta = end - begin; 975 begin += delta; 976 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 977 begin); 978 thread_pool->AddTask(self, task); 979 } 980 thread_pool->SetMaxActiveWorkers(thread_count - 1); 981 thread_pool->StartWorkers(self); 982 thread_pool->Wait(self, true, true); 983 thread_pool->StopWorkers(self); 984 } else { 985 // This function does not handle heap end increasing, so we must use the space end. 986 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 987 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 988 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 989 } 990 } 991 } 992 } 993 ProcessMarkStack(false); 994} 995 996mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 997 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 998 return object; 999 } 1000 return nullptr; 1001} 1002 1003void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 1004 ScanGrayObjects(paused, minimum_age); 1005 ProcessMarkStack(paused); 1006} 1007 1008void MarkSweep::ReMarkRoots() { 1009 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1010 timings_.StartSplit("(Paused)ReMarkRoots"); 1011 Runtime::Current()->VisitRoots( 1012 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 1013 kVisitRootFlagStopLoggingNewRoots | 1014 kVisitRootFlagClearRootLog)); 1015 timings_.EndSplit(); 1016 if (kVerifyRoots) { 1017 timings_.StartSplit("(Paused)VerifyRoots"); 1018 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 1019 timings_.EndSplit(); 1020 } 1021} 1022 1023void MarkSweep::SweepSystemWeaks() { 1024 Runtime* runtime = Runtime::Current(); 1025 timings_.StartSplit("SweepSystemWeaks"); 1026 runtime->SweepSystemWeaks(IsMarkedCallback, this); 1027 timings_.EndSplit(); 1028} 1029 1030mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 1031 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 1032 // We don't actually want to sweep the object, so lets return "marked" 1033 return obj; 1034} 1035 1036void MarkSweep::VerifyIsLive(const Object* obj) { 1037 Heap* heap = GetHeap(); 1038 if (!heap->GetLiveBitmap()->Test(obj)) { 1039 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1040 if (!large_object_space->GetLiveObjects()->Test(obj)) { 1041 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 1042 heap->allocation_stack_->End()) { 1043 // Object not found! 1044 heap->DumpSpaces(); 1045 LOG(FATAL) << "Found dead object " << obj; 1046 } 1047 } 1048 } 1049} 1050 1051void MarkSweep::VerifySystemWeaks() { 1052 // Verify system weaks, uses a special object visitor which returns the input object. 1053 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 1054} 1055 1056class CheckpointMarkThreadRoots : public Closure { 1057 public: 1058 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1059 1060 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1061 ATRACE_BEGIN("Marking thread roots"); 1062 // Note: self is not necessarily equal to thread since thread may be suspended. 1063 Thread* self = Thread::Current(); 1064 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1065 << thread->GetState() << " thread " << thread << " self " << self; 1066 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 1067 ATRACE_END(); 1068 if (kUseThreadLocalAllocationStack) { 1069 thread->RevokeThreadLocalAllocationStack(); 1070 } 1071 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint) { 1072 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 1073 } 1074 mark_sweep_->GetBarrier().Pass(self); 1075 } 1076 1077 private: 1078 MarkSweep* mark_sweep_; 1079}; 1080 1081void MarkSweep::MarkRootsCheckpoint(Thread* self) { 1082 CheckpointMarkThreadRoots check_point(this); 1083 timings_.StartSplit("MarkRootsCheckpoint"); 1084 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1085 // Request the check point is run on all threads returning a count of the threads that must 1086 // run through the barrier including self. 1087 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1088 // Release locks then wait for all mutator threads to pass the barrier. 1089 // TODO: optimize to not release locks when there are no threads to wait for. 1090 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1091 Locks::mutator_lock_->SharedUnlock(self); 1092 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 1093 CHECK_EQ(old_state, kWaitingPerformingGc); 1094 gc_barrier_->Increment(self, barrier_count); 1095 self->SetState(kWaitingPerformingGc); 1096 Locks::mutator_lock_->SharedLock(self); 1097 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1098 timings_.EndSplit(); 1099} 1100 1101void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1102 timings_.StartSplit("SweepArray"); 1103 Thread* self = Thread::Current(); 1104 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize]; 1105 size_t chunk_free_pos = 0; 1106 size_t freed_bytes = 0; 1107 size_t freed_large_object_bytes = 0; 1108 size_t freed_objects = 0; 1109 size_t freed_large_objects = 0; 1110 // How many objects are left in the array, modified after each space is swept. 1111 Object** objects = const_cast<Object**>(allocations->Begin()); 1112 size_t count = allocations->Size(); 1113 // Change the order to ensure that the non-moving space last swept as an optimization. 1114 std::vector<space::ContinuousSpace*> sweep_spaces; 1115 space::ContinuousSpace* non_moving_space = nullptr; 1116 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1117 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1118 space->GetLiveBitmap() != nullptr) { 1119 if (space == heap_->GetNonMovingSpace()) { 1120 non_moving_space = space; 1121 } else { 1122 sweep_spaces.push_back(space); 1123 } 1124 } 1125 } 1126 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1127 // the other alloc spaces as an optimization. 1128 if (non_moving_space != nullptr) { 1129 sweep_spaces.push_back(non_moving_space); 1130 } 1131 // Start by sweeping the continuous spaces. 1132 for (space::ContinuousSpace* space : sweep_spaces) { 1133 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1134 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1135 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1136 if (swap_bitmaps) { 1137 std::swap(live_bitmap, mark_bitmap); 1138 } 1139 Object** out = objects; 1140 for (size_t i = 0; i < count; ++i) { 1141 Object* obj = objects[i]; 1142 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1143 continue; 1144 } 1145 if (space->HasAddress(obj)) { 1146 // This object is in the space, remove it from the array and add it to the sweep buffer 1147 // if needed. 1148 if (!mark_bitmap->Test(obj)) { 1149 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1150 timings_.StartSplit("FreeList"); 1151 freed_objects += chunk_free_pos; 1152 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1153 timings_.EndSplit(); 1154 chunk_free_pos = 0; 1155 } 1156 chunk_free_buffer[chunk_free_pos++] = obj; 1157 } 1158 } else { 1159 *(out++) = obj; 1160 } 1161 } 1162 if (chunk_free_pos > 0) { 1163 timings_.StartSplit("FreeList"); 1164 freed_objects += chunk_free_pos; 1165 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1166 timings_.EndSplit(); 1167 chunk_free_pos = 0; 1168 } 1169 // All of the references which space contained are no longer in the allocation stack, update 1170 // the count. 1171 count = out - objects; 1172 } 1173 // Handle the large object space. 1174 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1175 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 1176 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects(); 1177 if (swap_bitmaps) { 1178 std::swap(large_live_objects, large_mark_objects); 1179 } 1180 for (size_t i = 0; i < count; ++i) { 1181 Object* obj = objects[i]; 1182 // Handle large objects. 1183 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1184 continue; 1185 } 1186 if (!large_mark_objects->Test(obj)) { 1187 ++freed_large_objects; 1188 freed_large_object_bytes += large_object_space->Free(self, obj); 1189 } 1190 } 1191 timings_.EndSplit(); 1192 1193 timings_.StartSplit("RecordFree"); 1194 VLOG(heap) << "Freed " << freed_objects << "/" << count 1195 << " objects with size " << PrettySize(freed_bytes); 1196 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1197 freed_objects_.FetchAndAdd(freed_objects); 1198 freed_large_objects_.FetchAndAdd(freed_large_objects); 1199 freed_bytes_.FetchAndAdd(freed_bytes); 1200 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes); 1201 timings_.EndSplit(); 1202 1203 timings_.StartSplit("ResetStack"); 1204 allocations->Reset(); 1205 timings_.EndSplit(); 1206} 1207 1208void MarkSweep::Sweep(bool swap_bitmaps) { 1209 DCHECK(mark_stack_->IsEmpty()); 1210 TimingLogger::ScopedSplit("Sweep", &timings_); 1211 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1212 if (space->IsContinuousMemMapAllocSpace()) { 1213 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1214 TimingLogger::ScopedSplit split( 1215 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1216 size_t freed_objects = 0; 1217 size_t freed_bytes = 0; 1218 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1219 heap_->RecordFree(freed_objects, freed_bytes); 1220 freed_objects_.FetchAndAdd(freed_objects); 1221 freed_bytes_.FetchAndAdd(freed_bytes); 1222 } 1223 } 1224 SweepLargeObjects(swap_bitmaps); 1225} 1226 1227void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1228 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1229 size_t freed_objects = 0; 1230 size_t freed_bytes = 0; 1231 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1232 freed_large_objects_.FetchAndAdd(freed_objects); 1233 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 1234 GetHeap()->RecordFree(freed_objects, freed_bytes); 1235} 1236 1237// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1238// marked, put it on the appropriate list in the heap for later processing. 1239void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1240 DCHECK(klass != nullptr); 1241 if (kCountJavaLangRefs) { 1242 ++reference_count_; 1243 } 1244 heap_->DelayReferenceReferent(klass, ref, IsMarkedCallback, this); 1245} 1246 1247class MarkObjectVisitor { 1248 public: 1249 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1250 } 1251 1252 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1253 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1254 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1255 if (kCheckLocks) { 1256 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1257 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1258 } 1259 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset, false)); 1260 } 1261 1262 private: 1263 MarkSweep* const mark_sweep_; 1264}; 1265 1266// Scans an object reference. Determines the type of the reference 1267// and dispatches to a specialized scanning routine. 1268void MarkSweep::ScanObject(Object* obj) { 1269 MarkObjectVisitor mark_visitor(this); 1270 DelayReferenceReferentVisitor ref_visitor(this); 1271 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1272} 1273 1274void MarkSweep::ProcessMarkStackPausedCallback(void* arg) { 1275 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true); 1276} 1277 1278void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1279 Thread* self = Thread::Current(); 1280 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1281 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1282 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1283 CHECK_GT(chunk_size, 0U); 1284 // Split the current mark stack up into work tasks. 1285 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1286 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1287 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1288 it += delta; 1289 } 1290 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1291 thread_pool->StartWorkers(self); 1292 thread_pool->Wait(self, true, true); 1293 thread_pool->StopWorkers(self); 1294 mark_stack_->Reset(); 1295 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1296} 1297 1298// Scan anything that's on the mark stack. 1299void MarkSweep::ProcessMarkStack(bool paused) { 1300 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1301 size_t thread_count = GetThreadCount(paused); 1302 if (kParallelProcessMarkStack && thread_count > 1 && 1303 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1304 ProcessMarkStackParallel(thread_count); 1305 } else { 1306 // TODO: Tune this. 1307 static const size_t kFifoSize = 4; 1308 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1309 for (;;) { 1310 Object* obj = NULL; 1311 if (kUseMarkStackPrefetch) { 1312 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1313 Object* obj = mark_stack_->PopBack(); 1314 DCHECK(obj != NULL); 1315 __builtin_prefetch(obj); 1316 prefetch_fifo.push_back(obj); 1317 } 1318 if (prefetch_fifo.empty()) { 1319 break; 1320 } 1321 obj = prefetch_fifo.front(); 1322 prefetch_fifo.pop_front(); 1323 } else { 1324 if (mark_stack_->IsEmpty()) { 1325 break; 1326 } 1327 obj = mark_stack_->PopBack(); 1328 } 1329 DCHECK(obj != NULL); 1330 ScanObject(obj); 1331 } 1332 } 1333 timings_.EndSplit(); 1334} 1335 1336inline bool MarkSweep::IsMarked(const Object* object) const 1337 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1338 if (immune_region_.ContainsObject(object)) { 1339 return true; 1340 } 1341 if (current_space_bitmap_->HasAddress(object)) { 1342 return current_space_bitmap_->Test(object); 1343 } 1344 return mark_bitmap_->Test(object); 1345} 1346 1347void MarkSweep::FinishPhase() { 1348 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1349 // Can't enqueue references if we hold the mutator lock. 1350 Heap* heap = GetHeap(); 1351 timings_.NewSplit("PostGcVerification"); 1352 heap->PostGcVerification(this); 1353 // Update the cumulative statistics. 1354 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 1355 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 1356 // Ensure that the mark stack is empty. 1357 CHECK(mark_stack_->IsEmpty()); 1358 if (kCountScannedTypes) { 1359 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1360 << " other=" << other_count_; 1361 } 1362 if (kCountTasks) { 1363 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1364 } 1365 if (kMeasureOverhead) { 1366 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1367 } 1368 if (kProfileLargeObjects) { 1369 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1370 } 1371 if (kCountJavaLangRefs) { 1372 VLOG(gc) << "References scanned " << reference_count_; 1373 } 1374 if (kCountMarkedObjects) { 1375 VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_ 1376 << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_; 1377 } 1378 // Update the cumulative loggers. 1379 cumulative_timings_.Start(); 1380 cumulative_timings_.AddLogger(timings_); 1381 cumulative_timings_.End(); 1382 // Clear all of the spaces' mark bitmaps. 1383 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1384 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 1385 if (bitmap != nullptr && 1386 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1387 bitmap->Clear(); 1388 } 1389 } 1390 mark_stack_->Reset(); 1391 // Reset the marked large objects. 1392 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1393 large_objects->GetMarkObjects()->Clear(); 1394} 1395 1396void MarkSweep::RevokeAllThreadLocalBuffers() { 1397 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1398 // If concurrent, rosalloc thread-local buffers are revoked at the 1399 // thread checkpoint. Bump pointer space thread-local buffers must 1400 // not be in use. 1401 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1402 } else { 1403 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 1404 GetHeap()->RevokeAllThreadLocalBuffers(); 1405 timings_.EndSplit(); 1406 } 1407} 1408 1409} // namespace collector 1410} // namespace gc 1411} // namespace art 1412