mark_sweep.cc revision c93c530efc175954160c3834c93961a1a946a35a
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "monitor.h" 41#include "mark_sweep-inl.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/reference-inl.h" 48#include "mirror/object-inl.h" 49#include "mirror/object_array.h" 50#include "mirror/object_array-inl.h" 51#include "runtime.h" 52#include "thread-inl.h" 53#include "thread_list.h" 54#include "verifier/method_verifier.h" 55 56using ::art::mirror::ArtField; 57using ::art::mirror::Class; 58using ::art::mirror::Object; 59using ::art::mirror::ObjectArray; 60 61namespace art { 62namespace gc { 63namespace collector { 64 65// Performance options. 66static constexpr bool kUseRecursiveMark = false; 67static constexpr bool kUseMarkStackPrefetch = true; 68static constexpr size_t kSweepArrayChunkFreeSize = 1024; 69static constexpr bool kPreCleanCards = true; 70 71// Parallelism options. 72static constexpr bool kParallelCardScan = true; 73static constexpr bool kParallelRecursiveMark = true; 74// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 75// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 76// having this can add overhead in ProcessReferences since we may end up doing many calls of 77// ProcessMarkStack with very small mark stacks. 78static constexpr size_t kMinimumParallelMarkStackSize = 128; 79static constexpr bool kParallelProcessMarkStack = true; 80 81// Profiling and information flags. 82static constexpr bool kCountClassesMarked = false; 83static constexpr bool kProfileLargeObjects = false; 84static constexpr bool kMeasureOverhead = false; 85static constexpr bool kCountTasks = false; 86static constexpr bool kCountJavaLangRefs = false; 87 88// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 89static constexpr bool kCheckLocks = kDebugLocking; 90static constexpr bool kVerifyRoots = kIsDebugBuild; 91 92// If true, revoke the rosalloc thread-local buffers at the 93// checkpoint, as opposed to during the pause. 94static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 95 96void MarkSweep::BindBitmaps() { 97 timings_.StartSplit("BindBitmaps"); 98 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 99 // Mark all of the spaces we never collect as immune. 100 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 101 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 102 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 103 } 104 } 105 timings_.EndSplit(); 106} 107 108MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 109 : GarbageCollector(heap, 110 name_prefix + 111 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 112 current_mark_bitmap_(NULL), 113 mark_stack_(NULL), 114 live_stack_freeze_size_(0), 115 gc_barrier_(new Barrier(0)), 116 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 117 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 118 is_concurrent_(is_concurrent) { 119} 120 121void MarkSweep::InitializePhase() { 122 timings_.Reset(); 123 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 124 mark_stack_ = heap_->mark_stack_.get(); 125 DCHECK(mark_stack_ != nullptr); 126 immune_region_.Reset(); 127 class_count_ = 0; 128 array_count_ = 0; 129 other_count_ = 0; 130 large_object_test_ = 0; 131 large_object_mark_ = 0; 132 classes_marked_ = 0; 133 overhead_time_ = 0; 134 work_chunks_created_ = 0; 135 work_chunks_deleted_ = 0; 136 reference_count_ = 0; 137 138 FindDefaultMarkBitmap(); 139 140 // Do any pre GC verification. 141 timings_.NewSplit("PreGcVerification"); 142 heap_->PreGcVerification(this); 143} 144 145void MarkSweep::ProcessReferences(Thread* self) { 146 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 147 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 148 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback, 149 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 150} 151 152void MarkSweep::PreProcessReferences() { 153 if (IsConcurrent()) { 154 // No reason to do this for non-concurrent GC since pre processing soft references only helps 155 // pauses. 156 timings_.NewSplit("PreProcessReferences"); 157 GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback, 158 &MarkObjectCallback, &ProcessMarkStackPausedCallback, this); 159 } 160} 161 162void MarkSweep::HandleDirtyObjectsPhase() { 163 TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_); 164 Thread* self = Thread::Current(); 165 Locks::mutator_lock_->AssertExclusiveHeld(self); 166 167 { 168 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 169 170 // Re-mark root set. 171 ReMarkRoots(); 172 173 // Scan dirty objects, this is only required if we are not doing concurrent GC. 174 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 175 } 176 177 ProcessReferences(self); 178 179 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 180 if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_|| 181 GetHeap()->verify_post_gc_heap_) { 182 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 183 // This second sweep makes sure that we don't have any objects in the live stack which point to 184 // freed objects. These cause problems since their references may be previously freed objects. 185 SweepArray(GetHeap()->allocation_stack_.get(), false); 186 // Since SweepArray() above resets the (active) allocation 187 // stack. Need to revoke the thread-local allocation stacks that 188 // point into it. 189 RevokeAllThreadLocalAllocationStacks(self); 190 } 191 192 timings_.StartSplit("PreSweepingGcVerification"); 193 heap_->PreSweepingGcVerification(this); 194 timings_.EndSplit(); 195 196 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 197 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 198 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 199 200 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 201 // weak before we sweep them. Since this new system weak may not be marked, the GC may 202 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 203 // reference to a string that is about to be swept. 204 Runtime::Current()->DisallowNewSystemWeaks(); 205} 206 207void MarkSweep::PreCleanCards() { 208 // Don't do this for non concurrent GCs since they don't have any dirty cards. 209 if (kPreCleanCards && IsConcurrent()) { 210 Thread* self = Thread::Current(); 211 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 212 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 213 heap_->ProcessCards(timings_, false); 214 // The checkpoint root marking is required to avoid a race condition which occurs if the 215 // following happens during a reference write: 216 // 1. mutator dirties the card (write barrier) 217 // 2. GC ages the card (the above ProcessCards call) 218 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 219 // 4. mutator writes the value (corresponding to the write barrier in 1.) 220 // This causes the GC to age the card but not necessarily mark the reference which the mutator 221 // wrote into the object stored in the card. 222 // Having the checkpoint fixes this issue since it ensures that the card mark and the 223 // reference write are visible to the GC before the card is scanned (this is due to locks being 224 // acquired / released in the checkpoint code). 225 // The other roots are also marked to help reduce the pause. 226 MarkThreadRoots(self); 227 // TODO: Only mark the dirty roots. 228 MarkNonThreadRoots(); 229 MarkConcurrentRoots( 230 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 231 // Process the newly aged cards. 232 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 233 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 234 // in the next GC. 235 } 236} 237 238void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 239 if (kUseThreadLocalAllocationStack) { 240 Locks::mutator_lock_->AssertExclusiveHeld(self); 241 heap_->RevokeAllThreadLocalAllocationStacks(self); 242 } 243} 244 245void MarkSweep::MarkingPhase() { 246 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 247 Thread* self = Thread::Current(); 248 249 BindBitmaps(); 250 FindDefaultMarkBitmap(); 251 252 // Process dirty cards and add dirty cards to mod union tables. 253 heap_->ProcessCards(timings_, false); 254 255 // Need to do this before the checkpoint since we don't want any threads to add references to 256 // the live stack during the recursive mark. 257 timings_.NewSplit("SwapStacks"); 258 heap_->SwapStacks(self); 259 260 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 261 MarkRoots(self); 262 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 263 UpdateAndMarkModUnion(); 264 MarkReachableObjects(); 265 // Pre-clean dirtied cards to reduce pauses. 266 PreCleanCards(); 267 PreProcessReferences(); 268} 269 270void MarkSweep::UpdateAndMarkModUnion() { 271 for (const auto& space : heap_->GetContinuousSpaces()) { 272 if (immune_region_.ContainsSpace(space)) { 273 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 274 "UpdateAndMarkImageModUnionTable"; 275 TimingLogger::ScopedSplit split(name, &timings_); 276 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 277 CHECK(mod_union_table != nullptr); 278 mod_union_table->UpdateAndMarkReferences(MarkObjectCallback, this); 279 } 280 } 281} 282 283void MarkSweep::MarkThreadRoots(Thread* self) { 284 MarkRootsCheckpoint(self); 285} 286 287void MarkSweep::MarkReachableObjects() { 288 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 289 // knowing that new allocations won't be marked as live. 290 timings_.StartSplit("MarkStackAsLive"); 291 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 292 heap_->MarkAllocStackAsLive(live_stack); 293 live_stack->Reset(); 294 timings_.EndSplit(); 295 // Recursively mark all the non-image bits set in the mark bitmap. 296 RecursiveMark(); 297} 298 299void MarkSweep::ReclaimPhase() { 300 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 301 Thread* self = Thread::Current(); 302 303 if (!IsConcurrent()) { 304 ProcessReferences(self); 305 } 306 307 { 308 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 309 SweepSystemWeaks(); 310 } 311 312 if (IsConcurrent()) { 313 Runtime::Current()->AllowNewSystemWeaks(); 314 315 TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); 316 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 317 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 318 if (!kPreCleanCards) { 319 // The allocation stack contains things allocated since the start of the GC. These may have 320 // been marked during this GC meaning they won't be eligible for reclaiming in the next 321 // sticky GC. Unmark these objects so that they are eligible for reclaiming in the next 322 // sticky GC. 323 // There is a race here which is safely handled. Another thread such as the hprof could 324 // have flushed the alloc stack after we resumed the threads. This is safe however, since 325 // reseting the allocation stack zeros it out with madvise. This means that we will either 326 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the 327 // first place. 328 // We can't do this if we pre-clean cards since we will unmark objects which are no longer on 329 // a dirty card since we aged cards during the pre-cleaning process. 330 mirror::Object** end = allocation_stack->End(); 331 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { 332 const Object* obj = *it; 333 if (obj != nullptr) { 334 UnMarkObjectNonNull(obj); 335 } 336 } 337 } 338 } 339 340 { 341 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 342 343 // Reclaim unmarked objects. 344 Sweep(false); 345 346 // Swap the live and mark bitmaps for each space which we modified space. This is an 347 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 348 // bitmaps. 349 timings_.StartSplit("SwapBitmaps"); 350 SwapBitmaps(); 351 timings_.EndSplit(); 352 353 // Unbind the live and mark bitmaps. 354 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 355 GetHeap()->UnBindBitmaps(); 356 } 357} 358 359void MarkSweep::FindDefaultMarkBitmap() { 360 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 361 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 362 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 363 if (bitmap != nullptr && 364 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 365 current_mark_bitmap_ = bitmap; 366 CHECK(current_mark_bitmap_ != NULL); 367 return; 368 } 369 } 370 GetHeap()->DumpSpaces(); 371 LOG(FATAL) << "Could not find a default mark bitmap"; 372} 373 374void MarkSweep::ExpandMarkStack() { 375 ResizeMarkStack(mark_stack_->Capacity() * 2); 376} 377 378void MarkSweep::ResizeMarkStack(size_t new_size) { 379 // Rare case, no need to have Thread::Current be a parameter. 380 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 381 // Someone else acquired the lock and expanded the mark stack before us. 382 return; 383 } 384 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 385 CHECK_LE(mark_stack_->Size(), new_size); 386 mark_stack_->Resize(new_size); 387 for (const auto& obj : temp) { 388 mark_stack_->PushBack(obj); 389 } 390} 391 392inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) { 393 DCHECK(obj != NULL); 394 if (MarkObjectParallel(obj)) { 395 MutexLock mu(Thread::Current(), mark_stack_lock_); 396 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 397 ExpandMarkStack(); 398 } 399 // The object must be pushed on to the mark stack. 400 mark_stack_->PushBack(const_cast<Object*>(obj)); 401 } 402} 403 404mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 405 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 406 mark_sweep->MarkObject(obj); 407 return obj; 408} 409 410inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { 411 DCHECK(!immune_region_.ContainsObject(obj)); 412 413 if (kUseBrooksPointer) { 414 // Verify all the objects have the correct Brooks pointer installed. 415 obj->AssertSelfBrooksPointer(); 416 } 417 418 // Try to take advantage of locality of references within a space, failing this find the space 419 // the hard way. 420 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 421 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 422 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 423 if (LIKELY(new_bitmap != NULL)) { 424 object_bitmap = new_bitmap; 425 } else { 426 MarkLargeObject(obj, false); 427 return; 428 } 429 } 430 431 DCHECK(object_bitmap->HasAddress(obj)); 432 object_bitmap->Clear(obj); 433} 434 435inline void MarkSweep::MarkObjectNonNull(const Object* obj) { 436 DCHECK(obj != NULL); 437 438 if (kUseBrooksPointer) { 439 // Verify all the objects have the correct Brooks pointer installed. 440 obj->AssertSelfBrooksPointer(); 441 } 442 443 if (immune_region_.ContainsObject(obj)) { 444 DCHECK(IsMarked(obj)); 445 return; 446 } 447 448 // Try to take advantage of locality of references within a space, failing this find the space 449 // the hard way. 450 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 451 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 452 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 453 if (LIKELY(new_bitmap != NULL)) { 454 object_bitmap = new_bitmap; 455 } else { 456 MarkLargeObject(obj, true); 457 return; 458 } 459 } 460 461 // This object was not previously marked. 462 if (!object_bitmap->Test(obj)) { 463 object_bitmap->Set(obj); 464 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 465 // Lock is not needed but is here anyways to please annotalysis. 466 MutexLock mu(Thread::Current(), mark_stack_lock_); 467 ExpandMarkStack(); 468 } 469 // The object must be pushed on to the mark stack. 470 mark_stack_->PushBack(const_cast<Object*>(obj)); 471 } 472} 473 474// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 475bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 476 // TODO: support >1 discontinuous space. 477 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 478 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 479 if (kProfileLargeObjects) { 480 ++large_object_test_; 481 } 482 if (UNLIKELY(!large_objects->Test(obj))) { 483 if (!large_object_space->Contains(obj)) { 484 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 485 LOG(ERROR) << "Attempting see if it's a bad root"; 486 VerifyRoots(); 487 LOG(FATAL) << "Can't mark bad root"; 488 } 489 if (kProfileLargeObjects) { 490 ++large_object_mark_; 491 } 492 if (set) { 493 large_objects->Set(obj); 494 } else { 495 large_objects->Clear(obj); 496 } 497 return true; 498 } 499 return false; 500} 501 502inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 503 DCHECK(obj != NULL); 504 505 if (kUseBrooksPointer) { 506 // Verify all the objects have the correct Brooks pointer installed. 507 obj->AssertSelfBrooksPointer(); 508 } 509 510 if (immune_region_.ContainsObject(obj)) { 511 DCHECK(IsMarked(obj)); 512 return false; 513 } 514 515 // Try to take advantage of locality of references within a space, failing this find the space 516 // the hard way. 517 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 518 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 519 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 520 if (new_bitmap != NULL) { 521 object_bitmap = new_bitmap; 522 } else { 523 // TODO: Remove the Thread::Current here? 524 // TODO: Convert this to some kind of atomic marking? 525 MutexLock mu(Thread::Current(), large_object_lock_); 526 return MarkLargeObject(obj, true); 527 } 528 } 529 530 // Return true if the object was not previously marked. 531 return !object_bitmap->AtomicTestAndSet(obj); 532} 533 534// Used to mark objects when recursing. Recursion is done by moving 535// the finger across the bitmaps in address order and marking child 536// objects. Any newly-marked objects whose addresses are lower than 537// the finger won't be visited by the bitmap scan, so those objects 538// need to be added to the mark stack. 539inline void MarkSweep::MarkObject(const Object* obj) { 540 if (obj != NULL) { 541 MarkObjectNonNull(obj); 542 } 543} 544 545void MarkSweep::MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/, 546 RootType /*root_type*/) { 547 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 548} 549 550void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 551 RootType /*root_type*/) { 552 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 553} 554 555void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 556 RootType /*root_type*/) { 557 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 558} 559 560void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 561 const StackVisitor* visitor) { 562 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 563} 564 565void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 566 // See if the root is on any space bitmap. 567 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 568 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 569 if (!large_object_space->Contains(root)) { 570 LOG(ERROR) << "Found invalid root: " << root; 571 if (visitor != NULL) { 572 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 573 } 574 } 575 } 576} 577 578void MarkSweep::VerifyRoots() { 579 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 580} 581 582void MarkSweep::MarkRoots(Thread* self) { 583 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 584 // If we exclusively hold the mutator lock, all threads must be suspended. 585 timings_.StartSplit("MarkRoots"); 586 Runtime::Current()->VisitRoots(MarkRootCallback, this); 587 timings_.EndSplit(); 588 RevokeAllThreadLocalAllocationStacks(self); 589 } else { 590 MarkThreadRoots(self); 591 // At this point the live stack should no longer have any mutators which push into it. 592 MarkNonThreadRoots(); 593 MarkConcurrentRoots( 594 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 595 } 596} 597 598void MarkSweep::MarkNonThreadRoots() { 599 timings_.StartSplit("MarkNonThreadRoots"); 600 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 601 timings_.EndSplit(); 602} 603 604void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 605 timings_.StartSplit("MarkConcurrentRoots"); 606 // Visit all runtime roots and clear dirty flags. 607 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 608 timings_.EndSplit(); 609} 610 611class ScanObjectVisitor { 612 public: 613 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 614 : mark_sweep_(mark_sweep) {} 615 616 // TODO: Fixme when anotatalysis works with visitors. 617 void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { 618 if (kCheckLocks) { 619 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 620 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 621 } 622 mark_sweep_->ScanObject(obj); 623 } 624 625 private: 626 MarkSweep* const mark_sweep_; 627}; 628 629template <bool kUseFinger = false> 630class MarkStackTask : public Task { 631 public: 632 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 633 const Object** mark_stack) 634 : mark_sweep_(mark_sweep), 635 thread_pool_(thread_pool), 636 mark_stack_pos_(mark_stack_size) { 637 // We may have to copy part of an existing mark stack when another mark stack overflows. 638 if (mark_stack_size != 0) { 639 DCHECK(mark_stack != NULL); 640 // TODO: Check performance? 641 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 642 } 643 if (kCountTasks) { 644 ++mark_sweep_->work_chunks_created_; 645 } 646 } 647 648 static const size_t kMaxSize = 1 * KB; 649 650 protected: 651 class ScanObjectParallelVisitor { 652 public: 653 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 654 : chunk_task_(chunk_task) {} 655 656 void operator()(Object* obj) const { 657 MarkSweep* mark_sweep = chunk_task_->mark_sweep_; 658 mark_sweep->ScanObjectVisit(obj, 659 [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */, 660 bool /* is_static */) ALWAYS_INLINE_LAMBDA { 661 if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) { 662 if (kUseFinger) { 663 android_memory_barrier(); 664 if (reinterpret_cast<uintptr_t>(ref) >= 665 static_cast<uintptr_t>(mark_sweep->atomic_finger_)) { 666 return; 667 } 668 } 669 chunk_task_->MarkStackPush(ref); 670 } 671 }); 672 } 673 674 private: 675 MarkStackTask<kUseFinger>* const chunk_task_; 676 }; 677 678 virtual ~MarkStackTask() { 679 // Make sure that we have cleared our mark stack. 680 DCHECK_EQ(mark_stack_pos_, 0U); 681 if (kCountTasks) { 682 ++mark_sweep_->work_chunks_deleted_; 683 } 684 } 685 686 MarkSweep* const mark_sweep_; 687 ThreadPool* const thread_pool_; 688 // Thread local mark stack for this task. 689 const Object* mark_stack_[kMaxSize]; 690 // Mark stack position. 691 size_t mark_stack_pos_; 692 693 void MarkStackPush(const Object* obj) ALWAYS_INLINE { 694 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 695 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 696 mark_stack_pos_ /= 2; 697 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 698 mark_stack_ + mark_stack_pos_); 699 thread_pool_->AddTask(Thread::Current(), task); 700 } 701 DCHECK(obj != nullptr); 702 DCHECK(mark_stack_pos_ < kMaxSize); 703 mark_stack_[mark_stack_pos_++] = obj; 704 } 705 706 virtual void Finalize() { 707 delete this; 708 } 709 710 // Scans all of the objects 711 virtual void Run(Thread* self) { 712 ScanObjectParallelVisitor visitor(this); 713 // TODO: Tune this. 714 static const size_t kFifoSize = 4; 715 BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo; 716 for (;;) { 717 const Object* obj = nullptr; 718 if (kUseMarkStackPrefetch) { 719 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 720 const Object* obj = mark_stack_[--mark_stack_pos_]; 721 DCHECK(obj != nullptr); 722 __builtin_prefetch(obj); 723 prefetch_fifo.push_back(obj); 724 } 725 if (UNLIKELY(prefetch_fifo.empty())) { 726 break; 727 } 728 obj = prefetch_fifo.front(); 729 prefetch_fifo.pop_front(); 730 } else { 731 if (UNLIKELY(mark_stack_pos_ == 0)) { 732 break; 733 } 734 obj = mark_stack_[--mark_stack_pos_]; 735 } 736 DCHECK(obj != nullptr); 737 visitor(const_cast<mirror::Object*>(obj)); 738 } 739 } 740}; 741 742class CardScanTask : public MarkStackTask<false> { 743 public: 744 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap, 745 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 746 const Object** mark_stack_obj) 747 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 748 bitmap_(bitmap), 749 begin_(begin), 750 end_(end), 751 minimum_age_(minimum_age) { 752 } 753 754 protected: 755 accounting::SpaceBitmap* const bitmap_; 756 byte* const begin_; 757 byte* const end_; 758 const byte minimum_age_; 759 760 virtual void Finalize() { 761 delete this; 762 } 763 764 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 765 ScanObjectParallelVisitor visitor(this); 766 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 767 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 768 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 769 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 770 // Finish by emptying our local mark stack. 771 MarkStackTask::Run(self); 772 } 773}; 774 775size_t MarkSweep::GetThreadCount(bool paused) const { 776 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 777 return 0; 778 } 779 if (paused) { 780 return heap_->GetParallelGCThreadCount() + 1; 781 } else { 782 return heap_->GetConcGCThreadCount() + 1; 783 } 784} 785 786void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 787 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 788 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 789 size_t thread_count = GetThreadCount(paused); 790 // The parallel version with only one thread is faster for card scanning, TODO: fix. 791 if (kParallelCardScan && thread_count > 0) { 792 Thread* self = Thread::Current(); 793 // Can't have a different split for each space since multiple spaces can have their cards being 794 // scanned at the same time. 795 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 796 // Try to take some of the mark stack since we can pass this off to the worker tasks. 797 const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin()); 798 const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End()); 799 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 800 // Estimated number of work tasks we will create. 801 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 802 DCHECK_NE(mark_stack_tasks, 0U); 803 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 804 mark_stack_size / mark_stack_tasks + 1); 805 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 806 if (space->GetMarkBitmap() == nullptr) { 807 continue; 808 } 809 byte* card_begin = space->Begin(); 810 byte* card_end = space->End(); 811 // Align up the end address. For example, the image space's end 812 // may not be card-size-aligned. 813 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 814 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 815 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 816 // Calculate how many bytes of heap we will scan, 817 const size_t address_range = card_end - card_begin; 818 // Calculate how much address range each task gets. 819 const size_t card_delta = RoundUp(address_range / thread_count + 1, 820 accounting::CardTable::kCardSize); 821 // Create the worker tasks for this space. 822 while (card_begin != card_end) { 823 // Add a range of cards. 824 size_t addr_remaining = card_end - card_begin; 825 size_t card_increment = std::min(card_delta, addr_remaining); 826 // Take from the back of the mark stack. 827 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 828 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 829 mark_stack_end -= mark_stack_increment; 830 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 831 DCHECK_EQ(mark_stack_end, const_cast<const art::mirror::Object **>(mark_stack_->End())); 832 // Add the new task to the thread pool. 833 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 834 card_begin + card_increment, minimum_age, 835 mark_stack_increment, mark_stack_end); 836 thread_pool->AddTask(self, task); 837 card_begin += card_increment; 838 } 839 } 840 841 // Note: the card scan below may dirty new cards (and scan them) 842 // as a side effect when a Reference object is encountered and 843 // queued during the marking. See b/11465268. 844 thread_pool->SetMaxActiveWorkers(thread_count - 1); 845 thread_pool->StartWorkers(self); 846 thread_pool->Wait(self, true, true); 847 thread_pool->StopWorkers(self); 848 timings_.EndSplit(); 849 } else { 850 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 851 if (space->GetMarkBitmap() != nullptr) { 852 // Image spaces are handled properly since live == marked for them. 853 switch (space->GetGcRetentionPolicy()) { 854 case space::kGcRetentionPolicyNeverCollect: 855 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 856 "ScanGrayImageSpaceObjects"); 857 break; 858 case space::kGcRetentionPolicyFullCollect: 859 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 860 "ScanGrayZygoteSpaceObjects"); 861 break; 862 case space::kGcRetentionPolicyAlwaysCollect: 863 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 864 "ScanGrayAllocSpaceObjects"); 865 break; 866 } 867 ScanObjectVisitor visitor(this); 868 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 869 timings_.EndSplit(); 870 } 871 } 872 } 873} 874 875class RecursiveMarkTask : public MarkStackTask<false> { 876 public: 877 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 878 accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 879 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 880 bitmap_(bitmap), 881 begin_(begin), 882 end_(end) { 883 } 884 885 protected: 886 accounting::SpaceBitmap* const bitmap_; 887 const uintptr_t begin_; 888 const uintptr_t end_; 889 890 virtual void Finalize() { 891 delete this; 892 } 893 894 // Scans all of the objects 895 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 896 ScanObjectParallelVisitor visitor(this); 897 bitmap_->VisitMarkedRange(begin_, end_, visitor); 898 // Finish by emptying our local mark stack. 899 MarkStackTask::Run(self); 900 } 901}; 902 903// Populates the mark stack based on the set of marked objects and 904// recursively marks until the mark stack is emptied. 905void MarkSweep::RecursiveMark() { 906 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 907 // RecursiveMark will build the lists of known instances of the Reference classes. See 908 // DelayReferenceReferent for details. 909 if (kUseRecursiveMark) { 910 const bool partial = GetGcType() == kGcTypePartial; 911 ScanObjectVisitor scan_visitor(this); 912 auto* self = Thread::Current(); 913 ThreadPool* thread_pool = heap_->GetThreadPool(); 914 size_t thread_count = GetThreadCount(false); 915 const bool parallel = kParallelRecursiveMark && thread_count > 1; 916 mark_stack_->Reset(); 917 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 918 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 919 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 920 current_mark_bitmap_ = space->GetMarkBitmap(); 921 if (current_mark_bitmap_ == nullptr) { 922 continue; 923 } 924 if (parallel) { 925 // We will use the mark stack the future. 926 // CHECK(mark_stack_->IsEmpty()); 927 // This function does not handle heap end increasing, so we must use the space end. 928 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 929 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 930 atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF); 931 932 // Create a few worker tasks. 933 const size_t n = thread_count * 2; 934 while (begin != end) { 935 uintptr_t start = begin; 936 uintptr_t delta = (end - begin) / n; 937 delta = RoundUp(delta, KB); 938 if (delta < 16 * KB) delta = end - begin; 939 begin += delta; 940 auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start, 941 begin); 942 thread_pool->AddTask(self, task); 943 } 944 thread_pool->SetMaxActiveWorkers(thread_count - 1); 945 thread_pool->StartWorkers(self); 946 thread_pool->Wait(self, true, true); 947 thread_pool->StopWorkers(self); 948 } else { 949 // This function does not handle heap end increasing, so we must use the space end. 950 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 951 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 952 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 953 } 954 } 955 } 956 } 957 ProcessMarkStack(false); 958} 959 960mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 961 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 962 return object; 963 } 964 return nullptr; 965} 966 967void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 968 ScanGrayObjects(paused, minimum_age); 969 ProcessMarkStack(paused); 970} 971 972void MarkSweep::ReMarkRoots() { 973 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 974 timings_.StartSplit("(Paused)ReMarkRoots"); 975 Runtime::Current()->VisitRoots( 976 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 977 kVisitRootFlagStopLoggingNewRoots | 978 kVisitRootFlagClearRootLog)); 979 timings_.EndSplit(); 980 if (kVerifyRoots) { 981 timings_.StartSplit("(Paused)VerifyRoots"); 982 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 983 timings_.EndSplit(); 984 } 985} 986 987void MarkSweep::SweepSystemWeaks() { 988 Runtime* runtime = Runtime::Current(); 989 timings_.StartSplit("SweepSystemWeaks"); 990 runtime->SweepSystemWeaks(IsMarkedCallback, this); 991 timings_.EndSplit(); 992} 993 994mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 995 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 996 // We don't actually want to sweep the object, so lets return "marked" 997 return obj; 998} 999 1000void MarkSweep::VerifyIsLive(const Object* obj) { 1001 Heap* heap = GetHeap(); 1002 if (!heap->GetLiveBitmap()->Test(obj)) { 1003 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1004 if (!large_object_space->GetLiveObjects()->Test(obj)) { 1005 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 1006 heap->allocation_stack_->End()) { 1007 // Object not found! 1008 heap->DumpSpaces(); 1009 LOG(FATAL) << "Found dead object " << obj; 1010 } 1011 } 1012 } 1013} 1014 1015void MarkSweep::VerifySystemWeaks() { 1016 // Verify system weaks, uses a special object visitor which returns the input object. 1017 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 1018} 1019 1020class CheckpointMarkThreadRoots : public Closure { 1021 public: 1022 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 1023 1024 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1025 ATRACE_BEGIN("Marking thread roots"); 1026 // Note: self is not necessarily equal to thread since thread may be suspended. 1027 Thread* self = Thread::Current(); 1028 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1029 << thread->GetState() << " thread " << thread << " self " << self; 1030 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 1031 ATRACE_END(); 1032 if (kUseThreadLocalAllocationStack) { 1033 thread->RevokeThreadLocalAllocationStack(); 1034 } 1035 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint) { 1036 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 1037 } 1038 mark_sweep_->GetBarrier().Pass(self); 1039 } 1040 1041 private: 1042 MarkSweep* mark_sweep_; 1043}; 1044 1045void MarkSweep::MarkRootsCheckpoint(Thread* self) { 1046 CheckpointMarkThreadRoots check_point(this); 1047 timings_.StartSplit("MarkRootsCheckpoint"); 1048 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1049 // Request the check point is run on all threads returning a count of the threads that must 1050 // run through the barrier including self. 1051 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1052 // Release locks then wait for all mutator threads to pass the barrier. 1053 // TODO: optimize to not release locks when there are no threads to wait for. 1054 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1055 Locks::mutator_lock_->SharedUnlock(self); 1056 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 1057 CHECK_EQ(old_state, kWaitingPerformingGc); 1058 gc_barrier_->Increment(self, barrier_count); 1059 self->SetState(kWaitingPerformingGc); 1060 Locks::mutator_lock_->SharedLock(self); 1061 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1062 timings_.EndSplit(); 1063} 1064 1065void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1066 timings_.StartSplit("SweepArray"); 1067 Thread* self = Thread::Current(); 1068 mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize]; 1069 size_t chunk_free_pos = 0; 1070 size_t freed_bytes = 0; 1071 size_t freed_large_object_bytes = 0; 1072 size_t freed_objects = 0; 1073 size_t freed_large_objects = 0; 1074 // How many objects are left in the array, modified after each space is swept. 1075 Object** objects = const_cast<Object**>(allocations->Begin()); 1076 size_t count = allocations->Size(); 1077 // Change the order to ensure that the non-moving space last swept as an optimization. 1078 std::vector<space::ContinuousSpace*> sweep_spaces; 1079 space::ContinuousSpace* non_moving_space = nullptr; 1080 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1081 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1082 space->GetLiveBitmap() != nullptr) { 1083 if (space == heap_->GetNonMovingSpace()) { 1084 non_moving_space = space; 1085 } else { 1086 sweep_spaces.push_back(space); 1087 } 1088 } 1089 } 1090 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1091 // the other alloc spaces as an optimization. 1092 if (non_moving_space != nullptr) { 1093 sweep_spaces.push_back(non_moving_space); 1094 } 1095 // Start by sweeping the continuous spaces. 1096 for (space::ContinuousSpace* space : sweep_spaces) { 1097 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1098 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1099 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1100 if (swap_bitmaps) { 1101 std::swap(live_bitmap, mark_bitmap); 1102 } 1103 Object** out = objects; 1104 for (size_t i = 0; i < count; ++i) { 1105 Object* obj = objects[i]; 1106 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1107 continue; 1108 } 1109 if (space->HasAddress(obj)) { 1110 // This object is in the space, remove it from the array and add it to the sweep buffer 1111 // if needed. 1112 if (!mark_bitmap->Test(obj)) { 1113 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1114 timings_.StartSplit("FreeList"); 1115 freed_objects += chunk_free_pos; 1116 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1117 timings_.EndSplit(); 1118 chunk_free_pos = 0; 1119 } 1120 chunk_free_buffer[chunk_free_pos++] = obj; 1121 } 1122 } else { 1123 *(out++) = obj; 1124 } 1125 } 1126 if (chunk_free_pos > 0) { 1127 timings_.StartSplit("FreeList"); 1128 freed_objects += chunk_free_pos; 1129 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1130 timings_.EndSplit(); 1131 chunk_free_pos = 0; 1132 } 1133 // All of the references which space contained are no longer in the allocation stack, update 1134 // the count. 1135 count = out - objects; 1136 } 1137 // Handle the large object space. 1138 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1139 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 1140 accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects(); 1141 if (swap_bitmaps) { 1142 std::swap(large_live_objects, large_mark_objects); 1143 } 1144 for (size_t i = 0; i < count; ++i) { 1145 Object* obj = objects[i]; 1146 // Handle large objects. 1147 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1148 continue; 1149 } 1150 if (!large_mark_objects->Test(obj)) { 1151 ++freed_large_objects; 1152 freed_large_object_bytes += large_object_space->Free(self, obj); 1153 } 1154 } 1155 timings_.EndSplit(); 1156 1157 timings_.StartSplit("RecordFree"); 1158 VLOG(heap) << "Freed " << freed_objects << "/" << count 1159 << " objects with size " << PrettySize(freed_bytes); 1160 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes); 1161 freed_objects_.FetchAndAdd(freed_objects); 1162 freed_large_objects_.FetchAndAdd(freed_large_objects); 1163 freed_bytes_.FetchAndAdd(freed_bytes); 1164 freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes); 1165 timings_.EndSplit(); 1166 1167 timings_.StartSplit("ResetStack"); 1168 allocations->Reset(); 1169 timings_.EndSplit(); 1170} 1171 1172void MarkSweep::Sweep(bool swap_bitmaps) { 1173 DCHECK(mark_stack_->IsEmpty()); 1174 TimingLogger::ScopedSplit("Sweep", &timings_); 1175 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1176 if (space->IsContinuousMemMapAllocSpace()) { 1177 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1178 TimingLogger::ScopedSplit split( 1179 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1180 size_t freed_objects = 0; 1181 size_t freed_bytes = 0; 1182 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1183 heap_->RecordFree(freed_objects, freed_bytes); 1184 freed_objects_.FetchAndAdd(freed_objects); 1185 freed_bytes_.FetchAndAdd(freed_bytes); 1186 } 1187 } 1188 SweepLargeObjects(swap_bitmaps); 1189} 1190 1191void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1192 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1193 size_t freed_objects = 0; 1194 size_t freed_bytes = 0; 1195 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1196 freed_large_objects_.FetchAndAdd(freed_objects); 1197 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 1198 GetHeap()->RecordFree(freed_objects, freed_bytes); 1199} 1200 1201// Process the "referent" field in a java.lang.ref.Reference. If the 1202// referent has not yet been marked, put it on the appropriate list in 1203// the heap for later processing. 1204void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 1205 DCHECK(klass != nullptr); 1206 heap_->DelayReferenceReferent(klass, obj->AsReference(), IsMarkedCallback, this); 1207} 1208 1209class MarkObjectVisitor { 1210 public: 1211 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {} 1212 1213 // TODO: Fixme when anotatalysis works with visitors. 1214 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1215 bool /* is_static */) const ALWAYS_INLINE 1216 NO_THREAD_SAFETY_ANALYSIS { 1217 if (kCheckLocks) { 1218 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1219 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1220 } 1221 mark_sweep_->MarkObject(ref); 1222 } 1223 1224 private: 1225 MarkSweep* const mark_sweep_; 1226}; 1227 1228// Scans an object reference. Determines the type of the reference 1229// and dispatches to a specialized scanning routine. 1230void MarkSweep::ScanObject(Object* obj) { 1231 MarkObjectVisitor visitor(this); 1232 ScanObjectVisit(obj, visitor); 1233} 1234 1235void MarkSweep::ProcessMarkStackPausedCallback(void* arg) { 1236 DCHECK(arg != nullptr); 1237 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true); 1238} 1239 1240void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1241 Thread* self = Thread::Current(); 1242 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1243 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1244 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1245 CHECK_GT(chunk_size, 0U); 1246 // Split the current mark stack up into work tasks. 1247 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1248 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1249 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, 1250 const_cast<const mirror::Object**>(it))); 1251 it += delta; 1252 } 1253 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1254 thread_pool->StartWorkers(self); 1255 thread_pool->Wait(self, true, true); 1256 thread_pool->StopWorkers(self); 1257 mark_stack_->Reset(); 1258 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1259} 1260 1261// Scan anything that's on the mark stack. 1262void MarkSweep::ProcessMarkStack(bool paused) { 1263 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1264 size_t thread_count = GetThreadCount(paused); 1265 if (kParallelProcessMarkStack && thread_count > 1 && 1266 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1267 ProcessMarkStackParallel(thread_count); 1268 } else { 1269 // TODO: Tune this. 1270 static const size_t kFifoSize = 4; 1271 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1272 for (;;) { 1273 Object* obj = NULL; 1274 if (kUseMarkStackPrefetch) { 1275 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1276 Object* obj = mark_stack_->PopBack(); 1277 DCHECK(obj != NULL); 1278 __builtin_prefetch(obj); 1279 prefetch_fifo.push_back(obj); 1280 } 1281 if (prefetch_fifo.empty()) { 1282 break; 1283 } 1284 obj = prefetch_fifo.front(); 1285 prefetch_fifo.pop_front(); 1286 } else { 1287 if (mark_stack_->IsEmpty()) { 1288 break; 1289 } 1290 obj = mark_stack_->PopBack(); 1291 } 1292 DCHECK(obj != NULL); 1293 ScanObject(obj); 1294 } 1295 } 1296 timings_.EndSplit(); 1297} 1298 1299inline bool MarkSweep::IsMarked(const Object* object) const 1300 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1301 if (immune_region_.ContainsObject(object)) { 1302 return true; 1303 } 1304 DCHECK(current_mark_bitmap_ != NULL); 1305 if (current_mark_bitmap_->HasAddress(object)) { 1306 return current_mark_bitmap_->Test(object); 1307 } 1308 return heap_->GetMarkBitmap()->Test(object); 1309} 1310 1311void MarkSweep::FinishPhase() { 1312 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1313 // Can't enqueue references if we hold the mutator lock. 1314 Heap* heap = GetHeap(); 1315 timings_.NewSplit("PostGcVerification"); 1316 heap->PostGcVerification(this); 1317 1318 // Update the cumulative statistics 1319 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 1320 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 1321 1322 // Ensure that the mark stack is empty. 1323 CHECK(mark_stack_->IsEmpty()); 1324 1325 if (kCountScannedTypes) { 1326 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1327 << " other=" << other_count_; 1328 } 1329 1330 if (kCountTasks) { 1331 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1332 } 1333 1334 if (kMeasureOverhead) { 1335 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1336 } 1337 1338 if (kProfileLargeObjects) { 1339 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1340 } 1341 1342 if (kCountClassesMarked) { 1343 VLOG(gc) << "Classes marked " << classes_marked_; 1344 } 1345 1346 if (kCountJavaLangRefs) { 1347 VLOG(gc) << "References scanned " << reference_count_; 1348 } 1349 1350 // Update the cumulative loggers. 1351 cumulative_timings_.Start(); 1352 cumulative_timings_.AddLogger(timings_); 1353 cumulative_timings_.End(); 1354 1355 // Clear all of the spaces' mark bitmaps. 1356 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1357 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 1358 if (bitmap != nullptr && 1359 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1360 bitmap->Clear(); 1361 } 1362 } 1363 mark_stack_->Reset(); 1364 1365 // Reset the marked large objects. 1366 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1367 large_objects->GetMarkObjects()->Clear(); 1368} 1369 1370void MarkSweep::RevokeAllThreadLocalBuffers() { 1371 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1372 // If concurrent, rosalloc thread-local buffers are revoked at the 1373 // thread checkpoint. Bump pointer space thread-local buffers must 1374 // not be in use. 1375 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1376 } else { 1377 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 1378 GetHeap()->RevokeAllThreadLocalBuffers(); 1379 timings_.EndSplit(); 1380 } 1381} 1382 1383} // namespace collector 1384} // namespace gc 1385} // namespace art 1386