mark_sweep.cc revision bbdc5bc5fd5141711879a6c85d80ac45b7aad5d0
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap-inl.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/reference_processor.h" 35#include "gc/space/image_space.h" 36#include "gc/space/large_object_space.h" 37#include "gc/space/space-inl.h" 38#include "mark_sweep-inl.h" 39#include "mirror/art_field-inl.h" 40#include "mirror/object-inl.h" 41#include "runtime.h" 42#include "scoped_thread_state_change.h" 43#include "thread-inl.h" 44#include "thread_list.h" 45 46using ::art::mirror::ArtField; 47using ::art::mirror::Class; 48using ::art::mirror::Object; 49using ::art::mirror::ObjectArray; 50 51namespace art { 52namespace gc { 53namespace collector { 54 55// Performance options. 56static constexpr bool kUseRecursiveMark = false; 57static constexpr bool kUseMarkStackPrefetch = true; 58static constexpr size_t kSweepArrayChunkFreeSize = 1024; 59static constexpr bool kPreCleanCards = true; 60 61// Parallelism options. 62static constexpr bool kParallelCardScan = true; 63static constexpr bool kParallelRecursiveMark = true; 64// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 65// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 66// having this can add overhead in ProcessReferences since we may end up doing many calls of 67// ProcessMarkStack with very small mark stacks. 68static constexpr size_t kMinimumParallelMarkStackSize = 128; 69static constexpr bool kParallelProcessMarkStack = true; 70 71// Profiling and information flags. 72static constexpr bool kProfileLargeObjects = false; 73static constexpr bool kMeasureOverhead = false; 74static constexpr bool kCountTasks = false; 75static constexpr bool kCountJavaLangRefs = false; 76static constexpr bool kCountMarkedObjects = false; 77 78// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 79static constexpr bool kCheckLocks = kDebugLocking; 80static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 81 82// If true, revoke the rosalloc thread-local buffers at the 83// checkpoint, as opposed to during the pause. 84static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 85 86void MarkSweep::BindBitmaps() { 87 timings_.StartSplit("BindBitmaps"); 88 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 89 // Mark all of the spaces we never collect as immune. 90 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 91 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 92 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 93 } 94 } 95 timings_.EndSplit(); 96} 97 98MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 99 : GarbageCollector(heap, 100 name_prefix + 101 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 102 current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr), 103 gc_barrier_(new Barrier(0)), 104 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 105 is_concurrent_(is_concurrent), live_stack_freeze_size_(0) { 106 std::string error_msg; 107 MemMap* mem_map = MemMap::MapAnonymous( 108 "mark sweep sweep array free buffer", nullptr, 109 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), 110 PROT_READ | PROT_WRITE, false, &error_msg); 111 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg; 112 sweep_array_free_buffer_mem_map_.reset(mem_map); 113} 114 115void MarkSweep::InitializePhase() { 116 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 117 mark_stack_ = heap_->GetMarkStack(); 118 DCHECK(mark_stack_ != nullptr); 119 immune_region_.Reset(); 120 class_count_.StoreRelaxed(0); 121 array_count_.StoreRelaxed(0); 122 other_count_.StoreRelaxed(0); 123 large_object_test_.StoreRelaxed(0); 124 large_object_mark_.StoreRelaxed(0); 125 overhead_time_ .StoreRelaxed(0); 126 work_chunks_created_.StoreRelaxed(0); 127 work_chunks_deleted_.StoreRelaxed(0); 128 reference_count_.StoreRelaxed(0); 129 mark_null_count_.StoreRelaxed(0); 130 mark_immune_count_.StoreRelaxed(0); 131 mark_fastpath_count_.StoreRelaxed(0); 132 mark_slowpath_count_.StoreRelaxed(0); 133 { 134 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 135 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 136 mark_bitmap_ = heap_->GetMarkBitmap(); 137 } 138 if (!clear_soft_references_) { 139 // Always clear soft references if a non-sticky collection. 140 clear_soft_references_ = GetGcType() != collector::kGcTypeSticky; 141 } 142} 143 144void MarkSweep::RunPhases() { 145 Thread* self = Thread::Current(); 146 InitializePhase(); 147 Locks::mutator_lock_->AssertNotHeld(self); 148 if (IsConcurrent()) { 149 GetHeap()->PreGcVerification(this); 150 { 151 ReaderMutexLock mu(self, *Locks::mutator_lock_); 152 MarkingPhase(); 153 } 154 ScopedPause pause(this); 155 GetHeap()->PrePauseRosAllocVerification(this); 156 PausePhase(); 157 RevokeAllThreadLocalBuffers(); 158 } else { 159 ScopedPause pause(this); 160 GetHeap()->PreGcVerificationPaused(this); 161 MarkingPhase(); 162 GetHeap()->PrePauseRosAllocVerification(this); 163 PausePhase(); 164 RevokeAllThreadLocalBuffers(); 165 } 166 { 167 // Sweeping always done concurrently, even for non concurrent mark sweep. 168 ReaderMutexLock mu(self, *Locks::mutator_lock_); 169 ReclaimPhase(); 170 } 171 GetHeap()->PostGcVerification(this); 172 FinishPhase(); 173} 174 175void MarkSweep::ProcessReferences(Thread* self) { 176 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 177 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 178 GetHeap()->GetReferenceProcessor()->ProcessReferences( 179 true, &timings_, clear_soft_references_, &IsMarkedCallback, &MarkObjectCallback, 180 &ProcessMarkStackCallback, this); 181} 182 183void MarkSweep::PausePhase() { 184 TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_); 185 Thread* self = Thread::Current(); 186 Locks::mutator_lock_->AssertExclusiveHeld(self); 187 if (IsConcurrent()) { 188 // Handle the dirty objects if we are a concurrent GC. 189 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 190 // Re-mark root set. 191 ReMarkRoots(); 192 // Scan dirty objects, this is only required if we are not doing concurrent GC. 193 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 194 } 195 { 196 TimingLogger::ScopedSplit split("SwapStacks", &timings_); 197 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 198 heap_->SwapStacks(self); 199 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 200 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 201 // stacks and don't want anybody to allocate into the live stack. 202 RevokeAllThreadLocalAllocationStacks(self); 203 } 204 timings_.StartSplit("PreSweepingGcVerification"); 205 heap_->PreSweepingGcVerification(this); 206 timings_.EndSplit(); 207 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 208 // weak before we sweep them. Since this new system weak may not be marked, the GC may 209 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 210 // reference to a string that is about to be swept. 211 Runtime::Current()->DisallowNewSystemWeaks(); 212 // Enable the reference processing slow path, needs to be done with mutators paused since there 213 // is no lock in the GetReferent fast path. 214 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 215} 216 217void MarkSweep::PreCleanCards() { 218 // Don't do this for non concurrent GCs since they don't have any dirty cards. 219 if (kPreCleanCards && IsConcurrent()) { 220 Thread* self = Thread::Current(); 221 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 222 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 223 heap_->ProcessCards(timings_, false); 224 // The checkpoint root marking is required to avoid a race condition which occurs if the 225 // following happens during a reference write: 226 // 1. mutator dirties the card (write barrier) 227 // 2. GC ages the card (the above ProcessCards call) 228 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 229 // 4. mutator writes the value (corresponding to the write barrier in 1.) 230 // This causes the GC to age the card but not necessarily mark the reference which the mutator 231 // wrote into the object stored in the card. 232 // Having the checkpoint fixes this issue since it ensures that the card mark and the 233 // reference write are visible to the GC before the card is scanned (this is due to locks being 234 // acquired / released in the checkpoint code). 235 // The other roots are also marked to help reduce the pause. 236 MarkRootsCheckpoint(self, false); 237 MarkNonThreadRoots(); 238 MarkConcurrentRoots( 239 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 240 // Process the newly aged cards. 241 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 242 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 243 // in the next GC. 244 } 245} 246 247void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 248 if (kUseThreadLocalAllocationStack) { 249 timings_.NewSplit("RevokeAllThreadLocalAllocationStacks"); 250 Locks::mutator_lock_->AssertExclusiveHeld(self); 251 heap_->RevokeAllThreadLocalAllocationStacks(self); 252 } 253} 254 255void MarkSweep::MarkingPhase() { 256 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 257 Thread* self = Thread::Current(); 258 259 BindBitmaps(); 260 FindDefaultSpaceBitmap(); 261 262 // Process dirty cards and add dirty cards to mod union tables. 263 heap_->ProcessCards(timings_, false); 264 265 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 266 MarkRoots(self); 267 MarkReachableObjects(); 268 // Pre-clean dirtied cards to reduce pauses. 269 PreCleanCards(); 270} 271 272void MarkSweep::UpdateAndMarkModUnion() { 273 for (const auto& space : heap_->GetContinuousSpaces()) { 274 if (immune_region_.ContainsSpace(space)) { 275 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 276 "UpdateAndMarkImageModUnionTable"; 277 TimingLogger::ScopedSplit split(name, &timings_); 278 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 279 CHECK(mod_union_table != nullptr); 280 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 281 } 282 } 283} 284 285void MarkSweep::MarkReachableObjects() { 286 UpdateAndMarkModUnion(); 287 // Recursively mark all the non-image bits set in the mark bitmap. 288 RecursiveMark(); 289} 290 291void MarkSweep::ReclaimPhase() { 292 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 293 Thread* self = Thread::Current(); 294 // Process the references concurrently. 295 ProcessReferences(self); 296 SweepSystemWeaks(self); 297 Runtime::Current()->AllowNewSystemWeaks(); 298 { 299 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 300 301 // Reclaim unmarked objects. 302 Sweep(false); 303 304 // Swap the live and mark bitmaps for each space which we modified space. This is an 305 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 306 // bitmaps. 307 timings_.StartSplit("SwapBitmaps"); 308 SwapBitmaps(); 309 timings_.EndSplit(); 310 311 // Unbind the live and mark bitmaps. 312 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 313 GetHeap()->UnBindBitmaps(); 314 } 315} 316 317void MarkSweep::FindDefaultSpaceBitmap() { 318 TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 319 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 320 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 321 // We want to have the main space instead of non moving if possible. 322 if (bitmap != nullptr && 323 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 324 current_space_bitmap_ = bitmap; 325 // If we are not the non moving space exit the loop early since this will be good enough. 326 if (space != heap_->GetNonMovingSpace()) { 327 break; 328 } 329 } 330 } 331 if (current_space_bitmap_ == nullptr) { 332 heap_->DumpSpaces(); 333 LOG(FATAL) << "Could not find a default mark bitmap"; 334 } 335} 336 337void MarkSweep::ExpandMarkStack() { 338 ResizeMarkStack(mark_stack_->Capacity() * 2); 339} 340 341void MarkSweep::ResizeMarkStack(size_t new_size) { 342 // Rare case, no need to have Thread::Current be a parameter. 343 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 344 // Someone else acquired the lock and expanded the mark stack before us. 345 return; 346 } 347 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 348 CHECK_LE(mark_stack_->Size(), new_size); 349 mark_stack_->Resize(new_size); 350 for (const auto& obj : temp) { 351 mark_stack_->PushBack(obj); 352 } 353} 354 355inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 356 DCHECK(obj != nullptr); 357 if (MarkObjectParallel(obj)) { 358 MutexLock mu(Thread::Current(), mark_stack_lock_); 359 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 360 ExpandMarkStack(); 361 } 362 // The object must be pushed on to the mark stack. 363 mark_stack_->PushBack(obj); 364 } 365} 366 367mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 368 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 369 mark_sweep->MarkObject(obj); 370 return obj; 371} 372 373void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 374 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 375} 376 377class MarkSweepMarkObjectSlowPath { 378 public: 379 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) { 380 } 381 382 void operator()(const Object* obj) const ALWAYS_INLINE { 383 if (kProfileLargeObjects) { 384 // TODO: Differentiate between marking and testing somehow. 385 ++mark_sweep_->large_object_test_; 386 ++mark_sweep_->large_object_mark_; 387 } 388 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 389 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) || 390 (kIsDebugBuild && !large_object_space->Contains(obj)))) { 391 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 392 LOG(ERROR) << "Attempting see if it's a bad root"; 393 mark_sweep_->VerifyRoots(); 394 LOG(FATAL) << "Can't mark invalid object"; 395 } 396 } 397 398 private: 399 MarkSweep* const mark_sweep_; 400}; 401 402inline void MarkSweep::MarkObjectNonNull(Object* obj) { 403 DCHECK(obj != nullptr); 404 if (kUseBakerOrBrooksReadBarrier) { 405 // Verify all the objects have the correct pointer installed. 406 obj->AssertReadBarrierPointer(); 407 } 408 if (immune_region_.ContainsObject(obj)) { 409 if (kCountMarkedObjects) { 410 ++mark_immune_count_; 411 } 412 DCHECK(mark_bitmap_->Test(obj)); 413 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 414 if (kCountMarkedObjects) { 415 ++mark_fastpath_count_; 416 } 417 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 418 PushOnMarkStack(obj); // This object was not previously marked. 419 } 420 } else { 421 if (kCountMarkedObjects) { 422 ++mark_slowpath_count_; 423 } 424 MarkSweepMarkObjectSlowPath visitor(this); 425 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 426 // will check again. 427 if (!mark_bitmap_->Set(obj, visitor)) { 428 PushOnMarkStack(obj); // Was not already marked, push. 429 } 430 } 431} 432 433inline void MarkSweep::PushOnMarkStack(Object* obj) { 434 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 435 // Lock is not needed but is here anyways to please annotalysis. 436 MutexLock mu(Thread::Current(), mark_stack_lock_); 437 ExpandMarkStack(); 438 } 439 // The object must be pushed on to the mark stack. 440 mark_stack_->PushBack(obj); 441} 442 443inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 444 DCHECK(obj != nullptr); 445 if (kUseBakerOrBrooksReadBarrier) { 446 // Verify all the objects have the correct pointer installed. 447 obj->AssertReadBarrierPointer(); 448 } 449 if (immune_region_.ContainsObject(obj)) { 450 DCHECK(IsMarked(obj)); 451 return false; 452 } 453 // Try to take advantage of locality of references within a space, failing this find the space 454 // the hard way. 455 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 456 if (LIKELY(object_bitmap->HasAddress(obj))) { 457 return !object_bitmap->AtomicTestAndSet(obj); 458 } 459 MarkSweepMarkObjectSlowPath visitor(this); 460 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 461} 462 463// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 464inline void MarkSweep::MarkObject(Object* obj) { 465 if (obj != nullptr) { 466 MarkObjectNonNull(obj); 467 } else if (kCountMarkedObjects) { 468 ++mark_null_count_; 469 } 470} 471 472void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/, 473 RootType /*root_type*/) { 474 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 475} 476 477void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 478 RootType /*root_type*/) { 479 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 480} 481 482void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 483 RootType /*root_type*/) { 484 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 485} 486 487void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 488 const StackVisitor* visitor, RootType root_type) { 489 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type); 490} 491 492void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor, 493 RootType root_type) { 494 // See if the root is on any space bitmap. 495 if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 496 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 497 if (!large_object_space->Contains(root)) { 498 LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type; 499 if (visitor != NULL) { 500 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 501 } 502 } 503 } 504} 505 506void MarkSweep::VerifyRoots() { 507 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 508} 509 510void MarkSweep::MarkRoots(Thread* self) { 511 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 512 // If we exclusively hold the mutator lock, all threads must be suspended. 513 timings_.StartSplit("MarkRoots"); 514 Runtime::Current()->VisitRoots(MarkRootCallback, this); 515 timings_.EndSplit(); 516 RevokeAllThreadLocalAllocationStacks(self); 517 } else { 518 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 519 // At this point the live stack should no longer have any mutators which push into it. 520 MarkNonThreadRoots(); 521 MarkConcurrentRoots( 522 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 523 } 524} 525 526void MarkSweep::MarkNonThreadRoots() { 527 timings_.StartSplit("MarkNonThreadRoots"); 528 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 529 timings_.EndSplit(); 530} 531 532void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 533 timings_.StartSplit("MarkConcurrentRoots"); 534 // Visit all runtime roots and clear dirty flags. 535 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 536 timings_.EndSplit(); 537} 538 539class ScanObjectVisitor { 540 public: 541 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 542 : mark_sweep_(mark_sweep) {} 543 544 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 545 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 546 if (kCheckLocks) { 547 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 548 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 549 } 550 mark_sweep_->ScanObject(obj); 551 } 552 553 private: 554 MarkSweep* const mark_sweep_; 555}; 556 557class DelayReferenceReferentVisitor { 558 public: 559 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 560 } 561 562 void operator()(mirror::Class* klass, mirror::Reference* ref) const 563 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 564 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 565 collector_->DelayReferenceReferent(klass, ref); 566 } 567 568 private: 569 MarkSweep* const collector_; 570}; 571 572template <bool kUseFinger = false> 573class MarkStackTask : public Task { 574 public: 575 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 576 Object** mark_stack) 577 : mark_sweep_(mark_sweep), 578 thread_pool_(thread_pool), 579 mark_stack_pos_(mark_stack_size) { 580 // We may have to copy part of an existing mark stack when another mark stack overflows. 581 if (mark_stack_size != 0) { 582 DCHECK(mark_stack != NULL); 583 // TODO: Check performance? 584 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 585 } 586 if (kCountTasks) { 587 ++mark_sweep_->work_chunks_created_; 588 } 589 } 590 591 static const size_t kMaxSize = 1 * KB; 592 593 protected: 594 class MarkObjectParallelVisitor { 595 public: 596 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 597 MarkSweep* mark_sweep) ALWAYS_INLINE 598 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 599 600 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 601 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 602 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 603 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 604 if (kUseFinger) { 605 android_memory_barrier(); 606 if (reinterpret_cast<uintptr_t>(ref) >= 607 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) { 608 return; 609 } 610 } 611 chunk_task_->MarkStackPush(ref); 612 } 613 } 614 615 private: 616 MarkStackTask<kUseFinger>* const chunk_task_; 617 MarkSweep* const mark_sweep_; 618 }; 619 620 class ScanObjectParallelVisitor { 621 public: 622 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 623 : chunk_task_(chunk_task) {} 624 625 // No thread safety analysis since multiple threads will use this visitor. 626 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 627 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 628 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 629 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 630 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 631 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 632 } 633 634 private: 635 MarkStackTask<kUseFinger>* const chunk_task_; 636 }; 637 638 virtual ~MarkStackTask() { 639 // Make sure that we have cleared our mark stack. 640 DCHECK_EQ(mark_stack_pos_, 0U); 641 if (kCountTasks) { 642 ++mark_sweep_->work_chunks_deleted_; 643 } 644 } 645 646 MarkSweep* const mark_sweep_; 647 ThreadPool* const thread_pool_; 648 // Thread local mark stack for this task. 649 Object* mark_stack_[kMaxSize]; 650 // Mark stack position. 651 size_t mark_stack_pos_; 652 653 void MarkStackPush(Object* obj) ALWAYS_INLINE { 654 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 655 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 656 mark_stack_pos_ /= 2; 657 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 658 mark_stack_ + mark_stack_pos_); 659 thread_pool_->AddTask(Thread::Current(), task); 660 } 661 DCHECK(obj != nullptr); 662 DCHECK_LT(mark_stack_pos_, kMaxSize); 663 mark_stack_[mark_stack_pos_++] = obj; 664 } 665 666 virtual void Finalize() { 667 delete this; 668 } 669 670 // Scans all of the objects 671 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 672 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 673 ScanObjectParallelVisitor visitor(this); 674 // TODO: Tune this. 675 static const size_t kFifoSize = 4; 676 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 677 for (;;) { 678 Object* obj = nullptr; 679 if (kUseMarkStackPrefetch) { 680 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 681 Object* obj = mark_stack_[--mark_stack_pos_]; 682 DCHECK(obj != nullptr); 683 __builtin_prefetch(obj); 684 prefetch_fifo.push_back(obj); 685 } 686 if (UNLIKELY(prefetch_fifo.empty())) { 687 break; 688 } 689 obj = prefetch_fifo.front(); 690 prefetch_fifo.pop_front(); 691 } else { 692 if (UNLIKELY(mark_stack_pos_ == 0)) { 693 break; 694 } 695 obj = mark_stack_[--mark_stack_pos_]; 696 } 697 DCHECK(obj != nullptr); 698 visitor(obj); 699 } 700 } 701}; 702 703class CardScanTask : public MarkStackTask<false> { 704 public: 705 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 706 accounting::ContinuousSpaceBitmap* bitmap, 707 byte* begin, byte* end, byte minimum_age, size_t mark_stack_size, 708 Object** mark_stack_obj) 709 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 710 bitmap_(bitmap), 711 begin_(begin), 712 end_(end), 713 minimum_age_(minimum_age) { 714 } 715 716 protected: 717 accounting::ContinuousSpaceBitmap* const bitmap_; 718 byte* const begin_; 719 byte* const end_; 720 const byte minimum_age_; 721 722 virtual void Finalize() { 723 delete this; 724 } 725 726 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 727 ScanObjectParallelVisitor visitor(this); 728 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 729 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 730 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 731 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 732 // Finish by emptying our local mark stack. 733 MarkStackTask::Run(self); 734 } 735}; 736 737size_t MarkSweep::GetThreadCount(bool paused) const { 738 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 739 return 1; 740 } 741 if (paused) { 742 return heap_->GetParallelGCThreadCount() + 1; 743 } else { 744 return heap_->GetConcGCThreadCount() + 1; 745 } 746} 747 748void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) { 749 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 750 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 751 size_t thread_count = GetThreadCount(paused); 752 // The parallel version with only one thread is faster for card scanning, TODO: fix. 753 if (kParallelCardScan && thread_count > 1) { 754 Thread* self = Thread::Current(); 755 // Can't have a different split for each space since multiple spaces can have their cards being 756 // scanned at the same time. 757 timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects"); 758 // Try to take some of the mark stack since we can pass this off to the worker tasks. 759 Object** mark_stack_begin = mark_stack_->Begin(); 760 Object** mark_stack_end = mark_stack_->End(); 761 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 762 // Estimated number of work tasks we will create. 763 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 764 DCHECK_NE(mark_stack_tasks, 0U); 765 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 766 mark_stack_size / mark_stack_tasks + 1); 767 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 768 if (space->GetMarkBitmap() == nullptr) { 769 continue; 770 } 771 byte* card_begin = space->Begin(); 772 byte* card_end = space->End(); 773 // Align up the end address. For example, the image space's end 774 // may not be card-size-aligned. 775 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 776 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 777 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 778 // Calculate how many bytes of heap we will scan, 779 const size_t address_range = card_end - card_begin; 780 // Calculate how much address range each task gets. 781 const size_t card_delta = RoundUp(address_range / thread_count + 1, 782 accounting::CardTable::kCardSize); 783 // Create the worker tasks for this space. 784 while (card_begin != card_end) { 785 // Add a range of cards. 786 size_t addr_remaining = card_end - card_begin; 787 size_t card_increment = std::min(card_delta, addr_remaining); 788 // Take from the back of the mark stack. 789 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 790 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 791 mark_stack_end -= mark_stack_increment; 792 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 793 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 794 // Add the new task to the thread pool. 795 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 796 card_begin + card_increment, minimum_age, 797 mark_stack_increment, mark_stack_end); 798 thread_pool->AddTask(self, task); 799 card_begin += card_increment; 800 } 801 } 802 803 // Note: the card scan below may dirty new cards (and scan them) 804 // as a side effect when a Reference object is encountered and 805 // queued during the marking. See b/11465268. 806 thread_pool->SetMaxActiveWorkers(thread_count - 1); 807 thread_pool->StartWorkers(self); 808 thread_pool->Wait(self, true, true); 809 thread_pool->StopWorkers(self); 810 timings_.EndSplit(); 811 } else { 812 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 813 if (space->GetMarkBitmap() != nullptr) { 814 // Image spaces are handled properly since live == marked for them. 815 switch (space->GetGcRetentionPolicy()) { 816 case space::kGcRetentionPolicyNeverCollect: 817 timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" : 818 "ScanGrayImageSpaceObjects"); 819 break; 820 case space::kGcRetentionPolicyFullCollect: 821 timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" : 822 "ScanGrayZygoteSpaceObjects"); 823 break; 824 case space::kGcRetentionPolicyAlwaysCollect: 825 timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" : 826 "ScanGrayAllocSpaceObjects"); 827 break; 828 } 829 ScanObjectVisitor visitor(this); 830 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age); 831 timings_.EndSplit(); 832 } 833 } 834 } 835} 836 837class RecursiveMarkTask : public MarkStackTask<false> { 838 public: 839 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 840 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 841 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), 842 bitmap_(bitmap), 843 begin_(begin), 844 end_(end) { 845 } 846 847 protected: 848 accounting::ContinuousSpaceBitmap* const bitmap_; 849 const uintptr_t begin_; 850 const uintptr_t end_; 851 852 virtual void Finalize() { 853 delete this; 854 } 855 856 // Scans all of the objects 857 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 858 ScanObjectParallelVisitor visitor(this); 859 bitmap_->VisitMarkedRange(begin_, end_, visitor); 860 // Finish by emptying our local mark stack. 861 MarkStackTask::Run(self); 862 } 863}; 864 865// Populates the mark stack based on the set of marked objects and 866// recursively marks until the mark stack is emptied. 867void MarkSweep::RecursiveMark() { 868 TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 869 // RecursiveMark will build the lists of known instances of the Reference classes. See 870 // DelayReferenceReferent for details. 871 if (kUseRecursiveMark) { 872 const bool partial = GetGcType() == kGcTypePartial; 873 ScanObjectVisitor scan_visitor(this); 874 auto* self = Thread::Current(); 875 ThreadPool* thread_pool = heap_->GetThreadPool(); 876 size_t thread_count = GetThreadCount(false); 877 const bool parallel = kParallelRecursiveMark && thread_count > 1; 878 mark_stack_->Reset(); 879 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 880 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 881 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 882 current_space_bitmap_ = space->GetMarkBitmap(); 883 if (current_space_bitmap_ == nullptr) { 884 continue; 885 } 886 if (parallel) { 887 // We will use the mark stack the future. 888 // CHECK(mark_stack_->IsEmpty()); 889 // This function does not handle heap end increasing, so we must use the space end. 890 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 891 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 892 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue()); 893 894 // Create a few worker tasks. 895 const size_t n = thread_count * 2; 896 while (begin != end) { 897 uintptr_t start = begin; 898 uintptr_t delta = (end - begin) / n; 899 delta = RoundUp(delta, KB); 900 if (delta < 16 * KB) delta = end - begin; 901 begin += delta; 902 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 903 begin); 904 thread_pool->AddTask(self, task); 905 } 906 thread_pool->SetMaxActiveWorkers(thread_count - 1); 907 thread_pool->StartWorkers(self); 908 thread_pool->Wait(self, true, true); 909 thread_pool->StopWorkers(self); 910 } else { 911 // This function does not handle heap end increasing, so we must use the space end. 912 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 913 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 914 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 915 } 916 } 917 } 918 } 919 ProcessMarkStack(false); 920} 921 922mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 923 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 924 return object; 925 } 926 return nullptr; 927} 928 929void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) { 930 ScanGrayObjects(paused, minimum_age); 931 ProcessMarkStack(paused); 932} 933 934void MarkSweep::ReMarkRoots() { 935 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 936 timings_.StartSplit("(Paused)ReMarkRoots"); 937 Runtime::Current()->VisitRoots( 938 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 939 kVisitRootFlagStopLoggingNewRoots | 940 kVisitRootFlagClearRootLog)); 941 timings_.EndSplit(); 942 if (kVerifyRootsMarked) { 943 timings_.StartSplit("(Paused)VerifyRoots"); 944 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 945 timings_.EndSplit(); 946 } 947} 948 949void MarkSweep::SweepSystemWeaks(Thread* self) { 950 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 951 timings_.StartSplit("SweepSystemWeaks"); 952 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 953 timings_.EndSplit(); 954} 955 956mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 957 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 958 // We don't actually want to sweep the object, so lets return "marked" 959 return obj; 960} 961 962void MarkSweep::VerifyIsLive(const Object* obj) { 963 if (!heap_->GetLiveBitmap()->Test(obj)) { 964 if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) == 965 heap_->allocation_stack_->End()) { 966 // Object not found! 967 heap_->DumpSpaces(); 968 LOG(FATAL) << "Found dead object " << obj; 969 } 970 } 971} 972 973void MarkSweep::VerifySystemWeaks() { 974 // Verify system weaks, uses a special object visitor which returns the input object. 975 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 976} 977 978class CheckpointMarkThreadRoots : public Closure { 979 public: 980 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 981 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 982 : mark_sweep_(mark_sweep), 983 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 984 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 985 } 986 987 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 988 ATRACE_BEGIN("Marking thread roots"); 989 // Note: self is not necessarily equal to thread since thread may be suspended. 990 Thread* self = Thread::Current(); 991 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 992 << thread->GetState() << " thread " << thread << " self " << self; 993 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 994 ATRACE_END(); 995 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 996 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 997 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 998 ATRACE_END(); 999 } 1000 mark_sweep_->GetBarrier().Pass(self); 1001 } 1002 1003 private: 1004 MarkSweep* const mark_sweep_; 1005 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 1006}; 1007 1008void MarkSweep::MarkRootsCheckpoint(Thread* self, 1009 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1010 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 1011 timings_.StartSplit("MarkRootsCheckpoint"); 1012 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1013 // Request the check point is run on all threads returning a count of the threads that must 1014 // run through the barrier including self. 1015 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1016 // Release locks then wait for all mutator threads to pass the barrier. 1017 // TODO: optimize to not release locks when there are no threads to wait for. 1018 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1019 Locks::mutator_lock_->SharedUnlock(self); 1020 { 1021 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1022 gc_barrier_->Increment(self, barrier_count); 1023 } 1024 Locks::mutator_lock_->SharedLock(self); 1025 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1026 timings_.EndSplit(); 1027} 1028 1029void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1030 timings_.StartSplit("SweepArray"); 1031 Thread* self = Thread::Current(); 1032 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>( 1033 sweep_array_free_buffer_mem_map_->BaseBegin()); 1034 size_t chunk_free_pos = 0; 1035 size_t freed_bytes = 0; 1036 size_t freed_large_object_bytes = 0; 1037 size_t freed_objects = 0; 1038 size_t freed_large_objects = 0; 1039 // How many objects are left in the array, modified after each space is swept. 1040 Object** objects = allocations->Begin(); 1041 size_t count = allocations->Size(); 1042 // Change the order to ensure that the non-moving space last swept as an optimization. 1043 std::vector<space::ContinuousSpace*> sweep_spaces; 1044 space::ContinuousSpace* non_moving_space = nullptr; 1045 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1046 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1047 space->GetLiveBitmap() != nullptr) { 1048 if (space == heap_->GetNonMovingSpace()) { 1049 non_moving_space = space; 1050 } else { 1051 sweep_spaces.push_back(space); 1052 } 1053 } 1054 } 1055 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1056 // the other alloc spaces as an optimization. 1057 if (non_moving_space != nullptr) { 1058 sweep_spaces.push_back(non_moving_space); 1059 } 1060 // Start by sweeping the continuous spaces. 1061 for (space::ContinuousSpace* space : sweep_spaces) { 1062 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1063 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1064 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1065 if (swap_bitmaps) { 1066 std::swap(live_bitmap, mark_bitmap); 1067 } 1068 Object** out = objects; 1069 for (size_t i = 0; i < count; ++i) { 1070 Object* obj = objects[i]; 1071 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1072 continue; 1073 } 1074 if (space->HasAddress(obj)) { 1075 // This object is in the space, remove it from the array and add it to the sweep buffer 1076 // if needed. 1077 if (!mark_bitmap->Test(obj)) { 1078 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1079 timings_.StartSplit("FreeList"); 1080 freed_objects += chunk_free_pos; 1081 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1082 timings_.EndSplit(); 1083 chunk_free_pos = 0; 1084 } 1085 chunk_free_buffer[chunk_free_pos++] = obj; 1086 } 1087 } else { 1088 *(out++) = obj; 1089 } 1090 } 1091 if (chunk_free_pos > 0) { 1092 timings_.StartSplit("FreeList"); 1093 freed_objects += chunk_free_pos; 1094 freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1095 timings_.EndSplit(); 1096 chunk_free_pos = 0; 1097 } 1098 // All of the references which space contained are no longer in the allocation stack, update 1099 // the count. 1100 count = out - objects; 1101 } 1102 // Handle the large object space. 1103 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1104 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1105 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1106 if (swap_bitmaps) { 1107 std::swap(large_live_objects, large_mark_objects); 1108 } 1109 for (size_t i = 0; i < count; ++i) { 1110 Object* obj = objects[i]; 1111 // Handle large objects. 1112 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1113 continue; 1114 } 1115 if (!large_mark_objects->Test(obj)) { 1116 ++freed_large_objects; 1117 freed_large_object_bytes += large_object_space->Free(self, obj); 1118 } 1119 } 1120 timings_.EndSplit(); 1121 1122 timings_.StartSplit("RecordFree"); 1123 VLOG(heap) << "Freed " << freed_objects << "/" << count << " objects with size " 1124 << PrettySize(freed_bytes); 1125 RecordFree(freed_objects, freed_bytes); 1126 RecordFreeLargeObjects(freed_large_objects, freed_large_object_bytes); 1127 timings_.EndSplit(); 1128 1129 timings_.StartSplit("ResetStack"); 1130 allocations->Reset(); 1131 timings_.EndSplit(); 1132 1133 int success = madvise(sweep_array_free_buffer_mem_map_->BaseBegin(), 1134 sweep_array_free_buffer_mem_map_->BaseSize(), MADV_DONTNEED); 1135 DCHECK_EQ(success, 0) << "Failed to madvise the sweep array free buffer pages."; 1136} 1137 1138void MarkSweep::Sweep(bool swap_bitmaps) { 1139 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1140 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1141 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1142 // knowing that new allocations won't be marked as live. 1143 timings_.StartSplit("MarkStackAsLive"); 1144 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1145 heap_->MarkAllocStackAsLive(live_stack); 1146 live_stack->Reset(); 1147 timings_.EndSplit(); 1148 1149 DCHECK(mark_stack_->IsEmpty()); 1150 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1151 if (space->IsContinuousMemMapAllocSpace()) { 1152 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1153 TimingLogger::ScopedSplit split( 1154 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); 1155 size_t freed_objects = 0; 1156 size_t freed_bytes = 0; 1157 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1158 RecordFree(freed_objects, freed_bytes); 1159 } 1160 } 1161 SweepLargeObjects(swap_bitmaps); 1162} 1163 1164void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1165 TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_); 1166 size_t freed_objects = 0; 1167 size_t freed_bytes = 0; 1168 heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 1169 RecordFreeLargeObjects(freed_objects, freed_bytes); 1170} 1171 1172// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1173// marked, put it on the appropriate list in the heap for later processing. 1174void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1175 DCHECK(klass != nullptr); 1176 if (kCountJavaLangRefs) { 1177 ++reference_count_; 1178 } 1179 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, IsMarkedCallback, this); 1180} 1181 1182class MarkObjectVisitor { 1183 public: 1184 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1185 } 1186 1187 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1188 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1189 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1190 if (kCheckLocks) { 1191 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1192 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1193 } 1194 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset)); 1195 } 1196 1197 private: 1198 MarkSweep* const mark_sweep_; 1199}; 1200 1201// Scans an object reference. Determines the type of the reference 1202// and dispatches to a specialized scanning routine. 1203void MarkSweep::ScanObject(Object* obj) { 1204 MarkObjectVisitor mark_visitor(this); 1205 DelayReferenceReferentVisitor ref_visitor(this); 1206 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1207} 1208 1209void MarkSweep::ProcessMarkStackCallback(void* arg) { 1210 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false); 1211} 1212 1213void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1214 Thread* self = Thread::Current(); 1215 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1216 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1217 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1218 CHECK_GT(chunk_size, 0U); 1219 // Split the current mark stack up into work tasks. 1220 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1221 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1222 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1223 it += delta; 1224 } 1225 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1226 thread_pool->StartWorkers(self); 1227 thread_pool->Wait(self, true, true); 1228 thread_pool->StopWorkers(self); 1229 mark_stack_->Reset(); 1230 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(), 1231 work_chunks_deleted_.LoadSequentiallyConsistent()) 1232 << " some of the work chunks were leaked"; 1233} 1234 1235// Scan anything that's on the mark stack. 1236void MarkSweep::ProcessMarkStack(bool paused) { 1237 timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack"); 1238 size_t thread_count = GetThreadCount(paused); 1239 if (kParallelProcessMarkStack && thread_count > 1 && 1240 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1241 ProcessMarkStackParallel(thread_count); 1242 } else { 1243 // TODO: Tune this. 1244 static const size_t kFifoSize = 4; 1245 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1246 for (;;) { 1247 Object* obj = NULL; 1248 if (kUseMarkStackPrefetch) { 1249 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1250 Object* obj = mark_stack_->PopBack(); 1251 DCHECK(obj != NULL); 1252 __builtin_prefetch(obj); 1253 prefetch_fifo.push_back(obj); 1254 } 1255 if (prefetch_fifo.empty()) { 1256 break; 1257 } 1258 obj = prefetch_fifo.front(); 1259 prefetch_fifo.pop_front(); 1260 } else { 1261 if (mark_stack_->IsEmpty()) { 1262 break; 1263 } 1264 obj = mark_stack_->PopBack(); 1265 } 1266 DCHECK(obj != nullptr); 1267 ScanObject(obj); 1268 } 1269 } 1270 timings_.EndSplit(); 1271} 1272 1273inline bool MarkSweep::IsMarked(const Object* object) const 1274 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1275 if (immune_region_.ContainsObject(object)) { 1276 return true; 1277 } 1278 if (current_space_bitmap_->HasAddress(object)) { 1279 return current_space_bitmap_->Test(object); 1280 } 1281 return mark_bitmap_->Test(object); 1282} 1283 1284void MarkSweep::FinishPhase() { 1285 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1286 if (kCountScannedTypes) { 1287 VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed() 1288 << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed(); 1289 } 1290 if (kCountTasks) { 1291 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed(); 1292 } 1293 if (kMeasureOverhead) { 1294 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed()); 1295 } 1296 if (kProfileLargeObjects) { 1297 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed() 1298 << " marked " << large_object_mark_.LoadRelaxed(); 1299 } 1300 if (kCountJavaLangRefs) { 1301 VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed(); 1302 } 1303 if (kCountMarkedObjects) { 1304 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed() 1305 << " immune=" << mark_immune_count_.LoadRelaxed() 1306 << " fastpath=" << mark_fastpath_count_.LoadRelaxed() 1307 << " slowpath=" << mark_slowpath_count_.LoadRelaxed(); 1308 } 1309 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1310 mark_stack_->Reset(); 1311 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1312 heap_->ClearMarkedObjects(); 1313} 1314 1315void MarkSweep::RevokeAllThreadLocalBuffers() { 1316 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1317 // If concurrent, rosalloc thread-local buffers are revoked at the 1318 // thread checkpoint. Bump pointer space thread-local buffers must 1319 // not be in use. 1320 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1321 } else { 1322 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 1323 GetHeap()->RevokeAllThreadLocalBuffers(); 1324 timings_.EndSplit(); 1325 } 1326} 1327 1328} // namespace collector 1329} // namespace gc 1330} // namespace art 1331