mark_sweep.cc revision 9086b65b2ad35dd39a8afc62d535be8217208d08
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#define ATRACE_TAG ATRACE_TAG_DALVIK 25#include "cutils/trace.h" 26 27#include "base/bounded_fifo.h" 28#include "base/logging.h" 29#include "base/macros.h" 30#include "base/mutex-inl.h" 31#include "base/timing_logger.h" 32#include "gc/accounting/card_table-inl.h" 33#include "gc/accounting/heap_bitmap-inl.h" 34#include "gc/accounting/mod_union_table.h" 35#include "gc/accounting/space_bitmap-inl.h" 36#include "gc/heap.h" 37#include "gc/reference_processor.h" 38#include "gc/space/image_space.h" 39#include "gc/space/large_object_space.h" 40#include "gc/space/space-inl.h" 41#include "mark_sweep-inl.h" 42#include "mirror/object-inl.h" 43#include "runtime.h" 44#include "scoped_thread_state_change.h" 45#include "thread-inl.h" 46#include "thread_list.h" 47 48using ::art::mirror::Object; 49 50namespace art { 51namespace gc { 52namespace collector { 53 54// Performance options. 55static constexpr bool kUseRecursiveMark = false; 56static constexpr bool kUseMarkStackPrefetch = true; 57static constexpr size_t kSweepArrayChunkFreeSize = 1024; 58static constexpr bool kPreCleanCards = true; 59 60// Parallelism options. 61static constexpr bool kParallelCardScan = true; 62static constexpr bool kParallelRecursiveMark = true; 63// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 64// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 65// having this can add overhead in ProcessReferences since we may end up doing many calls of 66// ProcessMarkStack with very small mark stacks. 67static constexpr size_t kMinimumParallelMarkStackSize = 128; 68static constexpr bool kParallelProcessMarkStack = true; 69 70// Profiling and information flags. 71static constexpr bool kProfileLargeObjects = false; 72static constexpr bool kMeasureOverhead = false; 73static constexpr bool kCountTasks = false; 74static constexpr bool kCountJavaLangRefs = false; 75static constexpr bool kCountMarkedObjects = false; 76 77// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 78static constexpr bool kCheckLocks = kDebugLocking; 79static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 80 81// If true, revoke the rosalloc thread-local buffers at the 82// checkpoint, as opposed to during the pause. 83static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 84 85void MarkSweep::BindBitmaps() { 86 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 87 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 88 // Mark all of the spaces we never collect as immune. 89 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 90 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 91 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 92 } 93 } 94} 95 96MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 97 : GarbageCollector(heap, 98 name_prefix + 99 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 100 current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr), 101 gc_barrier_(new Barrier(0)), 102 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 103 is_concurrent_(is_concurrent), live_stack_freeze_size_(0) { 104 std::string error_msg; 105 MemMap* mem_map = MemMap::MapAnonymous( 106 "mark sweep sweep array free buffer", nullptr, 107 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), 108 PROT_READ | PROT_WRITE, false, false, &error_msg); 109 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg; 110 sweep_array_free_buffer_mem_map_.reset(mem_map); 111} 112 113void MarkSweep::InitializePhase() { 114 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 115 mark_stack_ = heap_->GetMarkStack(); 116 DCHECK(mark_stack_ != nullptr); 117 immune_region_.Reset(); 118 class_count_.StoreRelaxed(0); 119 array_count_.StoreRelaxed(0); 120 other_count_.StoreRelaxed(0); 121 large_object_test_.StoreRelaxed(0); 122 large_object_mark_.StoreRelaxed(0); 123 overhead_time_ .StoreRelaxed(0); 124 work_chunks_created_.StoreRelaxed(0); 125 work_chunks_deleted_.StoreRelaxed(0); 126 reference_count_.StoreRelaxed(0); 127 mark_null_count_.StoreRelaxed(0); 128 mark_immune_count_.StoreRelaxed(0); 129 mark_fastpath_count_.StoreRelaxed(0); 130 mark_slowpath_count_.StoreRelaxed(0); 131 { 132 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 133 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 134 mark_bitmap_ = heap_->GetMarkBitmap(); 135 } 136 if (!GetCurrentIteration()->GetClearSoftReferences()) { 137 // Always clear soft references if a non-sticky collection. 138 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky); 139 } 140} 141 142void MarkSweep::RunPhases() { 143 Thread* self = Thread::Current(); 144 InitializePhase(); 145 Locks::mutator_lock_->AssertNotHeld(self); 146 if (IsConcurrent()) { 147 GetHeap()->PreGcVerification(this); 148 { 149 ReaderMutexLock mu(self, *Locks::mutator_lock_); 150 MarkingPhase(); 151 } 152 ScopedPause pause(this); 153 GetHeap()->PrePauseRosAllocVerification(this); 154 PausePhase(); 155 RevokeAllThreadLocalBuffers(); 156 } else { 157 ScopedPause pause(this); 158 GetHeap()->PreGcVerificationPaused(this); 159 MarkingPhase(); 160 GetHeap()->PrePauseRosAllocVerification(this); 161 PausePhase(); 162 RevokeAllThreadLocalBuffers(); 163 } 164 { 165 // Sweeping always done concurrently, even for non concurrent mark sweep. 166 ReaderMutexLock mu(self, *Locks::mutator_lock_); 167 ReclaimPhase(); 168 } 169 GetHeap()->PostGcVerification(this); 170 FinishPhase(); 171} 172 173void MarkSweep::ProcessReferences(Thread* self) { 174 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 175 GetHeap()->GetReferenceProcessor()->ProcessReferences( 176 true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), 177 &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this); 178} 179 180void MarkSweep::PausePhase() { 181 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings()); 182 Thread* self = Thread::Current(); 183 Locks::mutator_lock_->AssertExclusiveHeld(self); 184 if (IsConcurrent()) { 185 // Handle the dirty objects if we are a concurrent GC. 186 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 187 // Re-mark root set. 188 ReMarkRoots(); 189 // Scan dirty objects, this is only required if we are not doing concurrent GC. 190 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 191 } 192 { 193 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings()); 194 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 195 heap_->SwapStacks(self); 196 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 197 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 198 // stacks and don't want anybody to allocate into the live stack. 199 RevokeAllThreadLocalAllocationStacks(self); 200 } 201 heap_->PreSweepingGcVerification(this); 202 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 203 // weak before we sweep them. Since this new system weak may not be marked, the GC may 204 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 205 // reference to a string that is about to be swept. 206 Runtime::Current()->DisallowNewSystemWeaks(); 207 // Enable the reference processing slow path, needs to be done with mutators paused since there 208 // is no lock in the GetReferent fast path. 209 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 210} 211 212void MarkSweep::PreCleanCards() { 213 // Don't do this for non concurrent GCs since they don't have any dirty cards. 214 if (kPreCleanCards && IsConcurrent()) { 215 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 216 Thread* self = Thread::Current(); 217 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 218 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 219 heap_->ProcessCards(GetTimings(), false, true, false); 220 // The checkpoint root marking is required to avoid a race condition which occurs if the 221 // following happens during a reference write: 222 // 1. mutator dirties the card (write barrier) 223 // 2. GC ages the card (the above ProcessCards call) 224 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 225 // 4. mutator writes the value (corresponding to the write barrier in 1.) 226 // This causes the GC to age the card but not necessarily mark the reference which the mutator 227 // wrote into the object stored in the card. 228 // Having the checkpoint fixes this issue since it ensures that the card mark and the 229 // reference write are visible to the GC before the card is scanned (this is due to locks being 230 // acquired / released in the checkpoint code). 231 // The other roots are also marked to help reduce the pause. 232 MarkRootsCheckpoint(self, false); 233 MarkNonThreadRoots(); 234 MarkConcurrentRoots( 235 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 236 // Process the newly aged cards. 237 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 238 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 239 // in the next GC. 240 } 241} 242 243void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 244 if (kUseThreadLocalAllocationStack) { 245 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 246 Locks::mutator_lock_->AssertExclusiveHeld(self); 247 heap_->RevokeAllThreadLocalAllocationStacks(self); 248 } 249} 250 251void MarkSweep::MarkingPhase() { 252 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 253 Thread* self = Thread::Current(); 254 BindBitmaps(); 255 FindDefaultSpaceBitmap(); 256 // Process dirty cards and add dirty cards to mod union tables. 257 // If the GC type is non sticky, then we just clear the cards instead of ageing them. 258 heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky); 259 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 260 MarkRoots(self); 261 MarkReachableObjects(); 262 // Pre-clean dirtied cards to reduce pauses. 263 PreCleanCards(); 264} 265 266void MarkSweep::UpdateAndMarkModUnion() { 267 for (const auto& space : heap_->GetContinuousSpaces()) { 268 if (immune_region_.ContainsSpace(space)) { 269 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 270 "UpdateAndMarkImageModUnionTable"; 271 TimingLogger::ScopedTiming t(name, GetTimings()); 272 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 273 CHECK(mod_union_table != nullptr); 274 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 275 } 276 } 277} 278 279void MarkSweep::MarkReachableObjects() { 280 UpdateAndMarkModUnion(); 281 // Recursively mark all the non-image bits set in the mark bitmap. 282 RecursiveMark(); 283} 284 285void MarkSweep::ReclaimPhase() { 286 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 287 Thread* self = Thread::Current(); 288 // Process the references concurrently. 289 ProcessReferences(self); 290 SweepSystemWeaks(self); 291 Runtime::Current()->AllowNewSystemWeaks(); 292 { 293 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 294 GetHeap()->RecordFreeRevoke(); 295 // Reclaim unmarked objects. 296 Sweep(false); 297 // Swap the live and mark bitmaps for each space which we modified space. This is an 298 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 299 // bitmaps. 300 SwapBitmaps(); 301 // Unbind the live and mark bitmaps. 302 GetHeap()->UnBindBitmaps(); 303 } 304} 305 306void MarkSweep::FindDefaultSpaceBitmap() { 307 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 308 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 309 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 310 // We want to have the main space instead of non moving if possible. 311 if (bitmap != nullptr && 312 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 313 current_space_bitmap_ = bitmap; 314 // If we are not the non moving space exit the loop early since this will be good enough. 315 if (space != heap_->GetNonMovingSpace()) { 316 break; 317 } 318 } 319 } 320 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n" 321 << heap_->DumpSpaces(); 322} 323 324void MarkSweep::ExpandMarkStack() { 325 ResizeMarkStack(mark_stack_->Capacity() * 2); 326} 327 328void MarkSweep::ResizeMarkStack(size_t new_size) { 329 // Rare case, no need to have Thread::Current be a parameter. 330 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 331 // Someone else acquired the lock and expanded the mark stack before us. 332 return; 333 } 334 std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End()); 335 CHECK_LE(mark_stack_->Size(), new_size); 336 mark_stack_->Resize(new_size); 337 for (auto& obj : temp) { 338 mark_stack_->PushBack(obj.AsMirrorPtr()); 339 } 340} 341 342inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 343 DCHECK(obj != nullptr); 344 if (MarkObjectParallel(obj)) { 345 MutexLock mu(Thread::Current(), mark_stack_lock_); 346 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 347 ExpandMarkStack(); 348 } 349 // The object must be pushed on to the mark stack. 350 mark_stack_->PushBack(obj); 351 } 352} 353 354mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 355 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 356 mark_sweep->MarkObject(obj); 357 return obj; 358} 359 360void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 361 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 362} 363 364bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 365 return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr()); 366} 367 368class MarkSweepMarkObjectSlowPath { 369 public: 370 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) { 371 } 372 373 void operator()(const Object* obj) const ALWAYS_INLINE { 374 if (kProfileLargeObjects) { 375 // TODO: Differentiate between marking and testing somehow. 376 ++mark_sweep_->large_object_test_; 377 ++mark_sweep_->large_object_mark_; 378 } 379 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 380 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) || 381 (kIsDebugBuild && large_object_space != nullptr && 382 !large_object_space->Contains(obj)))) { 383 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 384 LOG(ERROR) << "Attempting see if it's a bad root"; 385 mark_sweep_->VerifyRoots(); 386 LOG(FATAL) << "Can't mark invalid object"; 387 } 388 } 389 390 private: 391 MarkSweep* const mark_sweep_; 392}; 393 394inline void MarkSweep::MarkObjectNonNull(Object* obj) { 395 DCHECK(obj != nullptr); 396 if (kUseBakerOrBrooksReadBarrier) { 397 // Verify all the objects have the correct pointer installed. 398 obj->AssertReadBarrierPointer(); 399 } 400 if (immune_region_.ContainsObject(obj)) { 401 if (kCountMarkedObjects) { 402 ++mark_immune_count_; 403 } 404 DCHECK(mark_bitmap_->Test(obj)); 405 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 406 if (kCountMarkedObjects) { 407 ++mark_fastpath_count_; 408 } 409 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 410 PushOnMarkStack(obj); // This object was not previously marked. 411 } 412 } else { 413 if (kCountMarkedObjects) { 414 ++mark_slowpath_count_; 415 } 416 MarkSweepMarkObjectSlowPath visitor(this); 417 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 418 // will check again. 419 if (!mark_bitmap_->Set(obj, visitor)) { 420 PushOnMarkStack(obj); // Was not already marked, push. 421 } 422 } 423} 424 425inline void MarkSweep::PushOnMarkStack(Object* obj) { 426 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 427 // Lock is not needed but is here anyways to please annotalysis. 428 MutexLock mu(Thread::Current(), mark_stack_lock_); 429 ExpandMarkStack(); 430 } 431 // The object must be pushed on to the mark stack. 432 mark_stack_->PushBack(obj); 433} 434 435inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 436 DCHECK(obj != nullptr); 437 if (kUseBakerOrBrooksReadBarrier) { 438 // Verify all the objects have the correct pointer installed. 439 obj->AssertReadBarrierPointer(); 440 } 441 if (immune_region_.ContainsObject(obj)) { 442 DCHECK(IsMarked(obj)); 443 return false; 444 } 445 // Try to take advantage of locality of references within a space, failing this find the space 446 // the hard way. 447 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 448 if (LIKELY(object_bitmap->HasAddress(obj))) { 449 return !object_bitmap->AtomicTestAndSet(obj); 450 } 451 MarkSweepMarkObjectSlowPath visitor(this); 452 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 453} 454 455// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 456inline void MarkSweep::MarkObject(Object* obj) { 457 if (obj != nullptr) { 458 MarkObjectNonNull(obj); 459 } else if (kCountMarkedObjects) { 460 ++mark_null_count_; 461 } 462} 463 464class VerifyRootMarkedVisitor : public SingleRootVisitor { 465 public: 466 explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { } 467 468 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE 469 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 470 CHECK(collector_->IsMarked(root)) << info.ToString(); 471 } 472 473 private: 474 MarkSweep* const collector_; 475}; 476 477void MarkSweep::VisitRoots(mirror::Object*** roots, size_t count, 478 const RootInfo& info ATTRIBUTE_UNUSED) { 479 for (size_t i = 0; i < count; ++i) { 480 MarkObjectNonNull(*roots[i]); 481 } 482} 483 484void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, 485 const RootInfo& info ATTRIBUTE_UNUSED) { 486 for (size_t i = 0; i < count; ++i) { 487 MarkObjectNonNull(roots[i]->AsMirrorPtr()); 488 } 489} 490 491class VerifyRootVisitor : public SingleRootVisitor { 492 public: 493 void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE 494 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 495 // See if the root is on any space bitmap. 496 auto* heap = Runtime::Current()->GetHeap(); 497 if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 498 space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace(); 499 if (large_object_space != nullptr && !large_object_space->Contains(root)) { 500 LOG(ERROR) << "Found invalid root: " << root << " " << info; 501 } 502 } 503 } 504}; 505 506void MarkSweep::VerifyRoots() { 507 VerifyRootVisitor visitor; 508 Runtime::Current()->GetThreadList()->VisitRoots(&visitor); 509} 510 511void MarkSweep::MarkRoots(Thread* self) { 512 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 513 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 514 // If we exclusively hold the mutator lock, all threads must be suspended. 515 Runtime::Current()->VisitRoots(this); 516 RevokeAllThreadLocalAllocationStacks(self); 517 } else { 518 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 519 // At this point the live stack should no longer have any mutators which push into it. 520 MarkNonThreadRoots(); 521 MarkConcurrentRoots( 522 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 523 } 524} 525 526void MarkSweep::MarkNonThreadRoots() { 527 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 528 Runtime::Current()->VisitNonThreadRoots(this); 529} 530 531void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 532 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 533 // Visit all runtime roots and clear dirty flags. 534 Runtime::Current()->VisitConcurrentRoots(this, flags); 535} 536 537class ScanObjectVisitor { 538 public: 539 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 540 : mark_sweep_(mark_sweep) {} 541 542 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 543 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 544 if (kCheckLocks) { 545 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 546 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 547 } 548 mark_sweep_->ScanObject(obj); 549 } 550 551 private: 552 MarkSweep* const mark_sweep_; 553}; 554 555class DelayReferenceReferentVisitor { 556 public: 557 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 558 } 559 560 void operator()(mirror::Class* klass, mirror::Reference* ref) const 561 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 562 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 563 collector_->DelayReferenceReferent(klass, ref); 564 } 565 566 private: 567 MarkSweep* const collector_; 568}; 569 570template <bool kUseFinger = false> 571class MarkStackTask : public Task { 572 public: 573 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 574 StackReference<Object>* mark_stack) 575 : mark_sweep_(mark_sweep), 576 thread_pool_(thread_pool), 577 mark_stack_pos_(mark_stack_size) { 578 // We may have to copy part of an existing mark stack when another mark stack overflows. 579 if (mark_stack_size != 0) { 580 DCHECK(mark_stack != NULL); 581 // TODO: Check performance? 582 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 583 } 584 if (kCountTasks) { 585 ++mark_sweep_->work_chunks_created_; 586 } 587 } 588 589 static const size_t kMaxSize = 1 * KB; 590 591 protected: 592 class MarkObjectParallelVisitor { 593 public: 594 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 595 MarkSweep* mark_sweep) ALWAYS_INLINE 596 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 597 598 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 599 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 600 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 601 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 602 if (kUseFinger) { 603 android_memory_barrier(); 604 if (reinterpret_cast<uintptr_t>(ref) >= 605 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) { 606 return; 607 } 608 } 609 chunk_task_->MarkStackPush(ref); 610 } 611 } 612 613 private: 614 MarkStackTask<kUseFinger>* const chunk_task_; 615 MarkSweep* const mark_sweep_; 616 }; 617 618 class ScanObjectParallelVisitor { 619 public: 620 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 621 : chunk_task_(chunk_task) {} 622 623 // No thread safety analysis since multiple threads will use this visitor. 624 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 625 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 626 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 627 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 628 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 629 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 630 } 631 632 private: 633 MarkStackTask<kUseFinger>* const chunk_task_; 634 }; 635 636 virtual ~MarkStackTask() { 637 // Make sure that we have cleared our mark stack. 638 DCHECK_EQ(mark_stack_pos_, 0U); 639 if (kCountTasks) { 640 ++mark_sweep_->work_chunks_deleted_; 641 } 642 } 643 644 MarkSweep* const mark_sweep_; 645 ThreadPool* const thread_pool_; 646 // Thread local mark stack for this task. 647 StackReference<Object> mark_stack_[kMaxSize]; 648 // Mark stack position. 649 size_t mark_stack_pos_; 650 651 ALWAYS_INLINE void MarkStackPush(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 652 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 653 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 654 mark_stack_pos_ /= 2; 655 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 656 mark_stack_ + mark_stack_pos_); 657 thread_pool_->AddTask(Thread::Current(), task); 658 } 659 DCHECK(obj != nullptr); 660 DCHECK_LT(mark_stack_pos_, kMaxSize); 661 mark_stack_[mark_stack_pos_++].Assign(obj); 662 } 663 664 virtual void Finalize() { 665 delete this; 666 } 667 668 // Scans all of the objects 669 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 670 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 671 UNUSED(self); 672 ScanObjectParallelVisitor visitor(this); 673 // TODO: Tune this. 674 static const size_t kFifoSize = 4; 675 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 676 for (;;) { 677 Object* obj = nullptr; 678 if (kUseMarkStackPrefetch) { 679 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 680 Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 681 DCHECK(mark_stack_obj != nullptr); 682 __builtin_prefetch(mark_stack_obj); 683 prefetch_fifo.push_back(mark_stack_obj); 684 } 685 if (UNLIKELY(prefetch_fifo.empty())) { 686 break; 687 } 688 obj = prefetch_fifo.front(); 689 prefetch_fifo.pop_front(); 690 } else { 691 if (UNLIKELY(mark_stack_pos_ == 0)) { 692 break; 693 } 694 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 695 } 696 DCHECK(obj != nullptr); 697 visitor(obj); 698 } 699 } 700}; 701 702class CardScanTask : public MarkStackTask<false> { 703 public: 704 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 705 accounting::ContinuousSpaceBitmap* bitmap, 706 uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size, 707 StackReference<Object>* mark_stack_obj, bool clear_card) 708 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 709 bitmap_(bitmap), 710 begin_(begin), 711 end_(end), 712 minimum_age_(minimum_age), clear_card_(clear_card) { 713 } 714 715 protected: 716 accounting::ContinuousSpaceBitmap* const bitmap_; 717 uint8_t* const begin_; 718 uint8_t* const end_; 719 const uint8_t minimum_age_; 720 const bool clear_card_; 721 722 virtual void Finalize() { 723 delete this; 724 } 725 726 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 727 ScanObjectParallelVisitor visitor(this); 728 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 729 size_t cards_scanned = clear_card_ ? 730 card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) : 731 card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_); 732 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 733 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 734 // Finish by emptying our local mark stack. 735 MarkStackTask::Run(self); 736 } 737}; 738 739size_t MarkSweep::GetThreadCount(bool paused) const { 740 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 741 return 1; 742 } 743 if (paused) { 744 return heap_->GetParallelGCThreadCount() + 1; 745 } else { 746 return heap_->GetConcGCThreadCount() + 1; 747 } 748} 749 750void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { 751 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 752 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 753 size_t thread_count = GetThreadCount(paused); 754 // The parallel version with only one thread is faster for card scanning, TODO: fix. 755 if (kParallelCardScan && thread_count > 1) { 756 Thread* self = Thread::Current(); 757 // Can't have a different split for each space since multiple spaces can have their cards being 758 // scanned at the same time. 759 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__, 760 GetTimings()); 761 // Try to take some of the mark stack since we can pass this off to the worker tasks. 762 StackReference<Object>* mark_stack_begin = mark_stack_->Begin(); 763 StackReference<Object>* mark_stack_end = mark_stack_->End(); 764 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 765 // Estimated number of work tasks we will create. 766 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 767 DCHECK_NE(mark_stack_tasks, 0U); 768 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 769 mark_stack_size / mark_stack_tasks + 1); 770 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 771 if (space->GetMarkBitmap() == nullptr) { 772 continue; 773 } 774 uint8_t* card_begin = space->Begin(); 775 uint8_t* card_end = space->End(); 776 // Align up the end address. For example, the image space's end 777 // may not be card-size-aligned. 778 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 779 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 780 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 781 // Calculate how many bytes of heap we will scan, 782 const size_t address_range = card_end - card_begin; 783 // Calculate how much address range each task gets. 784 const size_t card_delta = RoundUp(address_range / thread_count + 1, 785 accounting::CardTable::kCardSize); 786 // If paused and the space is neither zygote nor image space, we could clear the dirty 787 // cards to avoid accumulating them to increase card scanning load in the following GC 788 // cycles. We need to keep dirty cards of image space and zygote space in order to track 789 // references to the other spaces. 790 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 791 // Create the worker tasks for this space. 792 while (card_begin != card_end) { 793 // Add a range of cards. 794 size_t addr_remaining = card_end - card_begin; 795 size_t card_increment = std::min(card_delta, addr_remaining); 796 // Take from the back of the mark stack. 797 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 798 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 799 mark_stack_end -= mark_stack_increment; 800 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 801 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 802 // Add the new task to the thread pool. 803 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 804 card_begin + card_increment, minimum_age, 805 mark_stack_increment, mark_stack_end, clear_card); 806 thread_pool->AddTask(self, task); 807 card_begin += card_increment; 808 } 809 } 810 811 // Note: the card scan below may dirty new cards (and scan them) 812 // as a side effect when a Reference object is encountered and 813 // queued during the marking. See b/11465268. 814 thread_pool->SetMaxActiveWorkers(thread_count - 1); 815 thread_pool->StartWorkers(self); 816 thread_pool->Wait(self, true, true); 817 thread_pool->StopWorkers(self); 818 } else { 819 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 820 if (space->GetMarkBitmap() != nullptr) { 821 // Image spaces are handled properly since live == marked for them. 822 const char* name = nullptr; 823 switch (space->GetGcRetentionPolicy()) { 824 case space::kGcRetentionPolicyNeverCollect: 825 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects"; 826 break; 827 case space::kGcRetentionPolicyFullCollect: 828 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects"; 829 break; 830 case space::kGcRetentionPolicyAlwaysCollect: 831 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects"; 832 break; 833 default: 834 LOG(FATAL) << "Unreachable"; 835 UNREACHABLE(); 836 } 837 TimingLogger::ScopedTiming t(name, GetTimings()); 838 ScanObjectVisitor visitor(this); 839 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 840 if (clear_card) { 841 card_table->Scan<true>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, 842 minimum_age); 843 } else { 844 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, 845 minimum_age); 846 } 847 } 848 } 849 } 850} 851 852class RecursiveMarkTask : public MarkStackTask<false> { 853 public: 854 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 855 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 856 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin), 857 end_(end) { 858 } 859 860 protected: 861 accounting::ContinuousSpaceBitmap* const bitmap_; 862 const uintptr_t begin_; 863 const uintptr_t end_; 864 865 virtual void Finalize() { 866 delete this; 867 } 868 869 // Scans all of the objects 870 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 871 ScanObjectParallelVisitor visitor(this); 872 bitmap_->VisitMarkedRange(begin_, end_, visitor); 873 // Finish by emptying our local mark stack. 874 MarkStackTask::Run(self); 875 } 876}; 877 878// Populates the mark stack based on the set of marked objects and 879// recursively marks until the mark stack is emptied. 880void MarkSweep::RecursiveMark() { 881 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 882 // RecursiveMark will build the lists of known instances of the Reference classes. See 883 // DelayReferenceReferent for details. 884 if (kUseRecursiveMark) { 885 const bool partial = GetGcType() == kGcTypePartial; 886 ScanObjectVisitor scan_visitor(this); 887 auto* self = Thread::Current(); 888 ThreadPool* thread_pool = heap_->GetThreadPool(); 889 size_t thread_count = GetThreadCount(false); 890 const bool parallel = kParallelRecursiveMark && thread_count > 1; 891 mark_stack_->Reset(); 892 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 893 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 894 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 895 current_space_bitmap_ = space->GetMarkBitmap(); 896 if (current_space_bitmap_ == nullptr) { 897 continue; 898 } 899 if (parallel) { 900 // We will use the mark stack the future. 901 // CHECK(mark_stack_->IsEmpty()); 902 // This function does not handle heap end increasing, so we must use the space end. 903 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 904 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 905 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue()); 906 907 // Create a few worker tasks. 908 const size_t n = thread_count * 2; 909 while (begin != end) { 910 uintptr_t start = begin; 911 uintptr_t delta = (end - begin) / n; 912 delta = RoundUp(delta, KB); 913 if (delta < 16 * KB) delta = end - begin; 914 begin += delta; 915 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 916 begin); 917 thread_pool->AddTask(self, task); 918 } 919 thread_pool->SetMaxActiveWorkers(thread_count - 1); 920 thread_pool->StartWorkers(self); 921 thread_pool->Wait(self, true, true); 922 thread_pool->StopWorkers(self); 923 } else { 924 // This function does not handle heap end increasing, so we must use the space end. 925 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 926 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 927 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 928 } 929 } 930 } 931 } 932 ProcessMarkStack(false); 933} 934 935mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 936 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 937 return object; 938 } 939 return nullptr; 940} 941 942void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) { 943 ScanGrayObjects(paused, minimum_age); 944 ProcessMarkStack(paused); 945} 946 947void MarkSweep::ReMarkRoots() { 948 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 949 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 950 Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>( 951 kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog)); 952 if (kVerifyRootsMarked) { 953 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings()); 954 VerifyRootMarkedVisitor visitor(this); 955 Runtime::Current()->VisitRoots(&visitor); 956 } 957} 958 959void MarkSweep::SweepSystemWeaks(Thread* self) { 960 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 961 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 962 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 963} 964 965mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 966 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 967 // We don't actually want to sweep the object, so lets return "marked" 968 return obj; 969} 970 971void MarkSweep::VerifyIsLive(const Object* obj) { 972 if (!heap_->GetLiveBitmap()->Test(obj)) { 973 // TODO: Consider live stack? Has this code bitrotted? 974 CHECK(!heap_->allocation_stack_->Contains(obj)) 975 << "Found dead object " << obj << "\n" << heap_->DumpSpaces(); 976 } 977} 978 979void MarkSweep::VerifySystemWeaks() { 980 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 981 // Verify system weaks, uses a special object visitor which returns the input object. 982 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 983} 984 985class CheckpointMarkThreadRoots : public Closure, public RootVisitor { 986 public: 987 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 988 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 989 : mark_sweep_(mark_sweep), 990 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 991 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 992 } 993 994 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) 995 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 996 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 997 for (size_t i = 0; i < count; ++i) { 998 mark_sweep_->MarkObjectNonNullParallel(*roots[i]); 999 } 1000 } 1001 1002 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, 1003 const RootInfo& info ATTRIBUTE_UNUSED) 1004 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1005 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1006 for (size_t i = 0; i < count; ++i) { 1007 mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr()); 1008 } 1009 } 1010 1011 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 1012 ATRACE_BEGIN("Marking thread roots"); 1013 // Note: self is not necessarily equal to thread since thread may be suspended. 1014 Thread* const self = Thread::Current(); 1015 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1016 << thread->GetState() << " thread " << thread << " self " << self; 1017 thread->VisitRoots(this); 1018 ATRACE_END(); 1019 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 1020 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 1021 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 1022 ATRACE_END(); 1023 } 1024 // If thread is a running mutator, then act on behalf of the garbage collector. 1025 // See the code in ThreadList::RunCheckpoint. 1026 if (thread->GetState() == kRunnable) { 1027 mark_sweep_->GetBarrier().Pass(self); 1028 } 1029 } 1030 1031 private: 1032 MarkSweep* const mark_sweep_; 1033 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 1034}; 1035 1036void MarkSweep::MarkRootsCheckpoint(Thread* self, 1037 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1038 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1039 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 1040 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1041 // Request the check point is run on all threads returning a count of the threads that must 1042 // run through the barrier including self. 1043 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1044 // Release locks then wait for all mutator threads to pass the barrier. 1045 // If there are no threads to wait which implys that all the checkpoint functions are finished, 1046 // then no need to release locks. 1047 if (barrier_count == 0) { 1048 return; 1049 } 1050 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1051 Locks::mutator_lock_->SharedUnlock(self); 1052 { 1053 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1054 gc_barrier_->Increment(self, barrier_count); 1055 } 1056 Locks::mutator_lock_->SharedLock(self); 1057 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1058} 1059 1060void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1061 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1062 Thread* self = Thread::Current(); 1063 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>( 1064 sweep_array_free_buffer_mem_map_->BaseBegin()); 1065 size_t chunk_free_pos = 0; 1066 ObjectBytePair freed; 1067 ObjectBytePair freed_los; 1068 // How many objects are left in the array, modified after each space is swept. 1069 StackReference<Object>* objects = allocations->Begin(); 1070 size_t count = allocations->Size(); 1071 // Change the order to ensure that the non-moving space last swept as an optimization. 1072 std::vector<space::ContinuousSpace*> sweep_spaces; 1073 space::ContinuousSpace* non_moving_space = nullptr; 1074 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1075 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1076 space->GetLiveBitmap() != nullptr) { 1077 if (space == heap_->GetNonMovingSpace()) { 1078 non_moving_space = space; 1079 } else { 1080 sweep_spaces.push_back(space); 1081 } 1082 } 1083 } 1084 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1085 // the other alloc spaces as an optimization. 1086 if (non_moving_space != nullptr) { 1087 sweep_spaces.push_back(non_moving_space); 1088 } 1089 // Start by sweeping the continuous spaces. 1090 for (space::ContinuousSpace* space : sweep_spaces) { 1091 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1092 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1093 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1094 if (swap_bitmaps) { 1095 std::swap(live_bitmap, mark_bitmap); 1096 } 1097 StackReference<Object>* out = objects; 1098 for (size_t i = 0; i < count; ++i) { 1099 Object* const obj = objects[i].AsMirrorPtr(); 1100 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1101 continue; 1102 } 1103 if (space->HasAddress(obj)) { 1104 // This object is in the space, remove it from the array and add it to the sweep buffer 1105 // if needed. 1106 if (!mark_bitmap->Test(obj)) { 1107 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1108 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1109 freed.objects += chunk_free_pos; 1110 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1111 chunk_free_pos = 0; 1112 } 1113 chunk_free_buffer[chunk_free_pos++] = obj; 1114 } 1115 } else { 1116 (out++)->Assign(obj); 1117 } 1118 } 1119 if (chunk_free_pos > 0) { 1120 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1121 freed.objects += chunk_free_pos; 1122 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1123 chunk_free_pos = 0; 1124 } 1125 // All of the references which space contained are no longer in the allocation stack, update 1126 // the count. 1127 count = out - objects; 1128 } 1129 // Handle the large object space. 1130 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1131 if (large_object_space != nullptr) { 1132 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1133 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1134 if (swap_bitmaps) { 1135 std::swap(large_live_objects, large_mark_objects); 1136 } 1137 for (size_t i = 0; i < count; ++i) { 1138 Object* const obj = objects[i].AsMirrorPtr(); 1139 // Handle large objects. 1140 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1141 continue; 1142 } 1143 if (!large_mark_objects->Test(obj)) { 1144 ++freed_los.objects; 1145 freed_los.bytes += large_object_space->Free(self, obj); 1146 } 1147 } 1148 } 1149 { 1150 TimingLogger::ScopedTiming t2("RecordFree", GetTimings()); 1151 RecordFree(freed); 1152 RecordFreeLOS(freed_los); 1153 t2.NewTiming("ResetStack"); 1154 allocations->Reset(); 1155 } 1156 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero(); 1157} 1158 1159void MarkSweep::Sweep(bool swap_bitmaps) { 1160 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1161 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1162 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1163 { 1164 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings()); 1165 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1166 // knowing that new allocations won't be marked as live. 1167 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1168 heap_->MarkAllocStackAsLive(live_stack); 1169 live_stack->Reset(); 1170 DCHECK(mark_stack_->IsEmpty()); 1171 } 1172 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1173 if (space->IsContinuousMemMapAllocSpace()) { 1174 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1175 TimingLogger::ScopedTiming split( 1176 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings()); 1177 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1178 } 1179 } 1180 SweepLargeObjects(swap_bitmaps); 1181} 1182 1183void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1184 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace(); 1185 if (los != nullptr) { 1186 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 1187 RecordFreeLOS(los->Sweep(swap_bitmaps)); 1188 } 1189} 1190 1191// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1192// marked, put it on the appropriate list in the heap for later processing. 1193void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1194 if (kCountJavaLangRefs) { 1195 ++reference_count_; 1196 } 1197 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback, 1198 this); 1199} 1200 1201class MarkObjectVisitor { 1202 public: 1203 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1204 } 1205 1206 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1207 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1208 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1209 if (kCheckLocks) { 1210 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1211 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1212 } 1213 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset)); 1214 } 1215 1216 private: 1217 MarkSweep* const mark_sweep_; 1218}; 1219 1220// Scans an object reference. Determines the type of the reference 1221// and dispatches to a specialized scanning routine. 1222void MarkSweep::ScanObject(Object* obj) { 1223 MarkObjectVisitor mark_visitor(this); 1224 DelayReferenceReferentVisitor ref_visitor(this); 1225 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1226} 1227 1228void MarkSweep::ProcessMarkStackCallback(void* arg) { 1229 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false); 1230} 1231 1232void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1233 Thread* self = Thread::Current(); 1234 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1235 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1236 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1237 CHECK_GT(chunk_size, 0U); 1238 // Split the current mark stack up into work tasks. 1239 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) { 1240 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1241 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1242 it += delta; 1243 } 1244 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1245 thread_pool->StartWorkers(self); 1246 thread_pool->Wait(self, true, true); 1247 thread_pool->StopWorkers(self); 1248 mark_stack_->Reset(); 1249 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(), 1250 work_chunks_deleted_.LoadSequentiallyConsistent()) 1251 << " some of the work chunks were leaked"; 1252} 1253 1254// Scan anything that's on the mark stack. 1255void MarkSweep::ProcessMarkStack(bool paused) { 1256 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings()); 1257 size_t thread_count = GetThreadCount(paused); 1258 if (kParallelProcessMarkStack && thread_count > 1 && 1259 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1260 ProcessMarkStackParallel(thread_count); 1261 } else { 1262 // TODO: Tune this. 1263 static const size_t kFifoSize = 4; 1264 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1265 for (;;) { 1266 Object* obj = NULL; 1267 if (kUseMarkStackPrefetch) { 1268 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1269 Object* mark_stack_obj = mark_stack_->PopBack(); 1270 DCHECK(mark_stack_obj != NULL); 1271 __builtin_prefetch(mark_stack_obj); 1272 prefetch_fifo.push_back(mark_stack_obj); 1273 } 1274 if (prefetch_fifo.empty()) { 1275 break; 1276 } 1277 obj = prefetch_fifo.front(); 1278 prefetch_fifo.pop_front(); 1279 } else { 1280 if (mark_stack_->IsEmpty()) { 1281 break; 1282 } 1283 obj = mark_stack_->PopBack(); 1284 } 1285 DCHECK(obj != nullptr); 1286 ScanObject(obj); 1287 } 1288 } 1289} 1290 1291inline bool MarkSweep::IsMarked(const Object* object) const { 1292 if (immune_region_.ContainsObject(object)) { 1293 return true; 1294 } 1295 if (current_space_bitmap_->HasAddress(object)) { 1296 return current_space_bitmap_->Test(object); 1297 } 1298 return mark_bitmap_->Test(object); 1299} 1300 1301void MarkSweep::FinishPhase() { 1302 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1303 if (kCountScannedTypes) { 1304 VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed() 1305 << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed(); 1306 } 1307 if (kCountTasks) { 1308 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed(); 1309 } 1310 if (kMeasureOverhead) { 1311 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed()); 1312 } 1313 if (kProfileLargeObjects) { 1314 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed() 1315 << " marked " << large_object_mark_.LoadRelaxed(); 1316 } 1317 if (kCountJavaLangRefs) { 1318 VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed(); 1319 } 1320 if (kCountMarkedObjects) { 1321 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed() 1322 << " immune=" << mark_immune_count_.LoadRelaxed() 1323 << " fastpath=" << mark_fastpath_count_.LoadRelaxed() 1324 << " slowpath=" << mark_slowpath_count_.LoadRelaxed(); 1325 } 1326 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1327 mark_stack_->Reset(); 1328 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1329 heap_->ClearMarkedObjects(); 1330} 1331 1332void MarkSweep::RevokeAllThreadLocalBuffers() { 1333 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1334 // If concurrent, rosalloc thread-local buffers are revoked at the 1335 // thread checkpoint. Bump pointer space thread-local buffers must 1336 // not be in use. 1337 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1338 } else { 1339 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1340 GetHeap()->RevokeAllThreadLocalBuffers(); 1341 } 1342} 1343 1344} // namespace collector 1345} // namespace gc 1346} // namespace art 1347