mark_sweep.cc revision 4460a84be92b5a94ecfb5c650aef4945ab849c93
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#define ATRACE_TAG ATRACE_TAG_DALVIK 25#include "cutils/trace.h" 26 27#include "base/bounded_fifo.h" 28#include "base/logging.h" 29#include "base/macros.h" 30#include "base/mutex-inl.h" 31#include "base/timing_logger.h" 32#include "gc/accounting/card_table-inl.h" 33#include "gc/accounting/heap_bitmap-inl.h" 34#include "gc/accounting/mod_union_table.h" 35#include "gc/accounting/space_bitmap-inl.h" 36#include "gc/heap.h" 37#include "gc/reference_processor.h" 38#include "gc/space/image_space.h" 39#include "gc/space/large_object_space.h" 40#include "gc/space/space-inl.h" 41#include "mark_sweep-inl.h" 42#include "mirror/art_field-inl.h" 43#include "mirror/object-inl.h" 44#include "runtime.h" 45#include "scoped_thread_state_change.h" 46#include "thread-inl.h" 47#include "thread_list.h" 48 49using ::art::mirror::Object; 50 51namespace art { 52namespace gc { 53namespace collector { 54 55// Performance options. 56static constexpr bool kUseRecursiveMark = false; 57static constexpr bool kUseMarkStackPrefetch = true; 58static constexpr size_t kSweepArrayChunkFreeSize = 1024; 59static constexpr bool kPreCleanCards = true; 60 61// Parallelism options. 62static constexpr bool kParallelCardScan = true; 63static constexpr bool kParallelRecursiveMark = true; 64// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 65// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 66// having this can add overhead in ProcessReferences since we may end up doing many calls of 67// ProcessMarkStack with very small mark stacks. 68static constexpr size_t kMinimumParallelMarkStackSize = 128; 69static constexpr bool kParallelProcessMarkStack = true; 70 71// Profiling and information flags. 72static constexpr bool kProfileLargeObjects = false; 73static constexpr bool kMeasureOverhead = false; 74static constexpr bool kCountTasks = false; 75static constexpr bool kCountJavaLangRefs = false; 76static constexpr bool kCountMarkedObjects = false; 77 78// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 79static constexpr bool kCheckLocks = kDebugLocking; 80static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 81 82// If true, revoke the rosalloc thread-local buffers at the 83// checkpoint, as opposed to during the pause. 84static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 85 86void MarkSweep::BindBitmaps() { 87 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 88 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 89 // Mark all of the spaces we never collect as immune. 90 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 91 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 92 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 93 } 94 } 95} 96 97MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 98 : GarbageCollector(heap, 99 name_prefix + 100 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 101 current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr), 102 gc_barrier_(new Barrier(0)), 103 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 104 is_concurrent_(is_concurrent), live_stack_freeze_size_(0) { 105 std::string error_msg; 106 MemMap* mem_map = MemMap::MapAnonymous( 107 "mark sweep sweep array free buffer", nullptr, 108 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), 109 PROT_READ | PROT_WRITE, false, false, &error_msg); 110 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg; 111 sweep_array_free_buffer_mem_map_.reset(mem_map); 112} 113 114void MarkSweep::InitializePhase() { 115 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 116 mark_stack_ = heap_->GetMarkStack(); 117 DCHECK(mark_stack_ != nullptr); 118 immune_region_.Reset(); 119 class_count_.StoreRelaxed(0); 120 array_count_.StoreRelaxed(0); 121 other_count_.StoreRelaxed(0); 122 large_object_test_.StoreRelaxed(0); 123 large_object_mark_.StoreRelaxed(0); 124 overhead_time_ .StoreRelaxed(0); 125 work_chunks_created_.StoreRelaxed(0); 126 work_chunks_deleted_.StoreRelaxed(0); 127 reference_count_.StoreRelaxed(0); 128 mark_null_count_.StoreRelaxed(0); 129 mark_immune_count_.StoreRelaxed(0); 130 mark_fastpath_count_.StoreRelaxed(0); 131 mark_slowpath_count_.StoreRelaxed(0); 132 { 133 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 134 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 135 mark_bitmap_ = heap_->GetMarkBitmap(); 136 } 137 if (!GetCurrentIteration()->GetClearSoftReferences()) { 138 // Always clear soft references if a non-sticky collection. 139 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky); 140 } 141} 142 143void MarkSweep::RunPhases() { 144 Thread* self = Thread::Current(); 145 InitializePhase(); 146 Locks::mutator_lock_->AssertNotHeld(self); 147 if (IsConcurrent()) { 148 GetHeap()->PreGcVerification(this); 149 { 150 ReaderMutexLock mu(self, *Locks::mutator_lock_); 151 MarkingPhase(); 152 } 153 ScopedPause pause(this); 154 GetHeap()->PrePauseRosAllocVerification(this); 155 PausePhase(); 156 RevokeAllThreadLocalBuffers(); 157 } else { 158 ScopedPause pause(this); 159 GetHeap()->PreGcVerificationPaused(this); 160 MarkingPhase(); 161 GetHeap()->PrePauseRosAllocVerification(this); 162 PausePhase(); 163 RevokeAllThreadLocalBuffers(); 164 } 165 { 166 // Sweeping always done concurrently, even for non concurrent mark sweep. 167 ReaderMutexLock mu(self, *Locks::mutator_lock_); 168 ReclaimPhase(); 169 } 170 GetHeap()->PostGcVerification(this); 171 FinishPhase(); 172} 173 174void MarkSweep::ProcessReferences(Thread* self) { 175 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 176 GetHeap()->GetReferenceProcessor()->ProcessReferences( 177 true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), 178 &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this); 179} 180 181void MarkSweep::PausePhase() { 182 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings()); 183 Thread* self = Thread::Current(); 184 Locks::mutator_lock_->AssertExclusiveHeld(self); 185 if (IsConcurrent()) { 186 // Handle the dirty objects if we are a concurrent GC. 187 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 188 // Re-mark root set. 189 ReMarkRoots(); 190 // Scan dirty objects, this is only required if we are not doing concurrent GC. 191 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 192 } 193 { 194 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings()); 195 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 196 heap_->SwapStacks(self); 197 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 198 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 199 // stacks and don't want anybody to allocate into the live stack. 200 RevokeAllThreadLocalAllocationStacks(self); 201 } 202 heap_->PreSweepingGcVerification(this); 203 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 204 // weak before we sweep them. Since this new system weak may not be marked, the GC may 205 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 206 // reference to a string that is about to be swept. 207 Runtime::Current()->DisallowNewSystemWeaks(); 208 // Enable the reference processing slow path, needs to be done with mutators paused since there 209 // is no lock in the GetReferent fast path. 210 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 211} 212 213void MarkSweep::PreCleanCards() { 214 // Don't do this for non concurrent GCs since they don't have any dirty cards. 215 if (kPreCleanCards && IsConcurrent()) { 216 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 217 Thread* self = Thread::Current(); 218 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 219 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 220 heap_->ProcessCards(GetTimings(), false, true, false); 221 // The checkpoint root marking is required to avoid a race condition which occurs if the 222 // following happens during a reference write: 223 // 1. mutator dirties the card (write barrier) 224 // 2. GC ages the card (the above ProcessCards call) 225 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 226 // 4. mutator writes the value (corresponding to the write barrier in 1.) 227 // This causes the GC to age the card but not necessarily mark the reference which the mutator 228 // wrote into the object stored in the card. 229 // Having the checkpoint fixes this issue since it ensures that the card mark and the 230 // reference write are visible to the GC before the card is scanned (this is due to locks being 231 // acquired / released in the checkpoint code). 232 // The other roots are also marked to help reduce the pause. 233 MarkRootsCheckpoint(self, false); 234 MarkNonThreadRoots(); 235 MarkConcurrentRoots( 236 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 237 // Process the newly aged cards. 238 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 239 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 240 // in the next GC. 241 } 242} 243 244void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 245 if (kUseThreadLocalAllocationStack) { 246 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 247 Locks::mutator_lock_->AssertExclusiveHeld(self); 248 heap_->RevokeAllThreadLocalAllocationStacks(self); 249 } 250} 251 252void MarkSweep::MarkingPhase() { 253 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 254 Thread* self = Thread::Current(); 255 BindBitmaps(); 256 FindDefaultSpaceBitmap(); 257 // Process dirty cards and add dirty cards to mod union tables. 258 // If the GC type is non sticky, then we just clear the cards instead of ageing them. 259 heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky); 260 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 261 MarkRoots(self); 262 MarkReachableObjects(); 263 // Pre-clean dirtied cards to reduce pauses. 264 PreCleanCards(); 265} 266 267void MarkSweep::UpdateAndMarkModUnion() { 268 for (const auto& space : heap_->GetContinuousSpaces()) { 269 if (immune_region_.ContainsSpace(space)) { 270 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 271 "UpdateAndMarkImageModUnionTable"; 272 TimingLogger::ScopedTiming t(name, GetTimings()); 273 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 274 CHECK(mod_union_table != nullptr); 275 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 276 } 277 } 278} 279 280void MarkSweep::MarkReachableObjects() { 281 UpdateAndMarkModUnion(); 282 // Recursively mark all the non-image bits set in the mark bitmap. 283 RecursiveMark(); 284} 285 286void MarkSweep::ReclaimPhase() { 287 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 288 Thread* self = Thread::Current(); 289 // Process the references concurrently. 290 ProcessReferences(self); 291 SweepSystemWeaks(self); 292 Runtime::Current()->AllowNewSystemWeaks(); 293 { 294 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 295 GetHeap()->RecordFreeRevoke(); 296 // Reclaim unmarked objects. 297 Sweep(false); 298 // Swap the live and mark bitmaps for each space which we modified space. This is an 299 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 300 // bitmaps. 301 SwapBitmaps(); 302 // Unbind the live and mark bitmaps. 303 GetHeap()->UnBindBitmaps(); 304 } 305} 306 307void MarkSweep::FindDefaultSpaceBitmap() { 308 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 309 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 310 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 311 // We want to have the main space instead of non moving if possible. 312 if (bitmap != nullptr && 313 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 314 current_space_bitmap_ = bitmap; 315 // If we are not the non moving space exit the loop early since this will be good enough. 316 if (space != heap_->GetNonMovingSpace()) { 317 break; 318 } 319 } 320 } 321 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n" 322 << heap_->DumpSpaces(); 323} 324 325void MarkSweep::ExpandMarkStack() { 326 ResizeMarkStack(mark_stack_->Capacity() * 2); 327} 328 329void MarkSweep::ResizeMarkStack(size_t new_size) { 330 // Rare case, no need to have Thread::Current be a parameter. 331 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 332 // Someone else acquired the lock and expanded the mark stack before us. 333 return; 334 } 335 std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End()); 336 CHECK_LE(mark_stack_->Size(), new_size); 337 mark_stack_->Resize(new_size); 338 for (auto& obj : temp) { 339 mark_stack_->PushBack(obj.AsMirrorPtr()); 340 } 341} 342 343inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 344 DCHECK(obj != nullptr); 345 if (MarkObjectParallel(obj)) { 346 MutexLock mu(Thread::Current(), mark_stack_lock_); 347 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 348 ExpandMarkStack(); 349 } 350 // The object must be pushed on to the mark stack. 351 mark_stack_->PushBack(obj); 352 } 353} 354 355mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 356 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 357 mark_sweep->MarkObject(obj); 358 return obj; 359} 360 361void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 362 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 363} 364 365bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 366 return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr()); 367} 368 369class MarkSweepMarkObjectSlowPath { 370 public: 371 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) { 372 } 373 374 void operator()(const Object* obj) const ALWAYS_INLINE { 375 if (kProfileLargeObjects) { 376 // TODO: Differentiate between marking and testing somehow. 377 ++mark_sweep_->large_object_test_; 378 ++mark_sweep_->large_object_mark_; 379 } 380 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 381 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) || 382 (kIsDebugBuild && large_object_space != nullptr && 383 !large_object_space->Contains(obj)))) { 384 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 385 LOG(ERROR) << "Attempting see if it's a bad root"; 386 mark_sweep_->VerifyRoots(); 387 LOG(FATAL) << "Can't mark invalid object"; 388 } 389 } 390 391 private: 392 MarkSweep* const mark_sweep_; 393}; 394 395inline void MarkSweep::MarkObjectNonNull(Object* obj) { 396 DCHECK(obj != nullptr); 397 if (kUseBakerOrBrooksReadBarrier) { 398 // Verify all the objects have the correct pointer installed. 399 obj->AssertReadBarrierPointer(); 400 } 401 if (immune_region_.ContainsObject(obj)) { 402 if (kCountMarkedObjects) { 403 ++mark_immune_count_; 404 } 405 DCHECK(mark_bitmap_->Test(obj)); 406 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 407 if (kCountMarkedObjects) { 408 ++mark_fastpath_count_; 409 } 410 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 411 PushOnMarkStack(obj); // This object was not previously marked. 412 } 413 } else { 414 if (kCountMarkedObjects) { 415 ++mark_slowpath_count_; 416 } 417 MarkSweepMarkObjectSlowPath visitor(this); 418 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 419 // will check again. 420 if (!mark_bitmap_->Set(obj, visitor)) { 421 PushOnMarkStack(obj); // Was not already marked, push. 422 } 423 } 424} 425 426inline void MarkSweep::PushOnMarkStack(Object* obj) { 427 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 428 // Lock is not needed but is here anyways to please annotalysis. 429 MutexLock mu(Thread::Current(), mark_stack_lock_); 430 ExpandMarkStack(); 431 } 432 // The object must be pushed on to the mark stack. 433 mark_stack_->PushBack(obj); 434} 435 436inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 437 DCHECK(obj != nullptr); 438 if (kUseBakerOrBrooksReadBarrier) { 439 // Verify all the objects have the correct pointer installed. 440 obj->AssertReadBarrierPointer(); 441 } 442 if (immune_region_.ContainsObject(obj)) { 443 DCHECK(IsMarked(obj)); 444 return false; 445 } 446 // Try to take advantage of locality of references within a space, failing this find the space 447 // the hard way. 448 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 449 if (LIKELY(object_bitmap->HasAddress(obj))) { 450 return !object_bitmap->AtomicTestAndSet(obj); 451 } 452 MarkSweepMarkObjectSlowPath visitor(this); 453 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 454} 455 456// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 457inline void MarkSweep::MarkObject(Object* obj) { 458 if (obj != nullptr) { 459 MarkObjectNonNull(obj); 460 } else if (kCountMarkedObjects) { 461 ++mark_null_count_; 462 } 463} 464 465void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, const RootInfo& /*root_info*/) { 466 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 467} 468 469void MarkSweep::VerifyRootMarked(Object** root, void* arg, const RootInfo& /*root_info*/) { 470 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 471} 472 473void MarkSweep::MarkRootCallback(Object** root, void* arg, const RootInfo& /*root_info*/) { 474 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 475} 476 477void MarkSweep::VerifyRootCallback(Object** root, void* arg, const RootInfo& root_info) { 478 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(*root, root_info); 479} 480 481void MarkSweep::VerifyRoot(const Object* root, const RootInfo& root_info) { 482 // See if the root is on any space bitmap. 483 if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 484 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 485 if (large_object_space != nullptr && !large_object_space->Contains(root)) { 486 LOG(ERROR) << "Found invalid root: " << root << " "; 487 root_info.Describe(LOG(ERROR)); 488 } 489 } 490} 491 492void MarkSweep::VerifyRoots() { 493 Runtime::Current()->GetThreadList()->VisitRoots(VerifyRootCallback, this); 494} 495 496void MarkSweep::MarkRoots(Thread* self) { 497 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 498 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 499 // If we exclusively hold the mutator lock, all threads must be suspended. 500 Runtime::Current()->VisitRoots(MarkRootCallback, this); 501 RevokeAllThreadLocalAllocationStacks(self); 502 } else { 503 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 504 // At this point the live stack should no longer have any mutators which push into it. 505 MarkNonThreadRoots(); 506 MarkConcurrentRoots( 507 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 508 } 509} 510 511void MarkSweep::MarkNonThreadRoots() { 512 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 513 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 514} 515 516void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 517 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 518 // Visit all runtime roots and clear dirty flags. 519 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 520} 521 522class ScanObjectVisitor { 523 public: 524 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 525 : mark_sweep_(mark_sweep) {} 526 527 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 528 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 529 if (kCheckLocks) { 530 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 531 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 532 } 533 mark_sweep_->ScanObject(obj); 534 } 535 536 private: 537 MarkSweep* const mark_sweep_; 538}; 539 540class DelayReferenceReferentVisitor { 541 public: 542 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 543 } 544 545 void operator()(mirror::Class* klass, mirror::Reference* ref) const 546 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 547 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 548 collector_->DelayReferenceReferent(klass, ref); 549 } 550 551 private: 552 MarkSweep* const collector_; 553}; 554 555template <bool kUseFinger = false> 556class MarkStackTask : public Task { 557 public: 558 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 559 StackReference<Object>* mark_stack) 560 : mark_sweep_(mark_sweep), 561 thread_pool_(thread_pool), 562 mark_stack_pos_(mark_stack_size) { 563 // We may have to copy part of an existing mark stack when another mark stack overflows. 564 if (mark_stack_size != 0) { 565 DCHECK(mark_stack != NULL); 566 // TODO: Check performance? 567 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 568 } 569 if (kCountTasks) { 570 ++mark_sweep_->work_chunks_created_; 571 } 572 } 573 574 static const size_t kMaxSize = 1 * KB; 575 576 protected: 577 class MarkObjectParallelVisitor { 578 public: 579 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 580 MarkSweep* mark_sweep) ALWAYS_INLINE 581 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 582 583 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 584 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 585 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 586 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 587 if (kUseFinger) { 588 android_memory_barrier(); 589 if (reinterpret_cast<uintptr_t>(ref) >= 590 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) { 591 return; 592 } 593 } 594 chunk_task_->MarkStackPush(ref); 595 } 596 } 597 598 private: 599 MarkStackTask<kUseFinger>* const chunk_task_; 600 MarkSweep* const mark_sweep_; 601 }; 602 603 class ScanObjectParallelVisitor { 604 public: 605 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 606 : chunk_task_(chunk_task) {} 607 608 // No thread safety analysis since multiple threads will use this visitor. 609 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 610 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 611 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 612 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 613 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 614 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 615 } 616 617 private: 618 MarkStackTask<kUseFinger>* const chunk_task_; 619 }; 620 621 virtual ~MarkStackTask() { 622 // Make sure that we have cleared our mark stack. 623 DCHECK_EQ(mark_stack_pos_, 0U); 624 if (kCountTasks) { 625 ++mark_sweep_->work_chunks_deleted_; 626 } 627 } 628 629 MarkSweep* const mark_sweep_; 630 ThreadPool* const thread_pool_; 631 // Thread local mark stack for this task. 632 StackReference<Object> mark_stack_[kMaxSize]; 633 // Mark stack position. 634 size_t mark_stack_pos_; 635 636 ALWAYS_INLINE void MarkStackPush(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 637 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 638 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 639 mark_stack_pos_ /= 2; 640 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 641 mark_stack_ + mark_stack_pos_); 642 thread_pool_->AddTask(Thread::Current(), task); 643 } 644 DCHECK(obj != nullptr); 645 DCHECK_LT(mark_stack_pos_, kMaxSize); 646 mark_stack_[mark_stack_pos_++].Assign(obj); 647 } 648 649 virtual void Finalize() { 650 delete this; 651 } 652 653 // Scans all of the objects 654 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 655 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 656 UNUSED(self); 657 ScanObjectParallelVisitor visitor(this); 658 // TODO: Tune this. 659 static const size_t kFifoSize = 4; 660 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 661 for (;;) { 662 Object* obj = nullptr; 663 if (kUseMarkStackPrefetch) { 664 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 665 Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 666 DCHECK(mark_stack_obj != nullptr); 667 __builtin_prefetch(mark_stack_obj); 668 prefetch_fifo.push_back(mark_stack_obj); 669 } 670 if (UNLIKELY(prefetch_fifo.empty())) { 671 break; 672 } 673 obj = prefetch_fifo.front(); 674 prefetch_fifo.pop_front(); 675 } else { 676 if (UNLIKELY(mark_stack_pos_ == 0)) { 677 break; 678 } 679 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); 680 } 681 DCHECK(obj != nullptr); 682 visitor(obj); 683 } 684 } 685}; 686 687class CardScanTask : public MarkStackTask<false> { 688 public: 689 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 690 accounting::ContinuousSpaceBitmap* bitmap, 691 uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size, 692 StackReference<Object>* mark_stack_obj, bool clear_card) 693 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 694 bitmap_(bitmap), 695 begin_(begin), 696 end_(end), 697 minimum_age_(minimum_age), clear_card_(clear_card) { 698 } 699 700 protected: 701 accounting::ContinuousSpaceBitmap* const bitmap_; 702 uint8_t* const begin_; 703 uint8_t* const end_; 704 const uint8_t minimum_age_; 705 const bool clear_card_; 706 707 virtual void Finalize() { 708 delete this; 709 } 710 711 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 712 ScanObjectParallelVisitor visitor(this); 713 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 714 size_t cards_scanned = clear_card_ ? 715 card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) : 716 card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_); 717 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 718 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 719 // Finish by emptying our local mark stack. 720 MarkStackTask::Run(self); 721 } 722}; 723 724size_t MarkSweep::GetThreadCount(bool paused) const { 725 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 726 return 1; 727 } 728 if (paused) { 729 return heap_->GetParallelGCThreadCount() + 1; 730 } else { 731 return heap_->GetConcGCThreadCount() + 1; 732 } 733} 734 735void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { 736 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 737 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 738 size_t thread_count = GetThreadCount(paused); 739 // The parallel version with only one thread is faster for card scanning, TODO: fix. 740 if (kParallelCardScan && thread_count > 1) { 741 Thread* self = Thread::Current(); 742 // Can't have a different split for each space since multiple spaces can have their cards being 743 // scanned at the same time. 744 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__, 745 GetTimings()); 746 // Try to take some of the mark stack since we can pass this off to the worker tasks. 747 StackReference<Object>* mark_stack_begin = mark_stack_->Begin(); 748 StackReference<Object>* mark_stack_end = mark_stack_->End(); 749 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 750 // Estimated number of work tasks we will create. 751 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 752 DCHECK_NE(mark_stack_tasks, 0U); 753 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 754 mark_stack_size / mark_stack_tasks + 1); 755 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 756 if (space->GetMarkBitmap() == nullptr) { 757 continue; 758 } 759 uint8_t* card_begin = space->Begin(); 760 uint8_t* card_end = space->End(); 761 // Align up the end address. For example, the image space's end 762 // may not be card-size-aligned. 763 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 764 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 765 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 766 // Calculate how many bytes of heap we will scan, 767 const size_t address_range = card_end - card_begin; 768 // Calculate how much address range each task gets. 769 const size_t card_delta = RoundUp(address_range / thread_count + 1, 770 accounting::CardTable::kCardSize); 771 // If paused and the space is neither zygote nor image space, we could clear the dirty 772 // cards to avoid accumulating them to increase card scanning load in the following GC 773 // cycles. We need to keep dirty cards of image space and zygote space in order to track 774 // references to the other spaces. 775 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 776 // Create the worker tasks for this space. 777 while (card_begin != card_end) { 778 // Add a range of cards. 779 size_t addr_remaining = card_end - card_begin; 780 size_t card_increment = std::min(card_delta, addr_remaining); 781 // Take from the back of the mark stack. 782 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 783 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 784 mark_stack_end -= mark_stack_increment; 785 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 786 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 787 // Add the new task to the thread pool. 788 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 789 card_begin + card_increment, minimum_age, 790 mark_stack_increment, mark_stack_end, clear_card); 791 thread_pool->AddTask(self, task); 792 card_begin += card_increment; 793 } 794 } 795 796 // Note: the card scan below may dirty new cards (and scan them) 797 // as a side effect when a Reference object is encountered and 798 // queued during the marking. See b/11465268. 799 thread_pool->SetMaxActiveWorkers(thread_count - 1); 800 thread_pool->StartWorkers(self); 801 thread_pool->Wait(self, true, true); 802 thread_pool->StopWorkers(self); 803 } else { 804 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 805 if (space->GetMarkBitmap() != nullptr) { 806 // Image spaces are handled properly since live == marked for them. 807 const char* name = nullptr; 808 switch (space->GetGcRetentionPolicy()) { 809 case space::kGcRetentionPolicyNeverCollect: 810 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects"; 811 break; 812 case space::kGcRetentionPolicyFullCollect: 813 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects"; 814 break; 815 case space::kGcRetentionPolicyAlwaysCollect: 816 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects"; 817 break; 818 default: 819 LOG(FATAL) << "Unreachable"; 820 UNREACHABLE(); 821 } 822 TimingLogger::ScopedTiming t(name, GetTimings()); 823 ScanObjectVisitor visitor(this); 824 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace(); 825 if (clear_card) { 826 card_table->Scan<true>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, 827 minimum_age); 828 } else { 829 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, 830 minimum_age); 831 } 832 } 833 } 834 } 835} 836 837class RecursiveMarkTask : public MarkStackTask<false> { 838 public: 839 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 840 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 841 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin), 842 end_(end) { 843 } 844 845 protected: 846 accounting::ContinuousSpaceBitmap* const bitmap_; 847 const uintptr_t begin_; 848 const uintptr_t end_; 849 850 virtual void Finalize() { 851 delete this; 852 } 853 854 // Scans all of the objects 855 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 856 ScanObjectParallelVisitor visitor(this); 857 bitmap_->VisitMarkedRange(begin_, end_, visitor); 858 // Finish by emptying our local mark stack. 859 MarkStackTask::Run(self); 860 } 861}; 862 863// Populates the mark stack based on the set of marked objects and 864// recursively marks until the mark stack is emptied. 865void MarkSweep::RecursiveMark() { 866 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 867 // RecursiveMark will build the lists of known instances of the Reference classes. See 868 // DelayReferenceReferent for details. 869 if (kUseRecursiveMark) { 870 const bool partial = GetGcType() == kGcTypePartial; 871 ScanObjectVisitor scan_visitor(this); 872 auto* self = Thread::Current(); 873 ThreadPool* thread_pool = heap_->GetThreadPool(); 874 size_t thread_count = GetThreadCount(false); 875 const bool parallel = kParallelRecursiveMark && thread_count > 1; 876 mark_stack_->Reset(); 877 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 878 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 879 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 880 current_space_bitmap_ = space->GetMarkBitmap(); 881 if (current_space_bitmap_ == nullptr) { 882 continue; 883 } 884 if (parallel) { 885 // We will use the mark stack the future. 886 // CHECK(mark_stack_->IsEmpty()); 887 // This function does not handle heap end increasing, so we must use the space end. 888 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 889 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 890 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue()); 891 892 // Create a few worker tasks. 893 const size_t n = thread_count * 2; 894 while (begin != end) { 895 uintptr_t start = begin; 896 uintptr_t delta = (end - begin) / n; 897 delta = RoundUp(delta, KB); 898 if (delta < 16 * KB) delta = end - begin; 899 begin += delta; 900 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 901 begin); 902 thread_pool->AddTask(self, task); 903 } 904 thread_pool->SetMaxActiveWorkers(thread_count - 1); 905 thread_pool->StartWorkers(self); 906 thread_pool->Wait(self, true, true); 907 thread_pool->StopWorkers(self); 908 } else { 909 // This function does not handle heap end increasing, so we must use the space end. 910 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 911 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 912 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 913 } 914 } 915 } 916 } 917 ProcessMarkStack(false); 918} 919 920mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 921 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 922 return object; 923 } 924 return nullptr; 925} 926 927void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) { 928 ScanGrayObjects(paused, minimum_age); 929 ProcessMarkStack(paused); 930} 931 932void MarkSweep::ReMarkRoots() { 933 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 934 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 935 Runtime::Current()->VisitRoots( 936 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 937 kVisitRootFlagStopLoggingNewRoots | 938 kVisitRootFlagClearRootLog)); 939 if (kVerifyRootsMarked) { 940 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings()); 941 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 942 } 943} 944 945void MarkSweep::SweepSystemWeaks(Thread* self) { 946 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 947 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 948 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 949} 950 951mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 952 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 953 // We don't actually want to sweep the object, so lets return "marked" 954 return obj; 955} 956 957void MarkSweep::VerifyIsLive(const Object* obj) { 958 if (!heap_->GetLiveBitmap()->Test(obj)) { 959 // TODO: Consider live stack? Has this code bitrotted? 960 CHECK(!heap_->allocation_stack_->Contains(obj)) 961 << "Found dead object " << obj << "\n" << heap_->DumpSpaces(); 962 } 963} 964 965void MarkSweep::VerifySystemWeaks() { 966 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 967 // Verify system weaks, uses a special object visitor which returns the input object. 968 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 969} 970 971class CheckpointMarkThreadRoots : public Closure { 972 public: 973 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 974 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 975 : mark_sweep_(mark_sweep), 976 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 977 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 978 } 979 980 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 981 ATRACE_BEGIN("Marking thread roots"); 982 // Note: self is not necessarily equal to thread since thread may be suspended. 983 Thread* self = Thread::Current(); 984 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 985 << thread->GetState() << " thread " << thread << " self " << self; 986 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 987 ATRACE_END(); 988 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 989 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 990 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 991 ATRACE_END(); 992 } 993 // If thread is a running mutator, then act on behalf of the garbage collector. 994 // See the code in ThreadList::RunCheckpoint. 995 if (thread->GetState() == kRunnable) { 996 mark_sweep_->GetBarrier().Pass(self); 997 } 998 } 999 1000 private: 1001 MarkSweep* const mark_sweep_; 1002 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 1003}; 1004 1005void MarkSweep::MarkRootsCheckpoint(Thread* self, 1006 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 1007 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1008 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 1009 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1010 // Request the check point is run on all threads returning a count of the threads that must 1011 // run through the barrier including self. 1012 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 1013 // Release locks then wait for all mutator threads to pass the barrier. 1014 // If there are no threads to wait which implys that all the checkpoint functions are finished, 1015 // then no need to release locks. 1016 if (barrier_count == 0) { 1017 return; 1018 } 1019 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 1020 Locks::mutator_lock_->SharedUnlock(self); 1021 { 1022 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1023 gc_barrier_->Increment(self, barrier_count); 1024 } 1025 Locks::mutator_lock_->SharedLock(self); 1026 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1027} 1028 1029void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1030 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1031 Thread* self = Thread::Current(); 1032 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>( 1033 sweep_array_free_buffer_mem_map_->BaseBegin()); 1034 size_t chunk_free_pos = 0; 1035 ObjectBytePair freed; 1036 ObjectBytePair freed_los; 1037 // How many objects are left in the array, modified after each space is swept. 1038 StackReference<Object>* objects = allocations->Begin(); 1039 size_t count = allocations->Size(); 1040 // Change the order to ensure that the non-moving space last swept as an optimization. 1041 std::vector<space::ContinuousSpace*> sweep_spaces; 1042 space::ContinuousSpace* non_moving_space = nullptr; 1043 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1044 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1045 space->GetLiveBitmap() != nullptr) { 1046 if (space == heap_->GetNonMovingSpace()) { 1047 non_moving_space = space; 1048 } else { 1049 sweep_spaces.push_back(space); 1050 } 1051 } 1052 } 1053 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1054 // the other alloc spaces as an optimization. 1055 if (non_moving_space != nullptr) { 1056 sweep_spaces.push_back(non_moving_space); 1057 } 1058 // Start by sweeping the continuous spaces. 1059 for (space::ContinuousSpace* space : sweep_spaces) { 1060 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1061 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1062 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1063 if (swap_bitmaps) { 1064 std::swap(live_bitmap, mark_bitmap); 1065 } 1066 StackReference<Object>* out = objects; 1067 for (size_t i = 0; i < count; ++i) { 1068 Object* const obj = objects[i].AsMirrorPtr(); 1069 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1070 continue; 1071 } 1072 if (space->HasAddress(obj)) { 1073 // This object is in the space, remove it from the array and add it to the sweep buffer 1074 // if needed. 1075 if (!mark_bitmap->Test(obj)) { 1076 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1077 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1078 freed.objects += chunk_free_pos; 1079 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1080 chunk_free_pos = 0; 1081 } 1082 chunk_free_buffer[chunk_free_pos++] = obj; 1083 } 1084 } else { 1085 (out++)->Assign(obj); 1086 } 1087 } 1088 if (chunk_free_pos > 0) { 1089 TimingLogger::ScopedTiming t2("FreeList", GetTimings()); 1090 freed.objects += chunk_free_pos; 1091 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1092 chunk_free_pos = 0; 1093 } 1094 // All of the references which space contained are no longer in the allocation stack, update 1095 // the count. 1096 count = out - objects; 1097 } 1098 // Handle the large object space. 1099 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1100 if (large_object_space != nullptr) { 1101 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1102 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1103 if (swap_bitmaps) { 1104 std::swap(large_live_objects, large_mark_objects); 1105 } 1106 for (size_t i = 0; i < count; ++i) { 1107 Object* const obj = objects[i].AsMirrorPtr(); 1108 // Handle large objects. 1109 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1110 continue; 1111 } 1112 if (!large_mark_objects->Test(obj)) { 1113 ++freed_los.objects; 1114 freed_los.bytes += large_object_space->Free(self, obj); 1115 } 1116 } 1117 } 1118 { 1119 TimingLogger::ScopedTiming t2("RecordFree", GetTimings()); 1120 RecordFree(freed); 1121 RecordFreeLOS(freed_los); 1122 t2.NewTiming("ResetStack"); 1123 allocations->Reset(); 1124 } 1125 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero(); 1126} 1127 1128void MarkSweep::Sweep(bool swap_bitmaps) { 1129 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1130 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1131 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1132 { 1133 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings()); 1134 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1135 // knowing that new allocations won't be marked as live. 1136 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1137 heap_->MarkAllocStackAsLive(live_stack); 1138 live_stack->Reset(); 1139 DCHECK(mark_stack_->IsEmpty()); 1140 } 1141 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1142 if (space->IsContinuousMemMapAllocSpace()) { 1143 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1144 TimingLogger::ScopedTiming split( 1145 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings()); 1146 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1147 } 1148 } 1149 SweepLargeObjects(swap_bitmaps); 1150} 1151 1152void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1153 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace(); 1154 if (los != nullptr) { 1155 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 1156 RecordFreeLOS(los->Sweep(swap_bitmaps)); 1157 } 1158} 1159 1160// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1161// marked, put it on the appropriate list in the heap for later processing. 1162void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1163 if (kCountJavaLangRefs) { 1164 ++reference_count_; 1165 } 1166 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback, 1167 this); 1168} 1169 1170class MarkObjectVisitor { 1171 public: 1172 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1173 } 1174 1175 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1176 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1177 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1178 if (kCheckLocks) { 1179 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1180 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1181 } 1182 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset)); 1183 } 1184 1185 private: 1186 MarkSweep* const mark_sweep_; 1187}; 1188 1189// Scans an object reference. Determines the type of the reference 1190// and dispatches to a specialized scanning routine. 1191void MarkSweep::ScanObject(Object* obj) { 1192 MarkObjectVisitor mark_visitor(this); 1193 DelayReferenceReferentVisitor ref_visitor(this); 1194 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1195} 1196 1197void MarkSweep::ProcessMarkStackCallback(void* arg) { 1198 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false); 1199} 1200 1201void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1202 Thread* self = Thread::Current(); 1203 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1204 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1205 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1206 CHECK_GT(chunk_size, 0U); 1207 // Split the current mark stack up into work tasks. 1208 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) { 1209 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1210 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1211 it += delta; 1212 } 1213 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1214 thread_pool->StartWorkers(self); 1215 thread_pool->Wait(self, true, true); 1216 thread_pool->StopWorkers(self); 1217 mark_stack_->Reset(); 1218 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(), 1219 work_chunks_deleted_.LoadSequentiallyConsistent()) 1220 << " some of the work chunks were leaked"; 1221} 1222 1223// Scan anything that's on the mark stack. 1224void MarkSweep::ProcessMarkStack(bool paused) { 1225 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings()); 1226 size_t thread_count = GetThreadCount(paused); 1227 if (kParallelProcessMarkStack && thread_count > 1 && 1228 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1229 ProcessMarkStackParallel(thread_count); 1230 } else { 1231 // TODO: Tune this. 1232 static const size_t kFifoSize = 4; 1233 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1234 for (;;) { 1235 Object* obj = NULL; 1236 if (kUseMarkStackPrefetch) { 1237 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1238 Object* mark_stack_obj = mark_stack_->PopBack(); 1239 DCHECK(mark_stack_obj != NULL); 1240 __builtin_prefetch(mark_stack_obj); 1241 prefetch_fifo.push_back(mark_stack_obj); 1242 } 1243 if (prefetch_fifo.empty()) { 1244 break; 1245 } 1246 obj = prefetch_fifo.front(); 1247 prefetch_fifo.pop_front(); 1248 } else { 1249 if (mark_stack_->IsEmpty()) { 1250 break; 1251 } 1252 obj = mark_stack_->PopBack(); 1253 } 1254 DCHECK(obj != nullptr); 1255 ScanObject(obj); 1256 } 1257 } 1258} 1259 1260inline bool MarkSweep::IsMarked(const Object* object) const { 1261 if (immune_region_.ContainsObject(object)) { 1262 return true; 1263 } 1264 if (current_space_bitmap_->HasAddress(object)) { 1265 return current_space_bitmap_->Test(object); 1266 } 1267 return mark_bitmap_->Test(object); 1268} 1269 1270void MarkSweep::FinishPhase() { 1271 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1272 if (kCountScannedTypes) { 1273 VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed() 1274 << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed(); 1275 } 1276 if (kCountTasks) { 1277 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed(); 1278 } 1279 if (kMeasureOverhead) { 1280 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed()); 1281 } 1282 if (kProfileLargeObjects) { 1283 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed() 1284 << " marked " << large_object_mark_.LoadRelaxed(); 1285 } 1286 if (kCountJavaLangRefs) { 1287 VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed(); 1288 } 1289 if (kCountMarkedObjects) { 1290 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed() 1291 << " immune=" << mark_immune_count_.LoadRelaxed() 1292 << " fastpath=" << mark_fastpath_count_.LoadRelaxed() 1293 << " slowpath=" << mark_slowpath_count_.LoadRelaxed(); 1294 } 1295 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1296 mark_stack_->Reset(); 1297 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1298 heap_->ClearMarkedObjects(); 1299} 1300 1301void MarkSweep::RevokeAllThreadLocalBuffers() { 1302 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1303 // If concurrent, rosalloc thread-local buffers are revoked at the 1304 // thread checkpoint. Bump pointer space thread-local buffers must 1305 // not be in use. 1306 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1307 } else { 1308 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1309 GetHeap()->RevokeAllThreadLocalBuffers(); 1310 } 1311} 1312 1313} // namespace collector 1314} // namespace gc 1315} // namespace art 1316