mark_sweep.cc revision 13735955f39b3b304c37d2b2840663c131262c18
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/bounded_fifo.h" 25#include "base/logging.h" 26#include "base/macros.h" 27#include "base/mutex-inl.h" 28#include "base/timing_logger.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/accounting/heap_bitmap-inl.h" 31#include "gc/accounting/mod_union_table.h" 32#include "gc/accounting/space_bitmap-inl.h" 33#include "gc/heap.h" 34#include "gc/reference_processor.h" 35#include "gc/space/image_space.h" 36#include "gc/space/large_object_space.h" 37#include "gc/space/space-inl.h" 38#include "mark_sweep-inl.h" 39#include "mirror/art_field-inl.h" 40#include "mirror/object-inl.h" 41#include "runtime.h" 42#include "scoped_thread_state_change.h" 43#include "thread-inl.h" 44#include "thread_list.h" 45 46using ::art::mirror::Object; 47 48namespace art { 49namespace gc { 50namespace collector { 51 52// Performance options. 53static constexpr bool kUseRecursiveMark = false; 54static constexpr bool kUseMarkStackPrefetch = true; 55static constexpr size_t kSweepArrayChunkFreeSize = 1024; 56static constexpr bool kPreCleanCards = true; 57 58// Parallelism options. 59static constexpr bool kParallelCardScan = true; 60static constexpr bool kParallelRecursiveMark = true; 61// Don't attempt to parallelize mark stack processing unless the mark stack is at least n 62// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not 63// having this can add overhead in ProcessReferences since we may end up doing many calls of 64// ProcessMarkStack with very small mark stacks. 65static constexpr size_t kMinimumParallelMarkStackSize = 128; 66static constexpr bool kParallelProcessMarkStack = true; 67 68// Profiling and information flags. 69static constexpr bool kProfileLargeObjects = false; 70static constexpr bool kMeasureOverhead = false; 71static constexpr bool kCountTasks = false; 72static constexpr bool kCountJavaLangRefs = false; 73static constexpr bool kCountMarkedObjects = false; 74 75// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%. 76static constexpr bool kCheckLocks = kDebugLocking; 77static constexpr bool kVerifyRootsMarked = kIsDebugBuild; 78 79// If true, revoke the rosalloc thread-local buffers at the 80// checkpoint, as opposed to during the pause. 81static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true; 82 83void MarkSweep::BindBitmaps() { 84 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 85 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 86 // Mark all of the spaces we never collect as immune. 87 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 88 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 89 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 90 } 91 } 92} 93 94MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 95 : GarbageCollector(heap, 96 name_prefix + 97 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 98 current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr), 99 gc_barrier_(new Barrier(0)), 100 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock), 101 is_concurrent_(is_concurrent), live_stack_freeze_size_(0) { 102 std::string error_msg; 103 MemMap* mem_map = MemMap::MapAnonymous( 104 "mark sweep sweep array free buffer", nullptr, 105 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), 106 PROT_READ | PROT_WRITE, false, &error_msg); 107 CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg; 108 sweep_array_free_buffer_mem_map_.reset(mem_map); 109} 110 111void MarkSweep::InitializePhase() { 112 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 113 mark_stack_ = heap_->GetMarkStack(); 114 DCHECK(mark_stack_ != nullptr); 115 immune_region_.Reset(); 116 class_count_.StoreRelaxed(0); 117 array_count_.StoreRelaxed(0); 118 other_count_.StoreRelaxed(0); 119 large_object_test_.StoreRelaxed(0); 120 large_object_mark_.StoreRelaxed(0); 121 overhead_time_ .StoreRelaxed(0); 122 work_chunks_created_.StoreRelaxed(0); 123 work_chunks_deleted_.StoreRelaxed(0); 124 reference_count_.StoreRelaxed(0); 125 mark_null_count_.StoreRelaxed(0); 126 mark_immune_count_.StoreRelaxed(0); 127 mark_fastpath_count_.StoreRelaxed(0); 128 mark_slowpath_count_.StoreRelaxed(0); 129 { 130 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. 131 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 132 mark_bitmap_ = heap_->GetMarkBitmap(); 133 } 134 if (!GetCurrentIteration()->GetClearSoftReferences()) { 135 // Always clear soft references if a non-sticky collection. 136 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky); 137 } 138} 139 140void MarkSweep::RunPhases() { 141 Thread* self = Thread::Current(); 142 InitializePhase(); 143 Locks::mutator_lock_->AssertNotHeld(self); 144 if (IsConcurrent()) { 145 GetHeap()->PreGcVerification(this); 146 { 147 ReaderMutexLock mu(self, *Locks::mutator_lock_); 148 MarkingPhase(); 149 } 150 ScopedPause pause(this); 151 GetHeap()->PrePauseRosAllocVerification(this); 152 PausePhase(); 153 RevokeAllThreadLocalBuffers(); 154 } else { 155 ScopedPause pause(this); 156 GetHeap()->PreGcVerificationPaused(this); 157 MarkingPhase(); 158 GetHeap()->PrePauseRosAllocVerification(this); 159 PausePhase(); 160 RevokeAllThreadLocalBuffers(); 161 } 162 { 163 // Sweeping always done concurrently, even for non concurrent mark sweep. 164 ReaderMutexLock mu(self, *Locks::mutator_lock_); 165 ReclaimPhase(); 166 } 167 GetHeap()->PostGcVerification(this); 168 FinishPhase(); 169} 170 171void MarkSweep::ProcessReferences(Thread* self) { 172 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 173 GetHeap()->GetReferenceProcessor()->ProcessReferences( 174 true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), 175 &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this); 176} 177 178void MarkSweep::PausePhase() { 179 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings()); 180 Thread* self = Thread::Current(); 181 Locks::mutator_lock_->AssertExclusiveHeld(self); 182 if (IsConcurrent()) { 183 // Handle the dirty objects if we are a concurrent GC. 184 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 185 // Re-mark root set. 186 ReMarkRoots(); 187 // Scan dirty objects, this is only required if we are not doing concurrent GC. 188 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty); 189 } 190 { 191 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings()); 192 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 193 heap_->SwapStacks(self); 194 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 195 // Need to revoke all the thread local allocation stacks since we just swapped the allocation 196 // stacks and don't want anybody to allocate into the live stack. 197 RevokeAllThreadLocalAllocationStacks(self); 198 } 199 heap_->PreSweepingGcVerification(this); 200 // Disallow new system weaks to prevent a race which occurs when someone adds a new system 201 // weak before we sweep them. Since this new system weak may not be marked, the GC may 202 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong 203 // reference to a string that is about to be swept. 204 Runtime::Current()->DisallowNewSystemWeaks(); 205 // Enable the reference processing slow path, needs to be done with mutators paused since there 206 // is no lock in the GetReferent fast path. 207 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 208} 209 210void MarkSweep::PreCleanCards() { 211 // Don't do this for non concurrent GCs since they don't have any dirty cards. 212 if (kPreCleanCards && IsConcurrent()) { 213 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 214 Thread* self = Thread::Current(); 215 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 216 // Process dirty cards and add dirty cards to mod union tables, also ages cards. 217 heap_->ProcessCards(GetTimings(), false); 218 // The checkpoint root marking is required to avoid a race condition which occurs if the 219 // following happens during a reference write: 220 // 1. mutator dirties the card (write barrier) 221 // 2. GC ages the card (the above ProcessCards call) 222 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below) 223 // 4. mutator writes the value (corresponding to the write barrier in 1.) 224 // This causes the GC to age the card but not necessarily mark the reference which the mutator 225 // wrote into the object stored in the card. 226 // Having the checkpoint fixes this issue since it ensures that the card mark and the 227 // reference write are visible to the GC before the card is scanned (this is due to locks being 228 // acquired / released in the checkpoint code). 229 // The other roots are also marked to help reduce the pause. 230 MarkRootsCheckpoint(self, false); 231 MarkNonThreadRoots(); 232 MarkConcurrentRoots( 233 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots)); 234 // Process the newly aged cards. 235 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1); 236 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live 237 // in the next GC. 238 } 239} 240 241void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) { 242 if (kUseThreadLocalAllocationStack) { 243 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 244 Locks::mutator_lock_->AssertExclusiveHeld(self); 245 heap_->RevokeAllThreadLocalAllocationStacks(self); 246 } 247} 248 249void MarkSweep::MarkingPhase() { 250 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 251 Thread* self = Thread::Current(); 252 BindBitmaps(); 253 FindDefaultSpaceBitmap(); 254 // Process dirty cards and add dirty cards to mod union tables. 255 heap_->ProcessCards(GetTimings(), false); 256 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 257 MarkRoots(self); 258 MarkReachableObjects(); 259 // Pre-clean dirtied cards to reduce pauses. 260 PreCleanCards(); 261} 262 263void MarkSweep::UpdateAndMarkModUnion() { 264 for (const auto& space : heap_->GetContinuousSpaces()) { 265 if (immune_region_.ContainsSpace(space)) { 266 const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 267 "UpdateAndMarkImageModUnionTable"; 268 TimingLogger::ScopedTiming t(name, GetTimings()); 269 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); 270 CHECK(mod_union_table != nullptr); 271 mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 272 } 273 } 274} 275 276void MarkSweep::MarkReachableObjects() { 277 UpdateAndMarkModUnion(); 278 // Recursively mark all the non-image bits set in the mark bitmap. 279 RecursiveMark(); 280} 281 282void MarkSweep::ReclaimPhase() { 283 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 284 Thread* self = Thread::Current(); 285 // Process the references concurrently. 286 ProcessReferences(self); 287 SweepSystemWeaks(self); 288 Runtime::Current()->AllowNewSystemWeaks(); 289 { 290 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 291 // Reclaim unmarked objects. 292 Sweep(false); 293 // Swap the live and mark bitmaps for each space which we modified space. This is an 294 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 295 // bitmaps. 296 SwapBitmaps(); 297 // Unbind the live and mark bitmaps. 298 GetHeap()->UnBindBitmaps(); 299 } 300} 301 302void MarkSweep::FindDefaultSpaceBitmap() { 303 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 304 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 305 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap(); 306 // We want to have the main space instead of non moving if possible. 307 if (bitmap != nullptr && 308 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 309 current_space_bitmap_ = bitmap; 310 // If we are not the non moving space exit the loop early since this will be good enough. 311 if (space != heap_->GetNonMovingSpace()) { 312 break; 313 } 314 } 315 } 316 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n" 317 << heap_->DumpSpaces(); 318} 319 320void MarkSweep::ExpandMarkStack() { 321 ResizeMarkStack(mark_stack_->Capacity() * 2); 322} 323 324void MarkSweep::ResizeMarkStack(size_t new_size) { 325 // Rare case, no need to have Thread::Current be a parameter. 326 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 327 // Someone else acquired the lock and expanded the mark stack before us. 328 return; 329 } 330 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 331 CHECK_LE(mark_stack_->Size(), new_size); 332 mark_stack_->Resize(new_size); 333 for (const auto& obj : temp) { 334 mark_stack_->PushBack(obj); 335 } 336} 337 338inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { 339 DCHECK(obj != nullptr); 340 if (MarkObjectParallel(obj)) { 341 MutexLock mu(Thread::Current(), mark_stack_lock_); 342 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 343 ExpandMarkStack(); 344 } 345 // The object must be pushed on to the mark stack. 346 mark_stack_->PushBack(obj); 347 } 348} 349 350mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { 351 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 352 mark_sweep->MarkObject(obj); 353 return obj; 354} 355 356void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 357 reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); 358} 359 360bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { 361 return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr()); 362} 363 364class MarkSweepMarkObjectSlowPath { 365 public: 366 explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) { 367 } 368 369 void operator()(const Object* obj) const ALWAYS_INLINE { 370 if (kProfileLargeObjects) { 371 // TODO: Differentiate between marking and testing somehow. 372 ++mark_sweep_->large_object_test_; 373 ++mark_sweep_->large_object_mark_; 374 } 375 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace(); 376 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) || 377 (kIsDebugBuild && large_object_space != nullptr && 378 !large_object_space->Contains(obj)))) { 379 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 380 LOG(ERROR) << "Attempting see if it's a bad root"; 381 mark_sweep_->VerifyRoots(); 382 LOG(FATAL) << "Can't mark invalid object"; 383 } 384 } 385 386 private: 387 MarkSweep* const mark_sweep_; 388}; 389 390inline void MarkSweep::MarkObjectNonNull(Object* obj) { 391 DCHECK(obj != nullptr); 392 if (kUseBakerOrBrooksReadBarrier) { 393 // Verify all the objects have the correct pointer installed. 394 obj->AssertReadBarrierPointer(); 395 } 396 if (immune_region_.ContainsObject(obj)) { 397 if (kCountMarkedObjects) { 398 ++mark_immune_count_; 399 } 400 DCHECK(mark_bitmap_->Test(obj)); 401 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) { 402 if (kCountMarkedObjects) { 403 ++mark_fastpath_count_; 404 } 405 if (UNLIKELY(!current_space_bitmap_->Set(obj))) { 406 PushOnMarkStack(obj); // This object was not previously marked. 407 } 408 } else { 409 if (kCountMarkedObjects) { 410 ++mark_slowpath_count_; 411 } 412 MarkSweepMarkObjectSlowPath visitor(this); 413 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set 414 // will check again. 415 if (!mark_bitmap_->Set(obj, visitor)) { 416 PushOnMarkStack(obj); // Was not already marked, push. 417 } 418 } 419} 420 421inline void MarkSweep::PushOnMarkStack(Object* obj) { 422 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 423 // Lock is not needed but is here anyways to please annotalysis. 424 MutexLock mu(Thread::Current(), mark_stack_lock_); 425 ExpandMarkStack(); 426 } 427 // The object must be pushed on to the mark stack. 428 mark_stack_->PushBack(obj); 429} 430 431inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 432 DCHECK(obj != nullptr); 433 if (kUseBakerOrBrooksReadBarrier) { 434 // Verify all the objects have the correct pointer installed. 435 obj->AssertReadBarrierPointer(); 436 } 437 if (immune_region_.ContainsObject(obj)) { 438 DCHECK(IsMarked(obj)); 439 return false; 440 } 441 // Try to take advantage of locality of references within a space, failing this find the space 442 // the hard way. 443 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_; 444 if (LIKELY(object_bitmap->HasAddress(obj))) { 445 return !object_bitmap->AtomicTestAndSet(obj); 446 } 447 MarkSweepMarkObjectSlowPath visitor(this); 448 return !mark_bitmap_->AtomicTestAndSet(obj, visitor); 449} 450 451// Used to mark objects when processing the mark stack. If an object is null, it is not marked. 452inline void MarkSweep::MarkObject(Object* obj) { 453 if (obj != nullptr) { 454 MarkObjectNonNull(obj); 455 } else if (kCountMarkedObjects) { 456 ++mark_null_count_; 457 } 458} 459 460void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/, 461 RootType /*root_type*/) { 462 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root); 463} 464 465void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/, 466 RootType /*root_type*/) { 467 CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root)); 468} 469 470void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 471 RootType /*root_type*/) { 472 reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root); 473} 474 475void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 476 const StackVisitor* visitor, RootType root_type) { 477 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type); 478} 479 480void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor, 481 RootType root_type) { 482 // See if the root is on any space bitmap. 483 if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { 484 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 485 if (large_object_space != nullptr && !large_object_space->Contains(root)) { 486 LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type; 487 if (visitor != NULL) { 488 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 489 } 490 } 491 } 492} 493 494void MarkSweep::VerifyRoots() { 495 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 496} 497 498void MarkSweep::MarkRoots(Thread* self) { 499 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 500 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 501 // If we exclusively hold the mutator lock, all threads must be suspended. 502 Runtime::Current()->VisitRoots(MarkRootCallback, this); 503 RevokeAllThreadLocalAllocationStacks(self); 504 } else { 505 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint); 506 // At this point the live stack should no longer have any mutators which push into it. 507 MarkNonThreadRoots(); 508 MarkConcurrentRoots( 509 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots)); 510 } 511} 512 513void MarkSweep::MarkNonThreadRoots() { 514 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 515 Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this); 516} 517 518void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) { 519 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 520 // Visit all runtime roots and clear dirty flags. 521 Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags); 522} 523 524class ScanObjectVisitor { 525 public: 526 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE 527 : mark_sweep_(mark_sweep) {} 528 529 void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 530 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 531 if (kCheckLocks) { 532 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 533 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 534 } 535 mark_sweep_->ScanObject(obj); 536 } 537 538 private: 539 MarkSweep* const mark_sweep_; 540}; 541 542class DelayReferenceReferentVisitor { 543 public: 544 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) { 545 } 546 547 void operator()(mirror::Class* klass, mirror::Reference* ref) const 548 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 549 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 550 collector_->DelayReferenceReferent(klass, ref); 551 } 552 553 private: 554 MarkSweep* const collector_; 555}; 556 557template <bool kUseFinger = false> 558class MarkStackTask : public Task { 559 public: 560 MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, 561 Object** mark_stack) 562 : mark_sweep_(mark_sweep), 563 thread_pool_(thread_pool), 564 mark_stack_pos_(mark_stack_size) { 565 // We may have to copy part of an existing mark stack when another mark stack overflows. 566 if (mark_stack_size != 0) { 567 DCHECK(mark_stack != NULL); 568 // TODO: Check performance? 569 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_); 570 } 571 if (kCountTasks) { 572 ++mark_sweep_->work_chunks_created_; 573 } 574 } 575 576 static const size_t kMaxSize = 1 * KB; 577 578 protected: 579 class MarkObjectParallelVisitor { 580 public: 581 explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, 582 MarkSweep* mark_sweep) ALWAYS_INLINE 583 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} 584 585 void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE 586 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 587 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 588 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { 589 if (kUseFinger) { 590 android_memory_barrier(); 591 if (reinterpret_cast<uintptr_t>(ref) >= 592 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) { 593 return; 594 } 595 } 596 chunk_task_->MarkStackPush(ref); 597 } 598 } 599 600 private: 601 MarkStackTask<kUseFinger>* const chunk_task_; 602 MarkSweep* const mark_sweep_; 603 }; 604 605 class ScanObjectParallelVisitor { 606 public: 607 explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE 608 : chunk_task_(chunk_task) {} 609 610 // No thread safety analysis since multiple threads will use this visitor. 611 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 612 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 613 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; 614 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); 615 DelayReferenceReferentVisitor ref_visitor(mark_sweep); 616 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor); 617 } 618 619 private: 620 MarkStackTask<kUseFinger>* const chunk_task_; 621 }; 622 623 virtual ~MarkStackTask() { 624 // Make sure that we have cleared our mark stack. 625 DCHECK_EQ(mark_stack_pos_, 0U); 626 if (kCountTasks) { 627 ++mark_sweep_->work_chunks_deleted_; 628 } 629 } 630 631 MarkSweep* const mark_sweep_; 632 ThreadPool* const thread_pool_; 633 // Thread local mark stack for this task. 634 Object* mark_stack_[kMaxSize]; 635 // Mark stack position. 636 size_t mark_stack_pos_; 637 638 void MarkStackPush(Object* obj) ALWAYS_INLINE { 639 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { 640 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. 641 mark_stack_pos_ /= 2; 642 auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_, 643 mark_stack_ + mark_stack_pos_); 644 thread_pool_->AddTask(Thread::Current(), task); 645 } 646 DCHECK(obj != nullptr); 647 DCHECK_LT(mark_stack_pos_, kMaxSize); 648 mark_stack_[mark_stack_pos_++] = obj; 649 } 650 651 virtual void Finalize() { 652 delete this; 653 } 654 655 // Scans all of the objects 656 virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 657 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 658 ScanObjectParallelVisitor visitor(this); 659 // TODO: Tune this. 660 static const size_t kFifoSize = 4; 661 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 662 for (;;) { 663 Object* obj = nullptr; 664 if (kUseMarkStackPrefetch) { 665 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { 666 Object* obj = mark_stack_[--mark_stack_pos_]; 667 DCHECK(obj != nullptr); 668 __builtin_prefetch(obj); 669 prefetch_fifo.push_back(obj); 670 } 671 if (UNLIKELY(prefetch_fifo.empty())) { 672 break; 673 } 674 obj = prefetch_fifo.front(); 675 prefetch_fifo.pop_front(); 676 } else { 677 if (UNLIKELY(mark_stack_pos_ == 0)) { 678 break; 679 } 680 obj = mark_stack_[--mark_stack_pos_]; 681 } 682 DCHECK(obj != nullptr); 683 visitor(obj); 684 } 685 } 686}; 687 688class CardScanTask : public MarkStackTask<false> { 689 public: 690 CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 691 accounting::ContinuousSpaceBitmap* bitmap, 692 uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size, 693 Object** mark_stack_obj) 694 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), 695 bitmap_(bitmap), 696 begin_(begin), 697 end_(end), 698 minimum_age_(minimum_age) { 699 } 700 701 protected: 702 accounting::ContinuousSpaceBitmap* const bitmap_; 703 uint8_t* const begin_; 704 uint8_t* const end_; 705 const uint8_t minimum_age_; 706 707 virtual void Finalize() { 708 delete this; 709 } 710 711 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 712 ScanObjectParallelVisitor visitor(this); 713 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable(); 714 size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_); 715 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - " 716 << reinterpret_cast<void*>(end_) << " = " << cards_scanned; 717 // Finish by emptying our local mark stack. 718 MarkStackTask::Run(self); 719 } 720}; 721 722size_t MarkSweep::GetThreadCount(bool paused) const { 723 if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) { 724 return 1; 725 } 726 if (paused) { 727 return heap_->GetParallelGCThreadCount() + 1; 728 } else { 729 return heap_->GetConcGCThreadCount() + 1; 730 } 731} 732 733void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { 734 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 735 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 736 size_t thread_count = GetThreadCount(paused); 737 // The parallel version with only one thread is faster for card scanning, TODO: fix. 738 if (kParallelCardScan && thread_count > 1) { 739 Thread* self = Thread::Current(); 740 // Can't have a different split for each space since multiple spaces can have their cards being 741 // scanned at the same time. 742 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__, 743 GetTimings()); 744 // Try to take some of the mark stack since we can pass this off to the worker tasks. 745 Object** mark_stack_begin = mark_stack_->Begin(); 746 Object** mark_stack_end = mark_stack_->End(); 747 const size_t mark_stack_size = mark_stack_end - mark_stack_begin; 748 // Estimated number of work tasks we will create. 749 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; 750 DCHECK_NE(mark_stack_tasks, 0U); 751 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2, 752 mark_stack_size / mark_stack_tasks + 1); 753 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 754 if (space->GetMarkBitmap() == nullptr) { 755 continue; 756 } 757 uint8_t* card_begin = space->Begin(); 758 uint8_t* card_end = space->End(); 759 // Align up the end address. For example, the image space's end 760 // may not be card-size-aligned. 761 card_end = AlignUp(card_end, accounting::CardTable::kCardSize); 762 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); 763 DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); 764 // Calculate how many bytes of heap we will scan, 765 const size_t address_range = card_end - card_begin; 766 // Calculate how much address range each task gets. 767 const size_t card_delta = RoundUp(address_range / thread_count + 1, 768 accounting::CardTable::kCardSize); 769 // Create the worker tasks for this space. 770 while (card_begin != card_end) { 771 // Add a range of cards. 772 size_t addr_remaining = card_end - card_begin; 773 size_t card_increment = std::min(card_delta, addr_remaining); 774 // Take from the back of the mark stack. 775 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin; 776 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining); 777 mark_stack_end -= mark_stack_increment; 778 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment)); 779 DCHECK_EQ(mark_stack_end, mark_stack_->End()); 780 // Add the new task to the thread pool. 781 auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin, 782 card_begin + card_increment, minimum_age, 783 mark_stack_increment, mark_stack_end); 784 thread_pool->AddTask(self, task); 785 card_begin += card_increment; 786 } 787 } 788 789 // Note: the card scan below may dirty new cards (and scan them) 790 // as a side effect when a Reference object is encountered and 791 // queued during the marking. See b/11465268. 792 thread_pool->SetMaxActiveWorkers(thread_count - 1); 793 thread_pool->StartWorkers(self); 794 thread_pool->Wait(self, true, true); 795 thread_pool->StopWorkers(self); 796 } else { 797 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 798 if (space->GetMarkBitmap() != nullptr) { 799 // Image spaces are handled properly since live == marked for them. 800 const char* name = nullptr; 801 switch (space->GetGcRetentionPolicy()) { 802 case space::kGcRetentionPolicyNeverCollect: 803 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects"; 804 break; 805 case space::kGcRetentionPolicyFullCollect: 806 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects"; 807 break; 808 case space::kGcRetentionPolicyAlwaysCollect: 809 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects"; 810 break; 811 default: 812 LOG(FATAL) << "Unreachable"; 813 } 814 TimingLogger::ScopedTiming t(name, GetTimings()); 815 ScanObjectVisitor visitor(this); 816 card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, 817 minimum_age); 818 } 819 } 820 } 821} 822 823class RecursiveMarkTask : public MarkStackTask<false> { 824 public: 825 RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, 826 accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end) 827 : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin), 828 end_(end) { 829 } 830 831 protected: 832 accounting::ContinuousSpaceBitmap* const bitmap_; 833 const uintptr_t begin_; 834 const uintptr_t end_; 835 836 virtual void Finalize() { 837 delete this; 838 } 839 840 // Scans all of the objects 841 virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS { 842 ScanObjectParallelVisitor visitor(this); 843 bitmap_->VisitMarkedRange(begin_, end_, visitor); 844 // Finish by emptying our local mark stack. 845 MarkStackTask::Run(self); 846 } 847}; 848 849// Populates the mark stack based on the set of marked objects and 850// recursively marks until the mark stack is emptied. 851void MarkSweep::RecursiveMark() { 852 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 853 // RecursiveMark will build the lists of known instances of the Reference classes. See 854 // DelayReferenceReferent for details. 855 if (kUseRecursiveMark) { 856 const bool partial = GetGcType() == kGcTypePartial; 857 ScanObjectVisitor scan_visitor(this); 858 auto* self = Thread::Current(); 859 ThreadPool* thread_pool = heap_->GetThreadPool(); 860 size_t thread_count = GetThreadCount(false); 861 const bool parallel = kParallelRecursiveMark && thread_count > 1; 862 mark_stack_->Reset(); 863 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 864 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 865 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 866 current_space_bitmap_ = space->GetMarkBitmap(); 867 if (current_space_bitmap_ == nullptr) { 868 continue; 869 } 870 if (parallel) { 871 // We will use the mark stack the future. 872 // CHECK(mark_stack_->IsEmpty()); 873 // This function does not handle heap end increasing, so we must use the space end. 874 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 875 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 876 atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue()); 877 878 // Create a few worker tasks. 879 const size_t n = thread_count * 2; 880 while (begin != end) { 881 uintptr_t start = begin; 882 uintptr_t delta = (end - begin) / n; 883 delta = RoundUp(delta, KB); 884 if (delta < 16 * KB) delta = end - begin; 885 begin += delta; 886 auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start, 887 begin); 888 thread_pool->AddTask(self, task); 889 } 890 thread_pool->SetMaxActiveWorkers(thread_count - 1); 891 thread_pool->StartWorkers(self); 892 thread_pool->Wait(self, true, true); 893 thread_pool->StopWorkers(self); 894 } else { 895 // This function does not handle heap end increasing, so we must use the space end. 896 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 897 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 898 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 899 } 900 } 901 } 902 } 903 ProcessMarkStack(false); 904} 905 906mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { 907 if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { 908 return object; 909 } 910 return nullptr; 911} 912 913void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) { 914 ScanGrayObjects(paused, minimum_age); 915 ProcessMarkStack(paused); 916} 917 918void MarkSweep::ReMarkRoots() { 919 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 920 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 921 Runtime::Current()->VisitRoots( 922 MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots | 923 kVisitRootFlagStopLoggingNewRoots | 924 kVisitRootFlagClearRootLog)); 925 if (kVerifyRootsMarked) { 926 TimingLogger::ScopedTiming t("(Paused)VerifyRoots", GetTimings()); 927 Runtime::Current()->VisitRoots(VerifyRootMarked, this); 928 } 929} 930 931void MarkSweep::SweepSystemWeaks(Thread* self) { 932 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 933 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 934 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 935} 936 937mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { 938 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 939 // We don't actually want to sweep the object, so lets return "marked" 940 return obj; 941} 942 943void MarkSweep::VerifyIsLive(const Object* obj) { 944 if (!heap_->GetLiveBitmap()->Test(obj)) { 945 accounting::ObjectStack* allocation_stack = heap_->allocation_stack_.get(); 946 CHECK(std::find(allocation_stack->Begin(), allocation_stack->End(), obj) != 947 allocation_stack->End()) << "Found dead object " << obj << "\n" << heap_->DumpSpaces(); 948 } 949} 950 951void MarkSweep::VerifySystemWeaks() { 952 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 953 // Verify system weaks, uses a special object visitor which returns the input object. 954 Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); 955} 956 957class CheckpointMarkThreadRoots : public Closure { 958 public: 959 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, 960 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 961 : mark_sweep_(mark_sweep), 962 revoke_ros_alloc_thread_local_buffers_at_checkpoint_( 963 revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 964 } 965 966 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 967 ATRACE_BEGIN("Marking thread roots"); 968 // Note: self is not necessarily equal to thread since thread may be suspended. 969 Thread* self = Thread::Current(); 970 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 971 << thread->GetState() << " thread " << thread << " self " << self; 972 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 973 ATRACE_END(); 974 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { 975 ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); 976 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); 977 ATRACE_END(); 978 } 979 mark_sweep_->GetBarrier().Pass(self); 980 } 981 982 private: 983 MarkSweep* const mark_sweep_; 984 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_; 985}; 986 987void MarkSweep::MarkRootsCheckpoint(Thread* self, 988 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) { 989 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 990 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint); 991 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 992 // Request the check point is run on all threads returning a count of the threads that must 993 // run through the barrier including self. 994 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 995 // Release locks then wait for all mutator threads to pass the barrier. 996 // TODO: optimize to not release locks when there are no threads to wait for. 997 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 998 Locks::mutator_lock_->SharedUnlock(self); 999 { 1000 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1001 gc_barrier_->Increment(self, barrier_count); 1002 } 1003 Locks::mutator_lock_->SharedLock(self); 1004 Locks::heap_bitmap_lock_->ExclusiveLock(self); 1005} 1006 1007void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 1008 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1009 Thread* self = Thread::Current(); 1010 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>( 1011 sweep_array_free_buffer_mem_map_->BaseBegin()); 1012 size_t chunk_free_pos = 0; 1013 ObjectBytePair freed; 1014 ObjectBytePair freed_los; 1015 // How many objects are left in the array, modified after each space is swept. 1016 Object** objects = allocations->Begin(); 1017 size_t count = allocations->Size(); 1018 // Change the order to ensure that the non-moving space last swept as an optimization. 1019 std::vector<space::ContinuousSpace*> sweep_spaces; 1020 space::ContinuousSpace* non_moving_space = nullptr; 1021 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 1022 if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) && 1023 space->GetLiveBitmap() != nullptr) { 1024 if (space == heap_->GetNonMovingSpace()) { 1025 non_moving_space = space; 1026 } else { 1027 sweep_spaces.push_back(space); 1028 } 1029 } 1030 } 1031 // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after 1032 // the other alloc spaces as an optimization. 1033 if (non_moving_space != nullptr) { 1034 sweep_spaces.push_back(non_moving_space); 1035 } 1036 // Start by sweeping the continuous spaces. 1037 for (space::ContinuousSpace* space : sweep_spaces) { 1038 space::AllocSpace* alloc_space = space->AsAllocSpace(); 1039 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1040 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1041 if (swap_bitmaps) { 1042 std::swap(live_bitmap, mark_bitmap); 1043 } 1044 Object** out = objects; 1045 for (size_t i = 0; i < count; ++i) { 1046 Object* obj = objects[i]; 1047 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1048 continue; 1049 } 1050 if (space->HasAddress(obj)) { 1051 // This object is in the space, remove it from the array and add it to the sweep buffer 1052 // if needed. 1053 if (!mark_bitmap->Test(obj)) { 1054 if (chunk_free_pos >= kSweepArrayChunkFreeSize) { 1055 TimingLogger::ScopedTiming t("FreeList", GetTimings()); 1056 freed.objects += chunk_free_pos; 1057 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1058 chunk_free_pos = 0; 1059 } 1060 chunk_free_buffer[chunk_free_pos++] = obj; 1061 } 1062 } else { 1063 *(out++) = obj; 1064 } 1065 } 1066 if (chunk_free_pos > 0) { 1067 TimingLogger::ScopedTiming t("FreeList", GetTimings()); 1068 freed.objects += chunk_free_pos; 1069 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); 1070 chunk_free_pos = 0; 1071 } 1072 // All of the references which space contained are no longer in the allocation stack, update 1073 // the count. 1074 count = out - objects; 1075 } 1076 // Handle the large object space. 1077 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1078 if (large_object_space != nullptr) { 1079 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap(); 1080 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap(); 1081 if (swap_bitmaps) { 1082 std::swap(large_live_objects, large_mark_objects); 1083 } 1084 for (size_t i = 0; i < count; ++i) { 1085 Object* obj = objects[i]; 1086 // Handle large objects. 1087 if (kUseThreadLocalAllocationStack && obj == nullptr) { 1088 continue; 1089 } 1090 if (!large_mark_objects->Test(obj)) { 1091 ++freed_los.objects; 1092 freed_los.bytes += large_object_space->Free(self, obj); 1093 } 1094 } 1095 } 1096 { 1097 TimingLogger::ScopedTiming t("RecordFree", GetTimings()); 1098 RecordFree(freed); 1099 RecordFreeLOS(freed_los); 1100 t.NewTiming("ResetStack"); 1101 allocations->Reset(); 1102 } 1103 sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero(); 1104} 1105 1106void MarkSweep::Sweep(bool swap_bitmaps) { 1107 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1108 // Ensure that nobody inserted items in the live stack after we swapped the stacks. 1109 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size()); 1110 { 1111 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings()); 1112 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 1113 // knowing that new allocations won't be marked as live. 1114 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1115 heap_->MarkAllocStackAsLive(live_stack); 1116 live_stack->Reset(); 1117 DCHECK(mark_stack_->IsEmpty()); 1118 } 1119 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1120 if (space->IsContinuousMemMapAllocSpace()) { 1121 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1122 TimingLogger::ScopedTiming split( 1123 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings()); 1124 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1125 } 1126 } 1127 SweepLargeObjects(swap_bitmaps); 1128} 1129 1130void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1131 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace(); 1132 if (los != nullptr) { 1133 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 1134 RecordFreeLOS(los->Sweep(swap_bitmaps)); 1135 } 1136} 1137 1138// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 1139// marked, put it on the appropriate list in the heap for later processing. 1140void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) { 1141 if (kCountJavaLangRefs) { 1142 ++reference_count_; 1143 } 1144 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback, 1145 this); 1146} 1147 1148class MarkObjectVisitor { 1149 public: 1150 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { 1151 } 1152 1153 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 1154 ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1155 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1156 if (kCheckLocks) { 1157 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1158 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1159 } 1160 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset)); 1161 } 1162 1163 private: 1164 MarkSweep* const mark_sweep_; 1165}; 1166 1167// Scans an object reference. Determines the type of the reference 1168// and dispatches to a specialized scanning routine. 1169void MarkSweep::ScanObject(Object* obj) { 1170 MarkObjectVisitor mark_visitor(this); 1171 DelayReferenceReferentVisitor ref_visitor(this); 1172 ScanObjectVisit(obj, mark_visitor, ref_visitor); 1173} 1174 1175void MarkSweep::ProcessMarkStackCallback(void* arg) { 1176 reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false); 1177} 1178 1179void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { 1180 Thread* self = Thread::Current(); 1181 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1182 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1, 1183 static_cast<size_t>(MarkStackTask<false>::kMaxSize)); 1184 CHECK_GT(chunk_size, 0U); 1185 // Split the current mark stack up into work tasks. 1186 for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) { 1187 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size); 1188 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it)); 1189 it += delta; 1190 } 1191 thread_pool->SetMaxActiveWorkers(thread_count - 1); 1192 thread_pool->StartWorkers(self); 1193 thread_pool->Wait(self, true, true); 1194 thread_pool->StopWorkers(self); 1195 mark_stack_->Reset(); 1196 CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(), 1197 work_chunks_deleted_.LoadSequentiallyConsistent()) 1198 << " some of the work chunks were leaked"; 1199} 1200 1201// Scan anything that's on the mark stack. 1202void MarkSweep::ProcessMarkStack(bool paused) { 1203 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings()); 1204 size_t thread_count = GetThreadCount(paused); 1205 if (kParallelProcessMarkStack && thread_count > 1 && 1206 mark_stack_->Size() >= kMinimumParallelMarkStackSize) { 1207 ProcessMarkStackParallel(thread_count); 1208 } else { 1209 // TODO: Tune this. 1210 static const size_t kFifoSize = 4; 1211 BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; 1212 for (;;) { 1213 Object* obj = NULL; 1214 if (kUseMarkStackPrefetch) { 1215 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { 1216 Object* obj = mark_stack_->PopBack(); 1217 DCHECK(obj != NULL); 1218 __builtin_prefetch(obj); 1219 prefetch_fifo.push_back(obj); 1220 } 1221 if (prefetch_fifo.empty()) { 1222 break; 1223 } 1224 obj = prefetch_fifo.front(); 1225 prefetch_fifo.pop_front(); 1226 } else { 1227 if (mark_stack_->IsEmpty()) { 1228 break; 1229 } 1230 obj = mark_stack_->PopBack(); 1231 } 1232 DCHECK(obj != nullptr); 1233 ScanObject(obj); 1234 } 1235 } 1236} 1237 1238inline bool MarkSweep::IsMarked(const Object* object) const { 1239 if (immune_region_.ContainsObject(object)) { 1240 return true; 1241 } 1242 if (current_space_bitmap_->HasAddress(object)) { 1243 return current_space_bitmap_->Test(object); 1244 } 1245 return mark_bitmap_->Test(object); 1246} 1247 1248void MarkSweep::FinishPhase() { 1249 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1250 if (kCountScannedTypes) { 1251 VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed() 1252 << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed(); 1253 } 1254 if (kCountTasks) { 1255 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed(); 1256 } 1257 if (kMeasureOverhead) { 1258 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed()); 1259 } 1260 if (kProfileLargeObjects) { 1261 VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed() 1262 << " marked " << large_object_mark_.LoadRelaxed(); 1263 } 1264 if (kCountJavaLangRefs) { 1265 VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed(); 1266 } 1267 if (kCountMarkedObjects) { 1268 VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed() 1269 << " immune=" << mark_immune_count_.LoadRelaxed() 1270 << " fastpath=" << mark_fastpath_count_.LoadRelaxed() 1271 << " slowpath=" << mark_slowpath_count_.LoadRelaxed(); 1272 } 1273 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. 1274 mark_stack_->Reset(); 1275 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1276 heap_->ClearMarkedObjects(); 1277} 1278 1279void MarkSweep::RevokeAllThreadLocalBuffers() { 1280 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) { 1281 // If concurrent, rosalloc thread-local buffers are revoked at the 1282 // thread checkpoint. Bump pointer space thread-local buffers must 1283 // not be in use. 1284 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 1285 } else { 1286 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1287 GetHeap()->RevokeAllThreadLocalBuffers(); 1288 } 1289} 1290 1291} // namespace collector 1292} // namespace gc 1293} // namespace art 1294