concurrent_copying.cc revision 19eab409b3efab3889885b71db708fbe56594088
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "concurrent_copying.h" 18 19#include "art_field-inl.h" 20#include "base/stl_util.h" 21#include "debugger.h" 22#include "gc/accounting/heap_bitmap-inl.h" 23#include "gc/accounting/space_bitmap-inl.h" 24#include "gc/reference_processor.h" 25#include "gc/space/image_space.h" 26#include "gc/space/space.h" 27#include "intern_table.h" 28#include "mirror/class-inl.h" 29#include "mirror/object-inl.h" 30#include "scoped_thread_state_change.h" 31#include "thread-inl.h" 32#include "thread_list.h" 33#include "well_known_classes.h" 34 35namespace art { 36namespace gc { 37namespace collector { 38 39static constexpr size_t kDefaultGcMarkStackSize = 2 * MB; 40 41ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) 42 : GarbageCollector(heap, 43 name_prefix + (name_prefix.empty() ? "" : " ") + 44 "concurrent copying + mark sweep"), 45 region_space_(nullptr), gc_barrier_(new Barrier(0)), 46 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack", 47 kDefaultGcMarkStackSize, 48 kDefaultGcMarkStackSize)), 49 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock), 50 thread_running_gc_(nullptr), 51 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), 52 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff), 53 weak_ref_access_enabled_(true), 54 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), 55 rb_table_(heap_->GetReadBarrierTable()), 56 force_evacuate_all_(false) { 57 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, 58 "The region space size and the read barrier table region size must match"); 59 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap)); 60 Thread* self = Thread::Current(); 61 { 62 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 63 // Cache this so that we won't have to lock heap_bitmap_lock_ in 64 // Mark() which could cause a nested lock on heap_bitmap_lock_ 65 // when GC causes a RB while doing GC or a lock order violation 66 // (class_linker_lock_ and heap_bitmap_lock_). 67 heap_mark_bitmap_ = heap->GetMarkBitmap(); 68 } 69 { 70 MutexLock mu(self, mark_stack_lock_); 71 for (size_t i = 0; i < kMarkStackPoolSize; ++i) { 72 accounting::AtomicStack<mirror::Object>* mark_stack = 73 accounting::AtomicStack<mirror::Object>::Create( 74 "thread local mark stack", kMarkStackSize, kMarkStackSize); 75 pooled_mark_stacks_.push_back(mark_stack); 76 } 77 } 78} 79 80void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) { 81 // Used for preserving soft references, should be OK to not have a CAS here since there should be 82 // no other threads which can trigger read barriers on the same referent during reference 83 // processing. 84 from_ref->Assign(Mark(from_ref->AsMirrorPtr())); 85 DCHECK(!from_ref->IsNull()); 86} 87 88ConcurrentCopying::~ConcurrentCopying() { 89 STLDeleteElements(&pooled_mark_stacks_); 90} 91 92void ConcurrentCopying::RunPhases() { 93 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier); 94 CHECK(!is_active_); 95 is_active_ = true; 96 Thread* self = Thread::Current(); 97 thread_running_gc_ = self; 98 Locks::mutator_lock_->AssertNotHeld(self); 99 { 100 ReaderMutexLock mu(self, *Locks::mutator_lock_); 101 InitializePhase(); 102 } 103 FlipThreadRoots(); 104 { 105 ReaderMutexLock mu(self, *Locks::mutator_lock_); 106 MarkingPhase(); 107 } 108 // Verify no from space refs. This causes a pause. 109 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { 110 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); 111 ScopedPause pause(this); 112 CheckEmptyMarkStack(); 113 if (kVerboseMode) { 114 LOG(INFO) << "Verifying no from-space refs"; 115 } 116 VerifyNoFromSpaceReferences(); 117 if (kVerboseMode) { 118 LOG(INFO) << "Done verifying no from-space refs"; 119 } 120 CheckEmptyMarkStack(); 121 } 122 { 123 ReaderMutexLock mu(self, *Locks::mutator_lock_); 124 ReclaimPhase(); 125 } 126 FinishPhase(); 127 CHECK(is_active_); 128 is_active_ = false; 129 thread_running_gc_ = nullptr; 130} 131 132void ConcurrentCopying::BindBitmaps() { 133 Thread* self = Thread::Current(); 134 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 135 // Mark all of the spaces we never collect as immune. 136 for (const auto& space : heap_->GetContinuousSpaces()) { 137 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 138 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 139 CHECK(space->IsZygoteSpace() || space->IsImageSpace()); 140 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 141 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" : 142 "cc zygote space bitmap"; 143 // TODO: try avoiding using bitmaps for image/zygote to save space. 144 accounting::ContinuousSpaceBitmap* bitmap = 145 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity()); 146 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 147 cc_bitmaps_.push_back(bitmap); 148 } else if (space == region_space_) { 149 accounting::ContinuousSpaceBitmap* bitmap = 150 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap", 151 space->Begin(), space->Capacity()); 152 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 153 cc_bitmaps_.push_back(bitmap); 154 region_space_bitmap_ = bitmap; 155 } 156 } 157} 158 159void ConcurrentCopying::InitializePhase() { 160 TimingLogger::ScopedTiming split("InitializePhase", GetTimings()); 161 if (kVerboseMode) { 162 LOG(INFO) << "GC InitializePhase"; 163 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" 164 << reinterpret_cast<void*>(region_space_->Limit()); 165 } 166 CheckEmptyMarkStack(); 167 immune_region_.Reset(); 168 bytes_moved_.StoreRelaxed(0); 169 objects_moved_.StoreRelaxed(0); 170 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 171 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 172 GetCurrentIteration()->GetClearSoftReferences()) { 173 force_evacuate_all_ = true; 174 } else { 175 force_evacuate_all_ = false; 176 } 177 BindBitmaps(); 178 if (kVerboseMode) { 179 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_; 180 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End(); 181 LOG(INFO) << "GC end of InitializePhase"; 182 } 183} 184 185// Used to switch the thread roots of a thread from from-space refs to to-space refs. 186class ThreadFlipVisitor : public Closure { 187 public: 188 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) 189 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { 190 } 191 192 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { 193 // Note: self is not necessarily equal to thread since thread may be suspended. 194 Thread* self = Thread::Current(); 195 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 196 << thread->GetState() << " thread " << thread << " self " << self; 197 thread->SetIsGcMarking(true); 198 if (use_tlab_ && thread->HasTlab()) { 199 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 200 // This must come before the revoke. 201 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); 202 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 203 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> 204 FetchAndAddSequentiallyConsistent(thread_local_objects); 205 } else { 206 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 207 } 208 } 209 if (kUseThreadLocalAllocationStack) { 210 thread->RevokeThreadLocalAllocationStack(); 211 } 212 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 213 thread->VisitRoots(concurrent_copying_); 214 concurrent_copying_->GetBarrier().Pass(self); 215 } 216 217 private: 218 ConcurrentCopying* const concurrent_copying_; 219 const bool use_tlab_; 220}; 221 222// Called back from Runtime::FlipThreadRoots() during a pause. 223class FlipCallback : public Closure { 224 public: 225 explicit FlipCallback(ConcurrentCopying* concurrent_copying) 226 : concurrent_copying_(concurrent_copying) { 227 } 228 229 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) { 230 ConcurrentCopying* cc = concurrent_copying_; 231 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); 232 // Note: self is not necessarily equal to thread since thread may be suspended. 233 Thread* self = Thread::Current(); 234 CHECK(thread == self); 235 Locks::mutator_lock_->AssertExclusiveHeld(self); 236 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); 237 cc->SwapStacks(); 238 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 239 cc->RecordLiveStackFreezeSize(self); 240 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); 241 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); 242 } 243 cc->is_marking_ = true; 244 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal); 245 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { 246 CHECK(Runtime::Current()->IsAotCompiler()); 247 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); 248 Runtime::Current()->VisitTransactionRoots(cc); 249 } 250 } 251 252 private: 253 ConcurrentCopying* const concurrent_copying_; 254}; 255 256// Switch threads that from from-space to to-space refs. Forward/mark the thread roots. 257void ConcurrentCopying::FlipThreadRoots() { 258 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); 259 if (kVerboseMode) { 260 LOG(INFO) << "time=" << region_space_->Time(); 261 region_space_->DumpNonFreeRegions(LOG(INFO)); 262 } 263 Thread* self = Thread::Current(); 264 Locks::mutator_lock_->AssertNotHeld(self); 265 gc_barrier_->Init(self, 0); 266 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); 267 FlipCallback flip_callback(this); 268 heap_->ThreadFlipBegin(self); // Sync with JNI critical calls. 269 size_t barrier_count = Runtime::Current()->FlipThreadRoots( 270 &thread_flip_visitor, &flip_callback, this); 271 heap_->ThreadFlipEnd(self); 272 { 273 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 274 gc_barrier_->Increment(self, barrier_count); 275 } 276 is_asserting_to_space_invariant_ = true; 277 QuasiAtomic::ThreadFenceForConstructor(); 278 if (kVerboseMode) { 279 LOG(INFO) << "time=" << region_space_->Time(); 280 region_space_->DumpNonFreeRegions(LOG(INFO)); 281 LOG(INFO) << "GC end of FlipThreadRoots"; 282 } 283} 284 285void ConcurrentCopying::SwapStacks() { 286 heap_->SwapStacks(); 287} 288 289void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { 290 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 291 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 292} 293 294// Used to visit objects in the immune spaces. 295class ConcurrentCopyingImmuneSpaceObjVisitor { 296 public: 297 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) 298 : collector_(cc) {} 299 300 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) 301 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 302 DCHECK(obj != nullptr); 303 DCHECK(collector_->immune_region_.ContainsObject(obj)); 304 accounting::ContinuousSpaceBitmap* cc_bitmap = 305 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 306 DCHECK(cc_bitmap != nullptr) 307 << "An immune space object must have a bitmap"; 308 if (kIsDebugBuild) { 309 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) 310 << "Immune space object must be already marked"; 311 } 312 // This may or may not succeed, which is ok. 313 if (kUseBakerReadBarrier) { 314 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 315 } 316 if (cc_bitmap->AtomicTestAndSet(obj)) { 317 // Already marked. Do nothing. 318 } else { 319 // Newly marked. Set the gray bit and push it onto the mark stack. 320 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 321 collector_->PushOntoMarkStack(obj); 322 } 323 } 324 325 private: 326 ConcurrentCopying* const collector_; 327}; 328 329class EmptyCheckpoint : public Closure { 330 public: 331 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying) 332 : concurrent_copying_(concurrent_copying) { 333 } 334 335 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 336 // Note: self is not necessarily equal to thread since thread may be suspended. 337 Thread* self = Thread::Current(); 338 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 339 << thread->GetState() << " thread " << thread << " self " << self; 340 // If thread is a running mutator, then act on behalf of the garbage collector. 341 // See the code in ThreadList::RunCheckpoint. 342 if (thread->GetState() == kRunnable) { 343 concurrent_copying_->GetBarrier().Pass(self); 344 } 345 } 346 347 private: 348 ConcurrentCopying* const concurrent_copying_; 349}; 350 351// Concurrently mark roots that are guarded by read barriers and process the mark stack. 352void ConcurrentCopying::MarkingPhase() { 353 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); 354 if (kVerboseMode) { 355 LOG(INFO) << "GC MarkingPhase"; 356 } 357 CHECK(weak_ref_access_enabled_); 358 { 359 // Mark the image root. The WB-based collectors do not need to 360 // scan the image objects from roots by relying on the card table, 361 // but it's necessary for the RB to-space invariant to hold. 362 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings()); 363 gc::space::ImageSpace* image = heap_->GetImageSpace(); 364 if (image != nullptr) { 365 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots(); 366 mirror::Object* marked_image_root = Mark(image_root); 367 CHECK_EQ(image_root, marked_image_root) << "An image object does not move"; 368 if (ReadBarrier::kEnableToSpaceInvariantChecks) { 369 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root); 370 } 371 } 372 } 373 // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part 374 // to also use the same function. 375 { 376 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings()); 377 Runtime::Current()->VisitConstantRoots(this); 378 } 379 { 380 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings()); 381 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots); 382 } 383 { 384 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings()); 385 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots); 386 } 387 { 388 // TODO: don't visit the transaction roots if it's not active. 389 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); 390 Runtime::Current()->VisitNonThreadRoots(this); 391 } 392 { 393 TimingLogger::ScopedTiming split6("Dbg::VisitRoots", GetTimings()); 394 Dbg::VisitRoots(this); 395 } 396 Runtime::Current()->GetHeap()->VisitAllocationRecords(this); 397 398 // Immune spaces. 399 for (auto& space : heap_->GetContinuousSpaces()) { 400 if (immune_region_.ContainsSpace(space)) { 401 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 402 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 403 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this); 404 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 405 reinterpret_cast<uintptr_t>(space->Limit()), 406 visitor); 407 } 408 } 409 410 Thread* self = Thread::Current(); 411 { 412 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings()); 413 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The 414 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark 415 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock 416 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we 417 // reach the point where we process weak references, we can avoid using a lock when accessing 418 // the GC mark stack, which makes mark stack processing more efficient. 419 420 // Process the mark stack once in the thread local stack mode. This marks most of the live 421 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system 422 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray 423 // objects and push refs on the mark stack. 424 ProcessMarkStack(); 425 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks 426 // for the last time before transitioning to the shared mark stack mode, which would process new 427 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack() 428 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's 429 // important to do these together in a single checkpoint so that we can ensure that mutators 430 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and 431 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on 432 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref 433 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones. 434 SwitchToSharedMarkStackMode(); 435 CHECK(!self->GetWeakRefAccessEnabled()); 436 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here 437 // (which may be non-empty if there were refs found on thread-local mark stacks during the above 438 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators 439 // (via read barriers) have no way to produce any more refs to process. Marking converges once 440 // before we process weak refs below. 441 ProcessMarkStack(); 442 CheckEmptyMarkStack(); 443 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a 444 // lock from this point on. 445 SwitchToGcExclusiveMarkStackMode(); 446 CheckEmptyMarkStack(); 447 if (kVerboseMode) { 448 LOG(INFO) << "ProcessReferences"; 449 } 450 // Process weak references. This may produce new refs to process and have them processed via 451 // ProcessMarkStack (in the GC exclusive mark stack mode). 452 ProcessReferences(self); 453 CheckEmptyMarkStack(); 454 if (kVerboseMode) { 455 LOG(INFO) << "SweepSystemWeaks"; 456 } 457 SweepSystemWeaks(self); 458 if (kVerboseMode) { 459 LOG(INFO) << "SweepSystemWeaks done"; 460 } 461 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have 462 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for 463 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks(). 464 ProcessMarkStack(); 465 CheckEmptyMarkStack(); 466 // Re-enable weak ref accesses. 467 ReenableWeakRefAccess(self); 468 // Free data for class loaders that we unloaded. 469 Runtime::Current()->GetClassLinker()->CleanupClassLoaders(); 470 // Marking is done. Disable marking. 471 DisableMarking(); 472 CheckEmptyMarkStack(); 473 } 474 475 CHECK(weak_ref_access_enabled_); 476 if (kVerboseMode) { 477 LOG(INFO) << "GC end of MarkingPhase"; 478 } 479} 480 481void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) { 482 if (kVerboseMode) { 483 LOG(INFO) << "ReenableWeakRefAccess"; 484 } 485 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads. 486 QuasiAtomic::ThreadFenceForConstructor(); 487 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access. 488 { 489 MutexLock mu(self, *Locks::thread_list_lock_); 490 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 491 for (Thread* thread : thread_list) { 492 thread->SetWeakRefAccessEnabled(true); 493 } 494 } 495 // Unblock blocking threads. 496 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self); 497 Runtime::Current()->BroadcastForNewSystemWeaks(); 498} 499 500class DisableMarkingCheckpoint : public Closure { 501 public: 502 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying) 503 : concurrent_copying_(concurrent_copying) { 504 } 505 506 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 507 // Note: self is not necessarily equal to thread since thread may be suspended. 508 Thread* self = Thread::Current(); 509 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 510 << thread->GetState() << " thread " << thread << " self " << self; 511 // Disable the thread-local is_gc_marking flag. 512 // Note a thread that has just started right before this checkpoint may have already this flag 513 // set to false, which is ok. 514 thread->SetIsGcMarking(false); 515 // If thread is a running mutator, then act on behalf of the garbage collector. 516 // See the code in ThreadList::RunCheckpoint. 517 if (thread->GetState() == kRunnable) { 518 concurrent_copying_->GetBarrier().Pass(self); 519 } 520 } 521 522 private: 523 ConcurrentCopying* const concurrent_copying_; 524}; 525 526void ConcurrentCopying::IssueDisableMarkingCheckpoint() { 527 Thread* self = Thread::Current(); 528 DisableMarkingCheckpoint check_point(this); 529 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 530 gc_barrier_->Init(self, 0); 531 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 532 // If there are no threads to wait which implies that all the checkpoint functions are finished, 533 // then no need to release the mutator lock. 534 if (barrier_count == 0) { 535 return; 536 } 537 // Release locks then wait for all mutator threads to pass the barrier. 538 Locks::mutator_lock_->SharedUnlock(self); 539 { 540 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 541 gc_barrier_->Increment(self, barrier_count); 542 } 543 Locks::mutator_lock_->SharedLock(self); 544} 545 546void ConcurrentCopying::DisableMarking() { 547 // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the 548 // thread-local flags so that a new thread starting up will get the correct is_marking flag. 549 is_marking_ = false; 550 QuasiAtomic::ThreadFenceForConstructor(); 551 // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are 552 // still in the middle of a read barrier which may have a from-space ref cached in a local 553 // variable. 554 IssueDisableMarkingCheckpoint(); 555 if (kUseTableLookupReadBarrier) { 556 heap_->rb_table_->ClearAll(); 557 DCHECK(heap_->rb_table_->IsAllCleared()); 558 } 559 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1); 560 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff); 561} 562 563void ConcurrentCopying::IssueEmptyCheckpoint() { 564 Thread* self = Thread::Current(); 565 EmptyCheckpoint check_point(this); 566 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 567 gc_barrier_->Init(self, 0); 568 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 569 // If there are no threads to wait which implys that all the checkpoint functions are finished, 570 // then no need to release the mutator lock. 571 if (barrier_count == 0) { 572 return; 573 } 574 // Release locks then wait for all mutator threads to pass the barrier. 575 Locks::mutator_lock_->SharedUnlock(self); 576 { 577 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 578 gc_barrier_->Increment(self, barrier_count); 579 } 580 Locks::mutator_lock_->SharedLock(self); 581} 582 583void ConcurrentCopying::ExpandGcMarkStack() { 584 DCHECK(gc_mark_stack_->IsFull()); 585 const size_t new_size = gc_mark_stack_->Capacity() * 2; 586 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(), 587 gc_mark_stack_->End()); 588 gc_mark_stack_->Resize(new_size); 589 for (auto& ref : temp) { 590 gc_mark_stack_->PushBack(ref.AsMirrorPtr()); 591 } 592 DCHECK(!gc_mark_stack_->IsFull()); 593} 594 595void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { 596 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0) 597 << " " << to_ref << " " << PrettyTypeOf(to_ref); 598 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites? 599 CHECK(thread_running_gc_ != nullptr); 600 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 601 if (mark_stack_mode == kMarkStackModeThreadLocal) { 602 if (self == thread_running_gc_) { 603 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack. 604 CHECK(self->GetThreadLocalMarkStack() == nullptr); 605 if (UNLIKELY(gc_mark_stack_->IsFull())) { 606 ExpandGcMarkStack(); 607 } 608 gc_mark_stack_->PushBack(to_ref); 609 } else { 610 // Otherwise, use a thread-local mark stack. 611 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack(); 612 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) { 613 MutexLock mu(self, mark_stack_lock_); 614 // Get a new thread local mark stack. 615 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack; 616 if (!pooled_mark_stacks_.empty()) { 617 // Use a pooled mark stack. 618 new_tl_mark_stack = pooled_mark_stacks_.back(); 619 pooled_mark_stacks_.pop_back(); 620 } else { 621 // None pooled. Create a new one. 622 new_tl_mark_stack = 623 accounting::AtomicStack<mirror::Object>::Create( 624 "thread local mark stack", 4 * KB, 4 * KB); 625 } 626 DCHECK(new_tl_mark_stack != nullptr); 627 DCHECK(new_tl_mark_stack->IsEmpty()); 628 new_tl_mark_stack->PushBack(to_ref); 629 self->SetThreadLocalMarkStack(new_tl_mark_stack); 630 if (tl_mark_stack != nullptr) { 631 // Store the old full stack into a vector. 632 revoked_mark_stacks_.push_back(tl_mark_stack); 633 } 634 } else { 635 tl_mark_stack->PushBack(to_ref); 636 } 637 } 638 } else if (mark_stack_mode == kMarkStackModeShared) { 639 // Access the shared GC mark stack with a lock. 640 MutexLock mu(self, mark_stack_lock_); 641 if (UNLIKELY(gc_mark_stack_->IsFull())) { 642 ExpandGcMarkStack(); 643 } 644 gc_mark_stack_->PushBack(to_ref); 645 } else { 646 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), 647 static_cast<uint32_t>(kMarkStackModeGcExclusive)) 648 << "ref=" << to_ref 649 << " self->gc_marking=" << self->GetIsGcMarking() 650 << " cc->is_marking=" << is_marking_; 651 CHECK(self == thread_running_gc_) 652 << "Only GC-running thread should access the mark stack " 653 << "in the GC exclusive mark stack mode"; 654 // Access the GC mark stack without a lock. 655 if (UNLIKELY(gc_mark_stack_->IsFull())) { 656 ExpandGcMarkStack(); 657 } 658 gc_mark_stack_->PushBack(to_ref); 659 } 660} 661 662accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() { 663 return heap_->allocation_stack_.get(); 664} 665 666accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { 667 return heap_->live_stack_.get(); 668} 669 670inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { 671 DCHECK(region_space_->IsInFromSpace(from_ref)); 672 LockWord lw = from_ref->GetLockWord(false); 673 if (lw.GetState() == LockWord::kForwardingAddress) { 674 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); 675 CHECK(fwd_ptr != nullptr); 676 return fwd_ptr; 677 } else { 678 return nullptr; 679 } 680} 681 682// The following visitors are that used to verify that there's no 683// references to the from-space left after marking. 684class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { 685 public: 686 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) 687 : collector_(collector) {} 688 689 void operator()(mirror::Object* ref) const 690 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 691 if (ref == nullptr) { 692 // OK. 693 return; 694 } 695 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 696 if (kUseBakerReadBarrier) { 697 if (collector_->RegionSpace()->IsInToSpace(ref)) { 698 CHECK(ref->GetReadBarrierPointer() == nullptr) 699 << "To-space ref " << ref << " " << PrettyTypeOf(ref) 700 << " has non-white rb_ptr " << ref->GetReadBarrierPointer(); 701 } else { 702 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 703 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 704 collector_->IsOnAllocStack(ref))) 705 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref) 706 << " has non-black rb_ptr " << ref->GetReadBarrierPointer() 707 << " but isn't on the alloc stack (and has white rb_ptr)." 708 << " Is it in the non-moving space=" 709 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref)); 710 } 711 } 712 } 713 714 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 715 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { 716 DCHECK(root != nullptr); 717 operator()(root); 718 } 719 720 private: 721 ConcurrentCopying* const collector_; 722}; 723 724class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { 725 public: 726 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) 727 : collector_(collector) {} 728 729 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const 730 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 731 mirror::Object* ref = 732 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 733 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 734 visitor(ref); 735 } 736 void operator()(mirror::Class* klass, mirror::Reference* ref) const 737 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 738 CHECK(klass->IsTypeOfReferenceClass()); 739 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 740 } 741 742 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 743 SHARED_REQUIRES(Locks::mutator_lock_) { 744 if (!root->IsNull()) { 745 VisitRoot(root); 746 } 747 } 748 749 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 750 SHARED_REQUIRES(Locks::mutator_lock_) { 751 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 752 visitor(root->AsMirrorPtr()); 753 } 754 755 private: 756 ConcurrentCopying* const collector_; 757}; 758 759class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { 760 public: 761 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) 762 : collector_(collector) {} 763 void operator()(mirror::Object* obj) const 764 SHARED_REQUIRES(Locks::mutator_lock_) { 765 ObjectCallback(obj, collector_); 766 } 767 static void ObjectCallback(mirror::Object* obj, void *arg) 768 SHARED_REQUIRES(Locks::mutator_lock_) { 769 CHECK(obj != nullptr); 770 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 771 space::RegionSpace* region_space = collector->RegionSpace(); 772 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 773 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); 774 obj->VisitReferences(visitor, visitor); 775 if (kUseBakerReadBarrier) { 776 if (collector->RegionSpace()->IsInToSpace(obj)) { 777 CHECK(obj->GetReadBarrierPointer() == nullptr) 778 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer(); 779 } else { 780 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 781 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 782 collector->IsOnAllocStack(obj))) 783 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj) 784 << " has non-black rb_ptr " << obj->GetReadBarrierPointer() 785 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space=" 786 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj)); 787 } 788 } 789 } 790 791 private: 792 ConcurrentCopying* const collector_; 793}; 794 795// Verify there's no from-space references left after the marking phase. 796void ConcurrentCopying::VerifyNoFromSpaceReferences() { 797 Thread* self = Thread::Current(); 798 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 799 // Verify all threads have is_gc_marking to be false 800 { 801 MutexLock mu(self, *Locks::thread_list_lock_); 802 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 803 for (Thread* thread : thread_list) { 804 CHECK(!thread->GetIsGcMarking()); 805 } 806 } 807 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this); 808 // Roots. 809 { 810 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 811 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 812 Runtime::Current()->VisitRoots(&ref_visitor); 813 } 814 // The to-space. 815 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, 816 this); 817 // Non-moving spaces. 818 { 819 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 820 heap_->GetMarkBitmap()->Visit(visitor); 821 } 822 // The alloc stack. 823 { 824 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 825 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); 826 it < end; ++it) { 827 mirror::Object* const obj = it->AsMirrorPtr(); 828 if (obj != nullptr && obj->GetClass() != nullptr) { 829 // TODO: need to call this only if obj is alive? 830 ref_visitor(obj); 831 visitor(obj); 832 } 833 } 834 } 835 // TODO: LOS. But only refs in LOS are classes. 836} 837 838// The following visitors are used to assert the to-space invariant. 839class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { 840 public: 841 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) 842 : collector_(collector) {} 843 844 void operator()(mirror::Object* ref) const 845 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 846 if (ref == nullptr) { 847 // OK. 848 return; 849 } 850 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 851 } 852 853 private: 854 ConcurrentCopying* const collector_; 855}; 856 857class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { 858 public: 859 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) 860 : collector_(collector) {} 861 862 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const 863 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 864 mirror::Object* ref = 865 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 866 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 867 visitor(ref); 868 } 869 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const 870 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 871 CHECK(klass->IsTypeOfReferenceClass()); 872 } 873 874 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 875 SHARED_REQUIRES(Locks::mutator_lock_) { 876 if (!root->IsNull()) { 877 VisitRoot(root); 878 } 879 } 880 881 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 882 SHARED_REQUIRES(Locks::mutator_lock_) { 883 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 884 visitor(root->AsMirrorPtr()); 885 } 886 887 private: 888 ConcurrentCopying* const collector_; 889}; 890 891class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { 892 public: 893 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) 894 : collector_(collector) {} 895 void operator()(mirror::Object* obj) const 896 SHARED_REQUIRES(Locks::mutator_lock_) { 897 ObjectCallback(obj, collector_); 898 } 899 static void ObjectCallback(mirror::Object* obj, void *arg) 900 SHARED_REQUIRES(Locks::mutator_lock_) { 901 CHECK(obj != nullptr); 902 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 903 space::RegionSpace* region_space = collector->RegionSpace(); 904 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 905 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); 906 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); 907 obj->VisitReferences(visitor, visitor); 908 } 909 910 private: 911 ConcurrentCopying* const collector_; 912}; 913 914class RevokeThreadLocalMarkStackCheckpoint : public Closure { 915 public: 916 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying, 917 bool disable_weak_ref_access) 918 : concurrent_copying_(concurrent_copying), 919 disable_weak_ref_access_(disable_weak_ref_access) { 920 } 921 922 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 923 // Note: self is not necessarily equal to thread since thread may be suspended. 924 Thread* self = Thread::Current(); 925 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 926 << thread->GetState() << " thread " << thread << " self " << self; 927 // Revoke thread local mark stacks. 928 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); 929 if (tl_mark_stack != nullptr) { 930 MutexLock mu(self, concurrent_copying_->mark_stack_lock_); 931 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack); 932 thread->SetThreadLocalMarkStack(nullptr); 933 } 934 // Disable weak ref access. 935 if (disable_weak_ref_access_) { 936 thread->SetWeakRefAccessEnabled(false); 937 } 938 // If thread is a running mutator, then act on behalf of the garbage collector. 939 // See the code in ThreadList::RunCheckpoint. 940 if (thread->GetState() == kRunnable) { 941 concurrent_copying_->GetBarrier().Pass(self); 942 } 943 } 944 945 private: 946 ConcurrentCopying* const concurrent_copying_; 947 const bool disable_weak_ref_access_; 948}; 949 950void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) { 951 Thread* self = Thread::Current(); 952 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access); 953 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 954 gc_barrier_->Init(self, 0); 955 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 956 // If there are no threads to wait which implys that all the checkpoint functions are finished, 957 // then no need to release the mutator lock. 958 if (barrier_count == 0) { 959 return; 960 } 961 Locks::mutator_lock_->SharedUnlock(self); 962 { 963 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 964 gc_barrier_->Increment(self, barrier_count); 965 } 966 Locks::mutator_lock_->SharedLock(self); 967} 968 969void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) { 970 Thread* self = Thread::Current(); 971 CHECK_EQ(self, thread); 972 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); 973 if (tl_mark_stack != nullptr) { 974 CHECK(is_marking_); 975 MutexLock mu(self, mark_stack_lock_); 976 revoked_mark_stacks_.push_back(tl_mark_stack); 977 thread->SetThreadLocalMarkStack(nullptr); 978 } 979} 980 981void ConcurrentCopying::ProcessMarkStack() { 982 if (kVerboseMode) { 983 LOG(INFO) << "ProcessMarkStack. "; 984 } 985 bool empty_prev = false; 986 while (true) { 987 bool empty = ProcessMarkStackOnce(); 988 if (empty_prev && empty) { 989 // Saw empty mark stack for a second time, done. 990 break; 991 } 992 empty_prev = empty; 993 } 994} 995 996bool ConcurrentCopying::ProcessMarkStackOnce() { 997 Thread* self = Thread::Current(); 998 CHECK(thread_running_gc_ != nullptr); 999 CHECK(self == thread_running_gc_); 1000 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1001 size_t count = 0; 1002 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1003 if (mark_stack_mode == kMarkStackModeThreadLocal) { 1004 // Process the thread-local mark stacks and the GC mark stack. 1005 count += ProcessThreadLocalMarkStacks(false); 1006 while (!gc_mark_stack_->IsEmpty()) { 1007 mirror::Object* to_ref = gc_mark_stack_->PopBack(); 1008 ProcessMarkStackRef(to_ref); 1009 ++count; 1010 } 1011 gc_mark_stack_->Reset(); 1012 } else if (mark_stack_mode == kMarkStackModeShared) { 1013 // Process the shared GC mark stack with a lock. 1014 { 1015 MutexLock mu(self, mark_stack_lock_); 1016 CHECK(revoked_mark_stacks_.empty()); 1017 } 1018 while (true) { 1019 std::vector<mirror::Object*> refs; 1020 { 1021 // Copy refs with lock. Note the number of refs should be small. 1022 MutexLock mu(self, mark_stack_lock_); 1023 if (gc_mark_stack_->IsEmpty()) { 1024 break; 1025 } 1026 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin(); 1027 p != gc_mark_stack_->End(); ++p) { 1028 refs.push_back(p->AsMirrorPtr()); 1029 } 1030 gc_mark_stack_->Reset(); 1031 } 1032 for (mirror::Object* ref : refs) { 1033 ProcessMarkStackRef(ref); 1034 ++count; 1035 } 1036 } 1037 } else { 1038 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), 1039 static_cast<uint32_t>(kMarkStackModeGcExclusive)); 1040 { 1041 MutexLock mu(self, mark_stack_lock_); 1042 CHECK(revoked_mark_stacks_.empty()); 1043 } 1044 // Process the GC mark stack in the exclusive mode. No need to take the lock. 1045 while (!gc_mark_stack_->IsEmpty()) { 1046 mirror::Object* to_ref = gc_mark_stack_->PopBack(); 1047 ProcessMarkStackRef(to_ref); 1048 ++count; 1049 } 1050 gc_mark_stack_->Reset(); 1051 } 1052 1053 // Return true if the stack was empty. 1054 return count == 0; 1055} 1056 1057size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) { 1058 // Run a checkpoint to collect all thread local mark stacks and iterate over them all. 1059 RevokeThreadLocalMarkStacks(disable_weak_ref_access); 1060 size_t count = 0; 1061 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks; 1062 { 1063 MutexLock mu(Thread::Current(), mark_stack_lock_); 1064 // Make a copy of the mark stack vector. 1065 mark_stacks = revoked_mark_stacks_; 1066 revoked_mark_stacks_.clear(); 1067 } 1068 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) { 1069 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) { 1070 mirror::Object* to_ref = p->AsMirrorPtr(); 1071 ProcessMarkStackRef(to_ref); 1072 ++count; 1073 } 1074 { 1075 MutexLock mu(Thread::Current(), mark_stack_lock_); 1076 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) { 1077 // The pool has enough. Delete it. 1078 delete mark_stack; 1079 } else { 1080 // Otherwise, put it into the pool for later reuse. 1081 mark_stack->Reset(); 1082 pooled_mark_stacks_.push_back(mark_stack); 1083 } 1084 } 1085 } 1086 return count; 1087} 1088 1089void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { 1090 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1091 if (kUseBakerReadBarrier) { 1092 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 1093 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 1094 << " is_marked=" << IsMarked(to_ref); 1095 } 1096 // Scan ref fields. 1097 Scan(to_ref); 1098 // Mark the gray ref as white or black. 1099 if (kUseBakerReadBarrier) { 1100 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 1101 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 1102 << " is_marked=" << IsMarked(to_ref); 1103 } 1104 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && 1105 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && 1106 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { 1107 // Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We 1108 // will change it to black or white later in ReferenceQueue::DequeuePendingReference(). 1109 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; 1110 } else { 1111 // We may occasionally leave a Reference black or white in the queue if its referent happens to 1112 // be concurrently marked after the Scan() call above has enqueued the Reference, in which case 1113 // the above IsInToSpace() evaluates to true and we change the color from gray to black or white 1114 // here in this else block. 1115#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER 1116 if (kUseBakerReadBarrier) { 1117 if (region_space_->IsInToSpace(to_ref)) { 1118 // If to-space, change from gray to white. 1119 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 1120 ReadBarrier::WhitePtr()); 1121 CHECK(success) << "Must succeed as we won the race."; 1122 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 1123 } else { 1124 // If non-moving space/unevac from space, change from gray 1125 // to black. We can't change gray to white because it's not 1126 // safe to use CAS if two threads change values in opposite 1127 // directions (A->B and B->A). So, we change it to black to 1128 // indicate non-moving objects that have been marked 1129 // through. Note we'd need to change from black to white 1130 // later (concurrently). 1131 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 1132 ReadBarrier::BlackPtr()); 1133 CHECK(success) << "Must succeed as we won the race."; 1134 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1135 } 1136 } 1137#else 1138 DCHECK(!kUseBakerReadBarrier); 1139#endif 1140 } 1141 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { 1142 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); 1143 visitor(to_ref); 1144 } 1145} 1146 1147void ConcurrentCopying::SwitchToSharedMarkStackMode() { 1148 Thread* self = Thread::Current(); 1149 CHECK(thread_running_gc_ != nullptr); 1150 CHECK_EQ(self, thread_running_gc_); 1151 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1152 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1153 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), 1154 static_cast<uint32_t>(kMarkStackModeThreadLocal)); 1155 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared); 1156 CHECK(weak_ref_access_enabled_.LoadRelaxed()); 1157 weak_ref_access_enabled_.StoreRelaxed(false); 1158 QuasiAtomic::ThreadFenceForConstructor(); 1159 // Process the thread local mark stacks one last time after switching to the shared mark stack 1160 // mode and disable weak ref accesses. 1161 ProcessThreadLocalMarkStacks(true); 1162 if (kVerboseMode) { 1163 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access"; 1164 } 1165} 1166 1167void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() { 1168 Thread* self = Thread::Current(); 1169 CHECK(thread_running_gc_ != nullptr); 1170 CHECK_EQ(self, thread_running_gc_); 1171 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1172 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1173 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), 1174 static_cast<uint32_t>(kMarkStackModeShared)); 1175 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive); 1176 QuasiAtomic::ThreadFenceForConstructor(); 1177 if (kVerboseMode) { 1178 LOG(INFO) << "Switched to GC exclusive mark stack mode"; 1179 } 1180} 1181 1182void ConcurrentCopying::CheckEmptyMarkStack() { 1183 Thread* self = Thread::Current(); 1184 CHECK(thread_running_gc_ != nullptr); 1185 CHECK_EQ(self, thread_running_gc_); 1186 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1187 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1188 if (mark_stack_mode == kMarkStackModeThreadLocal) { 1189 // Thread-local mark stack mode. 1190 RevokeThreadLocalMarkStacks(false); 1191 MutexLock mu(Thread::Current(), mark_stack_lock_); 1192 if (!revoked_mark_stacks_.empty()) { 1193 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) { 1194 while (!mark_stack->IsEmpty()) { 1195 mirror::Object* obj = mark_stack->PopBack(); 1196 if (kUseBakerReadBarrier) { 1197 mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); 1198 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr 1199 << " is_marked=" << IsMarked(obj); 1200 } else { 1201 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) 1202 << " is_marked=" << IsMarked(obj); 1203 } 1204 } 1205 } 1206 LOG(FATAL) << "mark stack is not empty"; 1207 } 1208 } else { 1209 // Shared, GC-exclusive, or off. 1210 MutexLock mu(Thread::Current(), mark_stack_lock_); 1211 CHECK(gc_mark_stack_->IsEmpty()); 1212 CHECK(revoked_mark_stacks_.empty()); 1213 } 1214} 1215 1216void ConcurrentCopying::SweepSystemWeaks(Thread* self) { 1217 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); 1218 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1219 Runtime::Current()->SweepSystemWeaks(this); 1220} 1221 1222void ConcurrentCopying::Sweep(bool swap_bitmaps) { 1223 { 1224 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); 1225 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1226 if (kEnableFromSpaceAccountingCheck) { 1227 CHECK_GE(live_stack_freeze_size_, live_stack->Size()); 1228 } 1229 heap_->MarkAllocStackAsLive(live_stack); 1230 live_stack->Reset(); 1231 } 1232 CheckEmptyMarkStack(); 1233 TimingLogger::ScopedTiming split("Sweep", GetTimings()); 1234 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1235 if (space->IsContinuousMemMapAllocSpace()) { 1236 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1237 if (space == region_space_ || immune_region_.ContainsSpace(space)) { 1238 continue; 1239 } 1240 TimingLogger::ScopedTiming split2( 1241 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 1242 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1243 } 1244 } 1245 SweepLargeObjects(swap_bitmaps); 1246} 1247 1248void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) { 1249 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 1250 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 1251} 1252 1253class ConcurrentCopyingClearBlackPtrsVisitor { 1254 public: 1255 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc) 1256 : collector_(cc) {} 1257#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER 1258 NO_RETURN 1259#endif 1260 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) 1261 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 1262 DCHECK(obj != nullptr); 1263 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; 1264 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; 1265 obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr()); 1266 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 1267 } 1268 1269 private: 1270 ConcurrentCopying* const collector_; 1271}; 1272 1273// Clear the black ptrs in non-moving objects back to white. 1274void ConcurrentCopying::ClearBlackPtrs() { 1275 CHECK(kUseBakerReadBarrier); 1276 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings()); 1277 ConcurrentCopyingClearBlackPtrsVisitor visitor(this); 1278 for (auto& space : heap_->GetContinuousSpaces()) { 1279 if (space == region_space_) { 1280 continue; 1281 } 1282 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1283 if (kVerboseMode) { 1284 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap; 1285 } 1286 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 1287 reinterpret_cast<uintptr_t>(space->Limit()), 1288 visitor); 1289 } 1290 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 1291 large_object_space->GetMarkBitmap()->VisitMarkedRange( 1292 reinterpret_cast<uintptr_t>(large_object_space->Begin()), 1293 reinterpret_cast<uintptr_t>(large_object_space->End()), 1294 visitor); 1295 // Objects on the allocation stack? 1296 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) { 1297 size_t count = GetAllocationStack()->Size(); 1298 auto* it = GetAllocationStack()->Begin(); 1299 auto* end = GetAllocationStack()->End(); 1300 for (size_t i = 0; i < count; ++i, ++it) { 1301 CHECK_LT(it, end); 1302 mirror::Object* obj = it->AsMirrorPtr(); 1303 if (obj != nullptr) { 1304 // Must have been cleared above. 1305 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 1306 } 1307 } 1308 } 1309} 1310 1311void ConcurrentCopying::ReclaimPhase() { 1312 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings()); 1313 if (kVerboseMode) { 1314 LOG(INFO) << "GC ReclaimPhase"; 1315 } 1316 Thread* self = Thread::Current(); 1317 1318 { 1319 // Double-check that the mark stack is empty. 1320 // Note: need to set this after VerifyNoFromSpaceRef(). 1321 is_asserting_to_space_invariant_ = false; 1322 QuasiAtomic::ThreadFenceForConstructor(); 1323 if (kVerboseMode) { 1324 LOG(INFO) << "Issue an empty check point. "; 1325 } 1326 IssueEmptyCheckpoint(); 1327 // Disable the check. 1328 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0); 1329 CheckEmptyMarkStack(); 1330 } 1331 1332 { 1333 // Record freed objects. 1334 TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); 1335 // Don't include thread-locals that are in the to-space. 1336 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); 1337 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); 1338 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); 1339 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); 1340 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); 1341 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); 1342 if (kEnableFromSpaceAccountingCheck) { 1343 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); 1344 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); 1345 } 1346 CHECK_LE(to_objects, from_objects); 1347 CHECK_LE(to_bytes, from_bytes); 1348 int64_t freed_bytes = from_bytes - to_bytes; 1349 int64_t freed_objects = from_objects - to_objects; 1350 if (kVerboseMode) { 1351 LOG(INFO) << "RecordFree:" 1352 << " from_bytes=" << from_bytes << " from_objects=" << from_objects 1353 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects 1354 << " to_bytes=" << to_bytes << " to_objects=" << to_objects 1355 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects 1356 << " from_space size=" << region_space_->FromSpaceSize() 1357 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() 1358 << " to_space size=" << region_space_->ToSpaceSize(); 1359 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 1360 } 1361 RecordFree(ObjectBytePair(freed_objects, freed_bytes)); 1362 if (kVerboseMode) { 1363 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 1364 } 1365 } 1366 1367 { 1368 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings()); 1369 ComputeUnevacFromSpaceLiveRatio(); 1370 } 1371 1372 { 1373 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); 1374 region_space_->ClearFromSpace(); 1375 } 1376 1377 { 1378 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1379 if (kUseBakerReadBarrier) { 1380 ClearBlackPtrs(); 1381 } 1382 Sweep(false); 1383 SwapBitmaps(); 1384 heap_->UnBindBitmaps(); 1385 1386 // Remove bitmaps for the immune spaces. 1387 while (!cc_bitmaps_.empty()) { 1388 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back(); 1389 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap); 1390 delete cc_bitmap; 1391 cc_bitmaps_.pop_back(); 1392 } 1393 region_space_bitmap_ = nullptr; 1394 } 1395 1396 CheckEmptyMarkStack(); 1397 1398 if (kVerboseMode) { 1399 LOG(INFO) << "GC end of ReclaimPhase"; 1400 } 1401} 1402 1403class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { 1404 public: 1405 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) 1406 : collector_(cc) {} 1407 void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) 1408 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 1409 DCHECK(ref != nullptr); 1410 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref; 1411 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; 1412 if (kUseBakerReadBarrier) { 1413 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref; 1414 // Clear the black ptr. 1415 ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr()); 1416 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref; 1417 } 1418 size_t obj_size = ref->SizeOf(); 1419 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1420 collector_->region_space_->AddLiveBytes(ref, alloc_size); 1421 } 1422 1423 private: 1424 ConcurrentCopying* const collector_; 1425}; 1426 1427// Compute how much live objects are left in regions. 1428void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() { 1429 region_space_->AssertAllRegionLiveBytesZeroOrCleared(); 1430 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this); 1431 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()), 1432 reinterpret_cast<uintptr_t>(region_space_->Limit()), 1433 visitor); 1434} 1435 1436// Assert the to-space invariant. 1437void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, 1438 mirror::Object* ref) { 1439 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1440 if (is_asserting_to_space_invariant_) { 1441 if (region_space_->IsInToSpace(ref)) { 1442 // OK. 1443 return; 1444 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1445 CHECK(region_space_bitmap_->Test(ref)) << ref; 1446 } else if (region_space_->IsInFromSpace(ref)) { 1447 // Not OK. Do extra logging. 1448 if (obj != nullptr) { 1449 LogFromSpaceRefHolder(obj, offset); 1450 } 1451 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); 1452 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1453 } else { 1454 AssertToSpaceInvariantInNonMovingSpace(obj, ref); 1455 } 1456 } 1457} 1458 1459class RootPrinter { 1460 public: 1461 RootPrinter() { } 1462 1463 template <class MirrorType> 1464 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) 1465 SHARED_REQUIRES(Locks::mutator_lock_) { 1466 if (!root->IsNull()) { 1467 VisitRoot(root); 1468 } 1469 } 1470 1471 template <class MirrorType> 1472 void VisitRoot(mirror::Object** root) 1473 SHARED_REQUIRES(Locks::mutator_lock_) { 1474 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root; 1475 } 1476 1477 template <class MirrorType> 1478 void VisitRoot(mirror::CompressedReference<MirrorType>* root) 1479 SHARED_REQUIRES(Locks::mutator_lock_) { 1480 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr(); 1481 } 1482}; 1483 1484void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source, 1485 mirror::Object* ref) { 1486 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1487 if (is_asserting_to_space_invariant_) { 1488 if (region_space_->IsInToSpace(ref)) { 1489 // OK. 1490 return; 1491 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1492 CHECK(region_space_bitmap_->Test(ref)) << ref; 1493 } else if (region_space_->IsInFromSpace(ref)) { 1494 // Not OK. Do extra logging. 1495 if (gc_root_source == nullptr) { 1496 // No info. 1497 } else if (gc_root_source->HasArtField()) { 1498 ArtField* field = gc_root_source->GetArtField(); 1499 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field); 1500 RootPrinter root_printer; 1501 field->VisitRoots(root_printer); 1502 } else if (gc_root_source->HasArtMethod()) { 1503 ArtMethod* method = gc_root_source->GetArtMethod(); 1504 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method); 1505 RootPrinter root_printer; 1506 method->VisitRoots(root_printer, sizeof(void*)); 1507 } 1508 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); 1509 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL)); 1510 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL); 1511 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true); 1512 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1513 } else { 1514 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref); 1515 } 1516 } 1517} 1518 1519void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) { 1520 if (kUseBakerReadBarrier) { 1521 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) 1522 << " holder rb_ptr=" << obj->GetReadBarrierPointer(); 1523 } else { 1524 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); 1525 } 1526 if (region_space_->IsInFromSpace(obj)) { 1527 LOG(INFO) << "holder is in the from-space."; 1528 } else if (region_space_->IsInToSpace(obj)) { 1529 LOG(INFO) << "holder is in the to-space."; 1530 } else if (region_space_->IsInUnevacFromSpace(obj)) { 1531 LOG(INFO) << "holder is in the unevac from-space."; 1532 if (region_space_bitmap_->Test(obj)) { 1533 LOG(INFO) << "holder is marked in the region space bitmap."; 1534 } else { 1535 LOG(INFO) << "holder is not marked in the region space bitmap."; 1536 } 1537 } else { 1538 // In a non-moving space. 1539 if (immune_region_.ContainsObject(obj)) { 1540 LOG(INFO) << "holder is in the image or the zygote space."; 1541 accounting::ContinuousSpaceBitmap* cc_bitmap = 1542 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 1543 CHECK(cc_bitmap != nullptr) 1544 << "An immune space object must have a bitmap."; 1545 if (cc_bitmap->Test(obj)) { 1546 LOG(INFO) << "holder is marked in the bit map."; 1547 } else { 1548 LOG(INFO) << "holder is NOT marked in the bit map."; 1549 } 1550 } else { 1551 LOG(INFO) << "holder is in a non-moving (or main) space."; 1552 accounting::ContinuousSpaceBitmap* mark_bitmap = 1553 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); 1554 accounting::LargeObjectBitmap* los_bitmap = 1555 heap_mark_bitmap_->GetLargeObjectBitmap(obj); 1556 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1557 bool is_los = mark_bitmap == nullptr; 1558 if (!is_los && mark_bitmap->Test(obj)) { 1559 LOG(INFO) << "holder is marked in the mark bit map."; 1560 } else if (is_los && los_bitmap->Test(obj)) { 1561 LOG(INFO) << "holder is marked in the los bit map."; 1562 } else { 1563 // If ref is on the allocation stack, then it is considered 1564 // mark/alive (but not necessarily on the live stack.) 1565 if (IsOnAllocStack(obj)) { 1566 LOG(INFO) << "holder is on the alloc stack."; 1567 } else { 1568 LOG(INFO) << "holder is not marked or on the alloc stack."; 1569 } 1570 } 1571 } 1572 } 1573 LOG(INFO) << "offset=" << offset.SizeValue(); 1574} 1575 1576void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, 1577 mirror::Object* ref) { 1578 // In a non-moving spaces. Check that the ref is marked. 1579 if (immune_region_.ContainsObject(ref)) { 1580 accounting::ContinuousSpaceBitmap* cc_bitmap = 1581 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); 1582 CHECK(cc_bitmap != nullptr) 1583 << "An immune space ref must have a bitmap. " << ref; 1584 if (kUseBakerReadBarrier) { 1585 CHECK(cc_bitmap->Test(ref)) 1586 << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" 1587 << obj->GetReadBarrierPointer() << " ref=" << ref; 1588 } else { 1589 CHECK(cc_bitmap->Test(ref)) 1590 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; 1591 } 1592 } else { 1593 accounting::ContinuousSpaceBitmap* mark_bitmap = 1594 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 1595 accounting::LargeObjectBitmap* los_bitmap = 1596 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 1597 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1598 bool is_los = mark_bitmap == nullptr; 1599 if ((!is_los && mark_bitmap->Test(ref)) || 1600 (is_los && los_bitmap->Test(ref))) { 1601 // OK. 1602 } else { 1603 // If ref is on the allocation stack, then it may not be 1604 // marked live, but considered marked/alive (but not 1605 // necessarily on the live stack). 1606 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " 1607 << "obj=" << obj << " ref=" << ref; 1608 } 1609 } 1610} 1611 1612// Used to scan ref fields of an object. 1613class ConcurrentCopyingRefFieldsVisitor { 1614 public: 1615 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector) 1616 : collector_(collector) {} 1617 1618 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) 1619 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) 1620 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 1621 collector_->Process(obj, offset); 1622 } 1623 1624 void operator()(mirror::Class* klass, mirror::Reference* ref) const 1625 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 1626 CHECK(klass->IsTypeOfReferenceClass()); 1627 collector_->DelayReferenceReferent(klass, ref); 1628 } 1629 1630 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 1631 SHARED_REQUIRES(Locks::mutator_lock_) { 1632 if (!root->IsNull()) { 1633 VisitRoot(root); 1634 } 1635 } 1636 1637 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 1638 SHARED_REQUIRES(Locks::mutator_lock_) { 1639 collector_->MarkRoot(root); 1640 } 1641 1642 private: 1643 ConcurrentCopying* const collector_; 1644}; 1645 1646// Scan ref fields of an object. 1647void ConcurrentCopying::Scan(mirror::Object* to_ref) { 1648 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1649 ConcurrentCopyingRefFieldsVisitor visitor(this); 1650 to_ref->VisitReferences(visitor, visitor); 1651} 1652 1653// Process a field. 1654inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { 1655 mirror::Object* ref = obj->GetFieldObject< 1656 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); 1657 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1658 return; 1659 } 1660 mirror::Object* to_ref = Mark(ref); 1661 if (to_ref == ref) { 1662 return; 1663 } 1664 // This may fail if the mutator writes to the field at the same time. But it's ok. 1665 mirror::Object* expected_ref = ref; 1666 mirror::Object* new_ref = to_ref; 1667 do { 1668 if (expected_ref != 1669 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) { 1670 // It was updated by the mutator. 1671 break; 1672 } 1673 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier< 1674 false, false, kVerifyNone>(offset, expected_ref, new_ref)); 1675} 1676 1677// Process some roots. 1678void ConcurrentCopying::VisitRoots( 1679 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) { 1680 for (size_t i = 0; i < count; ++i) { 1681 mirror::Object** root = roots[i]; 1682 mirror::Object* ref = *root; 1683 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1684 continue; 1685 } 1686 mirror::Object* to_ref = Mark(ref); 1687 if (to_ref == ref) { 1688 continue; 1689 } 1690 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root); 1691 mirror::Object* expected_ref = ref; 1692 mirror::Object* new_ref = to_ref; 1693 do { 1694 if (expected_ref != addr->LoadRelaxed()) { 1695 // It was updated by the mutator. 1696 break; 1697 } 1698 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1699 } 1700} 1701 1702void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) { 1703 DCHECK(!root->IsNull()); 1704 mirror::Object* const ref = root->AsMirrorPtr(); 1705 if (region_space_->IsInToSpace(ref)) { 1706 return; 1707 } 1708 mirror::Object* to_ref = Mark(ref); 1709 if (to_ref != ref) { 1710 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); 1711 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref); 1712 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref); 1713 // If the cas fails, then it was updated by the mutator. 1714 do { 1715 if (ref != addr->LoadRelaxed().AsMirrorPtr()) { 1716 // It was updated by the mutator. 1717 break; 1718 } 1719 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1720 } 1721} 1722 1723void ConcurrentCopying::VisitRoots( 1724 mirror::CompressedReference<mirror::Object>** roots, size_t count, 1725 const RootInfo& info ATTRIBUTE_UNUSED) { 1726 for (size_t i = 0; i < count; ++i) { 1727 mirror::CompressedReference<mirror::Object>* const root = roots[i]; 1728 if (!root->IsNull()) { 1729 MarkRoot(root); 1730 } 1731 } 1732} 1733 1734// Fill the given memory block with a dummy object. Used to fill in a 1735// copy of objects that was lost in race. 1736void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { 1737 CHECK_ALIGNED(byte_size, kObjectAlignment); 1738 memset(dummy_obj, 0, byte_size); 1739 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); 1740 CHECK(int_array_class != nullptr); 1741 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); 1742 size_t component_size = int_array_class->GetComponentSize(); 1743 CHECK_EQ(component_size, sizeof(int32_t)); 1744 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); 1745 if (data_offset > byte_size) { 1746 // An int array is too big. Use java.lang.Object. 1747 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object); 1748 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object); 1749 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize()); 1750 dummy_obj->SetClass(java_lang_Object); 1751 CHECK_EQ(byte_size, dummy_obj->SizeOf()); 1752 } else { 1753 // Use an int array. 1754 dummy_obj->SetClass(int_array_class); 1755 CHECK(dummy_obj->IsArrayInstance()); 1756 int32_t length = (byte_size - data_offset) / component_size; 1757 dummy_obj->AsArray()->SetLength(length); 1758 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length) 1759 << "byte_size=" << byte_size << " length=" << length 1760 << " component_size=" << component_size << " data_offset=" << data_offset; 1761 CHECK_EQ(byte_size, dummy_obj->SizeOf()) 1762 << "byte_size=" << byte_size << " length=" << length 1763 << " component_size=" << component_size << " data_offset=" << data_offset; 1764 } 1765} 1766 1767// Reuse the memory blocks that were copy of objects that were lost in race. 1768mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { 1769 // Try to reuse the blocks that were unused due to CAS failures. 1770 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment); 1771 Thread* self = Thread::Current(); 1772 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); 1773 MutexLock mu(self, skipped_blocks_lock_); 1774 auto it = skipped_blocks_map_.lower_bound(alloc_size); 1775 if (it == skipped_blocks_map_.end()) { 1776 // Not found. 1777 return nullptr; 1778 } 1779 { 1780 size_t byte_size = it->first; 1781 CHECK_GE(byte_size, alloc_size); 1782 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) { 1783 // If remainder would be too small for a dummy object, retry with a larger request size. 1784 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size); 1785 if (it == skipped_blocks_map_.end()) { 1786 // Not found. 1787 return nullptr; 1788 } 1789 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment); 1790 CHECK_GE(it->first - alloc_size, min_object_size) 1791 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; 1792 } 1793 } 1794 // Found a block. 1795 CHECK(it != skipped_blocks_map_.end()); 1796 size_t byte_size = it->first; 1797 uint8_t* addr = it->second; 1798 CHECK_GE(byte_size, alloc_size); 1799 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); 1800 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment); 1801 if (kVerboseMode) { 1802 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; 1803 } 1804 skipped_blocks_map_.erase(it); 1805 memset(addr, 0, byte_size); 1806 if (byte_size > alloc_size) { 1807 // Return the remainder to the map. 1808 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment); 1809 CHECK_GE(byte_size - alloc_size, min_object_size); 1810 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), 1811 byte_size - alloc_size); 1812 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size))); 1813 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size)); 1814 } 1815 return reinterpret_cast<mirror::Object*>(addr); 1816} 1817 1818mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { 1819 DCHECK(region_space_->IsInFromSpace(from_ref)); 1820 // No read barrier to avoid nested RB that might violate the to-space 1821 // invariant. Note that from_ref is a from space ref so the SizeOf() 1822 // call will access the from-space meta objects, but it's ok and necessary. 1823 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(); 1824 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1825 size_t region_space_bytes_allocated = 0U; 1826 size_t non_moving_space_bytes_allocated = 0U; 1827 size_t bytes_allocated = 0U; 1828 size_t dummy; 1829 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>( 1830 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy); 1831 bytes_allocated = region_space_bytes_allocated; 1832 if (to_ref != nullptr) { 1833 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated); 1834 } 1835 bool fall_back_to_non_moving = false; 1836 if (UNLIKELY(to_ref == nullptr)) { 1837 // Failed to allocate in the region space. Try the skipped blocks. 1838 to_ref = AllocateInSkippedBlock(region_space_alloc_size); 1839 if (to_ref != nullptr) { 1840 // Succeeded to allocate in a skipped block. 1841 if (heap_->use_tlab_) { 1842 // This is necessary for the tlab case as it's not accounted in the space. 1843 region_space_->RecordAlloc(to_ref); 1844 } 1845 bytes_allocated = region_space_alloc_size; 1846 } else { 1847 // Fall back to the non-moving space. 1848 fall_back_to_non_moving = true; 1849 if (kVerboseMode) { 1850 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" 1851 << to_space_bytes_skipped_.LoadSequentiallyConsistent() 1852 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); 1853 } 1854 fall_back_to_non_moving = true; 1855 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, 1856 &non_moving_space_bytes_allocated, nullptr, &dummy); 1857 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed"; 1858 bytes_allocated = non_moving_space_bytes_allocated; 1859 // Mark it in the mark bitmap. 1860 accounting::ContinuousSpaceBitmap* mark_bitmap = 1861 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1862 CHECK(mark_bitmap != nullptr); 1863 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref)); 1864 } 1865 } 1866 DCHECK(to_ref != nullptr); 1867 1868 // Attempt to install the forward pointer. This is in a loop as the 1869 // lock word atomic write can fail. 1870 while (true) { 1871 // Copy the object. TODO: copy only the lockword in the second iteration and on? 1872 memcpy(to_ref, from_ref, obj_size); 1873 1874 LockWord old_lock_word = to_ref->GetLockWord(false); 1875 1876 if (old_lock_word.GetState() == LockWord::kForwardingAddress) { 1877 // Lost the race. Another thread (either GC or mutator) stored 1878 // the forwarding pointer first. Make the lost copy (to_ref) 1879 // look like a valid but dead (dummy) object and keep it for 1880 // future reuse. 1881 FillWithDummyObject(to_ref, bytes_allocated); 1882 if (!fall_back_to_non_moving) { 1883 DCHECK(region_space_->IsInToSpace(to_ref)); 1884 if (bytes_allocated > space::RegionSpace::kRegionSize) { 1885 // Free the large alloc. 1886 region_space_->FreeLarge(to_ref, bytes_allocated); 1887 } else { 1888 // Record the lost copy for later reuse. 1889 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1890 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1891 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); 1892 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1893 skipped_blocks_map_.insert(std::make_pair(bytes_allocated, 1894 reinterpret_cast<uint8_t*>(to_ref))); 1895 } 1896 } else { 1897 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1898 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1899 // Free the non-moving-space chunk. 1900 accounting::ContinuousSpaceBitmap* mark_bitmap = 1901 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1902 CHECK(mark_bitmap != nullptr); 1903 CHECK(mark_bitmap->Clear(to_ref)); 1904 heap_->non_moving_space_->Free(Thread::Current(), to_ref); 1905 } 1906 1907 // Get the winner's forward ptr. 1908 mirror::Object* lost_fwd_ptr = to_ref; 1909 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress()); 1910 CHECK(to_ref != nullptr); 1911 CHECK_NE(to_ref, lost_fwd_ptr); 1912 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)); 1913 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1914 return to_ref; 1915 } 1916 1917 // Set the gray ptr. 1918 if (kUseBakerReadBarrier) { 1919 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr()); 1920 } 1921 1922 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)); 1923 1924 // Try to atomically write the fwd ptr. 1925 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word); 1926 if (LIKELY(success)) { 1927 // The CAS succeeded. 1928 objects_moved_.FetchAndAddSequentiallyConsistent(1); 1929 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size); 1930 if (LIKELY(!fall_back_to_non_moving)) { 1931 DCHECK(region_space_->IsInToSpace(to_ref)); 1932 } else { 1933 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1934 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1935 } 1936 if (kUseBakerReadBarrier) { 1937 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1938 } 1939 DCHECK(GetFwdPtr(from_ref) == to_ref); 1940 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1941 PushOntoMarkStack(to_ref); 1942 return to_ref; 1943 } else { 1944 // The CAS failed. It may have lost the race or may have failed 1945 // due to monitor/hashcode ops. Either way, retry. 1946 } 1947 } 1948} 1949 1950mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { 1951 DCHECK(from_ref != nullptr); 1952 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1953 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1954 // It's already marked. 1955 return from_ref; 1956 } 1957 mirror::Object* to_ref; 1958 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1959 to_ref = GetFwdPtr(from_ref); 1960 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) || 1961 heap_->non_moving_space_->HasAddress(to_ref)) 1962 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1963 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1964 if (region_space_bitmap_->Test(from_ref)) { 1965 to_ref = from_ref; 1966 } else { 1967 to_ref = nullptr; 1968 } 1969 } else { 1970 // from_ref is in a non-moving space. 1971 if (immune_region_.ContainsObject(from_ref)) { 1972 accounting::ContinuousSpaceBitmap* cc_bitmap = 1973 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1974 DCHECK(cc_bitmap != nullptr) 1975 << "An immune space object must have a bitmap"; 1976 if (kIsDebugBuild) { 1977 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1978 << "Immune space object must be already marked"; 1979 } 1980 if (cc_bitmap->Test(from_ref)) { 1981 // Already marked. 1982 to_ref = from_ref; 1983 } else { 1984 // Newly marked. 1985 to_ref = nullptr; 1986 } 1987 } else { 1988 // Non-immune non-moving space. Use the mark bitmap. 1989 accounting::ContinuousSpaceBitmap* mark_bitmap = 1990 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1991 accounting::LargeObjectBitmap* los_bitmap = 1992 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1993 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1994 bool is_los = mark_bitmap == nullptr; 1995 if (!is_los && mark_bitmap->Test(from_ref)) { 1996 // Already marked. 1997 to_ref = from_ref; 1998 } else if (is_los && los_bitmap->Test(from_ref)) { 1999 // Already marked in LOS. 2000 to_ref = from_ref; 2001 } else { 2002 // Not marked. 2003 if (IsOnAllocStack(from_ref)) { 2004 // If on the allocation stack, it's considered marked. 2005 to_ref = from_ref; 2006 } else { 2007 // Not marked. 2008 to_ref = nullptr; 2009 } 2010 } 2011 } 2012 } 2013 return to_ref; 2014} 2015 2016bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { 2017 QuasiAtomic::ThreadFenceAcquire(); 2018 accounting::ObjectStack* alloc_stack = GetAllocationStack(); 2019 return alloc_stack->Contains(ref); 2020} 2021 2022mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { 2023 if (from_ref == nullptr) { 2024 return nullptr; 2025 } 2026 DCHECK(from_ref != nullptr); 2027 DCHECK(heap_->collector_type_ == kCollectorTypeCC); 2028 if (kUseBakerReadBarrier && !is_active_) { 2029 // In the lock word forward address state, the read barrier bits 2030 // in the lock word are part of the stored forwarding address and 2031 // invalid. This is usually OK as the from-space copy of objects 2032 // aren't accessed by mutators due to the to-space 2033 // invariant. However, during the dex2oat image writing relocation 2034 // and the zygote compaction, objects can be in the forward 2035 // address state (to store the forward/relocation addresses) and 2036 // they can still be accessed and the invalid read barrier bits 2037 // are consulted. If they look like gray but aren't really, the 2038 // read barriers slow path can trigger when it shouldn't. To guard 2039 // against this, return here if the CC collector isn't running. 2040 return from_ref; 2041 } 2042 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?"; 2043 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 2044 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 2045 // It's already marked. 2046 return from_ref; 2047 } 2048 mirror::Object* to_ref; 2049 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 2050 to_ref = GetFwdPtr(from_ref); 2051 if (kUseBakerReadBarrier) { 2052 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref; 2053 } 2054 if (to_ref == nullptr) { 2055 // It isn't marked yet. Mark it by copying it to the to-space. 2056 to_ref = Copy(from_ref); 2057 } 2058 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) 2059 << "from_ref=" << from_ref << " to_ref=" << to_ref; 2060 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 2061 // This may or may not succeed, which is ok. 2062 if (kUseBakerReadBarrier) { 2063 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 2064 } 2065 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) { 2066 // Already marked. 2067 to_ref = from_ref; 2068 } else { 2069 // Newly marked. 2070 to_ref = from_ref; 2071 if (kUseBakerReadBarrier) { 2072 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 2073 } 2074 PushOntoMarkStack(to_ref); 2075 } 2076 } else { 2077 // from_ref is in a non-moving space. 2078 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref; 2079 if (immune_region_.ContainsObject(from_ref)) { 2080 accounting::ContinuousSpaceBitmap* cc_bitmap = 2081 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 2082 DCHECK(cc_bitmap != nullptr) 2083 << "An immune space object must have a bitmap"; 2084 if (kIsDebugBuild) { 2085 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 2086 << "Immune space object must be already marked"; 2087 } 2088 // This may or may not succeed, which is ok. 2089 if (kUseBakerReadBarrier) { 2090 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 2091 } 2092 if (cc_bitmap->AtomicTestAndSet(from_ref)) { 2093 // Already marked. 2094 to_ref = from_ref; 2095 } else { 2096 // Newly marked. 2097 to_ref = from_ref; 2098 if (kUseBakerReadBarrier) { 2099 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 2100 } 2101 PushOntoMarkStack(to_ref); 2102 } 2103 } else { 2104 // Use the mark bitmap. 2105 accounting::ContinuousSpaceBitmap* mark_bitmap = 2106 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 2107 accounting::LargeObjectBitmap* los_bitmap = 2108 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 2109 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 2110 bool is_los = mark_bitmap == nullptr; 2111 if (!is_los && mark_bitmap->Test(from_ref)) { 2112 // Already marked. 2113 to_ref = from_ref; 2114 if (kUseBakerReadBarrier) { 2115 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 2116 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 2117 } 2118 } else if (is_los && los_bitmap->Test(from_ref)) { 2119 // Already marked in LOS. 2120 to_ref = from_ref; 2121 if (kUseBakerReadBarrier) { 2122 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 2123 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 2124 } 2125 } else { 2126 // Not marked. 2127 if (IsOnAllocStack(from_ref)) { 2128 // If it's on the allocation stack, it's considered marked. Keep it white. 2129 to_ref = from_ref; 2130 // Objects on the allocation stack need not be marked. 2131 if (!is_los) { 2132 DCHECK(!mark_bitmap->Test(to_ref)); 2133 } else { 2134 DCHECK(!los_bitmap->Test(to_ref)); 2135 } 2136 if (kUseBakerReadBarrier) { 2137 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 2138 } 2139 } else { 2140 // Not marked or on the allocation stack. Try to mark it. 2141 // This may or may not succeed, which is ok. 2142 if (kUseBakerReadBarrier) { 2143 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 2144 } 2145 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) { 2146 // Already marked. 2147 to_ref = from_ref; 2148 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) { 2149 // Already marked in LOS. 2150 to_ref = from_ref; 2151 } else { 2152 // Newly marked. 2153 to_ref = from_ref; 2154 if (kUseBakerReadBarrier) { 2155 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 2156 } 2157 PushOntoMarkStack(to_ref); 2158 } 2159 } 2160 } 2161 } 2162 } 2163 return to_ref; 2164} 2165 2166void ConcurrentCopying::FinishPhase() { 2167 { 2168 MutexLock mu(Thread::Current(), mark_stack_lock_); 2169 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize); 2170 } 2171 region_space_ = nullptr; 2172 { 2173 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 2174 skipped_blocks_map_.clear(); 2175 } 2176 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 2177 heap_->ClearMarkedObjects(); 2178} 2179 2180bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) { 2181 mirror::Object* from_ref = field->AsMirrorPtr(); 2182 mirror::Object* to_ref = IsMarked(from_ref); 2183 if (to_ref == nullptr) { 2184 return false; 2185 } 2186 if (from_ref != to_ref) { 2187 QuasiAtomic::ThreadFenceRelease(); 2188 field->Assign(to_ref); 2189 QuasiAtomic::ThreadFenceSequentiallyConsistent(); 2190 } 2191 return true; 2192} 2193 2194mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) { 2195 return Mark(from_ref); 2196} 2197 2198void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 2199 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this); 2200} 2201 2202void ConcurrentCopying::ProcessReferences(Thread* self) { 2203 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); 2204 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps. 2205 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 2206 GetHeap()->GetReferenceProcessor()->ProcessReferences( 2207 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); 2208} 2209 2210void ConcurrentCopying::RevokeAllThreadLocalBuffers() { 2211 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 2212 region_space_->RevokeAllThreadLocalBuffers(); 2213} 2214 2215} // namespace collector 2216} // namespace gc 2217} // namespace art 2218