concurrent_copying.cc revision ed70b4a439acea537d55266bb9b1f309de8cbec9
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "concurrent_copying.h" 18 19#include "art_field-inl.h" 20#include "base/stl_util.h" 21#include "debugger.h" 22#include "gc/accounting/heap_bitmap-inl.h" 23#include "gc/accounting/space_bitmap-inl.h" 24#include "gc/reference_processor.h" 25#include "gc/space/image_space.h" 26#include "gc/space/space-inl.h" 27#include "intern_table.h" 28#include "mirror/class-inl.h" 29#include "mirror/object-inl.h" 30#include "scoped_thread_state_change.h" 31#include "thread-inl.h" 32#include "thread_list.h" 33#include "well_known_classes.h" 34 35namespace art { 36namespace gc { 37namespace collector { 38 39static constexpr size_t kDefaultGcMarkStackSize = 2 * MB; 40 41ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) 42 : GarbageCollector(heap, 43 name_prefix + (name_prefix.empty() ? "" : " ") + 44 "concurrent copying + mark sweep"), 45 region_space_(nullptr), gc_barrier_(new Barrier(0)), 46 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack", 47 kDefaultGcMarkStackSize, 48 kDefaultGcMarkStackSize)), 49 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock), 50 thread_running_gc_(nullptr), 51 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), 52 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff), 53 weak_ref_access_enabled_(true), 54 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), 55 rb_table_(heap_->GetReadBarrierTable()), 56 force_evacuate_all_(false) { 57 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, 58 "The region space size and the read barrier table region size must match"); 59 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap)); 60 Thread* self = Thread::Current(); 61 { 62 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 63 // Cache this so that we won't have to lock heap_bitmap_lock_ in 64 // Mark() which could cause a nested lock on heap_bitmap_lock_ 65 // when GC causes a RB while doing GC or a lock order violation 66 // (class_linker_lock_ and heap_bitmap_lock_). 67 heap_mark_bitmap_ = heap->GetMarkBitmap(); 68 } 69 { 70 MutexLock mu(self, mark_stack_lock_); 71 for (size_t i = 0; i < kMarkStackPoolSize; ++i) { 72 accounting::AtomicStack<mirror::Object>* mark_stack = 73 accounting::AtomicStack<mirror::Object>::Create( 74 "thread local mark stack", kMarkStackSize, kMarkStackSize); 75 pooled_mark_stacks_.push_back(mark_stack); 76 } 77 } 78} 79 80void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) { 81 // Used for preserving soft references, should be OK to not have a CAS here since there should be 82 // no other threads which can trigger read barriers on the same referent during reference 83 // processing. 84 from_ref->Assign(Mark(from_ref->AsMirrorPtr())); 85 DCHECK(!from_ref->IsNull()); 86} 87 88ConcurrentCopying::~ConcurrentCopying() { 89 STLDeleteElements(&pooled_mark_stacks_); 90} 91 92void ConcurrentCopying::RunPhases() { 93 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier); 94 CHECK(!is_active_); 95 is_active_ = true; 96 Thread* self = Thread::Current(); 97 thread_running_gc_ = self; 98 Locks::mutator_lock_->AssertNotHeld(self); 99 { 100 ReaderMutexLock mu(self, *Locks::mutator_lock_); 101 InitializePhase(); 102 } 103 FlipThreadRoots(); 104 { 105 ReaderMutexLock mu(self, *Locks::mutator_lock_); 106 MarkingPhase(); 107 } 108 // Verify no from space refs. This causes a pause. 109 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { 110 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); 111 ScopedPause pause(this); 112 CheckEmptyMarkStack(); 113 if (kVerboseMode) { 114 LOG(INFO) << "Verifying no from-space refs"; 115 } 116 VerifyNoFromSpaceReferences(); 117 if (kVerboseMode) { 118 LOG(INFO) << "Done verifying no from-space refs"; 119 } 120 CheckEmptyMarkStack(); 121 } 122 { 123 ReaderMutexLock mu(self, *Locks::mutator_lock_); 124 ReclaimPhase(); 125 } 126 FinishPhase(); 127 CHECK(is_active_); 128 is_active_ = false; 129 thread_running_gc_ = nullptr; 130} 131 132void ConcurrentCopying::BindBitmaps() { 133 Thread* self = Thread::Current(); 134 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 135 // Mark all of the spaces we never collect as immune. 136 for (const auto& space : heap_->GetContinuousSpaces()) { 137 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 138 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 139 CHECK(space->IsZygoteSpace() || space->IsImageSpace()); 140 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 141 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" : 142 "cc zygote space bitmap"; 143 // TODO: try avoiding using bitmaps for image/zygote to save space. 144 accounting::ContinuousSpaceBitmap* bitmap = 145 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity()); 146 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 147 cc_bitmaps_.push_back(bitmap); 148 } else if (space == region_space_) { 149 accounting::ContinuousSpaceBitmap* bitmap = 150 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap", 151 space->Begin(), space->Capacity()); 152 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 153 cc_bitmaps_.push_back(bitmap); 154 region_space_bitmap_ = bitmap; 155 } 156 } 157} 158 159void ConcurrentCopying::InitializePhase() { 160 TimingLogger::ScopedTiming split("InitializePhase", GetTimings()); 161 if (kVerboseMode) { 162 LOG(INFO) << "GC InitializePhase"; 163 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" 164 << reinterpret_cast<void*>(region_space_->Limit()); 165 } 166 CheckEmptyMarkStack(); 167 immune_region_.Reset(); 168 bytes_moved_.StoreRelaxed(0); 169 objects_moved_.StoreRelaxed(0); 170 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 171 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 172 GetCurrentIteration()->GetClearSoftReferences()) { 173 force_evacuate_all_ = true; 174 } else { 175 force_evacuate_all_ = false; 176 } 177 BindBitmaps(); 178 if (kVerboseMode) { 179 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_; 180 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End(); 181 LOG(INFO) << "GC end of InitializePhase"; 182 } 183} 184 185// Used to switch the thread roots of a thread from from-space refs to to-space refs. 186class ThreadFlipVisitor : public Closure { 187 public: 188 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) 189 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { 190 } 191 192 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { 193 // Note: self is not necessarily equal to thread since thread may be suspended. 194 Thread* self = Thread::Current(); 195 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 196 << thread->GetState() << " thread " << thread << " self " << self; 197 thread->SetIsGcMarking(true); 198 if (use_tlab_ && thread->HasTlab()) { 199 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 200 // This must come before the revoke. 201 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); 202 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 203 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> 204 FetchAndAddSequentiallyConsistent(thread_local_objects); 205 } else { 206 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 207 } 208 } 209 if (kUseThreadLocalAllocationStack) { 210 thread->RevokeThreadLocalAllocationStack(); 211 } 212 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 213 thread->VisitRoots(concurrent_copying_); 214 concurrent_copying_->GetBarrier().Pass(self); 215 } 216 217 private: 218 ConcurrentCopying* const concurrent_copying_; 219 const bool use_tlab_; 220}; 221 222// Called back from Runtime::FlipThreadRoots() during a pause. 223class FlipCallback : public Closure { 224 public: 225 explicit FlipCallback(ConcurrentCopying* concurrent_copying) 226 : concurrent_copying_(concurrent_copying) { 227 } 228 229 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) { 230 ConcurrentCopying* cc = concurrent_copying_; 231 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); 232 // Note: self is not necessarily equal to thread since thread may be suspended. 233 Thread* self = Thread::Current(); 234 CHECK(thread == self); 235 Locks::mutator_lock_->AssertExclusiveHeld(self); 236 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); 237 cc->SwapStacks(); 238 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 239 cc->RecordLiveStackFreezeSize(self); 240 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); 241 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); 242 } 243 cc->is_marking_ = true; 244 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal); 245 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { 246 CHECK(Runtime::Current()->IsAotCompiler()); 247 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); 248 Runtime::Current()->VisitTransactionRoots(cc); 249 } 250 } 251 252 private: 253 ConcurrentCopying* const concurrent_copying_; 254}; 255 256// Switch threads that from from-space to to-space refs. Forward/mark the thread roots. 257void ConcurrentCopying::FlipThreadRoots() { 258 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); 259 if (kVerboseMode) { 260 LOG(INFO) << "time=" << region_space_->Time(); 261 region_space_->DumpNonFreeRegions(LOG(INFO)); 262 } 263 Thread* self = Thread::Current(); 264 Locks::mutator_lock_->AssertNotHeld(self); 265 gc_barrier_->Init(self, 0); 266 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); 267 FlipCallback flip_callback(this); 268 heap_->ThreadFlipBegin(self); // Sync with JNI critical calls. 269 size_t barrier_count = Runtime::Current()->FlipThreadRoots( 270 &thread_flip_visitor, &flip_callback, this); 271 heap_->ThreadFlipEnd(self); 272 { 273 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 274 gc_barrier_->Increment(self, barrier_count); 275 } 276 is_asserting_to_space_invariant_ = true; 277 QuasiAtomic::ThreadFenceForConstructor(); 278 if (kVerboseMode) { 279 LOG(INFO) << "time=" << region_space_->Time(); 280 region_space_->DumpNonFreeRegions(LOG(INFO)); 281 LOG(INFO) << "GC end of FlipThreadRoots"; 282 } 283} 284 285void ConcurrentCopying::SwapStacks() { 286 heap_->SwapStacks(); 287} 288 289void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { 290 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 291 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 292} 293 294// Used to visit objects in the immune spaces. 295class ConcurrentCopyingImmuneSpaceObjVisitor { 296 public: 297 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) 298 : collector_(cc) {} 299 300 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) 301 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 302 DCHECK(obj != nullptr); 303 DCHECK(collector_->immune_region_.ContainsObject(obj)); 304 accounting::ContinuousSpaceBitmap* cc_bitmap = 305 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 306 DCHECK(cc_bitmap != nullptr) 307 << "An immune space object must have a bitmap"; 308 if (kIsDebugBuild) { 309 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) 310 << "Immune space object must be already marked"; 311 } 312 // This may or may not succeed, which is ok. 313 if (kUseBakerReadBarrier) { 314 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 315 } 316 if (cc_bitmap->AtomicTestAndSet(obj)) { 317 // Already marked. Do nothing. 318 } else { 319 // Newly marked. Set the gray bit and push it onto the mark stack. 320 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 321 collector_->PushOntoMarkStack(obj); 322 } 323 } 324 325 private: 326 ConcurrentCopying* const collector_; 327}; 328 329class EmptyCheckpoint : public Closure { 330 public: 331 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying) 332 : concurrent_copying_(concurrent_copying) { 333 } 334 335 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 336 // Note: self is not necessarily equal to thread since thread may be suspended. 337 Thread* self = Thread::Current(); 338 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 339 << thread->GetState() << " thread " << thread << " self " << self; 340 // If thread is a running mutator, then act on behalf of the garbage collector. 341 // See the code in ThreadList::RunCheckpoint. 342 concurrent_copying_->GetBarrier().Pass(self); 343 } 344 345 private: 346 ConcurrentCopying* const concurrent_copying_; 347}; 348 349// Concurrently mark roots that are guarded by read barriers and process the mark stack. 350void ConcurrentCopying::MarkingPhase() { 351 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); 352 if (kVerboseMode) { 353 LOG(INFO) << "GC MarkingPhase"; 354 } 355 CHECK(weak_ref_access_enabled_); 356 { 357 // Mark the image root. The WB-based collectors do not need to 358 // scan the image objects from roots by relying on the card table, 359 // but it's necessary for the RB to-space invariant to hold. 360 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings()); 361 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) { 362 if (space->IsImageSpace()) { 363 gc::space::ImageSpace* image = space->AsImageSpace(); 364 if (image != nullptr) { 365 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots(); 366 mirror::Object* marked_image_root = Mark(image_root); 367 CHECK_EQ(image_root, marked_image_root) << "An image object does not move"; 368 if (ReadBarrier::kEnableToSpaceInvariantChecks) { 369 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root); 370 } 371 } 372 } 373 } 374 } 375 { 376 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings()); 377 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots); 378 } 379 { 380 // TODO: don't visit the transaction roots if it's not active. 381 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); 382 Runtime::Current()->VisitNonThreadRoots(this); 383 } 384 385 // Immune spaces. 386 for (auto& space : heap_->GetContinuousSpaces()) { 387 if (immune_region_.ContainsSpace(space)) { 388 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 389 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 390 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this); 391 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 392 reinterpret_cast<uintptr_t>(space->Limit()), 393 visitor); 394 } 395 } 396 397 Thread* self = Thread::Current(); 398 { 399 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings()); 400 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The 401 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark 402 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock 403 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we 404 // reach the point where we process weak references, we can avoid using a lock when accessing 405 // the GC mark stack, which makes mark stack processing more efficient. 406 407 // Process the mark stack once in the thread local stack mode. This marks most of the live 408 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system 409 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray 410 // objects and push refs on the mark stack. 411 ProcessMarkStack(); 412 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks 413 // for the last time before transitioning to the shared mark stack mode, which would process new 414 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack() 415 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's 416 // important to do these together in a single checkpoint so that we can ensure that mutators 417 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and 418 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on 419 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref 420 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones. 421 SwitchToSharedMarkStackMode(); 422 CHECK(!self->GetWeakRefAccessEnabled()); 423 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here 424 // (which may be non-empty if there were refs found on thread-local mark stacks during the above 425 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators 426 // (via read barriers) have no way to produce any more refs to process. Marking converges once 427 // before we process weak refs below. 428 ProcessMarkStack(); 429 CheckEmptyMarkStack(); 430 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a 431 // lock from this point on. 432 SwitchToGcExclusiveMarkStackMode(); 433 CheckEmptyMarkStack(); 434 if (kVerboseMode) { 435 LOG(INFO) << "ProcessReferences"; 436 } 437 // Process weak references. This may produce new refs to process and have them processed via 438 // ProcessMarkStack (in the GC exclusive mark stack mode). 439 ProcessReferences(self); 440 CheckEmptyMarkStack(); 441 if (kVerboseMode) { 442 LOG(INFO) << "SweepSystemWeaks"; 443 } 444 SweepSystemWeaks(self); 445 if (kVerboseMode) { 446 LOG(INFO) << "SweepSystemWeaks done"; 447 } 448 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have 449 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for 450 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks(). 451 ProcessMarkStack(); 452 CheckEmptyMarkStack(); 453 // Re-enable weak ref accesses. 454 ReenableWeakRefAccess(self); 455 // Free data for class loaders that we unloaded. 456 Runtime::Current()->GetClassLinker()->CleanupClassLoaders(); 457 // Marking is done. Disable marking. 458 DisableMarking(); 459 CheckEmptyMarkStack(); 460 } 461 462 CHECK(weak_ref_access_enabled_); 463 if (kVerboseMode) { 464 LOG(INFO) << "GC end of MarkingPhase"; 465 } 466} 467 468void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) { 469 if (kVerboseMode) { 470 LOG(INFO) << "ReenableWeakRefAccess"; 471 } 472 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads. 473 QuasiAtomic::ThreadFenceForConstructor(); 474 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access. 475 { 476 MutexLock mu(self, *Locks::thread_list_lock_); 477 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 478 for (Thread* thread : thread_list) { 479 thread->SetWeakRefAccessEnabled(true); 480 } 481 } 482 // Unblock blocking threads. 483 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self); 484 Runtime::Current()->BroadcastForNewSystemWeaks(); 485} 486 487class DisableMarkingCheckpoint : public Closure { 488 public: 489 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying) 490 : concurrent_copying_(concurrent_copying) { 491 } 492 493 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 494 // Note: self is not necessarily equal to thread since thread may be suspended. 495 Thread* self = Thread::Current(); 496 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 497 << thread->GetState() << " thread " << thread << " self " << self; 498 // Disable the thread-local is_gc_marking flag. 499 // Note a thread that has just started right before this checkpoint may have already this flag 500 // set to false, which is ok. 501 thread->SetIsGcMarking(false); 502 // If thread is a running mutator, then act on behalf of the garbage collector. 503 // See the code in ThreadList::RunCheckpoint. 504 concurrent_copying_->GetBarrier().Pass(self); 505 } 506 507 private: 508 ConcurrentCopying* const concurrent_copying_; 509}; 510 511void ConcurrentCopying::IssueDisableMarkingCheckpoint() { 512 Thread* self = Thread::Current(); 513 DisableMarkingCheckpoint check_point(this); 514 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 515 gc_barrier_->Init(self, 0); 516 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 517 // If there are no threads to wait which implies that all the checkpoint functions are finished, 518 // then no need to release the mutator lock. 519 if (barrier_count == 0) { 520 return; 521 } 522 // Release locks then wait for all mutator threads to pass the barrier. 523 Locks::mutator_lock_->SharedUnlock(self); 524 { 525 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 526 gc_barrier_->Increment(self, barrier_count); 527 } 528 Locks::mutator_lock_->SharedLock(self); 529} 530 531void ConcurrentCopying::DisableMarking() { 532 // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the 533 // thread-local flags so that a new thread starting up will get the correct is_marking flag. 534 is_marking_ = false; 535 QuasiAtomic::ThreadFenceForConstructor(); 536 // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are 537 // still in the middle of a read barrier which may have a from-space ref cached in a local 538 // variable. 539 IssueDisableMarkingCheckpoint(); 540 if (kUseTableLookupReadBarrier) { 541 heap_->rb_table_->ClearAll(); 542 DCHECK(heap_->rb_table_->IsAllCleared()); 543 } 544 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1); 545 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff); 546} 547 548void ConcurrentCopying::IssueEmptyCheckpoint() { 549 Thread* self = Thread::Current(); 550 EmptyCheckpoint check_point(this); 551 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 552 gc_barrier_->Init(self, 0); 553 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 554 // If there are no threads to wait which implys that all the checkpoint functions are finished, 555 // then no need to release the mutator lock. 556 if (barrier_count == 0) { 557 return; 558 } 559 // Release locks then wait for all mutator threads to pass the barrier. 560 Locks::mutator_lock_->SharedUnlock(self); 561 { 562 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 563 gc_barrier_->Increment(self, barrier_count); 564 } 565 Locks::mutator_lock_->SharedLock(self); 566} 567 568void ConcurrentCopying::ExpandGcMarkStack() { 569 DCHECK(gc_mark_stack_->IsFull()); 570 const size_t new_size = gc_mark_stack_->Capacity() * 2; 571 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(), 572 gc_mark_stack_->End()); 573 gc_mark_stack_->Resize(new_size); 574 for (auto& ref : temp) { 575 gc_mark_stack_->PushBack(ref.AsMirrorPtr()); 576 } 577 DCHECK(!gc_mark_stack_->IsFull()); 578} 579 580void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { 581 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0) 582 << " " << to_ref << " " << PrettyTypeOf(to_ref); 583 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites? 584 CHECK(thread_running_gc_ != nullptr); 585 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 586 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) { 587 if (LIKELY(self == thread_running_gc_)) { 588 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack. 589 CHECK(self->GetThreadLocalMarkStack() == nullptr); 590 if (UNLIKELY(gc_mark_stack_->IsFull())) { 591 ExpandGcMarkStack(); 592 } 593 gc_mark_stack_->PushBack(to_ref); 594 } else { 595 // Otherwise, use a thread-local mark stack. 596 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack(); 597 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) { 598 MutexLock mu(self, mark_stack_lock_); 599 // Get a new thread local mark stack. 600 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack; 601 if (!pooled_mark_stacks_.empty()) { 602 // Use a pooled mark stack. 603 new_tl_mark_stack = pooled_mark_stacks_.back(); 604 pooled_mark_stacks_.pop_back(); 605 } else { 606 // None pooled. Create a new one. 607 new_tl_mark_stack = 608 accounting::AtomicStack<mirror::Object>::Create( 609 "thread local mark stack", 4 * KB, 4 * KB); 610 } 611 DCHECK(new_tl_mark_stack != nullptr); 612 DCHECK(new_tl_mark_stack->IsEmpty()); 613 new_tl_mark_stack->PushBack(to_ref); 614 self->SetThreadLocalMarkStack(new_tl_mark_stack); 615 if (tl_mark_stack != nullptr) { 616 // Store the old full stack into a vector. 617 revoked_mark_stacks_.push_back(tl_mark_stack); 618 } 619 } else { 620 tl_mark_stack->PushBack(to_ref); 621 } 622 } 623 } else if (mark_stack_mode == kMarkStackModeShared) { 624 // Access the shared GC mark stack with a lock. 625 MutexLock mu(self, mark_stack_lock_); 626 if (UNLIKELY(gc_mark_stack_->IsFull())) { 627 ExpandGcMarkStack(); 628 } 629 gc_mark_stack_->PushBack(to_ref); 630 } else { 631 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), 632 static_cast<uint32_t>(kMarkStackModeGcExclusive)) 633 << "ref=" << to_ref 634 << " self->gc_marking=" << self->GetIsGcMarking() 635 << " cc->is_marking=" << is_marking_; 636 CHECK(self == thread_running_gc_) 637 << "Only GC-running thread should access the mark stack " 638 << "in the GC exclusive mark stack mode"; 639 // Access the GC mark stack without a lock. 640 if (UNLIKELY(gc_mark_stack_->IsFull())) { 641 ExpandGcMarkStack(); 642 } 643 gc_mark_stack_->PushBack(to_ref); 644 } 645} 646 647accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() { 648 return heap_->allocation_stack_.get(); 649} 650 651accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { 652 return heap_->live_stack_.get(); 653} 654 655// The following visitors are that used to verify that there's no 656// references to the from-space left after marking. 657class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { 658 public: 659 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) 660 : collector_(collector) {} 661 662 void operator()(mirror::Object* ref) const 663 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 664 if (ref == nullptr) { 665 // OK. 666 return; 667 } 668 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 669 if (kUseBakerReadBarrier) { 670 if (collector_->RegionSpace()->IsInToSpace(ref)) { 671 CHECK(ref->GetReadBarrierPointer() == nullptr) 672 << "To-space ref " << ref << " " << PrettyTypeOf(ref) 673 << " has non-white rb_ptr " << ref->GetReadBarrierPointer(); 674 } else { 675 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 676 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 677 collector_->IsOnAllocStack(ref))) 678 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref) 679 << " has non-black rb_ptr " << ref->GetReadBarrierPointer() 680 << " but isn't on the alloc stack (and has white rb_ptr)." 681 << " Is it in the non-moving space=" 682 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref)); 683 } 684 } 685 } 686 687 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 688 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { 689 DCHECK(root != nullptr); 690 operator()(root); 691 } 692 693 private: 694 ConcurrentCopying* const collector_; 695}; 696 697class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { 698 public: 699 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) 700 : collector_(collector) {} 701 702 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const 703 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 704 mirror::Object* ref = 705 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 706 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 707 visitor(ref); 708 } 709 void operator()(mirror::Class* klass, mirror::Reference* ref) const 710 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 711 CHECK(klass->IsTypeOfReferenceClass()); 712 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 713 } 714 715 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 716 SHARED_REQUIRES(Locks::mutator_lock_) { 717 if (!root->IsNull()) { 718 VisitRoot(root); 719 } 720 } 721 722 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 723 SHARED_REQUIRES(Locks::mutator_lock_) { 724 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 725 visitor(root->AsMirrorPtr()); 726 } 727 728 private: 729 ConcurrentCopying* const collector_; 730}; 731 732class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { 733 public: 734 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) 735 : collector_(collector) {} 736 void operator()(mirror::Object* obj) const 737 SHARED_REQUIRES(Locks::mutator_lock_) { 738 ObjectCallback(obj, collector_); 739 } 740 static void ObjectCallback(mirror::Object* obj, void *arg) 741 SHARED_REQUIRES(Locks::mutator_lock_) { 742 CHECK(obj != nullptr); 743 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 744 space::RegionSpace* region_space = collector->RegionSpace(); 745 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 746 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); 747 obj->VisitReferences(visitor, visitor); 748 if (kUseBakerReadBarrier) { 749 if (collector->RegionSpace()->IsInToSpace(obj)) { 750 CHECK(obj->GetReadBarrierPointer() == nullptr) 751 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer(); 752 } else { 753 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 754 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 755 collector->IsOnAllocStack(obj))) 756 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj) 757 << " has non-black rb_ptr " << obj->GetReadBarrierPointer() 758 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space=" 759 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj)); 760 } 761 } 762 } 763 764 private: 765 ConcurrentCopying* const collector_; 766}; 767 768// Verify there's no from-space references left after the marking phase. 769void ConcurrentCopying::VerifyNoFromSpaceReferences() { 770 Thread* self = Thread::Current(); 771 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 772 // Verify all threads have is_gc_marking to be false 773 { 774 MutexLock mu(self, *Locks::thread_list_lock_); 775 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 776 for (Thread* thread : thread_list) { 777 CHECK(!thread->GetIsGcMarking()); 778 } 779 } 780 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this); 781 // Roots. 782 { 783 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 784 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 785 Runtime::Current()->VisitRoots(&ref_visitor); 786 } 787 // The to-space. 788 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, 789 this); 790 // Non-moving spaces. 791 { 792 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 793 heap_->GetMarkBitmap()->Visit(visitor); 794 } 795 // The alloc stack. 796 { 797 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 798 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); 799 it < end; ++it) { 800 mirror::Object* const obj = it->AsMirrorPtr(); 801 if (obj != nullptr && obj->GetClass() != nullptr) { 802 // TODO: need to call this only if obj is alive? 803 ref_visitor(obj); 804 visitor(obj); 805 } 806 } 807 } 808 // TODO: LOS. But only refs in LOS are classes. 809} 810 811// The following visitors are used to assert the to-space invariant. 812class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { 813 public: 814 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) 815 : collector_(collector) {} 816 817 void operator()(mirror::Object* ref) const 818 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 819 if (ref == nullptr) { 820 // OK. 821 return; 822 } 823 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 824 } 825 826 private: 827 ConcurrentCopying* const collector_; 828}; 829 830class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { 831 public: 832 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) 833 : collector_(collector) {} 834 835 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const 836 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 837 mirror::Object* ref = 838 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 839 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 840 visitor(ref); 841 } 842 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const 843 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 844 CHECK(klass->IsTypeOfReferenceClass()); 845 } 846 847 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 848 SHARED_REQUIRES(Locks::mutator_lock_) { 849 if (!root->IsNull()) { 850 VisitRoot(root); 851 } 852 } 853 854 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 855 SHARED_REQUIRES(Locks::mutator_lock_) { 856 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 857 visitor(root->AsMirrorPtr()); 858 } 859 860 private: 861 ConcurrentCopying* const collector_; 862}; 863 864class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { 865 public: 866 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) 867 : collector_(collector) {} 868 void operator()(mirror::Object* obj) const 869 SHARED_REQUIRES(Locks::mutator_lock_) { 870 ObjectCallback(obj, collector_); 871 } 872 static void ObjectCallback(mirror::Object* obj, void *arg) 873 SHARED_REQUIRES(Locks::mutator_lock_) { 874 CHECK(obj != nullptr); 875 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 876 space::RegionSpace* region_space = collector->RegionSpace(); 877 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 878 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); 879 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); 880 obj->VisitReferences(visitor, visitor); 881 } 882 883 private: 884 ConcurrentCopying* const collector_; 885}; 886 887class RevokeThreadLocalMarkStackCheckpoint : public Closure { 888 public: 889 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying, 890 bool disable_weak_ref_access) 891 : concurrent_copying_(concurrent_copying), 892 disable_weak_ref_access_(disable_weak_ref_access) { 893 } 894 895 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 896 // Note: self is not necessarily equal to thread since thread may be suspended. 897 Thread* self = Thread::Current(); 898 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 899 << thread->GetState() << " thread " << thread << " self " << self; 900 // Revoke thread local mark stacks. 901 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); 902 if (tl_mark_stack != nullptr) { 903 MutexLock mu(self, concurrent_copying_->mark_stack_lock_); 904 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack); 905 thread->SetThreadLocalMarkStack(nullptr); 906 } 907 // Disable weak ref access. 908 if (disable_weak_ref_access_) { 909 thread->SetWeakRefAccessEnabled(false); 910 } 911 // If thread is a running mutator, then act on behalf of the garbage collector. 912 // See the code in ThreadList::RunCheckpoint. 913 concurrent_copying_->GetBarrier().Pass(self); 914 } 915 916 private: 917 ConcurrentCopying* const concurrent_copying_; 918 const bool disable_weak_ref_access_; 919}; 920 921void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) { 922 Thread* self = Thread::Current(); 923 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access); 924 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 925 gc_barrier_->Init(self, 0); 926 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 927 // If there are no threads to wait which implys that all the checkpoint functions are finished, 928 // then no need to release the mutator lock. 929 if (barrier_count == 0) { 930 return; 931 } 932 Locks::mutator_lock_->SharedUnlock(self); 933 { 934 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 935 gc_barrier_->Increment(self, barrier_count); 936 } 937 Locks::mutator_lock_->SharedLock(self); 938} 939 940void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) { 941 Thread* self = Thread::Current(); 942 CHECK_EQ(self, thread); 943 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); 944 if (tl_mark_stack != nullptr) { 945 CHECK(is_marking_); 946 MutexLock mu(self, mark_stack_lock_); 947 revoked_mark_stacks_.push_back(tl_mark_stack); 948 thread->SetThreadLocalMarkStack(nullptr); 949 } 950} 951 952void ConcurrentCopying::ProcessMarkStack() { 953 if (kVerboseMode) { 954 LOG(INFO) << "ProcessMarkStack. "; 955 } 956 bool empty_prev = false; 957 while (true) { 958 bool empty = ProcessMarkStackOnce(); 959 if (empty_prev && empty) { 960 // Saw empty mark stack for a second time, done. 961 break; 962 } 963 empty_prev = empty; 964 } 965} 966 967bool ConcurrentCopying::ProcessMarkStackOnce() { 968 Thread* self = Thread::Current(); 969 CHECK(thread_running_gc_ != nullptr); 970 CHECK(self == thread_running_gc_); 971 CHECK(self->GetThreadLocalMarkStack() == nullptr); 972 size_t count = 0; 973 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 974 if (mark_stack_mode == kMarkStackModeThreadLocal) { 975 // Process the thread-local mark stacks and the GC mark stack. 976 count += ProcessThreadLocalMarkStacks(false); 977 while (!gc_mark_stack_->IsEmpty()) { 978 mirror::Object* to_ref = gc_mark_stack_->PopBack(); 979 ProcessMarkStackRef(to_ref); 980 ++count; 981 } 982 gc_mark_stack_->Reset(); 983 } else if (mark_stack_mode == kMarkStackModeShared) { 984 // Process the shared GC mark stack with a lock. 985 { 986 MutexLock mu(self, mark_stack_lock_); 987 CHECK(revoked_mark_stacks_.empty()); 988 } 989 while (true) { 990 std::vector<mirror::Object*> refs; 991 { 992 // Copy refs with lock. Note the number of refs should be small. 993 MutexLock mu(self, mark_stack_lock_); 994 if (gc_mark_stack_->IsEmpty()) { 995 break; 996 } 997 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin(); 998 p != gc_mark_stack_->End(); ++p) { 999 refs.push_back(p->AsMirrorPtr()); 1000 } 1001 gc_mark_stack_->Reset(); 1002 } 1003 for (mirror::Object* ref : refs) { 1004 ProcessMarkStackRef(ref); 1005 ++count; 1006 } 1007 } 1008 } else { 1009 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), 1010 static_cast<uint32_t>(kMarkStackModeGcExclusive)); 1011 { 1012 MutexLock mu(self, mark_stack_lock_); 1013 CHECK(revoked_mark_stacks_.empty()); 1014 } 1015 // Process the GC mark stack in the exclusive mode. No need to take the lock. 1016 while (!gc_mark_stack_->IsEmpty()) { 1017 mirror::Object* to_ref = gc_mark_stack_->PopBack(); 1018 ProcessMarkStackRef(to_ref); 1019 ++count; 1020 } 1021 gc_mark_stack_->Reset(); 1022 } 1023 1024 // Return true if the stack was empty. 1025 return count == 0; 1026} 1027 1028size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) { 1029 // Run a checkpoint to collect all thread local mark stacks and iterate over them all. 1030 RevokeThreadLocalMarkStacks(disable_weak_ref_access); 1031 size_t count = 0; 1032 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks; 1033 { 1034 MutexLock mu(Thread::Current(), mark_stack_lock_); 1035 // Make a copy of the mark stack vector. 1036 mark_stacks = revoked_mark_stacks_; 1037 revoked_mark_stacks_.clear(); 1038 } 1039 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) { 1040 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) { 1041 mirror::Object* to_ref = p->AsMirrorPtr(); 1042 ProcessMarkStackRef(to_ref); 1043 ++count; 1044 } 1045 { 1046 MutexLock mu(Thread::Current(), mark_stack_lock_); 1047 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) { 1048 // The pool has enough. Delete it. 1049 delete mark_stack; 1050 } else { 1051 // Otherwise, put it into the pool for later reuse. 1052 mark_stack->Reset(); 1053 pooled_mark_stacks_.push_back(mark_stack); 1054 } 1055 } 1056 } 1057 return count; 1058} 1059 1060inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { 1061 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1062 if (kUseBakerReadBarrier) { 1063 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 1064 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 1065 << " is_marked=" << IsMarked(to_ref); 1066 } 1067 // Scan ref fields. 1068 Scan(to_ref); 1069 // Mark the gray ref as white or black. 1070 if (kUseBakerReadBarrier) { 1071 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 1072 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 1073 << " is_marked=" << IsMarked(to_ref); 1074 } 1075#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER 1076 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && 1077 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && 1078 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) { 1079 // Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We 1080 // will change it to black or white later in ReferenceQueue::DequeuePendingReference(). 1081 DCHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; 1082 } else { 1083 // We may occasionally leave a Reference black or white in the queue if its referent happens to 1084 // be concurrently marked after the Scan() call above has enqueued the Reference, in which case 1085 // the above IsInToSpace() evaluates to true and we change the color from gray to black or white 1086 // here in this else block. 1087 if (kUseBakerReadBarrier) { 1088 if (region_space_->IsInToSpace(to_ref)) { 1089 // If to-space, change from gray to white. 1090 bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>( 1091 ReadBarrier::GrayPtr(), 1092 ReadBarrier::WhitePtr()); 1093 DCHECK(success) << "Must succeed as we won the race."; 1094 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 1095 } else { 1096 // If non-moving space/unevac from space, change from gray 1097 // to black. We can't change gray to white because it's not 1098 // safe to use CAS if two threads change values in opposite 1099 // directions (A->B and B->A). So, we change it to black to 1100 // indicate non-moving objects that have been marked 1101 // through. Note we'd need to change from black to white 1102 // later (concurrently). 1103 bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>( 1104 ReadBarrier::GrayPtr(), 1105 ReadBarrier::BlackPtr()); 1106 DCHECK(success) << "Must succeed as we won the race."; 1107 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1108 } 1109 } 1110 } 1111#else 1112 DCHECK(!kUseBakerReadBarrier); 1113#endif 1114 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { 1115 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); 1116 visitor(to_ref); 1117 } 1118} 1119 1120void ConcurrentCopying::SwitchToSharedMarkStackMode() { 1121 Thread* self = Thread::Current(); 1122 CHECK(thread_running_gc_ != nullptr); 1123 CHECK_EQ(self, thread_running_gc_); 1124 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1125 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1126 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), 1127 static_cast<uint32_t>(kMarkStackModeThreadLocal)); 1128 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared); 1129 CHECK(weak_ref_access_enabled_.LoadRelaxed()); 1130 weak_ref_access_enabled_.StoreRelaxed(false); 1131 QuasiAtomic::ThreadFenceForConstructor(); 1132 // Process the thread local mark stacks one last time after switching to the shared mark stack 1133 // mode and disable weak ref accesses. 1134 ProcessThreadLocalMarkStacks(true); 1135 if (kVerboseMode) { 1136 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access"; 1137 } 1138} 1139 1140void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() { 1141 Thread* self = Thread::Current(); 1142 CHECK(thread_running_gc_ != nullptr); 1143 CHECK_EQ(self, thread_running_gc_); 1144 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1145 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1146 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), 1147 static_cast<uint32_t>(kMarkStackModeShared)); 1148 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive); 1149 QuasiAtomic::ThreadFenceForConstructor(); 1150 if (kVerboseMode) { 1151 LOG(INFO) << "Switched to GC exclusive mark stack mode"; 1152 } 1153} 1154 1155void ConcurrentCopying::CheckEmptyMarkStack() { 1156 Thread* self = Thread::Current(); 1157 CHECK(thread_running_gc_ != nullptr); 1158 CHECK_EQ(self, thread_running_gc_); 1159 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1160 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1161 if (mark_stack_mode == kMarkStackModeThreadLocal) { 1162 // Thread-local mark stack mode. 1163 RevokeThreadLocalMarkStacks(false); 1164 MutexLock mu(Thread::Current(), mark_stack_lock_); 1165 if (!revoked_mark_stacks_.empty()) { 1166 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) { 1167 while (!mark_stack->IsEmpty()) { 1168 mirror::Object* obj = mark_stack->PopBack(); 1169 if (kUseBakerReadBarrier) { 1170 mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); 1171 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr 1172 << " is_marked=" << IsMarked(obj); 1173 } else { 1174 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) 1175 << " is_marked=" << IsMarked(obj); 1176 } 1177 } 1178 } 1179 LOG(FATAL) << "mark stack is not empty"; 1180 } 1181 } else { 1182 // Shared, GC-exclusive, or off. 1183 MutexLock mu(Thread::Current(), mark_stack_lock_); 1184 CHECK(gc_mark_stack_->IsEmpty()); 1185 CHECK(revoked_mark_stacks_.empty()); 1186 } 1187} 1188 1189void ConcurrentCopying::SweepSystemWeaks(Thread* self) { 1190 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); 1191 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1192 Runtime::Current()->SweepSystemWeaks(this); 1193} 1194 1195void ConcurrentCopying::Sweep(bool swap_bitmaps) { 1196 { 1197 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); 1198 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1199 if (kEnableFromSpaceAccountingCheck) { 1200 CHECK_GE(live_stack_freeze_size_, live_stack->Size()); 1201 } 1202 heap_->MarkAllocStackAsLive(live_stack); 1203 live_stack->Reset(); 1204 } 1205 CheckEmptyMarkStack(); 1206 TimingLogger::ScopedTiming split("Sweep", GetTimings()); 1207 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1208 if (space->IsContinuousMemMapAllocSpace()) { 1209 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1210 if (space == region_space_ || immune_region_.ContainsSpace(space)) { 1211 continue; 1212 } 1213 TimingLogger::ScopedTiming split2( 1214 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 1215 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1216 } 1217 } 1218 SweepLargeObjects(swap_bitmaps); 1219} 1220 1221void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) { 1222 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 1223 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 1224} 1225 1226class ConcurrentCopyingClearBlackPtrsVisitor { 1227 public: 1228 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc) 1229 : collector_(cc) {} 1230 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) 1231 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 1232 DCHECK(obj != nullptr); 1233 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; 1234 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; 1235 obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr()); 1236 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 1237 } 1238 1239 private: 1240 ConcurrentCopying* const collector_; 1241}; 1242 1243// Clear the black ptrs in non-moving objects back to white. 1244void ConcurrentCopying::ClearBlackPtrs() { 1245 CHECK(kUseBakerReadBarrier); 1246 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings()); 1247 ConcurrentCopyingClearBlackPtrsVisitor visitor(this); 1248 for (auto& space : heap_->GetContinuousSpaces()) { 1249 if (space == region_space_) { 1250 continue; 1251 } 1252 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1253 if (kVerboseMode) { 1254 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap; 1255 } 1256 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 1257 reinterpret_cast<uintptr_t>(space->Limit()), 1258 visitor); 1259 } 1260 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 1261 large_object_space->GetMarkBitmap()->VisitMarkedRange( 1262 reinterpret_cast<uintptr_t>(large_object_space->Begin()), 1263 reinterpret_cast<uintptr_t>(large_object_space->End()), 1264 visitor); 1265 // Objects on the allocation stack? 1266 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) { 1267 size_t count = GetAllocationStack()->Size(); 1268 auto* it = GetAllocationStack()->Begin(); 1269 auto* end = GetAllocationStack()->End(); 1270 for (size_t i = 0; i < count; ++i, ++it) { 1271 CHECK_LT(it, end); 1272 mirror::Object* obj = it->AsMirrorPtr(); 1273 if (obj != nullptr) { 1274 // Must have been cleared above. 1275 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 1276 } 1277 } 1278 } 1279} 1280 1281void ConcurrentCopying::ReclaimPhase() { 1282 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings()); 1283 if (kVerboseMode) { 1284 LOG(INFO) << "GC ReclaimPhase"; 1285 } 1286 Thread* self = Thread::Current(); 1287 1288 { 1289 // Double-check that the mark stack is empty. 1290 // Note: need to set this after VerifyNoFromSpaceRef(). 1291 is_asserting_to_space_invariant_ = false; 1292 QuasiAtomic::ThreadFenceForConstructor(); 1293 if (kVerboseMode) { 1294 LOG(INFO) << "Issue an empty check point. "; 1295 } 1296 IssueEmptyCheckpoint(); 1297 // Disable the check. 1298 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0); 1299 CheckEmptyMarkStack(); 1300 } 1301 1302 { 1303 // Record freed objects. 1304 TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); 1305 // Don't include thread-locals that are in the to-space. 1306 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); 1307 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); 1308 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); 1309 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); 1310 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); 1311 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); 1312 if (kEnableFromSpaceAccountingCheck) { 1313 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); 1314 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); 1315 } 1316 CHECK_LE(to_objects, from_objects); 1317 CHECK_LE(to_bytes, from_bytes); 1318 int64_t freed_bytes = from_bytes - to_bytes; 1319 int64_t freed_objects = from_objects - to_objects; 1320 if (kVerboseMode) { 1321 LOG(INFO) << "RecordFree:" 1322 << " from_bytes=" << from_bytes << " from_objects=" << from_objects 1323 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects 1324 << " to_bytes=" << to_bytes << " to_objects=" << to_objects 1325 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects 1326 << " from_space size=" << region_space_->FromSpaceSize() 1327 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() 1328 << " to_space size=" << region_space_->ToSpaceSize(); 1329 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 1330 } 1331 RecordFree(ObjectBytePair(freed_objects, freed_bytes)); 1332 if (kVerboseMode) { 1333 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 1334 } 1335 } 1336 1337 { 1338 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings()); 1339 ComputeUnevacFromSpaceLiveRatio(); 1340 } 1341 1342 { 1343 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); 1344 region_space_->ClearFromSpace(); 1345 } 1346 1347 { 1348 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1349 if (kUseBakerReadBarrier) { 1350 ClearBlackPtrs(); 1351 } 1352 Sweep(false); 1353 SwapBitmaps(); 1354 heap_->UnBindBitmaps(); 1355 1356 // Remove bitmaps for the immune spaces. 1357 while (!cc_bitmaps_.empty()) { 1358 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back(); 1359 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap); 1360 delete cc_bitmap; 1361 cc_bitmaps_.pop_back(); 1362 } 1363 region_space_bitmap_ = nullptr; 1364 } 1365 1366 CheckEmptyMarkStack(); 1367 1368 if (kVerboseMode) { 1369 LOG(INFO) << "GC end of ReclaimPhase"; 1370 } 1371} 1372 1373class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { 1374 public: 1375 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) 1376 : collector_(cc) {} 1377 void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) 1378 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 1379 DCHECK(ref != nullptr); 1380 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref; 1381 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; 1382 if (kUseBakerReadBarrier) { 1383 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref; 1384 // Clear the black ptr. 1385 ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr()); 1386 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref; 1387 } 1388 size_t obj_size = ref->SizeOf(); 1389 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1390 collector_->region_space_->AddLiveBytes(ref, alloc_size); 1391 } 1392 1393 private: 1394 ConcurrentCopying* const collector_; 1395}; 1396 1397// Compute how much live objects are left in regions. 1398void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() { 1399 region_space_->AssertAllRegionLiveBytesZeroOrCleared(); 1400 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this); 1401 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()), 1402 reinterpret_cast<uintptr_t>(region_space_->Limit()), 1403 visitor); 1404} 1405 1406// Assert the to-space invariant. 1407void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, 1408 mirror::Object* ref) { 1409 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1410 if (is_asserting_to_space_invariant_) { 1411 if (region_space_->IsInToSpace(ref)) { 1412 // OK. 1413 return; 1414 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1415 CHECK(region_space_bitmap_->Test(ref)) << ref; 1416 } else if (region_space_->IsInFromSpace(ref)) { 1417 // Not OK. Do extra logging. 1418 if (obj != nullptr) { 1419 LogFromSpaceRefHolder(obj, offset); 1420 } 1421 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); 1422 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1423 } else { 1424 AssertToSpaceInvariantInNonMovingSpace(obj, ref); 1425 } 1426 } 1427} 1428 1429class RootPrinter { 1430 public: 1431 RootPrinter() { } 1432 1433 template <class MirrorType> 1434 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) 1435 SHARED_REQUIRES(Locks::mutator_lock_) { 1436 if (!root->IsNull()) { 1437 VisitRoot(root); 1438 } 1439 } 1440 1441 template <class MirrorType> 1442 void VisitRoot(mirror::Object** root) 1443 SHARED_REQUIRES(Locks::mutator_lock_) { 1444 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root; 1445 } 1446 1447 template <class MirrorType> 1448 void VisitRoot(mirror::CompressedReference<MirrorType>* root) 1449 SHARED_REQUIRES(Locks::mutator_lock_) { 1450 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr(); 1451 } 1452}; 1453 1454void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source, 1455 mirror::Object* ref) { 1456 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1457 if (is_asserting_to_space_invariant_) { 1458 if (region_space_->IsInToSpace(ref)) { 1459 // OK. 1460 return; 1461 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1462 CHECK(region_space_bitmap_->Test(ref)) << ref; 1463 } else if (region_space_->IsInFromSpace(ref)) { 1464 // Not OK. Do extra logging. 1465 if (gc_root_source == nullptr) { 1466 // No info. 1467 } else if (gc_root_source->HasArtField()) { 1468 ArtField* field = gc_root_source->GetArtField(); 1469 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field); 1470 RootPrinter root_printer; 1471 field->VisitRoots(root_printer); 1472 } else if (gc_root_source->HasArtMethod()) { 1473 ArtMethod* method = gc_root_source->GetArtMethod(); 1474 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method); 1475 RootPrinter root_printer; 1476 method->VisitRoots(root_printer, sizeof(void*)); 1477 } 1478 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); 1479 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL)); 1480 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL); 1481 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true); 1482 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1483 } else { 1484 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref); 1485 } 1486 } 1487} 1488 1489void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) { 1490 if (kUseBakerReadBarrier) { 1491 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) 1492 << " holder rb_ptr=" << obj->GetReadBarrierPointer(); 1493 } else { 1494 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); 1495 } 1496 if (region_space_->IsInFromSpace(obj)) { 1497 LOG(INFO) << "holder is in the from-space."; 1498 } else if (region_space_->IsInToSpace(obj)) { 1499 LOG(INFO) << "holder is in the to-space."; 1500 } else if (region_space_->IsInUnevacFromSpace(obj)) { 1501 LOG(INFO) << "holder is in the unevac from-space."; 1502 if (region_space_bitmap_->Test(obj)) { 1503 LOG(INFO) << "holder is marked in the region space bitmap."; 1504 } else { 1505 LOG(INFO) << "holder is not marked in the region space bitmap."; 1506 } 1507 } else { 1508 // In a non-moving space. 1509 if (immune_region_.ContainsObject(obj)) { 1510 LOG(INFO) << "holder is in the image or the zygote space."; 1511 accounting::ContinuousSpaceBitmap* cc_bitmap = 1512 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 1513 CHECK(cc_bitmap != nullptr) 1514 << "An immune space object must have a bitmap."; 1515 if (cc_bitmap->Test(obj)) { 1516 LOG(INFO) << "holder is marked in the bit map."; 1517 } else { 1518 LOG(INFO) << "holder is NOT marked in the bit map."; 1519 } 1520 } else { 1521 LOG(INFO) << "holder is in a non-moving (or main) space."; 1522 accounting::ContinuousSpaceBitmap* mark_bitmap = 1523 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); 1524 accounting::LargeObjectBitmap* los_bitmap = 1525 heap_mark_bitmap_->GetLargeObjectBitmap(obj); 1526 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1527 bool is_los = mark_bitmap == nullptr; 1528 if (!is_los && mark_bitmap->Test(obj)) { 1529 LOG(INFO) << "holder is marked in the mark bit map."; 1530 } else if (is_los && los_bitmap->Test(obj)) { 1531 LOG(INFO) << "holder is marked in the los bit map."; 1532 } else { 1533 // If ref is on the allocation stack, then it is considered 1534 // mark/alive (but not necessarily on the live stack.) 1535 if (IsOnAllocStack(obj)) { 1536 LOG(INFO) << "holder is on the alloc stack."; 1537 } else { 1538 LOG(INFO) << "holder is not marked or on the alloc stack."; 1539 } 1540 } 1541 } 1542 } 1543 LOG(INFO) << "offset=" << offset.SizeValue(); 1544} 1545 1546void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, 1547 mirror::Object* ref) { 1548 // In a non-moving spaces. Check that the ref is marked. 1549 if (immune_region_.ContainsObject(ref)) { 1550 accounting::ContinuousSpaceBitmap* cc_bitmap = 1551 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); 1552 CHECK(cc_bitmap != nullptr) 1553 << "An immune space ref must have a bitmap. " << ref; 1554 if (kUseBakerReadBarrier) { 1555 CHECK(cc_bitmap->Test(ref)) 1556 << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" 1557 << obj->GetReadBarrierPointer() << " ref=" << ref; 1558 } else { 1559 CHECK(cc_bitmap->Test(ref)) 1560 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; 1561 } 1562 } else { 1563 accounting::ContinuousSpaceBitmap* mark_bitmap = 1564 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 1565 accounting::LargeObjectBitmap* los_bitmap = 1566 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 1567 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1568 bool is_los = mark_bitmap == nullptr; 1569 if ((!is_los && mark_bitmap->Test(ref)) || 1570 (is_los && los_bitmap->Test(ref))) { 1571 // OK. 1572 } else { 1573 // If ref is on the allocation stack, then it may not be 1574 // marked live, but considered marked/alive (but not 1575 // necessarily on the live stack). 1576 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " 1577 << "obj=" << obj << " ref=" << ref; 1578 } 1579 } 1580} 1581 1582// Used to scan ref fields of an object. 1583class ConcurrentCopyingRefFieldsVisitor { 1584 public: 1585 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector) 1586 : collector_(collector) {} 1587 1588 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) 1589 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) 1590 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 1591 collector_->Process(obj, offset); 1592 } 1593 1594 void operator()(mirror::Class* klass, mirror::Reference* ref) const 1595 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 1596 CHECK(klass->IsTypeOfReferenceClass()); 1597 collector_->DelayReferenceReferent(klass, ref); 1598 } 1599 1600 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 1601 ALWAYS_INLINE 1602 SHARED_REQUIRES(Locks::mutator_lock_) { 1603 if (!root->IsNull()) { 1604 VisitRoot(root); 1605 } 1606 } 1607 1608 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 1609 ALWAYS_INLINE 1610 SHARED_REQUIRES(Locks::mutator_lock_) { 1611 collector_->MarkRoot(root); 1612 } 1613 1614 private: 1615 ConcurrentCopying* const collector_; 1616}; 1617 1618// Scan ref fields of an object. 1619inline void ConcurrentCopying::Scan(mirror::Object* to_ref) { 1620 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1621 ConcurrentCopyingRefFieldsVisitor visitor(this); 1622 to_ref->VisitReferences(visitor, visitor); 1623} 1624 1625// Process a field. 1626inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { 1627 mirror::Object* ref = obj->GetFieldObject< 1628 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); 1629 mirror::Object* to_ref = Mark(ref); 1630 if (to_ref == ref) { 1631 return; 1632 } 1633 // This may fail if the mutator writes to the field at the same time. But it's ok. 1634 mirror::Object* expected_ref = ref; 1635 mirror::Object* new_ref = to_ref; 1636 do { 1637 if (expected_ref != 1638 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) { 1639 // It was updated by the mutator. 1640 break; 1641 } 1642 } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier< 1643 false, false, kVerifyNone>(offset, expected_ref, new_ref)); 1644} 1645 1646// Process some roots. 1647inline void ConcurrentCopying::VisitRoots( 1648 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) { 1649 for (size_t i = 0; i < count; ++i) { 1650 mirror::Object** root = roots[i]; 1651 mirror::Object* ref = *root; 1652 mirror::Object* to_ref = Mark(ref); 1653 if (to_ref == ref) { 1654 continue; 1655 } 1656 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root); 1657 mirror::Object* expected_ref = ref; 1658 mirror::Object* new_ref = to_ref; 1659 do { 1660 if (expected_ref != addr->LoadRelaxed()) { 1661 // It was updated by the mutator. 1662 break; 1663 } 1664 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref)); 1665 } 1666} 1667 1668inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) { 1669 DCHECK(!root->IsNull()); 1670 mirror::Object* const ref = root->AsMirrorPtr(); 1671 mirror::Object* to_ref = Mark(ref); 1672 if (to_ref != ref) { 1673 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); 1674 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref); 1675 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref); 1676 // If the cas fails, then it was updated by the mutator. 1677 do { 1678 if (ref != addr->LoadRelaxed().AsMirrorPtr()) { 1679 // It was updated by the mutator. 1680 break; 1681 } 1682 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref)); 1683 } 1684} 1685 1686inline void ConcurrentCopying::VisitRoots( 1687 mirror::CompressedReference<mirror::Object>** roots, size_t count, 1688 const RootInfo& info ATTRIBUTE_UNUSED) { 1689 for (size_t i = 0; i < count; ++i) { 1690 mirror::CompressedReference<mirror::Object>* const root = roots[i]; 1691 if (!root->IsNull()) { 1692 MarkRoot(root); 1693 } 1694 } 1695} 1696 1697// Fill the given memory block with a dummy object. Used to fill in a 1698// copy of objects that was lost in race. 1699void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { 1700 CHECK_ALIGNED(byte_size, kObjectAlignment); 1701 memset(dummy_obj, 0, byte_size); 1702 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); 1703 CHECK(int_array_class != nullptr); 1704 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); 1705 size_t component_size = int_array_class->GetComponentSize(); 1706 CHECK_EQ(component_size, sizeof(int32_t)); 1707 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); 1708 if (data_offset > byte_size) { 1709 // An int array is too big. Use java.lang.Object. 1710 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object); 1711 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object); 1712 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize()); 1713 dummy_obj->SetClass(java_lang_Object); 1714 CHECK_EQ(byte_size, dummy_obj->SizeOf()); 1715 } else { 1716 // Use an int array. 1717 dummy_obj->SetClass(int_array_class); 1718 CHECK(dummy_obj->IsArrayInstance()); 1719 int32_t length = (byte_size - data_offset) / component_size; 1720 dummy_obj->AsArray()->SetLength(length); 1721 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length) 1722 << "byte_size=" << byte_size << " length=" << length 1723 << " component_size=" << component_size << " data_offset=" << data_offset; 1724 CHECK_EQ(byte_size, dummy_obj->SizeOf()) 1725 << "byte_size=" << byte_size << " length=" << length 1726 << " component_size=" << component_size << " data_offset=" << data_offset; 1727 } 1728} 1729 1730// Reuse the memory blocks that were copy of objects that were lost in race. 1731mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { 1732 // Try to reuse the blocks that were unused due to CAS failures. 1733 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment); 1734 Thread* self = Thread::Current(); 1735 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); 1736 MutexLock mu(self, skipped_blocks_lock_); 1737 auto it = skipped_blocks_map_.lower_bound(alloc_size); 1738 if (it == skipped_blocks_map_.end()) { 1739 // Not found. 1740 return nullptr; 1741 } 1742 { 1743 size_t byte_size = it->first; 1744 CHECK_GE(byte_size, alloc_size); 1745 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) { 1746 // If remainder would be too small for a dummy object, retry with a larger request size. 1747 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size); 1748 if (it == skipped_blocks_map_.end()) { 1749 // Not found. 1750 return nullptr; 1751 } 1752 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment); 1753 CHECK_GE(it->first - alloc_size, min_object_size) 1754 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; 1755 } 1756 } 1757 // Found a block. 1758 CHECK(it != skipped_blocks_map_.end()); 1759 size_t byte_size = it->first; 1760 uint8_t* addr = it->second; 1761 CHECK_GE(byte_size, alloc_size); 1762 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); 1763 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment); 1764 if (kVerboseMode) { 1765 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; 1766 } 1767 skipped_blocks_map_.erase(it); 1768 memset(addr, 0, byte_size); 1769 if (byte_size > alloc_size) { 1770 // Return the remainder to the map. 1771 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment); 1772 CHECK_GE(byte_size - alloc_size, min_object_size); 1773 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), 1774 byte_size - alloc_size); 1775 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size))); 1776 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size)); 1777 } 1778 return reinterpret_cast<mirror::Object*>(addr); 1779} 1780 1781mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { 1782 DCHECK(region_space_->IsInFromSpace(from_ref)); 1783 // No read barrier to avoid nested RB that might violate the to-space 1784 // invariant. Note that from_ref is a from space ref so the SizeOf() 1785 // call will access the from-space meta objects, but it's ok and necessary. 1786 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(); 1787 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1788 size_t region_space_bytes_allocated = 0U; 1789 size_t non_moving_space_bytes_allocated = 0U; 1790 size_t bytes_allocated = 0U; 1791 size_t dummy; 1792 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>( 1793 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy); 1794 bytes_allocated = region_space_bytes_allocated; 1795 if (to_ref != nullptr) { 1796 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated); 1797 } 1798 bool fall_back_to_non_moving = false; 1799 if (UNLIKELY(to_ref == nullptr)) { 1800 // Failed to allocate in the region space. Try the skipped blocks. 1801 to_ref = AllocateInSkippedBlock(region_space_alloc_size); 1802 if (to_ref != nullptr) { 1803 // Succeeded to allocate in a skipped block. 1804 if (heap_->use_tlab_) { 1805 // This is necessary for the tlab case as it's not accounted in the space. 1806 region_space_->RecordAlloc(to_ref); 1807 } 1808 bytes_allocated = region_space_alloc_size; 1809 } else { 1810 // Fall back to the non-moving space. 1811 fall_back_to_non_moving = true; 1812 if (kVerboseMode) { 1813 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" 1814 << to_space_bytes_skipped_.LoadSequentiallyConsistent() 1815 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); 1816 } 1817 fall_back_to_non_moving = true; 1818 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, 1819 &non_moving_space_bytes_allocated, nullptr, &dummy); 1820 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed"; 1821 bytes_allocated = non_moving_space_bytes_allocated; 1822 // Mark it in the mark bitmap. 1823 accounting::ContinuousSpaceBitmap* mark_bitmap = 1824 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1825 CHECK(mark_bitmap != nullptr); 1826 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref)); 1827 } 1828 } 1829 DCHECK(to_ref != nullptr); 1830 1831 // Attempt to install the forward pointer. This is in a loop as the 1832 // lock word atomic write can fail. 1833 while (true) { 1834 // Copy the object. TODO: copy only the lockword in the second iteration and on? 1835 memcpy(to_ref, from_ref, obj_size); 1836 1837 LockWord old_lock_word = to_ref->GetLockWord(false); 1838 1839 if (old_lock_word.GetState() == LockWord::kForwardingAddress) { 1840 // Lost the race. Another thread (either GC or mutator) stored 1841 // the forwarding pointer first. Make the lost copy (to_ref) 1842 // look like a valid but dead (dummy) object and keep it for 1843 // future reuse. 1844 FillWithDummyObject(to_ref, bytes_allocated); 1845 if (!fall_back_to_non_moving) { 1846 DCHECK(region_space_->IsInToSpace(to_ref)); 1847 if (bytes_allocated > space::RegionSpace::kRegionSize) { 1848 // Free the large alloc. 1849 region_space_->FreeLarge(to_ref, bytes_allocated); 1850 } else { 1851 // Record the lost copy for later reuse. 1852 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1853 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1854 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); 1855 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1856 skipped_blocks_map_.insert(std::make_pair(bytes_allocated, 1857 reinterpret_cast<uint8_t*>(to_ref))); 1858 } 1859 } else { 1860 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1861 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1862 // Free the non-moving-space chunk. 1863 accounting::ContinuousSpaceBitmap* mark_bitmap = 1864 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1865 CHECK(mark_bitmap != nullptr); 1866 CHECK(mark_bitmap->Clear(to_ref)); 1867 heap_->non_moving_space_->Free(Thread::Current(), to_ref); 1868 } 1869 1870 // Get the winner's forward ptr. 1871 mirror::Object* lost_fwd_ptr = to_ref; 1872 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress()); 1873 CHECK(to_ref != nullptr); 1874 CHECK_NE(to_ref, lost_fwd_ptr); 1875 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)); 1876 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1877 return to_ref; 1878 } 1879 1880 // Set the gray ptr. 1881 if (kUseBakerReadBarrier) { 1882 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr()); 1883 } 1884 1885 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)); 1886 1887 // Try to atomically write the fwd ptr. 1888 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word); 1889 if (LIKELY(success)) { 1890 // The CAS succeeded. 1891 objects_moved_.FetchAndAddSequentiallyConsistent(1); 1892 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size); 1893 if (LIKELY(!fall_back_to_non_moving)) { 1894 DCHECK(region_space_->IsInToSpace(to_ref)); 1895 } else { 1896 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1897 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1898 } 1899 if (kUseBakerReadBarrier) { 1900 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1901 } 1902 DCHECK(GetFwdPtr(from_ref) == to_ref); 1903 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1904 PushOntoMarkStack(to_ref); 1905 return to_ref; 1906 } else { 1907 // The CAS failed. It may have lost the race or may have failed 1908 // due to monitor/hashcode ops. Either way, retry. 1909 } 1910 } 1911} 1912 1913mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { 1914 DCHECK(from_ref != nullptr); 1915 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1916 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1917 // It's already marked. 1918 return from_ref; 1919 } 1920 mirror::Object* to_ref; 1921 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1922 to_ref = GetFwdPtr(from_ref); 1923 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) || 1924 heap_->non_moving_space_->HasAddress(to_ref)) 1925 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1926 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1927 if (region_space_bitmap_->Test(from_ref)) { 1928 to_ref = from_ref; 1929 } else { 1930 to_ref = nullptr; 1931 } 1932 } else { 1933 // from_ref is in a non-moving space. 1934 if (immune_region_.ContainsObject(from_ref)) { 1935 accounting::ContinuousSpaceBitmap* cc_bitmap = 1936 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1937 DCHECK(cc_bitmap != nullptr) 1938 << "An immune space object must have a bitmap"; 1939 if (kIsDebugBuild) { 1940 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1941 << "Immune space object must be already marked"; 1942 } 1943 if (cc_bitmap->Test(from_ref)) { 1944 // Already marked. 1945 to_ref = from_ref; 1946 } else { 1947 // Newly marked. 1948 to_ref = nullptr; 1949 } 1950 } else { 1951 // Non-immune non-moving space. Use the mark bitmap. 1952 accounting::ContinuousSpaceBitmap* mark_bitmap = 1953 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1954 accounting::LargeObjectBitmap* los_bitmap = 1955 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1956 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1957 bool is_los = mark_bitmap == nullptr; 1958 if (!is_los && mark_bitmap->Test(from_ref)) { 1959 // Already marked. 1960 to_ref = from_ref; 1961 } else if (is_los && los_bitmap->Test(from_ref)) { 1962 // Already marked in LOS. 1963 to_ref = from_ref; 1964 } else { 1965 // Not marked. 1966 if (IsOnAllocStack(from_ref)) { 1967 // If on the allocation stack, it's considered marked. 1968 to_ref = from_ref; 1969 } else { 1970 // Not marked. 1971 to_ref = nullptr; 1972 } 1973 } 1974 } 1975 } 1976 return to_ref; 1977} 1978 1979bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { 1980 QuasiAtomic::ThreadFenceAcquire(); 1981 accounting::ObjectStack* alloc_stack = GetAllocationStack(); 1982 return alloc_stack->Contains(ref); 1983} 1984 1985mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) { 1986 // ref is in a non-moving space (from_ref == to_ref). 1987 DCHECK(!region_space_->HasAddress(ref)) << ref; 1988 if (immune_region_.ContainsObject(ref)) { 1989 accounting::ContinuousSpaceBitmap* cc_bitmap = 1990 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); 1991 DCHECK(cc_bitmap != nullptr) 1992 << "An immune space object must have a bitmap"; 1993 if (kIsDebugBuild) { 1994 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref)) 1995 << "Immune space object must be already marked"; 1996 } 1997 // This may or may not succeed, which is ok. 1998 if (kUseBakerReadBarrier) { 1999 ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 2000 } 2001 if (cc_bitmap->AtomicTestAndSet(ref)) { 2002 // Already marked. 2003 } else { 2004 // Newly marked. 2005 if (kUseBakerReadBarrier) { 2006 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr()); 2007 } 2008 PushOntoMarkStack(ref); 2009 } 2010 } else { 2011 // Use the mark bitmap. 2012 accounting::ContinuousSpaceBitmap* mark_bitmap = 2013 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 2014 accounting::LargeObjectBitmap* los_bitmap = 2015 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 2016 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 2017 bool is_los = mark_bitmap == nullptr; 2018 if (!is_los && mark_bitmap->Test(ref)) { 2019 // Already marked. 2020 if (kUseBakerReadBarrier) { 2021 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 2022 ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 2023 } 2024 } else if (is_los && los_bitmap->Test(ref)) { 2025 // Already marked in LOS. 2026 if (kUseBakerReadBarrier) { 2027 DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 2028 ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 2029 } 2030 } else { 2031 // Not marked. 2032 if (IsOnAllocStack(ref)) { 2033 // If it's on the allocation stack, it's considered marked. Keep it white. 2034 // Objects on the allocation stack need not be marked. 2035 if (!is_los) { 2036 DCHECK(!mark_bitmap->Test(ref)); 2037 } else { 2038 DCHECK(!los_bitmap->Test(ref)); 2039 } 2040 if (kUseBakerReadBarrier) { 2041 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()); 2042 } 2043 } else { 2044 // Not marked or on the allocation stack. Try to mark it. 2045 // This may or may not succeed, which is ok. 2046 if (kUseBakerReadBarrier) { 2047 ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 2048 } 2049 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) { 2050 // Already marked. 2051 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) { 2052 // Already marked in LOS. 2053 } else { 2054 // Newly marked. 2055 if (kUseBakerReadBarrier) { 2056 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr()); 2057 } 2058 PushOntoMarkStack(ref); 2059 } 2060 } 2061 } 2062 } 2063 return ref; 2064} 2065 2066void ConcurrentCopying::FinishPhase() { 2067 { 2068 MutexLock mu(Thread::Current(), mark_stack_lock_); 2069 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize); 2070 } 2071 region_space_ = nullptr; 2072 { 2073 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 2074 skipped_blocks_map_.clear(); 2075 } 2076 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 2077 heap_->ClearMarkedObjects(); 2078} 2079 2080bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) { 2081 mirror::Object* from_ref = field->AsMirrorPtr(); 2082 mirror::Object* to_ref = IsMarked(from_ref); 2083 if (to_ref == nullptr) { 2084 return false; 2085 } 2086 if (from_ref != to_ref) { 2087 QuasiAtomic::ThreadFenceRelease(); 2088 field->Assign(to_ref); 2089 QuasiAtomic::ThreadFenceSequentiallyConsistent(); 2090 } 2091 return true; 2092} 2093 2094mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) { 2095 return Mark(from_ref); 2096} 2097 2098void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 2099 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this); 2100} 2101 2102void ConcurrentCopying::ProcessReferences(Thread* self) { 2103 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); 2104 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps. 2105 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 2106 GetHeap()->GetReferenceProcessor()->ProcessReferences( 2107 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); 2108} 2109 2110void ConcurrentCopying::RevokeAllThreadLocalBuffers() { 2111 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 2112 region_space_->RevokeAllThreadLocalBuffers(); 2113} 2114 2115} // namespace collector 2116} // namespace gc 2117} // namespace art 2118