concurrent_copying.cc revision fdbd13c7af91a042eda753e436eeebf0e1937250
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "concurrent_copying.h" 18 19#include "art_field-inl.h" 20#include "base/stl_util.h" 21#include "gc/accounting/heap_bitmap-inl.h" 22#include "gc/accounting/space_bitmap-inl.h" 23#include "gc/reference_processor.h" 24#include "gc/space/image_space.h" 25#include "gc/space/space.h" 26#include "intern_table.h" 27#include "mirror/class-inl.h" 28#include "mirror/object-inl.h" 29#include "scoped_thread_state_change.h" 30#include "thread-inl.h" 31#include "thread_list.h" 32#include "well_known_classes.h" 33 34namespace art { 35namespace gc { 36namespace collector { 37 38ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) 39 : GarbageCollector(heap, 40 name_prefix + (name_prefix.empty() ? "" : " ") + 41 "concurrent copying + mark sweep"), 42 region_space_(nullptr), gc_barrier_(new Barrier(0)), 43 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack", 44 2 * MB, 2 * MB)), 45 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock), 46 thread_running_gc_(nullptr), 47 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), 48 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff), 49 weak_ref_access_enabled_(true), 50 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), 51 rb_table_(heap_->GetReadBarrierTable()), 52 force_evacuate_all_(false) { 53 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, 54 "The region space size and the read barrier table region size must match"); 55 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap)); 56 Thread* self = Thread::Current(); 57 { 58 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 59 // Cache this so that we won't have to lock heap_bitmap_lock_ in 60 // Mark() which could cause a nested lock on heap_bitmap_lock_ 61 // when GC causes a RB while doing GC or a lock order violation 62 // (class_linker_lock_ and heap_bitmap_lock_). 63 heap_mark_bitmap_ = heap->GetMarkBitmap(); 64 } 65 { 66 MutexLock mu(self, mark_stack_lock_); 67 for (size_t i = 0; i < kMarkStackPoolSize; ++i) { 68 accounting::AtomicStack<mirror::Object>* mark_stack = 69 accounting::AtomicStack<mirror::Object>::Create( 70 "thread local mark stack", kMarkStackSize, kMarkStackSize); 71 pooled_mark_stacks_.push_back(mark_stack); 72 } 73 } 74} 75 76void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) { 77 // Used for preserving soft references, should be OK to not have a CAS here since there should be 78 // no other threads which can trigger read barriers on the same referent during reference 79 // processing. 80 from_ref->Assign(Mark(from_ref->AsMirrorPtr())); 81 DCHECK(!from_ref->IsNull()); 82} 83 84ConcurrentCopying::~ConcurrentCopying() { 85 STLDeleteElements(&pooled_mark_stacks_); 86} 87 88void ConcurrentCopying::RunPhases() { 89 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier); 90 CHECK(!is_active_); 91 is_active_ = true; 92 Thread* self = Thread::Current(); 93 thread_running_gc_ = self; 94 Locks::mutator_lock_->AssertNotHeld(self); 95 { 96 ReaderMutexLock mu(self, *Locks::mutator_lock_); 97 InitializePhase(); 98 } 99 FlipThreadRoots(); 100 { 101 ReaderMutexLock mu(self, *Locks::mutator_lock_); 102 MarkingPhase(); 103 } 104 // Verify no from space refs. This causes a pause. 105 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { 106 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); 107 ScopedPause pause(this); 108 CheckEmptyMarkStack(); 109 if (kVerboseMode) { 110 LOG(INFO) << "Verifying no from-space refs"; 111 } 112 VerifyNoFromSpaceReferences(); 113 if (kVerboseMode) { 114 LOG(INFO) << "Done verifying no from-space refs"; 115 } 116 CheckEmptyMarkStack(); 117 } 118 { 119 ReaderMutexLock mu(self, *Locks::mutator_lock_); 120 ReclaimPhase(); 121 } 122 FinishPhase(); 123 CHECK(is_active_); 124 is_active_ = false; 125 thread_running_gc_ = nullptr; 126} 127 128void ConcurrentCopying::BindBitmaps() { 129 Thread* self = Thread::Current(); 130 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 131 // Mark all of the spaces we never collect as immune. 132 for (const auto& space : heap_->GetContinuousSpaces()) { 133 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 134 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 135 CHECK(space->IsZygoteSpace() || space->IsImageSpace()); 136 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 137 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" : 138 "cc zygote space bitmap"; 139 // TODO: try avoiding using bitmaps for image/zygote to save space. 140 accounting::ContinuousSpaceBitmap* bitmap = 141 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity()); 142 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 143 cc_bitmaps_.push_back(bitmap); 144 } else if (space == region_space_) { 145 accounting::ContinuousSpaceBitmap* bitmap = 146 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap", 147 space->Begin(), space->Capacity()); 148 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 149 cc_bitmaps_.push_back(bitmap); 150 region_space_bitmap_ = bitmap; 151 } 152 } 153} 154 155void ConcurrentCopying::InitializePhase() { 156 TimingLogger::ScopedTiming split("InitializePhase", GetTimings()); 157 if (kVerboseMode) { 158 LOG(INFO) << "GC InitializePhase"; 159 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" 160 << reinterpret_cast<void*>(region_space_->Limit()); 161 } 162 CheckEmptyMarkStack(); 163 immune_region_.Reset(); 164 bytes_moved_.StoreRelaxed(0); 165 objects_moved_.StoreRelaxed(0); 166 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 167 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 168 GetCurrentIteration()->GetClearSoftReferences()) { 169 force_evacuate_all_ = true; 170 } else { 171 force_evacuate_all_ = false; 172 } 173 BindBitmaps(); 174 if (kVerboseMode) { 175 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_; 176 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End(); 177 LOG(INFO) << "GC end of InitializePhase"; 178 } 179} 180 181// Used to switch the thread roots of a thread from from-space refs to to-space refs. 182class ThreadFlipVisitor : public Closure { 183 public: 184 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) 185 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { 186 } 187 188 virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { 189 // Note: self is not necessarily equal to thread since thread may be suspended. 190 Thread* self = Thread::Current(); 191 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 192 << thread->GetState() << " thread " << thread << " self " << self; 193 thread->SetIsGcMarking(true); 194 if (use_tlab_ && thread->HasTlab()) { 195 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 196 // This must come before the revoke. 197 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); 198 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 199 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> 200 FetchAndAddSequentiallyConsistent(thread_local_objects); 201 } else { 202 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 203 } 204 } 205 if (kUseThreadLocalAllocationStack) { 206 thread->RevokeThreadLocalAllocationStack(); 207 } 208 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 209 thread->VisitRoots(concurrent_copying_); 210 concurrent_copying_->GetBarrier().Pass(self); 211 } 212 213 private: 214 ConcurrentCopying* const concurrent_copying_; 215 const bool use_tlab_; 216}; 217 218// Called back from Runtime::FlipThreadRoots() during a pause. 219class FlipCallback : public Closure { 220 public: 221 explicit FlipCallback(ConcurrentCopying* concurrent_copying) 222 : concurrent_copying_(concurrent_copying) { 223 } 224 225 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) { 226 ConcurrentCopying* cc = concurrent_copying_; 227 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); 228 // Note: self is not necessarily equal to thread since thread may be suspended. 229 Thread* self = Thread::Current(); 230 CHECK(thread == self); 231 Locks::mutator_lock_->AssertExclusiveHeld(self); 232 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); 233 cc->SwapStacks(); 234 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 235 cc->RecordLiveStackFreezeSize(self); 236 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); 237 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); 238 } 239 cc->is_marking_ = true; 240 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal); 241 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { 242 CHECK(Runtime::Current()->IsAotCompiler()); 243 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); 244 Runtime::Current()->VisitTransactionRoots(cc); 245 } 246 } 247 248 private: 249 ConcurrentCopying* const concurrent_copying_; 250}; 251 252// Switch threads that from from-space to to-space refs. Forward/mark the thread roots. 253void ConcurrentCopying::FlipThreadRoots() { 254 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); 255 if (kVerboseMode) { 256 LOG(INFO) << "time=" << region_space_->Time(); 257 region_space_->DumpNonFreeRegions(LOG(INFO)); 258 } 259 Thread* self = Thread::Current(); 260 Locks::mutator_lock_->AssertNotHeld(self); 261 gc_barrier_->Init(self, 0); 262 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); 263 FlipCallback flip_callback(this); 264 heap_->ThreadFlipBegin(self); // Sync with JNI critical calls. 265 size_t barrier_count = Runtime::Current()->FlipThreadRoots( 266 &thread_flip_visitor, &flip_callback, this); 267 heap_->ThreadFlipEnd(self); 268 { 269 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 270 gc_barrier_->Increment(self, barrier_count); 271 } 272 is_asserting_to_space_invariant_ = true; 273 QuasiAtomic::ThreadFenceForConstructor(); 274 if (kVerboseMode) { 275 LOG(INFO) << "time=" << region_space_->Time(); 276 region_space_->DumpNonFreeRegions(LOG(INFO)); 277 LOG(INFO) << "GC end of FlipThreadRoots"; 278 } 279} 280 281void ConcurrentCopying::SwapStacks() { 282 heap_->SwapStacks(); 283} 284 285void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { 286 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 287 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 288} 289 290// Used to visit objects in the immune spaces. 291class ConcurrentCopyingImmuneSpaceObjVisitor { 292 public: 293 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) 294 : collector_(cc) {} 295 296 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) 297 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 298 DCHECK(obj != nullptr); 299 DCHECK(collector_->immune_region_.ContainsObject(obj)); 300 accounting::ContinuousSpaceBitmap* cc_bitmap = 301 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 302 DCHECK(cc_bitmap != nullptr) 303 << "An immune space object must have a bitmap"; 304 if (kIsDebugBuild) { 305 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) 306 << "Immune space object must be already marked"; 307 } 308 // This may or may not succeed, which is ok. 309 if (kUseBakerReadBarrier) { 310 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 311 } 312 if (cc_bitmap->AtomicTestAndSet(obj)) { 313 // Already marked. Do nothing. 314 } else { 315 // Newly marked. Set the gray bit and push it onto the mark stack. 316 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 317 collector_->PushOntoMarkStack(obj); 318 } 319 } 320 321 private: 322 ConcurrentCopying* const collector_; 323}; 324 325class EmptyCheckpoint : public Closure { 326 public: 327 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying) 328 : concurrent_copying_(concurrent_copying) { 329 } 330 331 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 332 // Note: self is not necessarily equal to thread since thread may be suspended. 333 Thread* self = Thread::Current(); 334 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 335 << thread->GetState() << " thread " << thread << " self " << self; 336 // If thread is a running mutator, then act on behalf of the garbage collector. 337 // See the code in ThreadList::RunCheckpoint. 338 if (thread->GetState() == kRunnable) { 339 concurrent_copying_->GetBarrier().Pass(self); 340 } 341 } 342 343 private: 344 ConcurrentCopying* const concurrent_copying_; 345}; 346 347// Concurrently mark roots that are guarded by read barriers and process the mark stack. 348void ConcurrentCopying::MarkingPhase() { 349 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); 350 if (kVerboseMode) { 351 LOG(INFO) << "GC MarkingPhase"; 352 } 353 CHECK(weak_ref_access_enabled_); 354 { 355 // Mark the image root. The WB-based collectors do not need to 356 // scan the image objects from roots by relying on the card table, 357 // but it's necessary for the RB to-space invariant to hold. 358 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings()); 359 gc::space::ImageSpace* image = heap_->GetImageSpace(); 360 if (image != nullptr) { 361 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots(); 362 mirror::Object* marked_image_root = Mark(image_root); 363 CHECK_EQ(image_root, marked_image_root) << "An image object does not move"; 364 if (ReadBarrier::kEnableToSpaceInvariantChecks) { 365 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root); 366 } 367 } 368 } 369 // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part 370 // to also use the same function. 371 { 372 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings()); 373 Runtime::Current()->VisitConstantRoots(this); 374 } 375 { 376 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings()); 377 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots); 378 } 379 { 380 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings()); 381 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots); 382 } 383 { 384 // TODO: don't visit the transaction roots if it's not active. 385 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); 386 Runtime::Current()->VisitNonThreadRoots(this); 387 } 388 Runtime::Current()->GetHeap()->VisitAllocationRecords(this); 389 390 // Immune spaces. 391 for (auto& space : heap_->GetContinuousSpaces()) { 392 if (immune_region_.ContainsSpace(space)) { 393 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 394 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 395 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this); 396 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 397 reinterpret_cast<uintptr_t>(space->Limit()), 398 visitor); 399 } 400 } 401 402 Thread* self = Thread::Current(); 403 { 404 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings()); 405 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The 406 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark 407 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock 408 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we 409 // reach the point where we process weak references, we can avoid using a lock when accessing 410 // the GC mark stack, which makes mark stack processing more efficient. 411 412 // Process the mark stack once in the thread local stack mode. This marks most of the live 413 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system 414 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray 415 // objects and push refs on the mark stack. 416 ProcessMarkStack(); 417 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks 418 // for the last time before transitioning to the shared mark stack mode, which would process new 419 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack() 420 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's 421 // important to do these together in a single checkpoint so that we can ensure that mutators 422 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and 423 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on 424 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref 425 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones. 426 SwitchToSharedMarkStackMode(); 427 CHECK(!self->GetWeakRefAccessEnabled()); 428 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here 429 // (which may be non-empty if there were refs found on thread-local mark stacks during the above 430 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators 431 // (via read barriers) have no way to produce any more refs to process. Marking converges once 432 // before we process weak refs below. 433 ProcessMarkStack(); 434 CheckEmptyMarkStack(); 435 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a 436 // lock from this point on. 437 SwitchToGcExclusiveMarkStackMode(); 438 CheckEmptyMarkStack(); 439 if (kVerboseMode) { 440 LOG(INFO) << "ProcessReferences"; 441 } 442 // Process weak references. This may produce new refs to process and have them processed via 443 // ProcessMarkStack (in the GC exclusive mark stack mode). 444 ProcessReferences(self); 445 CheckEmptyMarkStack(); 446 if (kVerboseMode) { 447 LOG(INFO) << "SweepSystemWeaks"; 448 } 449 SweepSystemWeaks(self); 450 if (kVerboseMode) { 451 LOG(INFO) << "SweepSystemWeaks done"; 452 } 453 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have 454 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for 455 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks(). 456 ProcessMarkStack(); 457 CheckEmptyMarkStack(); 458 // Re-enable weak ref accesses. 459 ReenableWeakRefAccess(self); 460 // Marking is done. Disable marking. 461 DisableMarking(); 462 CheckEmptyMarkStack(); 463 } 464 465 CHECK(weak_ref_access_enabled_); 466 if (kVerboseMode) { 467 LOG(INFO) << "GC end of MarkingPhase"; 468 } 469} 470 471void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) { 472 if (kVerboseMode) { 473 LOG(INFO) << "ReenableWeakRefAccess"; 474 } 475 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads. 476 QuasiAtomic::ThreadFenceForConstructor(); 477 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access. 478 { 479 MutexLock mu(self, *Locks::thread_list_lock_); 480 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 481 for (Thread* thread : thread_list) { 482 thread->SetWeakRefAccessEnabled(true); 483 } 484 } 485 // Unblock blocking threads. 486 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self); 487 Runtime::Current()->BroadcastForNewSystemWeaks(); 488} 489 490class DisableMarkingCheckpoint : public Closure { 491 public: 492 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying) 493 : concurrent_copying_(concurrent_copying) { 494 } 495 496 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 497 // Note: self is not necessarily equal to thread since thread may be suspended. 498 Thread* self = Thread::Current(); 499 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 500 << thread->GetState() << " thread " << thread << " self " << self; 501 // Disable the thread-local is_gc_marking flag. 502 // Note a thread that has just started right before this checkpoint may have already this flag 503 // set to false, which is ok. 504 thread->SetIsGcMarking(false); 505 // If thread is a running mutator, then act on behalf of the garbage collector. 506 // See the code in ThreadList::RunCheckpoint. 507 if (thread->GetState() == kRunnable) { 508 concurrent_copying_->GetBarrier().Pass(self); 509 } 510 } 511 512 private: 513 ConcurrentCopying* const concurrent_copying_; 514}; 515 516void ConcurrentCopying::IssueDisableMarkingCheckpoint() { 517 Thread* self = Thread::Current(); 518 DisableMarkingCheckpoint check_point(this); 519 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 520 gc_barrier_->Init(self, 0); 521 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 522 // If there are no threads to wait which implies that all the checkpoint functions are finished, 523 // then no need to release the mutator lock. 524 if (barrier_count == 0) { 525 return; 526 } 527 // Release locks then wait for all mutator threads to pass the barrier. 528 Locks::mutator_lock_->SharedUnlock(self); 529 { 530 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 531 gc_barrier_->Increment(self, barrier_count); 532 } 533 Locks::mutator_lock_->SharedLock(self); 534} 535 536void ConcurrentCopying::DisableMarking() { 537 // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the 538 // thread-local flags so that a new thread starting up will get the correct is_marking flag. 539 is_marking_ = false; 540 QuasiAtomic::ThreadFenceForConstructor(); 541 // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are 542 // still in the middle of a read barrier which may have a from-space ref cached in a local 543 // variable. 544 IssueDisableMarkingCheckpoint(); 545 if (kUseTableLookupReadBarrier) { 546 heap_->rb_table_->ClearAll(); 547 DCHECK(heap_->rb_table_->IsAllCleared()); 548 } 549 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1); 550 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff); 551} 552 553void ConcurrentCopying::IssueEmptyCheckpoint() { 554 Thread* self = Thread::Current(); 555 EmptyCheckpoint check_point(this); 556 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 557 gc_barrier_->Init(self, 0); 558 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 559 // If there are no threads to wait which implys that all the checkpoint functions are finished, 560 // then no need to release the mutator lock. 561 if (barrier_count == 0) { 562 return; 563 } 564 // Release locks then wait for all mutator threads to pass the barrier. 565 Locks::mutator_lock_->SharedUnlock(self); 566 { 567 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 568 gc_barrier_->Increment(self, barrier_count); 569 } 570 Locks::mutator_lock_->SharedLock(self); 571} 572 573void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { 574 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0) 575 << " " << to_ref << " " << PrettyTypeOf(to_ref); 576 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites? 577 CHECK(thread_running_gc_ != nullptr); 578 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 579 if (mark_stack_mode == kMarkStackModeThreadLocal) { 580 if (self == thread_running_gc_) { 581 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack. 582 CHECK(self->GetThreadLocalMarkStack() == nullptr); 583 CHECK(!gc_mark_stack_->IsFull()); 584 gc_mark_stack_->PushBack(to_ref); 585 } else { 586 // Otherwise, use a thread-local mark stack. 587 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack(); 588 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) { 589 MutexLock mu(self, mark_stack_lock_); 590 // Get a new thread local mark stack. 591 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack; 592 if (!pooled_mark_stacks_.empty()) { 593 // Use a pooled mark stack. 594 new_tl_mark_stack = pooled_mark_stacks_.back(); 595 pooled_mark_stacks_.pop_back(); 596 } else { 597 // None pooled. Create a new one. 598 new_tl_mark_stack = 599 accounting::AtomicStack<mirror::Object>::Create( 600 "thread local mark stack", 4 * KB, 4 * KB); 601 } 602 DCHECK(new_tl_mark_stack != nullptr); 603 DCHECK(new_tl_mark_stack->IsEmpty()); 604 new_tl_mark_stack->PushBack(to_ref); 605 self->SetThreadLocalMarkStack(new_tl_mark_stack); 606 if (tl_mark_stack != nullptr) { 607 // Store the old full stack into a vector. 608 revoked_mark_stacks_.push_back(tl_mark_stack); 609 } 610 } else { 611 tl_mark_stack->PushBack(to_ref); 612 } 613 } 614 } else if (mark_stack_mode == kMarkStackModeShared) { 615 // Access the shared GC mark stack with a lock. 616 MutexLock mu(self, mark_stack_lock_); 617 CHECK(!gc_mark_stack_->IsFull()); 618 gc_mark_stack_->PushBack(to_ref); 619 } else { 620 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), 621 static_cast<uint32_t>(kMarkStackModeGcExclusive)); 622 CHECK(self == thread_running_gc_) 623 << "Only GC-running thread should access the mark stack " 624 << "in the GC exclusive mark stack mode"; 625 // Access the GC mark stack without a lock. 626 CHECK(!gc_mark_stack_->IsFull()); 627 gc_mark_stack_->PushBack(to_ref); 628 } 629} 630 631accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() { 632 return heap_->allocation_stack_.get(); 633} 634 635accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { 636 return heap_->live_stack_.get(); 637} 638 639inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { 640 DCHECK(region_space_->IsInFromSpace(from_ref)); 641 LockWord lw = from_ref->GetLockWord(false); 642 if (lw.GetState() == LockWord::kForwardingAddress) { 643 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); 644 CHECK(fwd_ptr != nullptr); 645 return fwd_ptr; 646 } else { 647 return nullptr; 648 } 649} 650 651// The following visitors are that used to verify that there's no 652// references to the from-space left after marking. 653class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { 654 public: 655 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) 656 : collector_(collector) {} 657 658 void operator()(mirror::Object* ref) const 659 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 660 if (ref == nullptr) { 661 // OK. 662 return; 663 } 664 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 665 if (kUseBakerReadBarrier) { 666 if (collector_->RegionSpace()->IsInToSpace(ref)) { 667 CHECK(ref->GetReadBarrierPointer() == nullptr) 668 << "To-space ref " << ref << " " << PrettyTypeOf(ref) 669 << " has non-white rb_ptr " << ref->GetReadBarrierPointer(); 670 } else { 671 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 672 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 673 collector_->IsOnAllocStack(ref))) 674 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref) 675 << " has non-black rb_ptr " << ref->GetReadBarrierPointer() 676 << " but isn't on the alloc stack (and has white rb_ptr)." 677 << " Is it in the non-moving space=" 678 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref)); 679 } 680 } 681 } 682 683 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 684 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { 685 DCHECK(root != nullptr); 686 operator()(root); 687 } 688 689 private: 690 ConcurrentCopying* const collector_; 691}; 692 693class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { 694 public: 695 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) 696 : collector_(collector) {} 697 698 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const 699 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 700 mirror::Object* ref = 701 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 702 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 703 visitor(ref); 704 } 705 void operator()(mirror::Class* klass, mirror::Reference* ref) const 706 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 707 CHECK(klass->IsTypeOfReferenceClass()); 708 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 709 } 710 711 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 712 SHARED_REQUIRES(Locks::mutator_lock_) { 713 if (!root->IsNull()) { 714 VisitRoot(root); 715 } 716 } 717 718 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 719 SHARED_REQUIRES(Locks::mutator_lock_) { 720 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 721 visitor(root->AsMirrorPtr()); 722 } 723 724 private: 725 ConcurrentCopying* const collector_; 726}; 727 728class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { 729 public: 730 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) 731 : collector_(collector) {} 732 void operator()(mirror::Object* obj) const 733 SHARED_REQUIRES(Locks::mutator_lock_) { 734 ObjectCallback(obj, collector_); 735 } 736 static void ObjectCallback(mirror::Object* obj, void *arg) 737 SHARED_REQUIRES(Locks::mutator_lock_) { 738 CHECK(obj != nullptr); 739 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 740 space::RegionSpace* region_space = collector->RegionSpace(); 741 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 742 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); 743 obj->VisitReferences(visitor, visitor); 744 if (kUseBakerReadBarrier) { 745 if (collector->RegionSpace()->IsInToSpace(obj)) { 746 CHECK(obj->GetReadBarrierPointer() == nullptr) 747 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer(); 748 } else { 749 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 750 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 751 collector->IsOnAllocStack(obj))) 752 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj) 753 << " has non-black rb_ptr " << obj->GetReadBarrierPointer() 754 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space=" 755 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj)); 756 } 757 } 758 } 759 760 private: 761 ConcurrentCopying* const collector_; 762}; 763 764// Verify there's no from-space references left after the marking phase. 765void ConcurrentCopying::VerifyNoFromSpaceReferences() { 766 Thread* self = Thread::Current(); 767 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 768 // Verify all threads have is_gc_marking to be false 769 { 770 MutexLock mu(self, *Locks::thread_list_lock_); 771 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 772 for (Thread* thread : thread_list) { 773 CHECK(!thread->GetIsGcMarking()); 774 } 775 } 776 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this); 777 // Roots. 778 { 779 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 780 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 781 Runtime::Current()->VisitRoots(&ref_visitor); 782 } 783 // The to-space. 784 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, 785 this); 786 // Non-moving spaces. 787 { 788 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 789 heap_->GetMarkBitmap()->Visit(visitor); 790 } 791 // The alloc stack. 792 { 793 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 794 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); 795 it < end; ++it) { 796 mirror::Object* const obj = it->AsMirrorPtr(); 797 if (obj != nullptr && obj->GetClass() != nullptr) { 798 // TODO: need to call this only if obj is alive? 799 ref_visitor(obj); 800 visitor(obj); 801 } 802 } 803 } 804 // TODO: LOS. But only refs in LOS are classes. 805} 806 807// The following visitors are used to assert the to-space invariant. 808class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { 809 public: 810 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) 811 : collector_(collector) {} 812 813 void operator()(mirror::Object* ref) const 814 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 815 if (ref == nullptr) { 816 // OK. 817 return; 818 } 819 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 820 } 821 822 private: 823 ConcurrentCopying* const collector_; 824}; 825 826class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { 827 public: 828 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) 829 : collector_(collector) {} 830 831 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const 832 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 833 mirror::Object* ref = 834 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 835 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 836 visitor(ref); 837 } 838 void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const 839 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 840 CHECK(klass->IsTypeOfReferenceClass()); 841 } 842 843 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 844 SHARED_REQUIRES(Locks::mutator_lock_) { 845 if (!root->IsNull()) { 846 VisitRoot(root); 847 } 848 } 849 850 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 851 SHARED_REQUIRES(Locks::mutator_lock_) { 852 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 853 visitor(root->AsMirrorPtr()); 854 } 855 856 private: 857 ConcurrentCopying* const collector_; 858}; 859 860class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { 861 public: 862 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) 863 : collector_(collector) {} 864 void operator()(mirror::Object* obj) const 865 SHARED_REQUIRES(Locks::mutator_lock_) { 866 ObjectCallback(obj, collector_); 867 } 868 static void ObjectCallback(mirror::Object* obj, void *arg) 869 SHARED_REQUIRES(Locks::mutator_lock_) { 870 CHECK(obj != nullptr); 871 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 872 space::RegionSpace* region_space = collector->RegionSpace(); 873 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 874 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); 875 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); 876 obj->VisitReferences(visitor, visitor); 877 } 878 879 private: 880 ConcurrentCopying* const collector_; 881}; 882 883class RevokeThreadLocalMarkStackCheckpoint : public Closure { 884 public: 885 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying, 886 bool disable_weak_ref_access) 887 : concurrent_copying_(concurrent_copying), 888 disable_weak_ref_access_(disable_weak_ref_access) { 889 } 890 891 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 892 // Note: self is not necessarily equal to thread since thread may be suspended. 893 Thread* self = Thread::Current(); 894 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 895 << thread->GetState() << " thread " << thread << " self " << self; 896 // Revoke thread local mark stacks. 897 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); 898 if (tl_mark_stack != nullptr) { 899 MutexLock mu(self, concurrent_copying_->mark_stack_lock_); 900 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack); 901 thread->SetThreadLocalMarkStack(nullptr); 902 } 903 // Disable weak ref access. 904 if (disable_weak_ref_access_) { 905 thread->SetWeakRefAccessEnabled(false); 906 } 907 // If thread is a running mutator, then act on behalf of the garbage collector. 908 // See the code in ThreadList::RunCheckpoint. 909 if (thread->GetState() == kRunnable) { 910 concurrent_copying_->GetBarrier().Pass(self); 911 } 912 } 913 914 private: 915 ConcurrentCopying* const concurrent_copying_; 916 const bool disable_weak_ref_access_; 917}; 918 919void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) { 920 Thread* self = Thread::Current(); 921 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access); 922 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 923 gc_barrier_->Init(self, 0); 924 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 925 // If there are no threads to wait which implys that all the checkpoint functions are finished, 926 // then no need to release the mutator lock. 927 if (barrier_count == 0) { 928 return; 929 } 930 Locks::mutator_lock_->SharedUnlock(self); 931 { 932 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 933 gc_barrier_->Increment(self, barrier_count); 934 } 935 Locks::mutator_lock_->SharedLock(self); 936} 937 938void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) { 939 Thread* self = Thread::Current(); 940 CHECK_EQ(self, thread); 941 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); 942 if (tl_mark_stack != nullptr) { 943 CHECK(is_marking_); 944 MutexLock mu(self, mark_stack_lock_); 945 revoked_mark_stacks_.push_back(tl_mark_stack); 946 thread->SetThreadLocalMarkStack(nullptr); 947 } 948} 949 950void ConcurrentCopying::ProcessMarkStack() { 951 if (kVerboseMode) { 952 LOG(INFO) << "ProcessMarkStack. "; 953 } 954 bool empty_prev = false; 955 while (true) { 956 bool empty = ProcessMarkStackOnce(); 957 if (empty_prev && empty) { 958 // Saw empty mark stack for a second time, done. 959 break; 960 } 961 empty_prev = empty; 962 } 963} 964 965bool ConcurrentCopying::ProcessMarkStackOnce() { 966 Thread* self = Thread::Current(); 967 CHECK(thread_running_gc_ != nullptr); 968 CHECK(self == thread_running_gc_); 969 CHECK(self->GetThreadLocalMarkStack() == nullptr); 970 size_t count = 0; 971 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 972 if (mark_stack_mode == kMarkStackModeThreadLocal) { 973 // Process the thread-local mark stacks and the GC mark stack. 974 count += ProcessThreadLocalMarkStacks(false); 975 while (!gc_mark_stack_->IsEmpty()) { 976 mirror::Object* to_ref = gc_mark_stack_->PopBack(); 977 ProcessMarkStackRef(to_ref); 978 ++count; 979 } 980 gc_mark_stack_->Reset(); 981 } else if (mark_stack_mode == kMarkStackModeShared) { 982 // Process the shared GC mark stack with a lock. 983 { 984 MutexLock mu(self, mark_stack_lock_); 985 CHECK(revoked_mark_stacks_.empty()); 986 } 987 while (true) { 988 std::vector<mirror::Object*> refs; 989 { 990 // Copy refs with lock. Note the number of refs should be small. 991 MutexLock mu(self, mark_stack_lock_); 992 if (gc_mark_stack_->IsEmpty()) { 993 break; 994 } 995 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin(); 996 p != gc_mark_stack_->End(); ++p) { 997 refs.push_back(p->AsMirrorPtr()); 998 } 999 gc_mark_stack_->Reset(); 1000 } 1001 for (mirror::Object* ref : refs) { 1002 ProcessMarkStackRef(ref); 1003 ++count; 1004 } 1005 } 1006 } else { 1007 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), 1008 static_cast<uint32_t>(kMarkStackModeGcExclusive)); 1009 { 1010 MutexLock mu(self, mark_stack_lock_); 1011 CHECK(revoked_mark_stacks_.empty()); 1012 } 1013 // Process the GC mark stack in the exclusive mode. No need to take the lock. 1014 while (!gc_mark_stack_->IsEmpty()) { 1015 mirror::Object* to_ref = gc_mark_stack_->PopBack(); 1016 ProcessMarkStackRef(to_ref); 1017 ++count; 1018 } 1019 gc_mark_stack_->Reset(); 1020 } 1021 1022 // Return true if the stack was empty. 1023 return count == 0; 1024} 1025 1026size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) { 1027 // Run a checkpoint to collect all thread local mark stacks and iterate over them all. 1028 RevokeThreadLocalMarkStacks(disable_weak_ref_access); 1029 size_t count = 0; 1030 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks; 1031 { 1032 MutexLock mu(Thread::Current(), mark_stack_lock_); 1033 // Make a copy of the mark stack vector. 1034 mark_stacks = revoked_mark_stacks_; 1035 revoked_mark_stacks_.clear(); 1036 } 1037 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) { 1038 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) { 1039 mirror::Object* to_ref = p->AsMirrorPtr(); 1040 ProcessMarkStackRef(to_ref); 1041 ++count; 1042 } 1043 { 1044 MutexLock mu(Thread::Current(), mark_stack_lock_); 1045 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) { 1046 // The pool has enough. Delete it. 1047 delete mark_stack; 1048 } else { 1049 // Otherwise, put it into the pool for later reuse. 1050 mark_stack->Reset(); 1051 pooled_mark_stacks_.push_back(mark_stack); 1052 } 1053 } 1054 } 1055 return count; 1056} 1057 1058void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { 1059 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1060 if (kUseBakerReadBarrier) { 1061 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 1062 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 1063 << " is_marked=" << IsMarked(to_ref); 1064 } 1065 // Scan ref fields. 1066 Scan(to_ref); 1067 // Mark the gray ref as white or black. 1068 if (kUseBakerReadBarrier) { 1069 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 1070 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 1071 << " is_marked=" << IsMarked(to_ref); 1072 } 1073 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && 1074 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && 1075 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { 1076 // Leave References gray so that GetReferent() will trigger RB. 1077 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; 1078 } else { 1079#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER 1080 if (kUseBakerReadBarrier) { 1081 if (region_space_->IsInToSpace(to_ref)) { 1082 // If to-space, change from gray to white. 1083 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 1084 ReadBarrier::WhitePtr()); 1085 CHECK(success) << "Must succeed as we won the race."; 1086 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 1087 } else { 1088 // If non-moving space/unevac from space, change from gray 1089 // to black. We can't change gray to white because it's not 1090 // safe to use CAS if two threads change values in opposite 1091 // directions (A->B and B->A). So, we change it to black to 1092 // indicate non-moving objects that have been marked 1093 // through. Note we'd need to change from black to white 1094 // later (concurrently). 1095 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 1096 ReadBarrier::BlackPtr()); 1097 CHECK(success) << "Must succeed as we won the race."; 1098 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1099 } 1100 } 1101#else 1102 DCHECK(!kUseBakerReadBarrier); 1103#endif 1104 } 1105 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { 1106 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); 1107 visitor(to_ref); 1108 } 1109} 1110 1111void ConcurrentCopying::SwitchToSharedMarkStackMode() { 1112 Thread* self = Thread::Current(); 1113 CHECK(thread_running_gc_ != nullptr); 1114 CHECK_EQ(self, thread_running_gc_); 1115 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1116 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1117 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), 1118 static_cast<uint32_t>(kMarkStackModeThreadLocal)); 1119 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared); 1120 CHECK(weak_ref_access_enabled_.LoadRelaxed()); 1121 weak_ref_access_enabled_.StoreRelaxed(false); 1122 QuasiAtomic::ThreadFenceForConstructor(); 1123 // Process the thread local mark stacks one last time after switching to the shared mark stack 1124 // mode and disable weak ref accesses. 1125 ProcessThreadLocalMarkStacks(true); 1126 if (kVerboseMode) { 1127 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access"; 1128 } 1129} 1130 1131void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() { 1132 Thread* self = Thread::Current(); 1133 CHECK(thread_running_gc_ != nullptr); 1134 CHECK_EQ(self, thread_running_gc_); 1135 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1136 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1137 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), 1138 static_cast<uint32_t>(kMarkStackModeShared)); 1139 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive); 1140 QuasiAtomic::ThreadFenceForConstructor(); 1141 if (kVerboseMode) { 1142 LOG(INFO) << "Switched to GC exclusive mark stack mode"; 1143 } 1144} 1145 1146void ConcurrentCopying::CheckEmptyMarkStack() { 1147 Thread* self = Thread::Current(); 1148 CHECK(thread_running_gc_ != nullptr); 1149 CHECK_EQ(self, thread_running_gc_); 1150 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1151 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1152 if (mark_stack_mode == kMarkStackModeThreadLocal) { 1153 // Thread-local mark stack mode. 1154 RevokeThreadLocalMarkStacks(false); 1155 MutexLock mu(Thread::Current(), mark_stack_lock_); 1156 if (!revoked_mark_stacks_.empty()) { 1157 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) { 1158 while (!mark_stack->IsEmpty()) { 1159 mirror::Object* obj = mark_stack->PopBack(); 1160 if (kUseBakerReadBarrier) { 1161 mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); 1162 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr 1163 << " is_marked=" << IsMarked(obj); 1164 } else { 1165 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) 1166 << " is_marked=" << IsMarked(obj); 1167 } 1168 } 1169 } 1170 LOG(FATAL) << "mark stack is not empty"; 1171 } 1172 } else { 1173 // Shared, GC-exclusive, or off. 1174 MutexLock mu(Thread::Current(), mark_stack_lock_); 1175 CHECK(gc_mark_stack_->IsEmpty()); 1176 CHECK(revoked_mark_stacks_.empty()); 1177 } 1178} 1179 1180void ConcurrentCopying::SweepSystemWeaks(Thread* self) { 1181 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); 1182 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1183 Runtime::Current()->SweepSystemWeaks(this); 1184} 1185 1186void ConcurrentCopying::Sweep(bool swap_bitmaps) { 1187 { 1188 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); 1189 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1190 if (kEnableFromSpaceAccountingCheck) { 1191 CHECK_GE(live_stack_freeze_size_, live_stack->Size()); 1192 } 1193 heap_->MarkAllocStackAsLive(live_stack); 1194 live_stack->Reset(); 1195 } 1196 CheckEmptyMarkStack(); 1197 TimingLogger::ScopedTiming split("Sweep", GetTimings()); 1198 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1199 if (space->IsContinuousMemMapAllocSpace()) { 1200 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1201 if (space == region_space_ || immune_region_.ContainsSpace(space)) { 1202 continue; 1203 } 1204 TimingLogger::ScopedTiming split2( 1205 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 1206 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1207 } 1208 } 1209 SweepLargeObjects(swap_bitmaps); 1210} 1211 1212void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) { 1213 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 1214 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 1215} 1216 1217class ConcurrentCopyingClearBlackPtrsVisitor { 1218 public: 1219 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc) 1220 : collector_(cc) {} 1221#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER 1222 NO_RETURN 1223#endif 1224 void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) 1225 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 1226 DCHECK(obj != nullptr); 1227 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; 1228 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; 1229 obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr()); 1230 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 1231 } 1232 1233 private: 1234 ConcurrentCopying* const collector_; 1235}; 1236 1237// Clear the black ptrs in non-moving objects back to white. 1238void ConcurrentCopying::ClearBlackPtrs() { 1239 CHECK(kUseBakerReadBarrier); 1240 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings()); 1241 ConcurrentCopyingClearBlackPtrsVisitor visitor(this); 1242 for (auto& space : heap_->GetContinuousSpaces()) { 1243 if (space == region_space_) { 1244 continue; 1245 } 1246 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1247 if (kVerboseMode) { 1248 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap; 1249 } 1250 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 1251 reinterpret_cast<uintptr_t>(space->Limit()), 1252 visitor); 1253 } 1254 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 1255 large_object_space->GetMarkBitmap()->VisitMarkedRange( 1256 reinterpret_cast<uintptr_t>(large_object_space->Begin()), 1257 reinterpret_cast<uintptr_t>(large_object_space->End()), 1258 visitor); 1259 // Objects on the allocation stack? 1260 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) { 1261 size_t count = GetAllocationStack()->Size(); 1262 auto* it = GetAllocationStack()->Begin(); 1263 auto* end = GetAllocationStack()->End(); 1264 for (size_t i = 0; i < count; ++i, ++it) { 1265 CHECK_LT(it, end); 1266 mirror::Object* obj = it->AsMirrorPtr(); 1267 if (obj != nullptr) { 1268 // Must have been cleared above. 1269 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 1270 } 1271 } 1272 } 1273} 1274 1275void ConcurrentCopying::ReclaimPhase() { 1276 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings()); 1277 if (kVerboseMode) { 1278 LOG(INFO) << "GC ReclaimPhase"; 1279 } 1280 Thread* self = Thread::Current(); 1281 1282 { 1283 // Double-check that the mark stack is empty. 1284 // Note: need to set this after VerifyNoFromSpaceRef(). 1285 is_asserting_to_space_invariant_ = false; 1286 QuasiAtomic::ThreadFenceForConstructor(); 1287 if (kVerboseMode) { 1288 LOG(INFO) << "Issue an empty check point. "; 1289 } 1290 IssueEmptyCheckpoint(); 1291 // Disable the check. 1292 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0); 1293 CheckEmptyMarkStack(); 1294 } 1295 1296 { 1297 // Record freed objects. 1298 TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); 1299 // Don't include thread-locals that are in the to-space. 1300 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); 1301 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); 1302 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); 1303 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); 1304 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); 1305 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); 1306 if (kEnableFromSpaceAccountingCheck) { 1307 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); 1308 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); 1309 } 1310 CHECK_LE(to_objects, from_objects); 1311 CHECK_LE(to_bytes, from_bytes); 1312 int64_t freed_bytes = from_bytes - to_bytes; 1313 int64_t freed_objects = from_objects - to_objects; 1314 if (kVerboseMode) { 1315 LOG(INFO) << "RecordFree:" 1316 << " from_bytes=" << from_bytes << " from_objects=" << from_objects 1317 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects 1318 << " to_bytes=" << to_bytes << " to_objects=" << to_objects 1319 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects 1320 << " from_space size=" << region_space_->FromSpaceSize() 1321 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() 1322 << " to_space size=" << region_space_->ToSpaceSize(); 1323 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 1324 } 1325 RecordFree(ObjectBytePair(freed_objects, freed_bytes)); 1326 if (kVerboseMode) { 1327 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 1328 } 1329 } 1330 1331 { 1332 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings()); 1333 ComputeUnevacFromSpaceLiveRatio(); 1334 } 1335 1336 { 1337 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); 1338 region_space_->ClearFromSpace(); 1339 } 1340 1341 { 1342 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1343 if (kUseBakerReadBarrier) { 1344 ClearBlackPtrs(); 1345 } 1346 Sweep(false); 1347 SwapBitmaps(); 1348 heap_->UnBindBitmaps(); 1349 1350 // Remove bitmaps for the immune spaces. 1351 while (!cc_bitmaps_.empty()) { 1352 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back(); 1353 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap); 1354 delete cc_bitmap; 1355 cc_bitmaps_.pop_back(); 1356 } 1357 region_space_bitmap_ = nullptr; 1358 } 1359 1360 CheckEmptyMarkStack(); 1361 1362 if (kVerboseMode) { 1363 LOG(INFO) << "GC end of ReclaimPhase"; 1364 } 1365} 1366 1367class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { 1368 public: 1369 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) 1370 : collector_(cc) {} 1371 void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) 1372 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 1373 DCHECK(ref != nullptr); 1374 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref; 1375 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; 1376 if (kUseBakerReadBarrier) { 1377 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref; 1378 // Clear the black ptr. 1379 ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr()); 1380 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref; 1381 } 1382 size_t obj_size = ref->SizeOf(); 1383 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1384 collector_->region_space_->AddLiveBytes(ref, alloc_size); 1385 } 1386 1387 private: 1388 ConcurrentCopying* const collector_; 1389}; 1390 1391// Compute how much live objects are left in regions. 1392void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() { 1393 region_space_->AssertAllRegionLiveBytesZeroOrCleared(); 1394 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this); 1395 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()), 1396 reinterpret_cast<uintptr_t>(region_space_->Limit()), 1397 visitor); 1398} 1399 1400// Assert the to-space invariant. 1401void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, 1402 mirror::Object* ref) { 1403 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1404 if (is_asserting_to_space_invariant_) { 1405 if (region_space_->IsInToSpace(ref)) { 1406 // OK. 1407 return; 1408 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1409 CHECK(region_space_bitmap_->Test(ref)) << ref; 1410 } else if (region_space_->IsInFromSpace(ref)) { 1411 // Not OK. Do extra logging. 1412 if (obj != nullptr) { 1413 LogFromSpaceRefHolder(obj, offset); 1414 } 1415 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); 1416 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1417 } else { 1418 AssertToSpaceInvariantInNonMovingSpace(obj, ref); 1419 } 1420 } 1421} 1422 1423class RootPrinter { 1424 public: 1425 RootPrinter() { } 1426 1427 template <class MirrorType> 1428 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) 1429 SHARED_REQUIRES(Locks::mutator_lock_) { 1430 if (!root->IsNull()) { 1431 VisitRoot(root); 1432 } 1433 } 1434 1435 template <class MirrorType> 1436 void VisitRoot(mirror::Object** root) 1437 SHARED_REQUIRES(Locks::mutator_lock_) { 1438 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root; 1439 } 1440 1441 template <class MirrorType> 1442 void VisitRoot(mirror::CompressedReference<MirrorType>* root) 1443 SHARED_REQUIRES(Locks::mutator_lock_) { 1444 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr(); 1445 } 1446}; 1447 1448void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source, 1449 mirror::Object* ref) { 1450 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1451 if (is_asserting_to_space_invariant_) { 1452 if (region_space_->IsInToSpace(ref)) { 1453 // OK. 1454 return; 1455 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1456 CHECK(region_space_bitmap_->Test(ref)) << ref; 1457 } else if (region_space_->IsInFromSpace(ref)) { 1458 // Not OK. Do extra logging. 1459 if (gc_root_source == nullptr) { 1460 // No info. 1461 } else if (gc_root_source->HasArtField()) { 1462 ArtField* field = gc_root_source->GetArtField(); 1463 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field); 1464 RootPrinter root_printer; 1465 field->VisitRoots(root_printer); 1466 } else if (gc_root_source->HasArtMethod()) { 1467 ArtMethod* method = gc_root_source->GetArtMethod(); 1468 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method); 1469 RootPrinter root_printer; 1470 method->VisitRoots(root_printer); 1471 } 1472 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); 1473 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL)); 1474 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL); 1475 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true); 1476 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1477 } else { 1478 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref); 1479 } 1480 } 1481} 1482 1483void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) { 1484 if (kUseBakerReadBarrier) { 1485 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) 1486 << " holder rb_ptr=" << obj->GetReadBarrierPointer(); 1487 } else { 1488 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); 1489 } 1490 if (region_space_->IsInFromSpace(obj)) { 1491 LOG(INFO) << "holder is in the from-space."; 1492 } else if (region_space_->IsInToSpace(obj)) { 1493 LOG(INFO) << "holder is in the to-space."; 1494 } else if (region_space_->IsInUnevacFromSpace(obj)) { 1495 LOG(INFO) << "holder is in the unevac from-space."; 1496 if (region_space_bitmap_->Test(obj)) { 1497 LOG(INFO) << "holder is marked in the region space bitmap."; 1498 } else { 1499 LOG(INFO) << "holder is not marked in the region space bitmap."; 1500 } 1501 } else { 1502 // In a non-moving space. 1503 if (immune_region_.ContainsObject(obj)) { 1504 LOG(INFO) << "holder is in the image or the zygote space."; 1505 accounting::ContinuousSpaceBitmap* cc_bitmap = 1506 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 1507 CHECK(cc_bitmap != nullptr) 1508 << "An immune space object must have a bitmap."; 1509 if (cc_bitmap->Test(obj)) { 1510 LOG(INFO) << "holder is marked in the bit map."; 1511 } else { 1512 LOG(INFO) << "holder is NOT marked in the bit map."; 1513 } 1514 } else { 1515 LOG(INFO) << "holder is in a non-moving (or main) space."; 1516 accounting::ContinuousSpaceBitmap* mark_bitmap = 1517 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); 1518 accounting::LargeObjectBitmap* los_bitmap = 1519 heap_mark_bitmap_->GetLargeObjectBitmap(obj); 1520 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1521 bool is_los = mark_bitmap == nullptr; 1522 if (!is_los && mark_bitmap->Test(obj)) { 1523 LOG(INFO) << "holder is marked in the mark bit map."; 1524 } else if (is_los && los_bitmap->Test(obj)) { 1525 LOG(INFO) << "holder is marked in the los bit map."; 1526 } else { 1527 // If ref is on the allocation stack, then it is considered 1528 // mark/alive (but not necessarily on the live stack.) 1529 if (IsOnAllocStack(obj)) { 1530 LOG(INFO) << "holder is on the alloc stack."; 1531 } else { 1532 LOG(INFO) << "holder is not marked or on the alloc stack."; 1533 } 1534 } 1535 } 1536 } 1537 LOG(INFO) << "offset=" << offset.SizeValue(); 1538} 1539 1540void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, 1541 mirror::Object* ref) { 1542 // In a non-moving spaces. Check that the ref is marked. 1543 if (immune_region_.ContainsObject(ref)) { 1544 accounting::ContinuousSpaceBitmap* cc_bitmap = 1545 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); 1546 CHECK(cc_bitmap != nullptr) 1547 << "An immune space ref must have a bitmap. " << ref; 1548 if (kUseBakerReadBarrier) { 1549 CHECK(cc_bitmap->Test(ref)) 1550 << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" 1551 << obj->GetReadBarrierPointer() << " ref=" << ref; 1552 } else { 1553 CHECK(cc_bitmap->Test(ref)) 1554 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; 1555 } 1556 } else { 1557 accounting::ContinuousSpaceBitmap* mark_bitmap = 1558 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 1559 accounting::LargeObjectBitmap* los_bitmap = 1560 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 1561 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1562 bool is_los = mark_bitmap == nullptr; 1563 if ((!is_los && mark_bitmap->Test(ref)) || 1564 (is_los && los_bitmap->Test(ref))) { 1565 // OK. 1566 } else { 1567 // If ref is on the allocation stack, then it may not be 1568 // marked live, but considered marked/alive (but not 1569 // necessarily on the live stack). 1570 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " 1571 << "obj=" << obj << " ref=" << ref; 1572 } 1573 } 1574} 1575 1576// Used to scan ref fields of an object. 1577class ConcurrentCopyingRefFieldsVisitor { 1578 public: 1579 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector) 1580 : collector_(collector) {} 1581 1582 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) 1583 const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) 1584 SHARED_REQUIRES(Locks::heap_bitmap_lock_) { 1585 collector_->Process(obj, offset); 1586 } 1587 1588 void operator()(mirror::Class* klass, mirror::Reference* ref) const 1589 SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { 1590 CHECK(klass->IsTypeOfReferenceClass()); 1591 collector_->DelayReferenceReferent(klass, ref); 1592 } 1593 1594 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 1595 SHARED_REQUIRES(Locks::mutator_lock_) { 1596 if (!root->IsNull()) { 1597 VisitRoot(root); 1598 } 1599 } 1600 1601 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 1602 SHARED_REQUIRES(Locks::mutator_lock_) { 1603 collector_->MarkRoot(root); 1604 } 1605 1606 private: 1607 ConcurrentCopying* const collector_; 1608}; 1609 1610// Scan ref fields of an object. 1611void ConcurrentCopying::Scan(mirror::Object* to_ref) { 1612 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1613 ConcurrentCopyingRefFieldsVisitor visitor(this); 1614 to_ref->VisitReferences(visitor, visitor); 1615} 1616 1617// Process a field. 1618inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { 1619 mirror::Object* ref = obj->GetFieldObject< 1620 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); 1621 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1622 return; 1623 } 1624 mirror::Object* to_ref = Mark(ref); 1625 if (to_ref == ref) { 1626 return; 1627 } 1628 // This may fail if the mutator writes to the field at the same time. But it's ok. 1629 mirror::Object* expected_ref = ref; 1630 mirror::Object* new_ref = to_ref; 1631 do { 1632 if (expected_ref != 1633 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) { 1634 // It was updated by the mutator. 1635 break; 1636 } 1637 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier< 1638 false, false, kVerifyNone>(offset, expected_ref, new_ref)); 1639} 1640 1641// Process some roots. 1642void ConcurrentCopying::VisitRoots( 1643 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) { 1644 for (size_t i = 0; i < count; ++i) { 1645 mirror::Object** root = roots[i]; 1646 mirror::Object* ref = *root; 1647 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1648 continue; 1649 } 1650 mirror::Object* to_ref = Mark(ref); 1651 if (to_ref == ref) { 1652 continue; 1653 } 1654 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root); 1655 mirror::Object* expected_ref = ref; 1656 mirror::Object* new_ref = to_ref; 1657 do { 1658 if (expected_ref != addr->LoadRelaxed()) { 1659 // It was updated by the mutator. 1660 break; 1661 } 1662 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1663 } 1664} 1665 1666void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) { 1667 DCHECK(!root->IsNull()); 1668 mirror::Object* const ref = root->AsMirrorPtr(); 1669 if (region_space_->IsInToSpace(ref)) { 1670 return; 1671 } 1672 mirror::Object* to_ref = Mark(ref); 1673 if (to_ref != ref) { 1674 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); 1675 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref); 1676 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref); 1677 // If the cas fails, then it was updated by the mutator. 1678 do { 1679 if (ref != addr->LoadRelaxed().AsMirrorPtr()) { 1680 // It was updated by the mutator. 1681 break; 1682 } 1683 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1684 } 1685} 1686 1687void ConcurrentCopying::VisitRoots( 1688 mirror::CompressedReference<mirror::Object>** roots, size_t count, 1689 const RootInfo& info ATTRIBUTE_UNUSED) { 1690 for (size_t i = 0; i < count; ++i) { 1691 mirror::CompressedReference<mirror::Object>* const root = roots[i]; 1692 if (!root->IsNull()) { 1693 MarkRoot(root); 1694 } 1695 } 1696} 1697 1698// Fill the given memory block with a dummy object. Used to fill in a 1699// copy of objects that was lost in race. 1700void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { 1701 CHECK_ALIGNED(byte_size, kObjectAlignment); 1702 memset(dummy_obj, 0, byte_size); 1703 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); 1704 CHECK(int_array_class != nullptr); 1705 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); 1706 size_t component_size = int_array_class->GetComponentSize(); 1707 CHECK_EQ(component_size, sizeof(int32_t)); 1708 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); 1709 if (data_offset > byte_size) { 1710 // An int array is too big. Use java.lang.Object. 1711 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object); 1712 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object); 1713 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize()); 1714 dummy_obj->SetClass(java_lang_Object); 1715 CHECK_EQ(byte_size, dummy_obj->SizeOf()); 1716 } else { 1717 // Use an int array. 1718 dummy_obj->SetClass(int_array_class); 1719 CHECK(dummy_obj->IsArrayInstance()); 1720 int32_t length = (byte_size - data_offset) / component_size; 1721 dummy_obj->AsArray()->SetLength(length); 1722 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length) 1723 << "byte_size=" << byte_size << " length=" << length 1724 << " component_size=" << component_size << " data_offset=" << data_offset; 1725 CHECK_EQ(byte_size, dummy_obj->SizeOf()) 1726 << "byte_size=" << byte_size << " length=" << length 1727 << " component_size=" << component_size << " data_offset=" << data_offset; 1728 } 1729} 1730 1731// Reuse the memory blocks that were copy of objects that were lost in race. 1732mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { 1733 // Try to reuse the blocks that were unused due to CAS failures. 1734 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment); 1735 Thread* self = Thread::Current(); 1736 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); 1737 MutexLock mu(self, skipped_blocks_lock_); 1738 auto it = skipped_blocks_map_.lower_bound(alloc_size); 1739 if (it == skipped_blocks_map_.end()) { 1740 // Not found. 1741 return nullptr; 1742 } 1743 { 1744 size_t byte_size = it->first; 1745 CHECK_GE(byte_size, alloc_size); 1746 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) { 1747 // If remainder would be too small for a dummy object, retry with a larger request size. 1748 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size); 1749 if (it == skipped_blocks_map_.end()) { 1750 // Not found. 1751 return nullptr; 1752 } 1753 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment); 1754 CHECK_GE(it->first - alloc_size, min_object_size) 1755 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; 1756 } 1757 } 1758 // Found a block. 1759 CHECK(it != skipped_blocks_map_.end()); 1760 size_t byte_size = it->first; 1761 uint8_t* addr = it->second; 1762 CHECK_GE(byte_size, alloc_size); 1763 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); 1764 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment); 1765 if (kVerboseMode) { 1766 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; 1767 } 1768 skipped_blocks_map_.erase(it); 1769 memset(addr, 0, byte_size); 1770 if (byte_size > alloc_size) { 1771 // Return the remainder to the map. 1772 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment); 1773 CHECK_GE(byte_size - alloc_size, min_object_size); 1774 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), 1775 byte_size - alloc_size); 1776 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size))); 1777 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size)); 1778 } 1779 return reinterpret_cast<mirror::Object*>(addr); 1780} 1781 1782mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { 1783 DCHECK(region_space_->IsInFromSpace(from_ref)); 1784 // No read barrier to avoid nested RB that might violate the to-space 1785 // invariant. Note that from_ref is a from space ref so the SizeOf() 1786 // call will access the from-space meta objects, but it's ok and necessary. 1787 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(); 1788 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1789 size_t region_space_bytes_allocated = 0U; 1790 size_t non_moving_space_bytes_allocated = 0U; 1791 size_t bytes_allocated = 0U; 1792 size_t dummy; 1793 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>( 1794 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy); 1795 bytes_allocated = region_space_bytes_allocated; 1796 if (to_ref != nullptr) { 1797 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated); 1798 } 1799 bool fall_back_to_non_moving = false; 1800 if (UNLIKELY(to_ref == nullptr)) { 1801 // Failed to allocate in the region space. Try the skipped blocks. 1802 to_ref = AllocateInSkippedBlock(region_space_alloc_size); 1803 if (to_ref != nullptr) { 1804 // Succeeded to allocate in a skipped block. 1805 if (heap_->use_tlab_) { 1806 // This is necessary for the tlab case as it's not accounted in the space. 1807 region_space_->RecordAlloc(to_ref); 1808 } 1809 bytes_allocated = region_space_alloc_size; 1810 } else { 1811 // Fall back to the non-moving space. 1812 fall_back_to_non_moving = true; 1813 if (kVerboseMode) { 1814 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" 1815 << to_space_bytes_skipped_.LoadSequentiallyConsistent() 1816 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); 1817 } 1818 fall_back_to_non_moving = true; 1819 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, 1820 &non_moving_space_bytes_allocated, nullptr, &dummy); 1821 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed"; 1822 bytes_allocated = non_moving_space_bytes_allocated; 1823 // Mark it in the mark bitmap. 1824 accounting::ContinuousSpaceBitmap* mark_bitmap = 1825 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1826 CHECK(mark_bitmap != nullptr); 1827 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref)); 1828 } 1829 } 1830 DCHECK(to_ref != nullptr); 1831 1832 // Attempt to install the forward pointer. This is in a loop as the 1833 // lock word atomic write can fail. 1834 while (true) { 1835 // Copy the object. TODO: copy only the lockword in the second iteration and on? 1836 memcpy(to_ref, from_ref, obj_size); 1837 1838 LockWord old_lock_word = to_ref->GetLockWord(false); 1839 1840 if (old_lock_word.GetState() == LockWord::kForwardingAddress) { 1841 // Lost the race. Another thread (either GC or mutator) stored 1842 // the forwarding pointer first. Make the lost copy (to_ref) 1843 // look like a valid but dead (dummy) object and keep it for 1844 // future reuse. 1845 FillWithDummyObject(to_ref, bytes_allocated); 1846 if (!fall_back_to_non_moving) { 1847 DCHECK(region_space_->IsInToSpace(to_ref)); 1848 if (bytes_allocated > space::RegionSpace::kRegionSize) { 1849 // Free the large alloc. 1850 region_space_->FreeLarge(to_ref, bytes_allocated); 1851 } else { 1852 // Record the lost copy for later reuse. 1853 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1854 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1855 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); 1856 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1857 skipped_blocks_map_.insert(std::make_pair(bytes_allocated, 1858 reinterpret_cast<uint8_t*>(to_ref))); 1859 } 1860 } else { 1861 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1862 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1863 // Free the non-moving-space chunk. 1864 accounting::ContinuousSpaceBitmap* mark_bitmap = 1865 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1866 CHECK(mark_bitmap != nullptr); 1867 CHECK(mark_bitmap->Clear(to_ref)); 1868 heap_->non_moving_space_->Free(Thread::Current(), to_ref); 1869 } 1870 1871 // Get the winner's forward ptr. 1872 mirror::Object* lost_fwd_ptr = to_ref; 1873 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress()); 1874 CHECK(to_ref != nullptr); 1875 CHECK_NE(to_ref, lost_fwd_ptr); 1876 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)); 1877 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1878 return to_ref; 1879 } 1880 1881 // Set the gray ptr. 1882 if (kUseBakerReadBarrier) { 1883 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr()); 1884 } 1885 1886 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)); 1887 1888 // Try to atomically write the fwd ptr. 1889 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word); 1890 if (LIKELY(success)) { 1891 // The CAS succeeded. 1892 objects_moved_.FetchAndAddSequentiallyConsistent(1); 1893 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size); 1894 if (LIKELY(!fall_back_to_non_moving)) { 1895 DCHECK(region_space_->IsInToSpace(to_ref)); 1896 } else { 1897 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1898 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1899 } 1900 if (kUseBakerReadBarrier) { 1901 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1902 } 1903 DCHECK(GetFwdPtr(from_ref) == to_ref); 1904 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1905 PushOntoMarkStack(to_ref); 1906 return to_ref; 1907 } else { 1908 // The CAS failed. It may have lost the race or may have failed 1909 // due to monitor/hashcode ops. Either way, retry. 1910 } 1911 } 1912} 1913 1914mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { 1915 DCHECK(from_ref != nullptr); 1916 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1917 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1918 // It's already marked. 1919 return from_ref; 1920 } 1921 mirror::Object* to_ref; 1922 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1923 to_ref = GetFwdPtr(from_ref); 1924 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) || 1925 heap_->non_moving_space_->HasAddress(to_ref)) 1926 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1927 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1928 if (region_space_bitmap_->Test(from_ref)) { 1929 to_ref = from_ref; 1930 } else { 1931 to_ref = nullptr; 1932 } 1933 } else { 1934 // from_ref is in a non-moving space. 1935 if (immune_region_.ContainsObject(from_ref)) { 1936 accounting::ContinuousSpaceBitmap* cc_bitmap = 1937 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1938 DCHECK(cc_bitmap != nullptr) 1939 << "An immune space object must have a bitmap"; 1940 if (kIsDebugBuild) { 1941 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1942 << "Immune space object must be already marked"; 1943 } 1944 if (cc_bitmap->Test(from_ref)) { 1945 // Already marked. 1946 to_ref = from_ref; 1947 } else { 1948 // Newly marked. 1949 to_ref = nullptr; 1950 } 1951 } else { 1952 // Non-immune non-moving space. Use the mark bitmap. 1953 accounting::ContinuousSpaceBitmap* mark_bitmap = 1954 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1955 accounting::LargeObjectBitmap* los_bitmap = 1956 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1957 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1958 bool is_los = mark_bitmap == nullptr; 1959 if (!is_los && mark_bitmap->Test(from_ref)) { 1960 // Already marked. 1961 to_ref = from_ref; 1962 } else if (is_los && los_bitmap->Test(from_ref)) { 1963 // Already marked in LOS. 1964 to_ref = from_ref; 1965 } else { 1966 // Not marked. 1967 if (IsOnAllocStack(from_ref)) { 1968 // If on the allocation stack, it's considered marked. 1969 to_ref = from_ref; 1970 } else { 1971 // Not marked. 1972 to_ref = nullptr; 1973 } 1974 } 1975 } 1976 } 1977 return to_ref; 1978} 1979 1980bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { 1981 QuasiAtomic::ThreadFenceAcquire(); 1982 accounting::ObjectStack* alloc_stack = GetAllocationStack(); 1983 return alloc_stack->Contains(ref); 1984} 1985 1986mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { 1987 if (from_ref == nullptr) { 1988 return nullptr; 1989 } 1990 DCHECK(from_ref != nullptr); 1991 DCHECK(heap_->collector_type_ == kCollectorTypeCC); 1992 if (kUseBakerReadBarrier && !is_active_) { 1993 // In the lock word forward address state, the read barrier bits 1994 // in the lock word are part of the stored forwarding address and 1995 // invalid. This is usually OK as the from-space copy of objects 1996 // aren't accessed by mutators due to the to-space 1997 // invariant. However, during the dex2oat image writing relocation 1998 // and the zygote compaction, objects can be in the forward 1999 // address state (to store the forward/relocation addresses) and 2000 // they can still be accessed and the invalid read barrier bits 2001 // are consulted. If they look like gray but aren't really, the 2002 // read barriers slow path can trigger when it shouldn't. To guard 2003 // against this, return here if the CC collector isn't running. 2004 return from_ref; 2005 } 2006 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?"; 2007 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 2008 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 2009 // It's already marked. 2010 return from_ref; 2011 } 2012 mirror::Object* to_ref; 2013 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 2014 to_ref = GetFwdPtr(from_ref); 2015 if (kUseBakerReadBarrier) { 2016 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref; 2017 } 2018 if (to_ref == nullptr) { 2019 // It isn't marked yet. Mark it by copying it to the to-space. 2020 to_ref = Copy(from_ref); 2021 } 2022 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) 2023 << "from_ref=" << from_ref << " to_ref=" << to_ref; 2024 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 2025 // This may or may not succeed, which is ok. 2026 if (kUseBakerReadBarrier) { 2027 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 2028 } 2029 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) { 2030 // Already marked. 2031 to_ref = from_ref; 2032 } else { 2033 // Newly marked. 2034 to_ref = from_ref; 2035 if (kUseBakerReadBarrier) { 2036 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 2037 } 2038 PushOntoMarkStack(to_ref); 2039 } 2040 } else { 2041 // from_ref is in a non-moving space. 2042 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref; 2043 if (immune_region_.ContainsObject(from_ref)) { 2044 accounting::ContinuousSpaceBitmap* cc_bitmap = 2045 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 2046 DCHECK(cc_bitmap != nullptr) 2047 << "An immune space object must have a bitmap"; 2048 if (kIsDebugBuild) { 2049 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 2050 << "Immune space object must be already marked"; 2051 } 2052 // This may or may not succeed, which is ok. 2053 if (kUseBakerReadBarrier) { 2054 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 2055 } 2056 if (cc_bitmap->AtomicTestAndSet(from_ref)) { 2057 // Already marked. 2058 to_ref = from_ref; 2059 } else { 2060 // Newly marked. 2061 to_ref = from_ref; 2062 if (kUseBakerReadBarrier) { 2063 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 2064 } 2065 PushOntoMarkStack(to_ref); 2066 } 2067 } else { 2068 // Use the mark bitmap. 2069 accounting::ContinuousSpaceBitmap* mark_bitmap = 2070 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 2071 accounting::LargeObjectBitmap* los_bitmap = 2072 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 2073 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 2074 bool is_los = mark_bitmap == nullptr; 2075 if (!is_los && mark_bitmap->Test(from_ref)) { 2076 // Already marked. 2077 to_ref = from_ref; 2078 if (kUseBakerReadBarrier) { 2079 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 2080 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 2081 } 2082 } else if (is_los && los_bitmap->Test(from_ref)) { 2083 // Already marked in LOS. 2084 to_ref = from_ref; 2085 if (kUseBakerReadBarrier) { 2086 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 2087 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 2088 } 2089 } else { 2090 // Not marked. 2091 if (IsOnAllocStack(from_ref)) { 2092 // If it's on the allocation stack, it's considered marked. Keep it white. 2093 to_ref = from_ref; 2094 // Objects on the allocation stack need not be marked. 2095 if (!is_los) { 2096 DCHECK(!mark_bitmap->Test(to_ref)); 2097 } else { 2098 DCHECK(!los_bitmap->Test(to_ref)); 2099 } 2100 if (kUseBakerReadBarrier) { 2101 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 2102 } 2103 } else { 2104 // Not marked or on the allocation stack. Try to mark it. 2105 // This may or may not succeed, which is ok. 2106 if (kUseBakerReadBarrier) { 2107 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 2108 } 2109 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) { 2110 // Already marked. 2111 to_ref = from_ref; 2112 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) { 2113 // Already marked in LOS. 2114 to_ref = from_ref; 2115 } else { 2116 // Newly marked. 2117 to_ref = from_ref; 2118 if (kUseBakerReadBarrier) { 2119 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 2120 } 2121 PushOntoMarkStack(to_ref); 2122 } 2123 } 2124 } 2125 } 2126 } 2127 return to_ref; 2128} 2129 2130void ConcurrentCopying::FinishPhase() { 2131 { 2132 MutexLock mu(Thread::Current(), mark_stack_lock_); 2133 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize); 2134 } 2135 region_space_ = nullptr; 2136 { 2137 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 2138 skipped_blocks_map_.clear(); 2139 } 2140 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 2141 heap_->ClearMarkedObjects(); 2142} 2143 2144bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) { 2145 mirror::Object* from_ref = field->AsMirrorPtr(); 2146 mirror::Object* to_ref = IsMarked(from_ref); 2147 if (to_ref == nullptr) { 2148 return false; 2149 } 2150 if (from_ref != to_ref) { 2151 QuasiAtomic::ThreadFenceRelease(); 2152 field->Assign(to_ref); 2153 QuasiAtomic::ThreadFenceSequentiallyConsistent(); 2154 } 2155 return true; 2156} 2157 2158mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) { 2159 return Mark(from_ref); 2160} 2161 2162void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 2163 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this); 2164} 2165 2166void ConcurrentCopying::ProcessReferences(Thread* self) { 2167 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); 2168 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps. 2169 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 2170 GetHeap()->GetReferenceProcessor()->ProcessReferences( 2171 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); 2172} 2173 2174void ConcurrentCopying::RevokeAllThreadLocalBuffers() { 2175 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 2176 region_space_->RevokeAllThreadLocalBuffers(); 2177} 2178 2179} // namespace collector 2180} // namespace gc 2181} // namespace art 2182