concurrent_copying.cc revision b19ccb1d88f0bff7371c9b72f265148677c18e95
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "concurrent_copying.h" 18 19#include "art_field-inl.h" 20#include "base/stl_util.h" 21#include "gc/accounting/heap_bitmap-inl.h" 22#include "gc/accounting/space_bitmap-inl.h" 23#include "gc/reference_processor.h" 24#include "gc/space/image_space.h" 25#include "gc/space/space.h" 26#include "intern_table.h" 27#include "mirror/class-inl.h" 28#include "mirror/object-inl.h" 29#include "scoped_thread_state_change.h" 30#include "thread-inl.h" 31#include "thread_list.h" 32#include "well_known_classes.h" 33 34namespace art { 35namespace gc { 36namespace collector { 37 38ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) 39 : GarbageCollector(heap, 40 name_prefix + (name_prefix.empty() ? "" : " ") + 41 "concurrent copying + mark sweep"), 42 region_space_(nullptr), gc_barrier_(new Barrier(0)), 43 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack", 44 2 * MB, 2 * MB)), 45 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock), 46 thread_running_gc_(nullptr), 47 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), 48 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff), 49 weak_ref_access_enabled_(true), 50 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), 51 rb_table_(heap_->GetReadBarrierTable()), 52 force_evacuate_all_(false) { 53 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, 54 "The region space size and the read barrier table region size must match"); 55 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap)); 56 Thread* self = Thread::Current(); 57 { 58 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 59 // Cache this so that we won't have to lock heap_bitmap_lock_ in 60 // Mark() which could cause a nested lock on heap_bitmap_lock_ 61 // when GC causes a RB while doing GC or a lock order violation 62 // (class_linker_lock_ and heap_bitmap_lock_). 63 heap_mark_bitmap_ = heap->GetMarkBitmap(); 64 } 65 { 66 MutexLock mu(self, mark_stack_lock_); 67 for (size_t i = 0; i < kMarkStackPoolSize; ++i) { 68 accounting::AtomicStack<mirror::Object>* mark_stack = 69 accounting::AtomicStack<mirror::Object>::Create( 70 "thread local mark stack", kMarkStackSize, kMarkStackSize); 71 pooled_mark_stacks_.push_back(mark_stack); 72 } 73 } 74} 75 76void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) { 77 // Used for preserving soft references, should be OK to not have a CAS here since there should be 78 // no other threads which can trigger read barriers on the same referent during reference 79 // processing. 80 from_ref->Assign(Mark(from_ref->AsMirrorPtr())); 81} 82 83ConcurrentCopying::~ConcurrentCopying() { 84 STLDeleteElements(&pooled_mark_stacks_); 85} 86 87void ConcurrentCopying::RunPhases() { 88 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier); 89 CHECK(!is_active_); 90 is_active_ = true; 91 Thread* self = Thread::Current(); 92 thread_running_gc_ = self; 93 Locks::mutator_lock_->AssertNotHeld(self); 94 { 95 ReaderMutexLock mu(self, *Locks::mutator_lock_); 96 InitializePhase(); 97 } 98 FlipThreadRoots(); 99 { 100 ReaderMutexLock mu(self, *Locks::mutator_lock_); 101 MarkingPhase(); 102 } 103 // Verify no from space refs. This causes a pause. 104 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { 105 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); 106 ScopedPause pause(this); 107 CheckEmptyMarkStack(); 108 if (kVerboseMode) { 109 LOG(INFO) << "Verifying no from-space refs"; 110 } 111 VerifyNoFromSpaceReferences(); 112 if (kVerboseMode) { 113 LOG(INFO) << "Done verifying no from-space refs"; 114 } 115 CheckEmptyMarkStack(); 116 } 117 { 118 ReaderMutexLock mu(self, *Locks::mutator_lock_); 119 ReclaimPhase(); 120 } 121 FinishPhase(); 122 CHECK(is_active_); 123 is_active_ = false; 124 thread_running_gc_ = nullptr; 125} 126 127void ConcurrentCopying::BindBitmaps() { 128 Thread* self = Thread::Current(); 129 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 130 // Mark all of the spaces we never collect as immune. 131 for (const auto& space : heap_->GetContinuousSpaces()) { 132 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 133 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 134 CHECK(space->IsZygoteSpace() || space->IsImageSpace()); 135 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 136 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" : 137 "cc zygote space bitmap"; 138 // TODO: try avoiding using bitmaps for image/zygote to save space. 139 accounting::ContinuousSpaceBitmap* bitmap = 140 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity()); 141 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 142 cc_bitmaps_.push_back(bitmap); 143 } else if (space == region_space_) { 144 accounting::ContinuousSpaceBitmap* bitmap = 145 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap", 146 space->Begin(), space->Capacity()); 147 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 148 cc_bitmaps_.push_back(bitmap); 149 region_space_bitmap_ = bitmap; 150 } 151 } 152} 153 154void ConcurrentCopying::InitializePhase() { 155 TimingLogger::ScopedTiming split("InitializePhase", GetTimings()); 156 if (kVerboseMode) { 157 LOG(INFO) << "GC InitializePhase"; 158 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" 159 << reinterpret_cast<void*>(region_space_->Limit()); 160 } 161 CheckEmptyMarkStack(); 162 immune_region_.Reset(); 163 bytes_moved_.StoreRelaxed(0); 164 objects_moved_.StoreRelaxed(0); 165 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 166 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 167 GetCurrentIteration()->GetClearSoftReferences()) { 168 force_evacuate_all_ = true; 169 } else { 170 force_evacuate_all_ = false; 171 } 172 BindBitmaps(); 173 if (kVerboseMode) { 174 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_; 175 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End(); 176 LOG(INFO) << "GC end of InitializePhase"; 177 } 178} 179 180// Used to switch the thread roots of a thread from from-space refs to to-space refs. 181class ThreadFlipVisitor : public Closure { 182 public: 183 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) 184 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { 185 } 186 187 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 188 // Note: self is not necessarily equal to thread since thread may be suspended. 189 Thread* self = Thread::Current(); 190 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 191 << thread->GetState() << " thread " << thread << " self " << self; 192 if (use_tlab_ && thread->HasTlab()) { 193 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 194 // This must come before the revoke. 195 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); 196 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 197 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> 198 FetchAndAddSequentiallyConsistent(thread_local_objects); 199 } else { 200 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 201 } 202 } 203 if (kUseThreadLocalAllocationStack) { 204 thread->RevokeThreadLocalAllocationStack(); 205 } 206 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 207 thread->VisitRoots(concurrent_copying_); 208 concurrent_copying_->GetBarrier().Pass(self); 209 } 210 211 private: 212 ConcurrentCopying* const concurrent_copying_; 213 const bool use_tlab_; 214}; 215 216// Called back from Runtime::FlipThreadRoots() during a pause. 217class FlipCallback : public Closure { 218 public: 219 explicit FlipCallback(ConcurrentCopying* concurrent_copying) 220 : concurrent_copying_(concurrent_copying) { 221 } 222 223 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { 224 ConcurrentCopying* cc = concurrent_copying_; 225 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); 226 // Note: self is not necessarily equal to thread since thread may be suspended. 227 Thread* self = Thread::Current(); 228 CHECK(thread == self); 229 Locks::mutator_lock_->AssertExclusiveHeld(self); 230 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); 231 cc->SwapStacks(self); 232 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 233 cc->RecordLiveStackFreezeSize(self); 234 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); 235 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); 236 } 237 cc->is_marking_ = true; 238 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal); 239 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { 240 CHECK(Runtime::Current()->IsAotCompiler()); 241 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); 242 Runtime::Current()->VisitTransactionRoots(cc); 243 } 244 } 245 246 private: 247 ConcurrentCopying* const concurrent_copying_; 248}; 249 250// Switch threads that from from-space to to-space refs. Forward/mark the thread roots. 251void ConcurrentCopying::FlipThreadRoots() { 252 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); 253 if (kVerboseMode) { 254 LOG(INFO) << "time=" << region_space_->Time(); 255 region_space_->DumpNonFreeRegions(LOG(INFO)); 256 } 257 Thread* self = Thread::Current(); 258 Locks::mutator_lock_->AssertNotHeld(self); 259 gc_barrier_->Init(self, 0); 260 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); 261 FlipCallback flip_callback(this); 262 size_t barrier_count = Runtime::Current()->FlipThreadRoots( 263 &thread_flip_visitor, &flip_callback, this); 264 { 265 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 266 gc_barrier_->Increment(self, barrier_count); 267 } 268 is_asserting_to_space_invariant_ = true; 269 QuasiAtomic::ThreadFenceForConstructor(); 270 if (kVerboseMode) { 271 LOG(INFO) << "time=" << region_space_->Time(); 272 region_space_->DumpNonFreeRegions(LOG(INFO)); 273 LOG(INFO) << "GC end of FlipThreadRoots"; 274 } 275} 276 277void ConcurrentCopying::SwapStacks(Thread* self) { 278 heap_->SwapStacks(self); 279} 280 281void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { 282 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 283 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 284} 285 286// Used to visit objects in the immune spaces. 287class ConcurrentCopyingImmuneSpaceObjVisitor { 288 public: 289 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) 290 : collector_(cc) {} 291 292 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 293 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 294 DCHECK(obj != nullptr); 295 DCHECK(collector_->immune_region_.ContainsObject(obj)); 296 accounting::ContinuousSpaceBitmap* cc_bitmap = 297 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 298 DCHECK(cc_bitmap != nullptr) 299 << "An immune space object must have a bitmap"; 300 if (kIsDebugBuild) { 301 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) 302 << "Immune space object must be already marked"; 303 } 304 // This may or may not succeed, which is ok. 305 if (kUseBakerReadBarrier) { 306 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 307 } 308 if (cc_bitmap->AtomicTestAndSet(obj)) { 309 // Already marked. Do nothing. 310 } else { 311 // Newly marked. Set the gray bit and push it onto the mark stack. 312 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 313 collector_->PushOntoMarkStack(obj); 314 } 315 } 316 317 private: 318 ConcurrentCopying* const collector_; 319}; 320 321class EmptyCheckpoint : public Closure { 322 public: 323 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying) 324 : concurrent_copying_(concurrent_copying) { 325 } 326 327 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 328 // Note: self is not necessarily equal to thread since thread may be suspended. 329 Thread* self = Thread::Current(); 330 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 331 << thread->GetState() << " thread " << thread << " self " << self; 332 // If thread is a running mutator, then act on behalf of the garbage collector. 333 // See the code in ThreadList::RunCheckpoint. 334 if (thread->GetState() == kRunnable) { 335 concurrent_copying_->GetBarrier().Pass(self); 336 } 337 } 338 339 private: 340 ConcurrentCopying* const concurrent_copying_; 341}; 342 343// Concurrently mark roots that are guarded by read barriers and process the mark stack. 344void ConcurrentCopying::MarkingPhase() { 345 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); 346 if (kVerboseMode) { 347 LOG(INFO) << "GC MarkingPhase"; 348 } 349 CHECK(weak_ref_access_enabled_); 350 { 351 // Mark the image root. The WB-based collectors do not need to 352 // scan the image objects from roots by relying on the card table, 353 // but it's necessary for the RB to-space invariant to hold. 354 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings()); 355 gc::space::ImageSpace* image = heap_->GetImageSpace(); 356 if (image != nullptr) { 357 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots(); 358 mirror::Object* marked_image_root = Mark(image_root); 359 CHECK_EQ(image_root, marked_image_root) << "An image object does not move"; 360 if (ReadBarrier::kEnableToSpaceInvariantChecks) { 361 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root); 362 } 363 } 364 } 365 // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part 366 // to also use the same function. 367 { 368 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings()); 369 Runtime::Current()->VisitConstantRoots(this); 370 } 371 { 372 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings()); 373 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots); 374 } 375 { 376 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings()); 377 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots); 378 } 379 { 380 // TODO: don't visit the transaction roots if it's not active. 381 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); 382 Runtime::Current()->VisitNonThreadRoots(this); 383 } 384 Runtime::Current()->GetHeap()->VisitAllocationRecords(this); 385 386 // Immune spaces. 387 for (auto& space : heap_->GetContinuousSpaces()) { 388 if (immune_region_.ContainsSpace(space)) { 389 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 390 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 391 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this); 392 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 393 reinterpret_cast<uintptr_t>(space->Limit()), 394 visitor); 395 } 396 } 397 398 Thread* self = Thread::Current(); 399 { 400 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings()); 401 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The 402 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark 403 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock 404 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we 405 // reach the point where we process weak references, we can avoid using a lock when accessing 406 // the GC mark stack, which makes mark stack processing more efficient. 407 408 // Process the mark stack once in the thread local stack mode. This marks most of the live 409 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system 410 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray 411 // objects and push refs on the mark stack. 412 ProcessMarkStack(); 413 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks 414 // for the last time before transitioning to the shared mark stack mode, which would process new 415 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack() 416 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's 417 // important to do these together in a single checkpoint so that we can ensure that mutators 418 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and 419 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on 420 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref 421 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones. 422 SwitchToSharedMarkStackMode(); 423 CHECK(!self->GetWeakRefAccessEnabled()); 424 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here 425 // (which may be non-empty if there were refs found on thread-local mark stacks during the above 426 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators 427 // (via read barriers) have no way to produce any more refs to process. Marking converges once 428 // before we process weak refs below. 429 ProcessMarkStack(); 430 CheckEmptyMarkStack(); 431 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a 432 // lock from this point on. 433 SwitchToGcExclusiveMarkStackMode(); 434 CheckEmptyMarkStack(); 435 if (kVerboseMode) { 436 LOG(INFO) << "ProcessReferences"; 437 } 438 // Process weak references. This may produce new refs to process and have them processed via 439 // ProcessMarkStack (in the GC exclusive mark stack mode). 440 ProcessReferences(self); 441 CheckEmptyMarkStack(); 442 if (kVerboseMode) { 443 LOG(INFO) << "SweepSystemWeaks"; 444 } 445 SweepSystemWeaks(self); 446 if (kVerboseMode) { 447 LOG(INFO) << "SweepSystemWeaks done"; 448 } 449 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have 450 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for 451 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks(). 452 ProcessMarkStack(); 453 CheckEmptyMarkStack(); 454 // Re-enable weak ref accesses. 455 ReenableWeakRefAccess(self); 456 // Issue an empty checkpoint to ensure no threads are still in the middle of a read barrier 457 // which may have a from-space ref cached in a local variable. 458 IssueEmptyCheckpoint(); 459 // Marking is done. Disable marking. 460 if (kUseTableLookupReadBarrier) { 461 heap_->rb_table_->ClearAll(); 462 DCHECK(heap_->rb_table_->IsAllCleared()); 463 } 464 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1); 465 is_marking_ = false; // This disables the read barrier/marking of weak roots. 466 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff); 467 CheckEmptyMarkStack(); 468 } 469 470 CHECK(weak_ref_access_enabled_); 471 if (kVerboseMode) { 472 LOG(INFO) << "GC end of MarkingPhase"; 473 } 474} 475 476void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) { 477 if (kVerboseMode) { 478 LOG(INFO) << "ReenableWeakRefAccess"; 479 } 480 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads. 481 QuasiAtomic::ThreadFenceForConstructor(); 482 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access. 483 { 484 MutexLock mu(self, *Locks::thread_list_lock_); 485 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 486 for (Thread* thread : thread_list) { 487 thread->SetWeakRefAccessEnabled(true); 488 } 489 } 490 // Unblock blocking threads. 491 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self); 492 Runtime::Current()->BroadcastForNewSystemWeaks(); 493} 494 495void ConcurrentCopying::IssueEmptyCheckpoint() { 496 Thread* self = Thread::Current(); 497 EmptyCheckpoint check_point(this); 498 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 499 gc_barrier_->Init(self, 0); 500 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 501 // If there are no threads to wait which implys that all the checkpoint functions are finished, 502 // then no need to release the mutator lock. 503 if (barrier_count == 0) { 504 return; 505 } 506 // Release locks then wait for all mutator threads to pass the barrier. 507 Locks::mutator_lock_->SharedUnlock(self); 508 { 509 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 510 gc_barrier_->Increment(self, barrier_count); 511 } 512 Locks::mutator_lock_->SharedLock(self); 513} 514 515void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { 516 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0) 517 << " " << to_ref << " " << PrettyTypeOf(to_ref); 518 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites? 519 CHECK(thread_running_gc_ != nullptr); 520 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 521 if (mark_stack_mode == kMarkStackModeThreadLocal) { 522 if (self == thread_running_gc_) { 523 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack. 524 CHECK(self->GetThreadLocalMarkStack() == nullptr); 525 CHECK(!gc_mark_stack_->IsFull()); 526 gc_mark_stack_->PushBack(to_ref); 527 } else { 528 // Otherwise, use a thread-local mark stack. 529 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack(); 530 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) { 531 MutexLock mu(self, mark_stack_lock_); 532 // Get a new thread local mark stack. 533 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack; 534 if (!pooled_mark_stacks_.empty()) { 535 // Use a pooled mark stack. 536 new_tl_mark_stack = pooled_mark_stacks_.back(); 537 pooled_mark_stacks_.pop_back(); 538 } else { 539 // None pooled. Create a new one. 540 new_tl_mark_stack = 541 accounting::AtomicStack<mirror::Object>::Create( 542 "thread local mark stack", 4 * KB, 4 * KB); 543 } 544 DCHECK(new_tl_mark_stack != nullptr); 545 DCHECK(new_tl_mark_stack->IsEmpty()); 546 new_tl_mark_stack->PushBack(to_ref); 547 self->SetThreadLocalMarkStack(new_tl_mark_stack); 548 if (tl_mark_stack != nullptr) { 549 // Store the old full stack into a vector. 550 revoked_mark_stacks_.push_back(tl_mark_stack); 551 } 552 } else { 553 tl_mark_stack->PushBack(to_ref); 554 } 555 } 556 } else if (mark_stack_mode == kMarkStackModeShared) { 557 // Access the shared GC mark stack with a lock. 558 MutexLock mu(self, mark_stack_lock_); 559 CHECK(!gc_mark_stack_->IsFull()); 560 gc_mark_stack_->PushBack(to_ref); 561 } else { 562 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), 563 static_cast<uint32_t>(kMarkStackModeGcExclusive)); 564 CHECK(self == thread_running_gc_) 565 << "Only GC-running thread should access the mark stack " 566 << "in the GC exclusive mark stack mode"; 567 // Access the GC mark stack without a lock. 568 CHECK(!gc_mark_stack_->IsFull()); 569 gc_mark_stack_->PushBack(to_ref); 570 } 571} 572 573accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() { 574 return heap_->allocation_stack_.get(); 575} 576 577accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { 578 return heap_->live_stack_.get(); 579} 580 581inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { 582 DCHECK(region_space_->IsInFromSpace(from_ref)); 583 LockWord lw = from_ref->GetLockWord(false); 584 if (lw.GetState() == LockWord::kForwardingAddress) { 585 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); 586 CHECK(fwd_ptr != nullptr); 587 return fwd_ptr; 588 } else { 589 return nullptr; 590 } 591} 592 593// The following visitors are that used to verify that there's no 594// references to the from-space left after marking. 595class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { 596 public: 597 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) 598 : collector_(collector) {} 599 600 void operator()(mirror::Object* ref) const 601 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 602 if (ref == nullptr) { 603 // OK. 604 return; 605 } 606 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 607 if (kUseBakerReadBarrier) { 608 if (collector_->RegionSpace()->IsInToSpace(ref)) { 609 CHECK(ref->GetReadBarrierPointer() == nullptr) 610 << "To-space ref " << ref << " " << PrettyTypeOf(ref) 611 << " has non-white rb_ptr " << ref->GetReadBarrierPointer(); 612 } else { 613 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 614 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 615 collector_->IsOnAllocStack(ref))) 616 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref) 617 << " has non-black rb_ptr " << ref->GetReadBarrierPointer() 618 << " but isn't on the alloc stack (and has white rb_ptr)." 619 << " Is it in the non-moving space=" 620 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref)); 621 } 622 } 623 } 624 625 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 626 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 627 DCHECK(root != nullptr); 628 operator()(root); 629 } 630 631 private: 632 ConcurrentCopying* const collector_; 633}; 634 635class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { 636 public: 637 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) 638 : collector_(collector) {} 639 640 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 641 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 642 mirror::Object* ref = 643 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 644 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 645 visitor(ref); 646 } 647 void operator()(mirror::Class* klass, mirror::Reference* ref) const 648 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 649 CHECK(klass->IsTypeOfReferenceClass()); 650 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 651 } 652 653 private: 654 ConcurrentCopying* const collector_; 655}; 656 657class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { 658 public: 659 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) 660 : collector_(collector) {} 661 void operator()(mirror::Object* obj) const 662 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 663 ObjectCallback(obj, collector_); 664 } 665 static void ObjectCallback(mirror::Object* obj, void *arg) 666 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 667 CHECK(obj != nullptr); 668 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 669 space::RegionSpace* region_space = collector->RegionSpace(); 670 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 671 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); 672 obj->VisitReferences<true>(visitor, visitor); 673 if (kUseBakerReadBarrier) { 674 if (collector->RegionSpace()->IsInToSpace(obj)) { 675 CHECK(obj->GetReadBarrierPointer() == nullptr) 676 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer(); 677 } else { 678 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 679 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 680 collector->IsOnAllocStack(obj))) 681 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj) 682 << " has non-black rb_ptr " << obj->GetReadBarrierPointer() 683 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space=" 684 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj)); 685 } 686 } 687 } 688 689 private: 690 ConcurrentCopying* const collector_; 691}; 692 693// Verify there's no from-space references left after the marking phase. 694void ConcurrentCopying::VerifyNoFromSpaceReferences() { 695 Thread* self = Thread::Current(); 696 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 697 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this); 698 // Roots. 699 { 700 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 701 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 702 Runtime::Current()->VisitRoots(&ref_visitor); 703 } 704 // The to-space. 705 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, 706 this); 707 // Non-moving spaces. 708 { 709 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 710 heap_->GetMarkBitmap()->Visit(visitor); 711 } 712 // The alloc stack. 713 { 714 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 715 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); 716 it < end; ++it) { 717 mirror::Object* const obj = it->AsMirrorPtr(); 718 if (obj != nullptr && obj->GetClass() != nullptr) { 719 // TODO: need to call this only if obj is alive? 720 ref_visitor(obj); 721 visitor(obj); 722 } 723 } 724 } 725 // TODO: LOS. But only refs in LOS are classes. 726} 727 728// The following visitors are used to assert the to-space invariant. 729class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { 730 public: 731 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) 732 : collector_(collector) {} 733 734 void operator()(mirror::Object* ref) const 735 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 736 if (ref == nullptr) { 737 // OK. 738 return; 739 } 740 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 741 } 742 743 private: 744 ConcurrentCopying* const collector_; 745}; 746 747class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { 748 public: 749 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) 750 : collector_(collector) {} 751 752 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 753 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 754 mirror::Object* ref = 755 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 756 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 757 visitor(ref); 758 } 759 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const 760 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 761 CHECK(klass->IsTypeOfReferenceClass()); 762 } 763 764 private: 765 ConcurrentCopying* const collector_; 766}; 767 768class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { 769 public: 770 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) 771 : collector_(collector) {} 772 void operator()(mirror::Object* obj) const 773 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 774 ObjectCallback(obj, collector_); 775 } 776 static void ObjectCallback(mirror::Object* obj, void *arg) 777 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 778 CHECK(obj != nullptr); 779 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 780 space::RegionSpace* region_space = collector->RegionSpace(); 781 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 782 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); 783 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); 784 obj->VisitReferences<true>(visitor, visitor); 785 } 786 787 private: 788 ConcurrentCopying* const collector_; 789}; 790 791class RevokeThreadLocalMarkStackCheckpoint : public Closure { 792 public: 793 explicit RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying, 794 bool disable_weak_ref_access) 795 : concurrent_copying_(concurrent_copying), 796 disable_weak_ref_access_(disable_weak_ref_access) { 797 } 798 799 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 800 // Note: self is not necessarily equal to thread since thread may be suspended. 801 Thread* self = Thread::Current(); 802 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 803 << thread->GetState() << " thread " << thread << " self " << self; 804 // Revoke thread local mark stacks. 805 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); 806 if (tl_mark_stack != nullptr) { 807 MutexLock mu(self, concurrent_copying_->mark_stack_lock_); 808 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack); 809 thread->SetThreadLocalMarkStack(nullptr); 810 } 811 // Disable weak ref access. 812 if (disable_weak_ref_access_) { 813 thread->SetWeakRefAccessEnabled(false); 814 } 815 // If thread is a running mutator, then act on behalf of the garbage collector. 816 // See the code in ThreadList::RunCheckpoint. 817 if (thread->GetState() == kRunnable) { 818 concurrent_copying_->GetBarrier().Pass(self); 819 } 820 } 821 822 private: 823 ConcurrentCopying* const concurrent_copying_; 824 const bool disable_weak_ref_access_; 825}; 826 827void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) { 828 Thread* self = Thread::Current(); 829 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access); 830 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 831 gc_barrier_->Init(self, 0); 832 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 833 // If there are no threads to wait which implys that all the checkpoint functions are finished, 834 // then no need to release the mutator lock. 835 if (barrier_count == 0) { 836 return; 837 } 838 Locks::mutator_lock_->SharedUnlock(self); 839 { 840 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 841 gc_barrier_->Increment(self, barrier_count); 842 } 843 Locks::mutator_lock_->SharedLock(self); 844} 845 846void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) { 847 Thread* self = Thread::Current(); 848 CHECK_EQ(self, thread); 849 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); 850 if (tl_mark_stack != nullptr) { 851 CHECK(is_marking_); 852 MutexLock mu(self, mark_stack_lock_); 853 revoked_mark_stacks_.push_back(tl_mark_stack); 854 thread->SetThreadLocalMarkStack(nullptr); 855 } 856} 857 858void ConcurrentCopying::ProcessMarkStack() { 859 if (kVerboseMode) { 860 LOG(INFO) << "ProcessMarkStack. "; 861 } 862 bool empty_prev = false; 863 while (true) { 864 bool empty = ProcessMarkStackOnce(); 865 if (empty_prev && empty) { 866 // Saw empty mark stack for a second time, done. 867 break; 868 } 869 empty_prev = empty; 870 } 871} 872 873bool ConcurrentCopying::ProcessMarkStackOnce() { 874 Thread* self = Thread::Current(); 875 CHECK(thread_running_gc_ != nullptr); 876 CHECK(self == thread_running_gc_); 877 CHECK(self->GetThreadLocalMarkStack() == nullptr); 878 size_t count = 0; 879 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 880 if (mark_stack_mode == kMarkStackModeThreadLocal) { 881 // Process the thread-local mark stacks and the GC mark stack. 882 count += ProcessThreadLocalMarkStacks(false); 883 while (!gc_mark_stack_->IsEmpty()) { 884 mirror::Object* to_ref = gc_mark_stack_->PopBack(); 885 ProcessMarkStackRef(to_ref); 886 ++count; 887 } 888 gc_mark_stack_->Reset(); 889 } else if (mark_stack_mode == kMarkStackModeShared) { 890 // Process the shared GC mark stack with a lock. 891 { 892 MutexLock mu(self, mark_stack_lock_); 893 CHECK(revoked_mark_stacks_.empty()); 894 } 895 while (true) { 896 std::vector<mirror::Object*> refs; 897 { 898 // Copy refs with lock. Note the number of refs should be small. 899 MutexLock mu(self, mark_stack_lock_); 900 if (gc_mark_stack_->IsEmpty()) { 901 break; 902 } 903 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin(); 904 p != gc_mark_stack_->End(); ++p) { 905 refs.push_back(p->AsMirrorPtr()); 906 } 907 gc_mark_stack_->Reset(); 908 } 909 for (mirror::Object* ref : refs) { 910 ProcessMarkStackRef(ref); 911 ++count; 912 } 913 } 914 } else { 915 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), 916 static_cast<uint32_t>(kMarkStackModeGcExclusive)); 917 { 918 MutexLock mu(self, mark_stack_lock_); 919 CHECK(revoked_mark_stacks_.empty()); 920 } 921 // Process the GC mark stack in the exclusive mode. No need to take the lock. 922 while (!gc_mark_stack_->IsEmpty()) { 923 mirror::Object* to_ref = gc_mark_stack_->PopBack(); 924 ProcessMarkStackRef(to_ref); 925 ++count; 926 } 927 gc_mark_stack_->Reset(); 928 } 929 930 // Return true if the stack was empty. 931 return count == 0; 932} 933 934size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) { 935 // Run a checkpoint to collect all thread local mark stacks and iterate over them all. 936 RevokeThreadLocalMarkStacks(disable_weak_ref_access); 937 size_t count = 0; 938 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks; 939 { 940 MutexLock mu(Thread::Current(), mark_stack_lock_); 941 // Make a copy of the mark stack vector. 942 mark_stacks = revoked_mark_stacks_; 943 revoked_mark_stacks_.clear(); 944 } 945 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) { 946 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) { 947 mirror::Object* to_ref = p->AsMirrorPtr(); 948 ProcessMarkStackRef(to_ref); 949 ++count; 950 } 951 { 952 MutexLock mu(Thread::Current(), mark_stack_lock_); 953 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) { 954 // The pool has enough. Delete it. 955 delete mark_stack; 956 } else { 957 // Otherwise, put it into the pool for later reuse. 958 mark_stack->Reset(); 959 pooled_mark_stacks_.push_back(mark_stack); 960 } 961 } 962 } 963 return count; 964} 965 966void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { 967 DCHECK(!region_space_->IsInFromSpace(to_ref)); 968 if (kUseBakerReadBarrier) { 969 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 970 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 971 << " is_marked=" << IsMarked(to_ref); 972 } 973 // Scan ref fields. 974 Scan(to_ref); 975 // Mark the gray ref as white or black. 976 if (kUseBakerReadBarrier) { 977 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 978 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 979 << " is_marked=" << IsMarked(to_ref); 980 } 981 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && 982 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && 983 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { 984 // Leave References gray so that GetReferent() will trigger RB. 985 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; 986 } else { 987#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER 988 if (kUseBakerReadBarrier) { 989 if (region_space_->IsInToSpace(to_ref)) { 990 // If to-space, change from gray to white. 991 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 992 ReadBarrier::WhitePtr()); 993 CHECK(success) << "Must succeed as we won the race."; 994 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 995 } else { 996 // If non-moving space/unevac from space, change from gray 997 // to black. We can't change gray to white because it's not 998 // safe to use CAS if two threads change values in opposite 999 // directions (A->B and B->A). So, we change it to black to 1000 // indicate non-moving objects that have been marked 1001 // through. Note we'd need to change from black to white 1002 // later (concurrently). 1003 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 1004 ReadBarrier::BlackPtr()); 1005 CHECK(success) << "Must succeed as we won the race."; 1006 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1007 } 1008 } 1009#else 1010 DCHECK(!kUseBakerReadBarrier); 1011#endif 1012 } 1013 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { 1014 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); 1015 visitor(to_ref); 1016 } 1017} 1018 1019void ConcurrentCopying::SwitchToSharedMarkStackMode() { 1020 Thread* self = Thread::Current(); 1021 CHECK(thread_running_gc_ != nullptr); 1022 CHECK_EQ(self, thread_running_gc_); 1023 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1024 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1025 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), 1026 static_cast<uint32_t>(kMarkStackModeThreadLocal)); 1027 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared); 1028 CHECK(weak_ref_access_enabled_.LoadRelaxed()); 1029 weak_ref_access_enabled_.StoreRelaxed(false); 1030 QuasiAtomic::ThreadFenceForConstructor(); 1031 // Process the thread local mark stacks one last time after switching to the shared mark stack 1032 // mode and disable weak ref accesses. 1033 ProcessThreadLocalMarkStacks(true); 1034 if (kVerboseMode) { 1035 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access"; 1036 } 1037} 1038 1039void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() { 1040 Thread* self = Thread::Current(); 1041 CHECK(thread_running_gc_ != nullptr); 1042 CHECK_EQ(self, thread_running_gc_); 1043 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1044 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1045 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), 1046 static_cast<uint32_t>(kMarkStackModeShared)); 1047 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive); 1048 QuasiAtomic::ThreadFenceForConstructor(); 1049 if (kVerboseMode) { 1050 LOG(INFO) << "Switched to GC exclusive mark stack mode"; 1051 } 1052} 1053 1054void ConcurrentCopying::CheckEmptyMarkStack() { 1055 Thread* self = Thread::Current(); 1056 CHECK(thread_running_gc_ != nullptr); 1057 CHECK_EQ(self, thread_running_gc_); 1058 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1059 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1060 if (mark_stack_mode == kMarkStackModeThreadLocal) { 1061 // Thread-local mark stack mode. 1062 RevokeThreadLocalMarkStacks(false); 1063 MutexLock mu(Thread::Current(), mark_stack_lock_); 1064 if (!revoked_mark_stacks_.empty()) { 1065 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) { 1066 while (!mark_stack->IsEmpty()) { 1067 mirror::Object* obj = mark_stack->PopBack(); 1068 if (kUseBakerReadBarrier) { 1069 mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); 1070 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr 1071 << " is_marked=" << IsMarked(obj); 1072 } else { 1073 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) 1074 << " is_marked=" << IsMarked(obj); 1075 } 1076 } 1077 } 1078 LOG(FATAL) << "mark stack is not empty"; 1079 } 1080 } else { 1081 // Shared, GC-exclusive, or off. 1082 MutexLock mu(Thread::Current(), mark_stack_lock_); 1083 CHECK(gc_mark_stack_->IsEmpty()); 1084 CHECK(revoked_mark_stacks_.empty()); 1085 } 1086} 1087 1088void ConcurrentCopying::SweepSystemWeaks(Thread* self) { 1089 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); 1090 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1091 Runtime::Current()->SweepSystemWeaks(this); 1092} 1093 1094void ConcurrentCopying::Sweep(bool swap_bitmaps) { 1095 { 1096 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); 1097 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1098 if (kEnableFromSpaceAccountingCheck) { 1099 CHECK_GE(live_stack_freeze_size_, live_stack->Size()); 1100 } 1101 heap_->MarkAllocStackAsLive(live_stack); 1102 live_stack->Reset(); 1103 } 1104 CheckEmptyMarkStack(); 1105 TimingLogger::ScopedTiming split("Sweep", GetTimings()); 1106 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1107 if (space->IsContinuousMemMapAllocSpace()) { 1108 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1109 if (space == region_space_ || immune_region_.ContainsSpace(space)) { 1110 continue; 1111 } 1112 TimingLogger::ScopedTiming split2( 1113 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 1114 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1115 } 1116 } 1117 SweepLargeObjects(swap_bitmaps); 1118} 1119 1120void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) { 1121 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 1122 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 1123} 1124 1125class ConcurrentCopyingClearBlackPtrsVisitor { 1126 public: 1127 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc) 1128 : collector_(cc) {} 1129#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER 1130 NO_RETURN 1131#endif 1132 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1133 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1134 DCHECK(obj != nullptr); 1135 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; 1136 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; 1137 obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr()); 1138 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 1139 } 1140 1141 private: 1142 ConcurrentCopying* const collector_; 1143}; 1144 1145// Clear the black ptrs in non-moving objects back to white. 1146void ConcurrentCopying::ClearBlackPtrs() { 1147 CHECK(kUseBakerReadBarrier); 1148 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings()); 1149 ConcurrentCopyingClearBlackPtrsVisitor visitor(this); 1150 for (auto& space : heap_->GetContinuousSpaces()) { 1151 if (space == region_space_) { 1152 continue; 1153 } 1154 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1155 if (kVerboseMode) { 1156 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap; 1157 } 1158 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 1159 reinterpret_cast<uintptr_t>(space->Limit()), 1160 visitor); 1161 } 1162 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 1163 large_object_space->GetMarkBitmap()->VisitMarkedRange( 1164 reinterpret_cast<uintptr_t>(large_object_space->Begin()), 1165 reinterpret_cast<uintptr_t>(large_object_space->End()), 1166 visitor); 1167 // Objects on the allocation stack? 1168 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) { 1169 size_t count = GetAllocationStack()->Size(); 1170 auto* it = GetAllocationStack()->Begin(); 1171 auto* end = GetAllocationStack()->End(); 1172 for (size_t i = 0; i < count; ++i, ++it) { 1173 CHECK_LT(it, end); 1174 mirror::Object* obj = it->AsMirrorPtr(); 1175 if (obj != nullptr) { 1176 // Must have been cleared above. 1177 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 1178 } 1179 } 1180 } 1181} 1182 1183void ConcurrentCopying::ReclaimPhase() { 1184 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings()); 1185 if (kVerboseMode) { 1186 LOG(INFO) << "GC ReclaimPhase"; 1187 } 1188 Thread* self = Thread::Current(); 1189 1190 { 1191 // Double-check that the mark stack is empty. 1192 // Note: need to set this after VerifyNoFromSpaceRef(). 1193 is_asserting_to_space_invariant_ = false; 1194 QuasiAtomic::ThreadFenceForConstructor(); 1195 if (kVerboseMode) { 1196 LOG(INFO) << "Issue an empty check point. "; 1197 } 1198 IssueEmptyCheckpoint(); 1199 // Disable the check. 1200 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0); 1201 CheckEmptyMarkStack(); 1202 } 1203 1204 { 1205 // Record freed objects. 1206 TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); 1207 // Don't include thread-locals that are in the to-space. 1208 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); 1209 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); 1210 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); 1211 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); 1212 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); 1213 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); 1214 if (kEnableFromSpaceAccountingCheck) { 1215 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); 1216 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); 1217 } 1218 CHECK_LE(to_objects, from_objects); 1219 CHECK_LE(to_bytes, from_bytes); 1220 int64_t freed_bytes = from_bytes - to_bytes; 1221 int64_t freed_objects = from_objects - to_objects; 1222 if (kVerboseMode) { 1223 LOG(INFO) << "RecordFree:" 1224 << " from_bytes=" << from_bytes << " from_objects=" << from_objects 1225 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects 1226 << " to_bytes=" << to_bytes << " to_objects=" << to_objects 1227 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects 1228 << " from_space size=" << region_space_->FromSpaceSize() 1229 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() 1230 << " to_space size=" << region_space_->ToSpaceSize(); 1231 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 1232 } 1233 RecordFree(ObjectBytePair(freed_objects, freed_bytes)); 1234 if (kVerboseMode) { 1235 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 1236 } 1237 } 1238 1239 { 1240 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings()); 1241 ComputeUnevacFromSpaceLiveRatio(); 1242 } 1243 1244 { 1245 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); 1246 region_space_->ClearFromSpace(); 1247 } 1248 1249 { 1250 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1251 if (kUseBakerReadBarrier) { 1252 ClearBlackPtrs(); 1253 } 1254 Sweep(false); 1255 SwapBitmaps(); 1256 heap_->UnBindBitmaps(); 1257 1258 // Remove bitmaps for the immune spaces. 1259 while (!cc_bitmaps_.empty()) { 1260 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back(); 1261 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap); 1262 delete cc_bitmap; 1263 cc_bitmaps_.pop_back(); 1264 } 1265 region_space_bitmap_ = nullptr; 1266 } 1267 1268 CheckEmptyMarkStack(); 1269 1270 if (kVerboseMode) { 1271 LOG(INFO) << "GC end of ReclaimPhase"; 1272 } 1273} 1274 1275class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { 1276 public: 1277 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) 1278 : collector_(cc) {} 1279 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1280 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1281 DCHECK(ref != nullptr); 1282 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref; 1283 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; 1284 if (kUseBakerReadBarrier) { 1285 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref; 1286 // Clear the black ptr. 1287 ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr()); 1288 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref; 1289 } 1290 size_t obj_size = ref->SizeOf(); 1291 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1292 collector_->region_space_->AddLiveBytes(ref, alloc_size); 1293 } 1294 1295 private: 1296 ConcurrentCopying* const collector_; 1297}; 1298 1299// Compute how much live objects are left in regions. 1300void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() { 1301 region_space_->AssertAllRegionLiveBytesZeroOrCleared(); 1302 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this); 1303 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()), 1304 reinterpret_cast<uintptr_t>(region_space_->Limit()), 1305 visitor); 1306} 1307 1308// Assert the to-space invariant. 1309void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, 1310 mirror::Object* ref) { 1311 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1312 if (is_asserting_to_space_invariant_) { 1313 if (region_space_->IsInToSpace(ref)) { 1314 // OK. 1315 return; 1316 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1317 CHECK(region_space_bitmap_->Test(ref)) << ref; 1318 } else if (region_space_->IsInFromSpace(ref)) { 1319 // Not OK. Do extra logging. 1320 if (obj != nullptr) { 1321 LogFromSpaceRefHolder(obj, offset); 1322 } 1323 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); 1324 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1325 } else { 1326 AssertToSpaceInvariantInNonMovingSpace(obj, ref); 1327 } 1328 } 1329} 1330 1331class RootPrinter { 1332 public: 1333 RootPrinter() { } 1334 1335 template <class MirrorType> 1336 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) 1337 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1338 if (!root->IsNull()) { 1339 VisitRoot(root); 1340 } 1341 } 1342 1343 template <class MirrorType> 1344 void VisitRoot(mirror::Object** root) 1345 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1346 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root; 1347 } 1348 1349 template <class MirrorType> 1350 void VisitRoot(mirror::CompressedReference<MirrorType>* root) 1351 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1352 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr(); 1353 } 1354}; 1355 1356void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source, 1357 mirror::Object* ref) { 1358 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1359 if (is_asserting_to_space_invariant_) { 1360 if (region_space_->IsInToSpace(ref)) { 1361 // OK. 1362 return; 1363 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1364 CHECK(region_space_bitmap_->Test(ref)) << ref; 1365 } else if (region_space_->IsInFromSpace(ref)) { 1366 // Not OK. Do extra logging. 1367 if (gc_root_source == nullptr) { 1368 // No info. 1369 } else if (gc_root_source->HasArtField()) { 1370 ArtField* field = gc_root_source->GetArtField(); 1371 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field); 1372 RootPrinter root_printer; 1373 field->VisitRoots(root_printer); 1374 } else if (gc_root_source->HasArtMethod()) { 1375 ArtMethod* method = gc_root_source->GetArtMethod(); 1376 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method); 1377 RootPrinter root_printer; 1378 method->VisitRoots(root_printer); 1379 } 1380 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); 1381 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL)); 1382 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL); 1383 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true); 1384 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1385 } else { 1386 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref); 1387 } 1388 } 1389} 1390 1391void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) { 1392 if (kUseBakerReadBarrier) { 1393 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) 1394 << " holder rb_ptr=" << obj->GetReadBarrierPointer(); 1395 } else { 1396 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); 1397 } 1398 if (region_space_->IsInFromSpace(obj)) { 1399 LOG(INFO) << "holder is in the from-space."; 1400 } else if (region_space_->IsInToSpace(obj)) { 1401 LOG(INFO) << "holder is in the to-space."; 1402 } else if (region_space_->IsInUnevacFromSpace(obj)) { 1403 LOG(INFO) << "holder is in the unevac from-space."; 1404 if (region_space_bitmap_->Test(obj)) { 1405 LOG(INFO) << "holder is marked in the region space bitmap."; 1406 } else { 1407 LOG(INFO) << "holder is not marked in the region space bitmap."; 1408 } 1409 } else { 1410 // In a non-moving space. 1411 if (immune_region_.ContainsObject(obj)) { 1412 LOG(INFO) << "holder is in the image or the zygote space."; 1413 accounting::ContinuousSpaceBitmap* cc_bitmap = 1414 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 1415 CHECK(cc_bitmap != nullptr) 1416 << "An immune space object must have a bitmap."; 1417 if (cc_bitmap->Test(obj)) { 1418 LOG(INFO) << "holder is marked in the bit map."; 1419 } else { 1420 LOG(INFO) << "holder is NOT marked in the bit map."; 1421 } 1422 } else { 1423 LOG(INFO) << "holder is in a non-moving (or main) space."; 1424 accounting::ContinuousSpaceBitmap* mark_bitmap = 1425 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); 1426 accounting::LargeObjectBitmap* los_bitmap = 1427 heap_mark_bitmap_->GetLargeObjectBitmap(obj); 1428 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1429 bool is_los = mark_bitmap == nullptr; 1430 if (!is_los && mark_bitmap->Test(obj)) { 1431 LOG(INFO) << "holder is marked in the mark bit map."; 1432 } else if (is_los && los_bitmap->Test(obj)) { 1433 LOG(INFO) << "holder is marked in the los bit map."; 1434 } else { 1435 // If ref is on the allocation stack, then it is considered 1436 // mark/alive (but not necessarily on the live stack.) 1437 if (IsOnAllocStack(obj)) { 1438 LOG(INFO) << "holder is on the alloc stack."; 1439 } else { 1440 LOG(INFO) << "holder is not marked or on the alloc stack."; 1441 } 1442 } 1443 } 1444 } 1445 LOG(INFO) << "offset=" << offset.SizeValue(); 1446} 1447 1448void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, 1449 mirror::Object* ref) { 1450 // In a non-moving spaces. Check that the ref is marked. 1451 if (immune_region_.ContainsObject(ref)) { 1452 accounting::ContinuousSpaceBitmap* cc_bitmap = 1453 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); 1454 CHECK(cc_bitmap != nullptr) 1455 << "An immune space ref must have a bitmap. " << ref; 1456 if (kUseBakerReadBarrier) { 1457 CHECK(cc_bitmap->Test(ref)) 1458 << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" 1459 << obj->GetReadBarrierPointer() << " ref=" << ref; 1460 } else { 1461 CHECK(cc_bitmap->Test(ref)) 1462 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; 1463 } 1464 } else { 1465 accounting::ContinuousSpaceBitmap* mark_bitmap = 1466 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 1467 accounting::LargeObjectBitmap* los_bitmap = 1468 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 1469 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1470 bool is_los = mark_bitmap == nullptr; 1471 if ((!is_los && mark_bitmap->Test(ref)) || 1472 (is_los && los_bitmap->Test(ref))) { 1473 // OK. 1474 } else { 1475 // If ref is on the allocation stack, then it may not be 1476 // marked live, but considered marked/alive (but not 1477 // necessarily on the live stack). 1478 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " 1479 << "obj=" << obj << " ref=" << ref; 1480 } 1481 } 1482} 1483 1484// Used to scan ref fields of an object. 1485class ConcurrentCopyingRefFieldsVisitor { 1486 public: 1487 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector) 1488 : collector_(collector) {} 1489 1490 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) 1491 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1492 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1493 collector_->Process(obj, offset); 1494 } 1495 1496 void operator()(mirror::Class* klass, mirror::Reference* ref) const 1497 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 1498 CHECK(klass->IsTypeOfReferenceClass()); 1499 collector_->DelayReferenceReferent(klass, ref); 1500 } 1501 1502 private: 1503 ConcurrentCopying* const collector_; 1504}; 1505 1506// Scan ref fields of an object. 1507void ConcurrentCopying::Scan(mirror::Object* to_ref) { 1508 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1509 ConcurrentCopyingRefFieldsVisitor visitor(this); 1510 to_ref->VisitReferences<true>(visitor, visitor); 1511} 1512 1513// Process a field. 1514inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { 1515 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); 1516 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1517 return; 1518 } 1519 mirror::Object* to_ref = Mark(ref); 1520 if (to_ref == ref) { 1521 return; 1522 } 1523 // This may fail if the mutator writes to the field at the same time. But it's ok. 1524 mirror::Object* expected_ref = ref; 1525 mirror::Object* new_ref = to_ref; 1526 do { 1527 if (expected_ref != 1528 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) { 1529 // It was updated by the mutator. 1530 break; 1531 } 1532 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>( 1533 offset, expected_ref, new_ref)); 1534} 1535 1536// Process some roots. 1537void ConcurrentCopying::VisitRoots( 1538 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) { 1539 for (size_t i = 0; i < count; ++i) { 1540 mirror::Object** root = roots[i]; 1541 mirror::Object* ref = *root; 1542 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1543 continue; 1544 } 1545 mirror::Object* to_ref = Mark(ref); 1546 if (to_ref == ref) { 1547 continue; 1548 } 1549 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root); 1550 mirror::Object* expected_ref = ref; 1551 mirror::Object* new_ref = to_ref; 1552 do { 1553 if (expected_ref != addr->LoadRelaxed()) { 1554 // It was updated by the mutator. 1555 break; 1556 } 1557 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1558 } 1559} 1560 1561void ConcurrentCopying::VisitRoots( 1562 mirror::CompressedReference<mirror::Object>** roots, size_t count, 1563 const RootInfo& info ATTRIBUTE_UNUSED) { 1564 for (size_t i = 0; i < count; ++i) { 1565 mirror::CompressedReference<mirror::Object>* root = roots[i]; 1566 mirror::Object* ref = root->AsMirrorPtr(); 1567 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1568 continue; 1569 } 1570 mirror::Object* to_ref = Mark(ref); 1571 if (to_ref == ref) { 1572 continue; 1573 } 1574 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); 1575 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref); 1576 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref); 1577 do { 1578 if (ref != addr->LoadRelaxed().AsMirrorPtr()) { 1579 // It was updated by the mutator. 1580 break; 1581 } 1582 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1583 } 1584} 1585 1586// Fill the given memory block with a dummy object. Used to fill in a 1587// copy of objects that was lost in race. 1588void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { 1589 CHECK(IsAligned<kObjectAlignment>(byte_size)); 1590 memset(dummy_obj, 0, byte_size); 1591 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); 1592 CHECK(int_array_class != nullptr); 1593 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); 1594 size_t component_size = int_array_class->GetComponentSize(); 1595 CHECK_EQ(component_size, sizeof(int32_t)); 1596 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); 1597 if (data_offset > byte_size) { 1598 // An int array is too big. Use java.lang.Object. 1599 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object); 1600 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object); 1601 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize()); 1602 dummy_obj->SetClass(java_lang_Object); 1603 CHECK_EQ(byte_size, dummy_obj->SizeOf()); 1604 } else { 1605 // Use an int array. 1606 dummy_obj->SetClass(int_array_class); 1607 CHECK(dummy_obj->IsArrayInstance()); 1608 int32_t length = (byte_size - data_offset) / component_size; 1609 dummy_obj->AsArray()->SetLength(length); 1610 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length) 1611 << "byte_size=" << byte_size << " length=" << length 1612 << " component_size=" << component_size << " data_offset=" << data_offset; 1613 CHECK_EQ(byte_size, dummy_obj->SizeOf()) 1614 << "byte_size=" << byte_size << " length=" << length 1615 << " component_size=" << component_size << " data_offset=" << data_offset; 1616 } 1617} 1618 1619// Reuse the memory blocks that were copy of objects that were lost in race. 1620mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { 1621 // Try to reuse the blocks that were unused due to CAS failures. 1622 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size)); 1623 Thread* self = Thread::Current(); 1624 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); 1625 MutexLock mu(self, skipped_blocks_lock_); 1626 auto it = skipped_blocks_map_.lower_bound(alloc_size); 1627 if (it == skipped_blocks_map_.end()) { 1628 // Not found. 1629 return nullptr; 1630 } 1631 { 1632 size_t byte_size = it->first; 1633 CHECK_GE(byte_size, alloc_size); 1634 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) { 1635 // If remainder would be too small for a dummy object, retry with a larger request size. 1636 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size); 1637 if (it == skipped_blocks_map_.end()) { 1638 // Not found. 1639 return nullptr; 1640 } 1641 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size)); 1642 CHECK_GE(it->first - alloc_size, min_object_size) 1643 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; 1644 } 1645 } 1646 // Found a block. 1647 CHECK(it != skipped_blocks_map_.end()); 1648 size_t byte_size = it->first; 1649 uint8_t* addr = it->second; 1650 CHECK_GE(byte_size, alloc_size); 1651 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); 1652 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size)); 1653 if (kVerboseMode) { 1654 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; 1655 } 1656 skipped_blocks_map_.erase(it); 1657 memset(addr, 0, byte_size); 1658 if (byte_size > alloc_size) { 1659 // Return the remainder to the map. 1660 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size)); 1661 CHECK_GE(byte_size - alloc_size, min_object_size); 1662 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), 1663 byte_size - alloc_size); 1664 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size))); 1665 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size)); 1666 } 1667 return reinterpret_cast<mirror::Object*>(addr); 1668} 1669 1670mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { 1671 DCHECK(region_space_->IsInFromSpace(from_ref)); 1672 // No read barrier to avoid nested RB that might violate the to-space 1673 // invariant. Note that from_ref is a from space ref so the SizeOf() 1674 // call will access the from-space meta objects, but it's ok and necessary. 1675 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(); 1676 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1677 size_t region_space_bytes_allocated = 0U; 1678 size_t non_moving_space_bytes_allocated = 0U; 1679 size_t bytes_allocated = 0U; 1680 size_t dummy; 1681 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>( 1682 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy); 1683 bytes_allocated = region_space_bytes_allocated; 1684 if (to_ref != nullptr) { 1685 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated); 1686 } 1687 bool fall_back_to_non_moving = false; 1688 if (UNLIKELY(to_ref == nullptr)) { 1689 // Failed to allocate in the region space. Try the skipped blocks. 1690 to_ref = AllocateInSkippedBlock(region_space_alloc_size); 1691 if (to_ref != nullptr) { 1692 // Succeeded to allocate in a skipped block. 1693 if (heap_->use_tlab_) { 1694 // This is necessary for the tlab case as it's not accounted in the space. 1695 region_space_->RecordAlloc(to_ref); 1696 } 1697 bytes_allocated = region_space_alloc_size; 1698 } else { 1699 // Fall back to the non-moving space. 1700 fall_back_to_non_moving = true; 1701 if (kVerboseMode) { 1702 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" 1703 << to_space_bytes_skipped_.LoadSequentiallyConsistent() 1704 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); 1705 } 1706 fall_back_to_non_moving = true; 1707 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, 1708 &non_moving_space_bytes_allocated, nullptr, &dummy); 1709 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed"; 1710 bytes_allocated = non_moving_space_bytes_allocated; 1711 // Mark it in the mark bitmap. 1712 accounting::ContinuousSpaceBitmap* mark_bitmap = 1713 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1714 CHECK(mark_bitmap != nullptr); 1715 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref)); 1716 } 1717 } 1718 DCHECK(to_ref != nullptr); 1719 1720 // Attempt to install the forward pointer. This is in a loop as the 1721 // lock word atomic write can fail. 1722 while (true) { 1723 // Copy the object. TODO: copy only the lockword in the second iteration and on? 1724 memcpy(to_ref, from_ref, obj_size); 1725 1726 LockWord old_lock_word = to_ref->GetLockWord(false); 1727 1728 if (old_lock_word.GetState() == LockWord::kForwardingAddress) { 1729 // Lost the race. Another thread (either GC or mutator) stored 1730 // the forwarding pointer first. Make the lost copy (to_ref) 1731 // look like a valid but dead (dummy) object and keep it for 1732 // future reuse. 1733 FillWithDummyObject(to_ref, bytes_allocated); 1734 if (!fall_back_to_non_moving) { 1735 DCHECK(region_space_->IsInToSpace(to_ref)); 1736 if (bytes_allocated > space::RegionSpace::kRegionSize) { 1737 // Free the large alloc. 1738 region_space_->FreeLarge(to_ref, bytes_allocated); 1739 } else { 1740 // Record the lost copy for later reuse. 1741 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1742 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1743 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); 1744 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1745 skipped_blocks_map_.insert(std::make_pair(bytes_allocated, 1746 reinterpret_cast<uint8_t*>(to_ref))); 1747 } 1748 } else { 1749 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1750 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1751 // Free the non-moving-space chunk. 1752 accounting::ContinuousSpaceBitmap* mark_bitmap = 1753 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1754 CHECK(mark_bitmap != nullptr); 1755 CHECK(mark_bitmap->Clear(to_ref)); 1756 heap_->non_moving_space_->Free(Thread::Current(), to_ref); 1757 } 1758 1759 // Get the winner's forward ptr. 1760 mirror::Object* lost_fwd_ptr = to_ref; 1761 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress()); 1762 CHECK(to_ref != nullptr); 1763 CHECK_NE(to_ref, lost_fwd_ptr); 1764 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)); 1765 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1766 return to_ref; 1767 } 1768 1769 // Set the gray ptr. 1770 if (kUseBakerReadBarrier) { 1771 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr()); 1772 } 1773 1774 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)); 1775 1776 // Try to atomically write the fwd ptr. 1777 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word); 1778 if (LIKELY(success)) { 1779 // The CAS succeeded. 1780 objects_moved_.FetchAndAddSequentiallyConsistent(1); 1781 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size); 1782 if (LIKELY(!fall_back_to_non_moving)) { 1783 DCHECK(region_space_->IsInToSpace(to_ref)); 1784 } else { 1785 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1786 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1787 } 1788 if (kUseBakerReadBarrier) { 1789 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1790 } 1791 DCHECK(GetFwdPtr(from_ref) == to_ref); 1792 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1793 PushOntoMarkStack(to_ref); 1794 return to_ref; 1795 } else { 1796 // The CAS failed. It may have lost the race or may have failed 1797 // due to monitor/hashcode ops. Either way, retry. 1798 } 1799 } 1800} 1801 1802mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { 1803 DCHECK(from_ref != nullptr); 1804 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1805 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1806 // It's already marked. 1807 return from_ref; 1808 } 1809 mirror::Object* to_ref; 1810 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1811 to_ref = GetFwdPtr(from_ref); 1812 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) || 1813 heap_->non_moving_space_->HasAddress(to_ref)) 1814 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1815 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1816 if (region_space_bitmap_->Test(from_ref)) { 1817 to_ref = from_ref; 1818 } else { 1819 to_ref = nullptr; 1820 } 1821 } else { 1822 // from_ref is in a non-moving space. 1823 if (immune_region_.ContainsObject(from_ref)) { 1824 accounting::ContinuousSpaceBitmap* cc_bitmap = 1825 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1826 DCHECK(cc_bitmap != nullptr) 1827 << "An immune space object must have a bitmap"; 1828 if (kIsDebugBuild) { 1829 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1830 << "Immune space object must be already marked"; 1831 } 1832 if (cc_bitmap->Test(from_ref)) { 1833 // Already marked. 1834 to_ref = from_ref; 1835 } else { 1836 // Newly marked. 1837 to_ref = nullptr; 1838 } 1839 } else { 1840 // Non-immune non-moving space. Use the mark bitmap. 1841 accounting::ContinuousSpaceBitmap* mark_bitmap = 1842 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1843 accounting::LargeObjectBitmap* los_bitmap = 1844 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1845 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1846 bool is_los = mark_bitmap == nullptr; 1847 if (!is_los && mark_bitmap->Test(from_ref)) { 1848 // Already marked. 1849 to_ref = from_ref; 1850 } else if (is_los && los_bitmap->Test(from_ref)) { 1851 // Already marked in LOS. 1852 to_ref = from_ref; 1853 } else { 1854 // Not marked. 1855 if (IsOnAllocStack(from_ref)) { 1856 // If on the allocation stack, it's considered marked. 1857 to_ref = from_ref; 1858 } else { 1859 // Not marked. 1860 to_ref = nullptr; 1861 } 1862 } 1863 } 1864 } 1865 return to_ref; 1866} 1867 1868bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { 1869 QuasiAtomic::ThreadFenceAcquire(); 1870 accounting::ObjectStack* alloc_stack = GetAllocationStack(); 1871 return alloc_stack->Contains(ref); 1872} 1873 1874mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { 1875 if (from_ref == nullptr) { 1876 return nullptr; 1877 } 1878 DCHECK(from_ref != nullptr); 1879 DCHECK(heap_->collector_type_ == kCollectorTypeCC); 1880 if (kUseBakerReadBarrier && !is_active_) { 1881 // In the lock word forward address state, the read barrier bits 1882 // in the lock word are part of the stored forwarding address and 1883 // invalid. This is usually OK as the from-space copy of objects 1884 // aren't accessed by mutators due to the to-space 1885 // invariant. However, during the dex2oat image writing relocation 1886 // and the zygote compaction, objects can be in the forward 1887 // address state (to store the forward/relocation addresses) and 1888 // they can still be accessed and the invalid read barrier bits 1889 // are consulted. If they look like gray but aren't really, the 1890 // read barriers slow path can trigger when it shouldn't. To guard 1891 // against this, return here if the CC collector isn't running. 1892 return from_ref; 1893 } 1894 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?"; 1895 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1896 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1897 // It's already marked. 1898 return from_ref; 1899 } 1900 mirror::Object* to_ref; 1901 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1902 to_ref = GetFwdPtr(from_ref); 1903 if (kUseBakerReadBarrier) { 1904 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref; 1905 } 1906 if (to_ref == nullptr) { 1907 // It isn't marked yet. Mark it by copying it to the to-space. 1908 to_ref = Copy(from_ref); 1909 } 1910 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) 1911 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1912 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1913 // This may or may not succeed, which is ok. 1914 if (kUseBakerReadBarrier) { 1915 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1916 } 1917 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) { 1918 // Already marked. 1919 to_ref = from_ref; 1920 } else { 1921 // Newly marked. 1922 to_ref = from_ref; 1923 if (kUseBakerReadBarrier) { 1924 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1925 } 1926 PushOntoMarkStack(to_ref); 1927 } 1928 } else { 1929 // from_ref is in a non-moving space. 1930 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref; 1931 if (immune_region_.ContainsObject(from_ref)) { 1932 accounting::ContinuousSpaceBitmap* cc_bitmap = 1933 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1934 DCHECK(cc_bitmap != nullptr) 1935 << "An immune space object must have a bitmap"; 1936 if (kIsDebugBuild) { 1937 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1938 << "Immune space object must be already marked"; 1939 } 1940 // This may or may not succeed, which is ok. 1941 if (kUseBakerReadBarrier) { 1942 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1943 } 1944 if (cc_bitmap->AtomicTestAndSet(from_ref)) { 1945 // Already marked. 1946 to_ref = from_ref; 1947 } else { 1948 // Newly marked. 1949 to_ref = from_ref; 1950 if (kUseBakerReadBarrier) { 1951 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1952 } 1953 PushOntoMarkStack(to_ref); 1954 } 1955 } else { 1956 // Use the mark bitmap. 1957 accounting::ContinuousSpaceBitmap* mark_bitmap = 1958 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1959 accounting::LargeObjectBitmap* los_bitmap = 1960 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1961 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1962 bool is_los = mark_bitmap == nullptr; 1963 if (!is_los && mark_bitmap->Test(from_ref)) { 1964 // Already marked. 1965 to_ref = from_ref; 1966 if (kUseBakerReadBarrier) { 1967 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1968 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1969 } 1970 } else if (is_los && los_bitmap->Test(from_ref)) { 1971 // Already marked in LOS. 1972 to_ref = from_ref; 1973 if (kUseBakerReadBarrier) { 1974 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1975 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1976 } 1977 } else { 1978 // Not marked. 1979 if (IsOnAllocStack(from_ref)) { 1980 // If it's on the allocation stack, it's considered marked. Keep it white. 1981 to_ref = from_ref; 1982 // Objects on the allocation stack need not be marked. 1983 if (!is_los) { 1984 DCHECK(!mark_bitmap->Test(to_ref)); 1985 } else { 1986 DCHECK(!los_bitmap->Test(to_ref)); 1987 } 1988 if (kUseBakerReadBarrier) { 1989 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 1990 } 1991 } else { 1992 // Not marked or on the allocation stack. Try to mark it. 1993 // This may or may not succeed, which is ok. 1994 if (kUseBakerReadBarrier) { 1995 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1996 } 1997 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) { 1998 // Already marked. 1999 to_ref = from_ref; 2000 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) { 2001 // Already marked in LOS. 2002 to_ref = from_ref; 2003 } else { 2004 // Newly marked. 2005 to_ref = from_ref; 2006 if (kUseBakerReadBarrier) { 2007 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 2008 } 2009 PushOntoMarkStack(to_ref); 2010 } 2011 } 2012 } 2013 } 2014 } 2015 return to_ref; 2016} 2017 2018void ConcurrentCopying::FinishPhase() { 2019 { 2020 MutexLock mu(Thread::Current(), mark_stack_lock_); 2021 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize); 2022 } 2023 region_space_ = nullptr; 2024 { 2025 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 2026 skipped_blocks_map_.clear(); 2027 } 2028 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 2029 heap_->ClearMarkedObjects(); 2030} 2031 2032bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) { 2033 mirror::Object* from_ref = field->AsMirrorPtr(); 2034 mirror::Object* to_ref = IsMarked(from_ref); 2035 if (to_ref == nullptr) { 2036 return false; 2037 } 2038 if (from_ref != to_ref) { 2039 QuasiAtomic::ThreadFenceRelease(); 2040 field->Assign(to_ref); 2041 QuasiAtomic::ThreadFenceSequentiallyConsistent(); 2042 } 2043 return true; 2044} 2045 2046mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) { 2047 return Mark(from_ref); 2048} 2049 2050void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 2051 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this); 2052} 2053 2054void ConcurrentCopying::ProcessReferences(Thread* self) { 2055 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); 2056 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps. 2057 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 2058 GetHeap()->GetReferenceProcessor()->ProcessReferences( 2059 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); 2060} 2061 2062void ConcurrentCopying::RevokeAllThreadLocalBuffers() { 2063 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 2064 region_space_->RevokeAllThreadLocalBuffers(); 2065} 2066 2067} // namespace collector 2068} // namespace gc 2069} // namespace art 2070