concurrent_copying.cc revision 3cf225386e8129dcbe32b289279ecb87ec255318
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "concurrent_copying.h" 18 19#include "art_field-inl.h" 20#include "gc/accounting/heap_bitmap-inl.h" 21#include "gc/accounting/space_bitmap-inl.h" 22#include "gc/reference_processor.h" 23#include "gc/space/image_space.h" 24#include "gc/space/space.h" 25#include "intern_table.h" 26#include "mirror/class-inl.h" 27#include "mirror/object-inl.h" 28#include "scoped_thread_state_change.h" 29#include "thread-inl.h" 30#include "thread_list.h" 31#include "well_known_classes.h" 32 33namespace art { 34namespace gc { 35namespace collector { 36 37ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) 38 : GarbageCollector(heap, 39 name_prefix + (name_prefix.empty() ? "" : " ") + 40 "concurrent copying + mark sweep"), 41 region_space_(nullptr), gc_barrier_(new Barrier(0)), mark_queue_(2 * MB), 42 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), 43 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), 44 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), 45 rb_table_(heap_->GetReadBarrierTable()), 46 force_evacuate_all_(false) { 47 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, 48 "The region space size and the read barrier table region size must match"); 49 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap)); 50 { 51 Thread* self = Thread::Current(); 52 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 53 // Cache this so that we won't have to lock heap_bitmap_lock_ in 54 // Mark() which could cause a nested lock on heap_bitmap_lock_ 55 // when GC causes a RB while doing GC or a lock order violation 56 // (class_linker_lock_ and heap_bitmap_lock_). 57 heap_mark_bitmap_ = heap->GetMarkBitmap(); 58 } 59} 60 61ConcurrentCopying::~ConcurrentCopying() { 62} 63 64void ConcurrentCopying::RunPhases() { 65 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier); 66 CHECK(!is_active_); 67 is_active_ = true; 68 Thread* self = Thread::Current(); 69 Locks::mutator_lock_->AssertNotHeld(self); 70 { 71 ReaderMutexLock mu(self, *Locks::mutator_lock_); 72 InitializePhase(); 73 } 74 FlipThreadRoots(); 75 { 76 ReaderMutexLock mu(self, *Locks::mutator_lock_); 77 MarkingPhase(); 78 } 79 // Verify no from space refs. This causes a pause. 80 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { 81 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); 82 ScopedPause pause(this); 83 CheckEmptyMarkQueue(); 84 if (kVerboseMode) { 85 LOG(INFO) << "Verifying no from-space refs"; 86 } 87 VerifyNoFromSpaceReferences(); 88 if (kVerboseMode) { 89 LOG(INFO) << "Done verifying no from-space refs"; 90 } 91 CheckEmptyMarkQueue(); 92 } 93 { 94 ReaderMutexLock mu(self, *Locks::mutator_lock_); 95 ReclaimPhase(); 96 } 97 FinishPhase(); 98 CHECK(is_active_); 99 is_active_ = false; 100} 101 102void ConcurrentCopying::BindBitmaps() { 103 Thread* self = Thread::Current(); 104 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 105 // Mark all of the spaces we never collect as immune. 106 for (const auto& space : heap_->GetContinuousSpaces()) { 107 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 108 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 109 CHECK(space->IsZygoteSpace() || space->IsImageSpace()); 110 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 111 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" : 112 "cc zygote space bitmap"; 113 // TODO: try avoiding using bitmaps for image/zygote to save space. 114 accounting::ContinuousSpaceBitmap* bitmap = 115 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity()); 116 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 117 cc_bitmaps_.push_back(bitmap); 118 } else if (space == region_space_) { 119 accounting::ContinuousSpaceBitmap* bitmap = 120 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap", 121 space->Begin(), space->Capacity()); 122 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 123 cc_bitmaps_.push_back(bitmap); 124 region_space_bitmap_ = bitmap; 125 } 126 } 127} 128 129void ConcurrentCopying::InitializePhase() { 130 TimingLogger::ScopedTiming split("InitializePhase", GetTimings()); 131 if (kVerboseMode) { 132 LOG(INFO) << "GC InitializePhase"; 133 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" 134 << reinterpret_cast<void*>(region_space_->Limit()); 135 } 136 CHECK(mark_queue_.IsEmpty()); 137 immune_region_.Reset(); 138 bytes_moved_.StoreRelaxed(0); 139 objects_moved_.StoreRelaxed(0); 140 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 141 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 142 GetCurrentIteration()->GetClearSoftReferences()) { 143 force_evacuate_all_ = true; 144 } else { 145 force_evacuate_all_ = false; 146 } 147 BindBitmaps(); 148 if (kVerboseMode) { 149 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_; 150 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End(); 151 LOG(INFO) << "GC end of InitializePhase"; 152 } 153} 154 155// Used to switch the thread roots of a thread from from-space refs to to-space refs. 156class ThreadFlipVisitor : public Closure { 157 public: 158 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) 159 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { 160 } 161 162 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 163 // Note: self is not necessarily equal to thread since thread may be suspended. 164 Thread* self = Thread::Current(); 165 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 166 << thread->GetState() << " thread " << thread << " self " << self; 167 if (use_tlab_ && thread->HasTlab()) { 168 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 169 // This must come before the revoke. 170 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); 171 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 172 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> 173 FetchAndAddSequentiallyConsistent(thread_local_objects); 174 } else { 175 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 176 } 177 } 178 if (kUseThreadLocalAllocationStack) { 179 thread->RevokeThreadLocalAllocationStack(); 180 } 181 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 182 thread->VisitRoots(concurrent_copying_); 183 concurrent_copying_->GetBarrier().Pass(self); 184 } 185 186 private: 187 ConcurrentCopying* const concurrent_copying_; 188 const bool use_tlab_; 189}; 190 191// Called back from Runtime::FlipThreadRoots() during a pause. 192class FlipCallback : public Closure { 193 public: 194 explicit FlipCallback(ConcurrentCopying* concurrent_copying) 195 : concurrent_copying_(concurrent_copying) { 196 } 197 198 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { 199 ConcurrentCopying* cc = concurrent_copying_; 200 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); 201 // Note: self is not necessarily equal to thread since thread may be suspended. 202 Thread* self = Thread::Current(); 203 CHECK(thread == self); 204 Locks::mutator_lock_->AssertExclusiveHeld(self); 205 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); 206 cc->SwapStacks(self); 207 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 208 cc->RecordLiveStackFreezeSize(self); 209 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); 210 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); 211 } 212 cc->is_marking_ = true; 213 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { 214 CHECK(Runtime::Current()->IsAotCompiler()); 215 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); 216 Runtime::Current()->VisitTransactionRoots(cc); 217 } 218 } 219 220 private: 221 ConcurrentCopying* const concurrent_copying_; 222}; 223 224// Switch threads that from from-space to to-space refs. Forward/mark the thread roots. 225void ConcurrentCopying::FlipThreadRoots() { 226 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); 227 if (kVerboseMode) { 228 LOG(INFO) << "time=" << region_space_->Time(); 229 region_space_->DumpNonFreeRegions(LOG(INFO)); 230 } 231 Thread* self = Thread::Current(); 232 Locks::mutator_lock_->AssertNotHeld(self); 233 gc_barrier_->Init(self, 0); 234 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); 235 FlipCallback flip_callback(this); 236 size_t barrier_count = Runtime::Current()->FlipThreadRoots( 237 &thread_flip_visitor, &flip_callback, this); 238 { 239 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 240 gc_barrier_->Increment(self, barrier_count); 241 } 242 is_asserting_to_space_invariant_ = true; 243 QuasiAtomic::ThreadFenceForConstructor(); 244 if (kVerboseMode) { 245 LOG(INFO) << "time=" << region_space_->Time(); 246 region_space_->DumpNonFreeRegions(LOG(INFO)); 247 LOG(INFO) << "GC end of FlipThreadRoots"; 248 } 249} 250 251void ConcurrentCopying::SwapStacks(Thread* self) { 252 heap_->SwapStacks(self); 253} 254 255void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { 256 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 257 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 258} 259 260// Used to visit objects in the immune spaces. 261class ConcurrentCopyingImmuneSpaceObjVisitor { 262 public: 263 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) 264 : collector_(cc) {} 265 266 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 267 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 268 DCHECK(obj != nullptr); 269 DCHECK(collector_->immune_region_.ContainsObject(obj)); 270 accounting::ContinuousSpaceBitmap* cc_bitmap = 271 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 272 DCHECK(cc_bitmap != nullptr) 273 << "An immune space object must have a bitmap"; 274 if (kIsDebugBuild) { 275 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) 276 << "Immune space object must be already marked"; 277 } 278 // This may or may not succeed, which is ok. 279 if (kUseBakerReadBarrier) { 280 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 281 } 282 if (cc_bitmap->AtomicTestAndSet(obj)) { 283 // Already marked. Do nothing. 284 } else { 285 // Newly marked. Set the gray bit and push it onto the mark stack. 286 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 287 collector_->PushOntoMarkStack<true>(obj); 288 } 289 } 290 291 private: 292 ConcurrentCopying* collector_; 293}; 294 295class EmptyCheckpoint : public Closure { 296 public: 297 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying) 298 : concurrent_copying_(concurrent_copying) { 299 } 300 301 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 302 // Note: self is not necessarily equal to thread since thread may be suspended. 303 Thread* self = Thread::Current(); 304 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 305 << thread->GetState() << " thread " << thread << " self " << self; 306 // If thread is a running mutator, then act on behalf of the garbage collector. 307 // See the code in ThreadList::RunCheckpoint. 308 if (thread->GetState() == kRunnable) { 309 concurrent_copying_->GetBarrier().Pass(self); 310 } 311 } 312 313 private: 314 ConcurrentCopying* const concurrent_copying_; 315}; 316 317// Concurrently mark roots that are guarded by read barriers and process the mark stack. 318void ConcurrentCopying::MarkingPhase() { 319 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); 320 if (kVerboseMode) { 321 LOG(INFO) << "GC MarkingPhase"; 322 } 323 { 324 // Mark the image root. The WB-based collectors do not need to 325 // scan the image objects from roots by relying on the card table, 326 // but it's necessary for the RB to-space invariant to hold. 327 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings()); 328 gc::space::ImageSpace* image = heap_->GetImageSpace(); 329 if (image != nullptr) { 330 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots(); 331 mirror::Object* marked_image_root = Mark(image_root); 332 CHECK_EQ(image_root, marked_image_root) << "An image object does not move"; 333 if (ReadBarrier::kEnableToSpaceInvariantChecks) { 334 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root); 335 } 336 } 337 } 338 // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part 339 // to also use the same function. 340 { 341 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings()); 342 Runtime::Current()->VisitConstantRoots(this); 343 } 344 { 345 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings()); 346 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots); 347 } 348 { 349 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings()); 350 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots); 351 } 352 { 353 // TODO: don't visit the transaction roots if it's not active. 354 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); 355 Runtime::Current()->VisitNonThreadRoots(this); 356 } 357 Runtime::Current()->GetHeap()->VisitAllocationRecords(this); 358 359 // Immune spaces. 360 for (auto& space : heap_->GetContinuousSpaces()) { 361 if (immune_region_.ContainsSpace(space)) { 362 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 363 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 364 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this); 365 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 366 reinterpret_cast<uintptr_t>(space->Limit()), 367 visitor); 368 } 369 } 370 371 Thread* self = Thread::Current(); 372 { 373 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings()); 374 // Process the mark stack and issue an empty check point. If the 375 // mark stack is still empty after the check point, we're 376 // done. Otherwise, repeat. 377 ProcessMarkStack(); 378 size_t count = 0; 379 while (!ProcessMarkStack()) { 380 ++count; 381 if (kVerboseMode) { 382 LOG(INFO) << "Issue an empty check point. " << count; 383 } 384 IssueEmptyCheckpoint(); 385 } 386 // Need to ensure the mark stack is empty before reference 387 // processing to get rid of non-reference gray objects. 388 CheckEmptyMarkQueue(); 389 // Enable the GetReference slow path and disallow access to the system weaks. 390 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 391 Runtime::Current()->DisallowNewSystemWeaks(); 392 QuasiAtomic::ThreadFenceForConstructor(); 393 // Lock-unlock the system weak locks so that there's no thread in 394 // the middle of accessing system weaks. 395 Runtime::Current()->EnsureNewSystemWeaksDisallowed(); 396 // Note: Do not issue a checkpoint from here to the 397 // SweepSystemWeaks call or else a deadlock due to 398 // WaitHoldingLocks() would occur. 399 if (kVerboseMode) { 400 LOG(INFO) << "Enabled the ref proc slow path & disabled access to system weaks."; 401 LOG(INFO) << "ProcessReferences"; 402 } 403 ProcessReferences(self, true); 404 CheckEmptyMarkQueue(); 405 if (kVerboseMode) { 406 LOG(INFO) << "SweepSystemWeaks"; 407 } 408 SweepSystemWeaks(self); 409 if (kVerboseMode) { 410 LOG(INFO) << "SweepSystemWeaks done"; 411 } 412 // Because hash_set::Erase() can call the hash function for 413 // arbitrary elements in the weak intern table in 414 // InternTable::Table::SweepWeaks(), the above SweepSystemWeaks() 415 // call may have marked some objects (strings) alive. So process 416 // the mark stack here once again. 417 ProcessMarkStack(); 418 CheckEmptyMarkQueue(); 419 if (kVerboseMode) { 420 LOG(INFO) << "AllowNewSystemWeaks"; 421 } 422 Runtime::Current()->AllowNewSystemWeaks(); 423 IssueEmptyCheckpoint(); 424 // Disable marking. 425 if (kUseTableLookupReadBarrier) { 426 heap_->rb_table_->ClearAll(); 427 DCHECK(heap_->rb_table_->IsAllCleared()); 428 } 429 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(1); 430 is_marking_ = false; 431 CheckEmptyMarkQueue(); 432 } 433 434 if (kVerboseMode) { 435 LOG(INFO) << "GC end of MarkingPhase"; 436 } 437} 438 439void ConcurrentCopying::IssueEmptyCheckpoint() { 440 Thread* self = Thread::Current(); 441 EmptyCheckpoint check_point(this); 442 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 443 gc_barrier_->Init(self, 0); 444 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 445 // If there are no threads to wait which implys that all the checkpoint functions are finished, 446 // then no need to release the mutator lock. 447 if (barrier_count == 0) { 448 return; 449 } 450 // Release locks then wait for all mutator threads to pass the barrier. 451 Locks::mutator_lock_->SharedUnlock(self); 452 { 453 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 454 gc_barrier_->Increment(self, barrier_count); 455 } 456 Locks::mutator_lock_->SharedLock(self); 457} 458 459mirror::Object* ConcurrentCopying::PopOffMarkStack() { 460 return mark_queue_.Dequeue(); 461} 462 463template<bool kThreadSafe> 464void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { 465 CHECK_EQ(is_mark_queue_push_disallowed_.LoadRelaxed(), 0) 466 << " " << to_ref << " " << PrettyTypeOf(to_ref); 467 if (kThreadSafe) { 468 CHECK(mark_queue_.Enqueue(to_ref)) << "Mark queue overflow"; 469 } else { 470 CHECK(mark_queue_.EnqueueThreadUnsafe(to_ref)) << "Mark queue overflow"; 471 } 472} 473 474accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() { 475 return heap_->allocation_stack_.get(); 476} 477 478accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { 479 return heap_->live_stack_.get(); 480} 481 482inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { 483 DCHECK(region_space_->IsInFromSpace(from_ref)); 484 LockWord lw = from_ref->GetLockWord(false); 485 if (lw.GetState() == LockWord::kForwardingAddress) { 486 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); 487 CHECK(fwd_ptr != nullptr); 488 return fwd_ptr; 489 } else { 490 return nullptr; 491 } 492} 493 494// The following visitors are that used to verify that there's no 495// references to the from-space left after marking. 496class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { 497 public: 498 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) 499 : collector_(collector) {} 500 501 void operator()(mirror::Object* ref) const 502 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 503 if (ref == nullptr) { 504 // OK. 505 return; 506 } 507 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 508 if (kUseBakerReadBarrier) { 509 if (collector_->RegionSpace()->IsInToSpace(ref)) { 510 CHECK(ref->GetReadBarrierPointer() == nullptr) 511 << "To-space ref " << ref << " " << PrettyTypeOf(ref) 512 << " has non-white rb_ptr " << ref->GetReadBarrierPointer(); 513 } else { 514 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 515 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 516 collector_->IsOnAllocStack(ref))) 517 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref) 518 << " has non-black rb_ptr " << ref->GetReadBarrierPointer() 519 << " but isn't on the alloc stack (and has white rb_ptr)." 520 << " Is it in the non-moving space=" 521 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref)); 522 } 523 } 524 } 525 526 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 527 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 528 DCHECK(root != nullptr); 529 operator()(root); 530 } 531 532 private: 533 ConcurrentCopying* const collector_; 534}; 535 536class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { 537 public: 538 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) 539 : collector_(collector) {} 540 541 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 542 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 543 mirror::Object* ref = 544 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 545 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 546 visitor(ref); 547 } 548 void operator()(mirror::Class* klass, mirror::Reference* ref) const 549 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 550 CHECK(klass->IsTypeOfReferenceClass()); 551 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 552 } 553 554 private: 555 ConcurrentCopying* collector_; 556}; 557 558class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { 559 public: 560 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) 561 : collector_(collector) {} 562 void operator()(mirror::Object* obj) const 563 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 564 ObjectCallback(obj, collector_); 565 } 566 static void ObjectCallback(mirror::Object* obj, void *arg) 567 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 568 CHECK(obj != nullptr); 569 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 570 space::RegionSpace* region_space = collector->RegionSpace(); 571 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 572 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); 573 obj->VisitReferences<true>(visitor, visitor); 574 if (kUseBakerReadBarrier) { 575 if (collector->RegionSpace()->IsInToSpace(obj)) { 576 CHECK(obj->GetReadBarrierPointer() == nullptr) 577 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer(); 578 } else { 579 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 580 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 581 collector->IsOnAllocStack(obj))) 582 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj) 583 << " has non-black rb_ptr " << obj->GetReadBarrierPointer() 584 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space=" 585 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj)); 586 } 587 } 588 } 589 590 private: 591 ConcurrentCopying* const collector_; 592}; 593 594// Verify there's no from-space references left after the marking phase. 595void ConcurrentCopying::VerifyNoFromSpaceReferences() { 596 Thread* self = Thread::Current(); 597 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 598 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this); 599 // Roots. 600 { 601 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 602 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 603 Runtime::Current()->VisitRoots(&ref_visitor); 604 } 605 // The to-space. 606 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, 607 this); 608 // Non-moving spaces. 609 { 610 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 611 heap_->GetMarkBitmap()->Visit(visitor); 612 } 613 // The alloc stack. 614 { 615 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 616 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); 617 it < end; ++it) { 618 mirror::Object* const obj = it->AsMirrorPtr(); 619 if (obj != nullptr && obj->GetClass() != nullptr) { 620 // TODO: need to call this only if obj is alive? 621 ref_visitor(obj); 622 visitor(obj); 623 } 624 } 625 } 626 // TODO: LOS. But only refs in LOS are classes. 627} 628 629// The following visitors are used to assert the to-space invariant. 630class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { 631 public: 632 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) 633 : collector_(collector) {} 634 635 void operator()(mirror::Object* ref) const 636 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 637 if (ref == nullptr) { 638 // OK. 639 return; 640 } 641 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 642 } 643 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/) 644 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 645 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 646 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector); 647 DCHECK(root != nullptr); 648 visitor(*root); 649 } 650 651 private: 652 ConcurrentCopying* collector_; 653}; 654 655class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { 656 public: 657 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) 658 : collector_(collector) {} 659 660 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 661 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 662 mirror::Object* ref = 663 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 664 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 665 visitor(ref); 666 } 667 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const 668 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 669 CHECK(klass->IsTypeOfReferenceClass()); 670 } 671 672 private: 673 ConcurrentCopying* collector_; 674}; 675 676class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { 677 public: 678 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) 679 : collector_(collector) {} 680 void operator()(mirror::Object* obj) const 681 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 682 ObjectCallback(obj, collector_); 683 } 684 static void ObjectCallback(mirror::Object* obj, void *arg) 685 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 686 CHECK(obj != nullptr); 687 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 688 space::RegionSpace* region_space = collector->RegionSpace(); 689 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 690 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); 691 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); 692 obj->VisitReferences<true>(visitor, visitor); 693 } 694 695 private: 696 ConcurrentCopying* collector_; 697}; 698 699bool ConcurrentCopying::ProcessMarkStack() { 700 if (kVerboseMode) { 701 LOG(INFO) << "ProcessMarkStack. "; 702 } 703 size_t count = 0; 704 mirror::Object* to_ref; 705 while ((to_ref = PopOffMarkStack()) != nullptr) { 706 ++count; 707 DCHECK(!region_space_->IsInFromSpace(to_ref)); 708 if (kUseBakerReadBarrier) { 709 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 710 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 711 << " is_marked=" << IsMarked(to_ref); 712 } 713 // Scan ref fields. 714 Scan(to_ref); 715 // Mark the gray ref as white or black. 716 if (kUseBakerReadBarrier) { 717 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 718 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 719 << " is_marked=" << IsMarked(to_ref); 720 } 721 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && 722 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && 723 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { 724 // Leave References gray so that GetReferent() will trigger RB. 725 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; 726 } else { 727#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER 728 if (kUseBakerReadBarrier) { 729 if (region_space_->IsInToSpace(to_ref)) { 730 // If to-space, change from gray to white. 731 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 732 ReadBarrier::WhitePtr()); 733 CHECK(success) << "Must succeed as we won the race."; 734 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 735 } else { 736 // If non-moving space/unevac from space, change from gray 737 // to black. We can't change gray to white because it's not 738 // safe to use CAS if two threads change values in opposite 739 // directions (A->B and B->A). So, we change it to black to 740 // indicate non-moving objects that have been marked 741 // through. Note we'd need to change from black to white 742 // later (concurrently). 743 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 744 ReadBarrier::BlackPtr()); 745 CHECK(success) << "Must succeed as we won the race."; 746 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 747 } 748 } 749#else 750 DCHECK(!kUseBakerReadBarrier); 751#endif 752 } 753 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { 754 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); 755 visitor(to_ref); 756 } 757 } 758 // Return true if the stack was empty. 759 return count == 0; 760} 761 762void ConcurrentCopying::CheckEmptyMarkQueue() { 763 if (!mark_queue_.IsEmpty()) { 764 while (!mark_queue_.IsEmpty()) { 765 mirror::Object* obj = mark_queue_.Dequeue(); 766 if (kUseBakerReadBarrier) { 767 mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); 768 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr 769 << " is_marked=" << IsMarked(obj); 770 } else { 771 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) 772 << " is_marked=" << IsMarked(obj); 773 } 774 } 775 LOG(FATAL) << "mark queue is not empty"; 776 } 777} 778 779void ConcurrentCopying::SweepSystemWeaks(Thread* self) { 780 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); 781 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 782 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 783} 784 785void ConcurrentCopying::Sweep(bool swap_bitmaps) { 786 { 787 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); 788 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 789 if (kEnableFromSpaceAccountingCheck) { 790 CHECK_GE(live_stack_freeze_size_, live_stack->Size()); 791 } 792 heap_->MarkAllocStackAsLive(live_stack); 793 live_stack->Reset(); 794 } 795 CHECK(mark_queue_.IsEmpty()); 796 TimingLogger::ScopedTiming split("Sweep", GetTimings()); 797 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 798 if (space->IsContinuousMemMapAllocSpace()) { 799 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 800 if (space == region_space_ || immune_region_.ContainsSpace(space)) { 801 continue; 802 } 803 TimingLogger::ScopedTiming split2( 804 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 805 RecordFree(alloc_space->Sweep(swap_bitmaps)); 806 } 807 } 808 SweepLargeObjects(swap_bitmaps); 809} 810 811void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) { 812 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 813 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 814} 815 816class ConcurrentCopyingClearBlackPtrsVisitor { 817 public: 818 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc) 819 : collector_(cc) {} 820#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER 821 NO_RETURN 822#endif 823 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 824 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 825 DCHECK(obj != nullptr); 826 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; 827 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; 828 obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr()); 829 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 830 } 831 832 private: 833 ConcurrentCopying* const collector_; 834}; 835 836// Clear the black ptrs in non-moving objects back to white. 837void ConcurrentCopying::ClearBlackPtrs() { 838 CHECK(kUseBakerReadBarrier); 839 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings()); 840 ConcurrentCopyingClearBlackPtrsVisitor visitor(this); 841 for (auto& space : heap_->GetContinuousSpaces()) { 842 if (space == region_space_) { 843 continue; 844 } 845 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 846 if (kVerboseMode) { 847 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap; 848 } 849 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 850 reinterpret_cast<uintptr_t>(space->Limit()), 851 visitor); 852 } 853 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 854 large_object_space->GetMarkBitmap()->VisitMarkedRange( 855 reinterpret_cast<uintptr_t>(large_object_space->Begin()), 856 reinterpret_cast<uintptr_t>(large_object_space->End()), 857 visitor); 858 // Objects on the allocation stack? 859 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) { 860 size_t count = GetAllocationStack()->Size(); 861 auto* it = GetAllocationStack()->Begin(); 862 auto* end = GetAllocationStack()->End(); 863 for (size_t i = 0; i < count; ++i, ++it) { 864 CHECK_LT(it, end); 865 mirror::Object* obj = it->AsMirrorPtr(); 866 if (obj != nullptr) { 867 // Must have been cleared above. 868 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 869 } 870 } 871 } 872} 873 874void ConcurrentCopying::ReclaimPhase() { 875 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings()); 876 if (kVerboseMode) { 877 LOG(INFO) << "GC ReclaimPhase"; 878 } 879 Thread* self = Thread::Current(); 880 881 { 882 // Double-check that the mark stack is empty. 883 // Note: need to set this after VerifyNoFromSpaceRef(). 884 is_asserting_to_space_invariant_ = false; 885 QuasiAtomic::ThreadFenceForConstructor(); 886 if (kVerboseMode) { 887 LOG(INFO) << "Issue an empty check point. "; 888 } 889 IssueEmptyCheckpoint(); 890 // Disable the check. 891 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(0); 892 CheckEmptyMarkQueue(); 893 } 894 895 { 896 // Record freed objects. 897 TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); 898 // Don't include thread-locals that are in the to-space. 899 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); 900 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); 901 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); 902 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); 903 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); 904 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); 905 if (kEnableFromSpaceAccountingCheck) { 906 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); 907 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); 908 } 909 CHECK_LE(to_objects, from_objects); 910 CHECK_LE(to_bytes, from_bytes); 911 int64_t freed_bytes = from_bytes - to_bytes; 912 int64_t freed_objects = from_objects - to_objects; 913 if (kVerboseMode) { 914 LOG(INFO) << "RecordFree:" 915 << " from_bytes=" << from_bytes << " from_objects=" << from_objects 916 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects 917 << " to_bytes=" << to_bytes << " to_objects=" << to_objects 918 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects 919 << " from_space size=" << region_space_->FromSpaceSize() 920 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() 921 << " to_space size=" << region_space_->ToSpaceSize(); 922 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 923 } 924 RecordFree(ObjectBytePair(freed_objects, freed_bytes)); 925 if (kVerboseMode) { 926 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 927 } 928 } 929 930 { 931 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings()); 932 ComputeUnevacFromSpaceLiveRatio(); 933 } 934 935 { 936 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); 937 region_space_->ClearFromSpace(); 938 } 939 940 { 941 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 942 if (kUseBakerReadBarrier) { 943 ClearBlackPtrs(); 944 } 945 Sweep(false); 946 SwapBitmaps(); 947 heap_->UnBindBitmaps(); 948 949 // Remove bitmaps for the immune spaces. 950 while (!cc_bitmaps_.empty()) { 951 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back(); 952 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap); 953 delete cc_bitmap; 954 cc_bitmaps_.pop_back(); 955 } 956 region_space_bitmap_ = nullptr; 957 } 958 959 if (kVerboseMode) { 960 LOG(INFO) << "GC end of ReclaimPhase"; 961 } 962} 963 964class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { 965 public: 966 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) 967 : collector_(cc) {} 968 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 969 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 970 DCHECK(ref != nullptr); 971 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref; 972 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; 973 if (kUseBakerReadBarrier) { 974 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref; 975 // Clear the black ptr. 976 ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr()); 977 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref; 978 } 979 size_t obj_size = ref->SizeOf(); 980 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 981 collector_->region_space_->AddLiveBytes(ref, alloc_size); 982 } 983 984 private: 985 ConcurrentCopying* collector_; 986}; 987 988// Compute how much live objects are left in regions. 989void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() { 990 region_space_->AssertAllRegionLiveBytesZeroOrCleared(); 991 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this); 992 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()), 993 reinterpret_cast<uintptr_t>(region_space_->Limit()), 994 visitor); 995} 996 997// Assert the to-space invariant. 998void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, 999 mirror::Object* ref) { 1000 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1001 if (is_asserting_to_space_invariant_) { 1002 if (region_space_->IsInToSpace(ref)) { 1003 // OK. 1004 return; 1005 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1006 CHECK(region_space_bitmap_->Test(ref)) << ref; 1007 } else if (region_space_->IsInFromSpace(ref)) { 1008 // Not OK. Do extra logging. 1009 if (obj != nullptr) { 1010 LogFromSpaceRefHolder(obj, offset); 1011 } 1012 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); 1013 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1014 } else { 1015 AssertToSpaceInvariantInNonMovingSpace(obj, ref); 1016 } 1017 } 1018} 1019 1020class RootPrinter { 1021 public: 1022 RootPrinter() { } 1023 1024 template <class MirrorType> 1025 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) 1026 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1027 if (!root->IsNull()) { 1028 VisitRoot(root); 1029 } 1030 } 1031 1032 template <class MirrorType> 1033 void VisitRoot(mirror::Object** root) 1034 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1035 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root; 1036 } 1037 1038 template <class MirrorType> 1039 void VisitRoot(mirror::CompressedReference<MirrorType>* root) 1040 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1041 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr(); 1042 } 1043}; 1044 1045void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source, 1046 mirror::Object* ref) { 1047 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1048 if (is_asserting_to_space_invariant_) { 1049 if (region_space_->IsInToSpace(ref)) { 1050 // OK. 1051 return; 1052 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1053 CHECK(region_space_bitmap_->Test(ref)) << ref; 1054 } else if (region_space_->IsInFromSpace(ref)) { 1055 // Not OK. Do extra logging. 1056 if (gc_root_source == nullptr) { 1057 // No info. 1058 } else if (gc_root_source->HasArtField()) { 1059 ArtField* field = gc_root_source->GetArtField(); 1060 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field); 1061 RootPrinter root_printer; 1062 field->VisitRoots(root_printer); 1063 } else if (gc_root_source->HasArtMethod()) { 1064 ArtMethod* method = gc_root_source->GetArtMethod(); 1065 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method); 1066 RootPrinter root_printer; 1067 method->VisitRoots(root_printer); 1068 } 1069 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL)); 1070 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL)); 1071 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL); 1072 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true); 1073 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1074 } else { 1075 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref); 1076 } 1077 } 1078} 1079 1080void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) { 1081 if (kUseBakerReadBarrier) { 1082 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) 1083 << " holder rb_ptr=" << obj->GetReadBarrierPointer(); 1084 } else { 1085 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); 1086 } 1087 if (region_space_->IsInFromSpace(obj)) { 1088 LOG(INFO) << "holder is in the from-space."; 1089 } else if (region_space_->IsInToSpace(obj)) { 1090 LOG(INFO) << "holder is in the to-space."; 1091 } else if (region_space_->IsInUnevacFromSpace(obj)) { 1092 LOG(INFO) << "holder is in the unevac from-space."; 1093 if (region_space_bitmap_->Test(obj)) { 1094 LOG(INFO) << "holder is marked in the region space bitmap."; 1095 } else { 1096 LOG(INFO) << "holder is not marked in the region space bitmap."; 1097 } 1098 } else { 1099 // In a non-moving space. 1100 if (immune_region_.ContainsObject(obj)) { 1101 LOG(INFO) << "holder is in the image or the zygote space."; 1102 accounting::ContinuousSpaceBitmap* cc_bitmap = 1103 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 1104 CHECK(cc_bitmap != nullptr) 1105 << "An immune space object must have a bitmap."; 1106 if (cc_bitmap->Test(obj)) { 1107 LOG(INFO) << "holder is marked in the bit map."; 1108 } else { 1109 LOG(INFO) << "holder is NOT marked in the bit map."; 1110 } 1111 } else { 1112 LOG(INFO) << "holder is in a non-moving (or main) space."; 1113 accounting::ContinuousSpaceBitmap* mark_bitmap = 1114 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); 1115 accounting::LargeObjectBitmap* los_bitmap = 1116 heap_mark_bitmap_->GetLargeObjectBitmap(obj); 1117 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1118 bool is_los = mark_bitmap == nullptr; 1119 if (!is_los && mark_bitmap->Test(obj)) { 1120 LOG(INFO) << "holder is marked in the mark bit map."; 1121 } else if (is_los && los_bitmap->Test(obj)) { 1122 LOG(INFO) << "holder is marked in the los bit map."; 1123 } else { 1124 // If ref is on the allocation stack, then it is considered 1125 // mark/alive (but not necessarily on the live stack.) 1126 if (IsOnAllocStack(obj)) { 1127 LOG(INFO) << "holder is on the alloc stack."; 1128 } else { 1129 LOG(INFO) << "holder is not marked or on the alloc stack."; 1130 } 1131 } 1132 } 1133 } 1134 LOG(INFO) << "offset=" << offset.SizeValue(); 1135} 1136 1137void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, 1138 mirror::Object* ref) { 1139 // In a non-moving spaces. Check that the ref is marked. 1140 if (immune_region_.ContainsObject(ref)) { 1141 accounting::ContinuousSpaceBitmap* cc_bitmap = 1142 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); 1143 CHECK(cc_bitmap != nullptr) 1144 << "An immune space ref must have a bitmap. " << ref; 1145 if (kUseBakerReadBarrier) { 1146 CHECK(cc_bitmap->Test(ref)) 1147 << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" 1148 << obj->GetReadBarrierPointer() << " ref=" << ref; 1149 } else { 1150 CHECK(cc_bitmap->Test(ref)) 1151 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; 1152 } 1153 } else { 1154 accounting::ContinuousSpaceBitmap* mark_bitmap = 1155 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 1156 accounting::LargeObjectBitmap* los_bitmap = 1157 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 1158 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1159 bool is_los = mark_bitmap == nullptr; 1160 if ((!is_los && mark_bitmap->Test(ref)) || 1161 (is_los && los_bitmap->Test(ref))) { 1162 // OK. 1163 } else { 1164 // If ref is on the allocation stack, then it may not be 1165 // marked live, but considered marked/alive (but not 1166 // necessarily on the live stack). 1167 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " 1168 << "obj=" << obj << " ref=" << ref; 1169 } 1170 } 1171} 1172 1173// Used to scan ref fields of an object. 1174class ConcurrentCopyingRefFieldsVisitor { 1175 public: 1176 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector) 1177 : collector_(collector) {} 1178 1179 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) 1180 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1181 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1182 collector_->Process(obj, offset); 1183 } 1184 1185 void operator()(mirror::Class* klass, mirror::Reference* ref) const 1186 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 1187 CHECK(klass->IsTypeOfReferenceClass()); 1188 collector_->DelayReferenceReferent(klass, ref); 1189 } 1190 1191 private: 1192 ConcurrentCopying* const collector_; 1193}; 1194 1195// Scan ref fields of an object. 1196void ConcurrentCopying::Scan(mirror::Object* to_ref) { 1197 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1198 ConcurrentCopyingRefFieldsVisitor visitor(this); 1199 to_ref->VisitReferences<true>(visitor, visitor); 1200} 1201 1202// Process a field. 1203inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { 1204 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); 1205 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1206 return; 1207 } 1208 mirror::Object* to_ref = Mark(ref); 1209 if (to_ref == ref) { 1210 return; 1211 } 1212 // This may fail if the mutator writes to the field at the same time. But it's ok. 1213 mirror::Object* expected_ref = ref; 1214 mirror::Object* new_ref = to_ref; 1215 do { 1216 if (expected_ref != 1217 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) { 1218 // It was updated by the mutator. 1219 break; 1220 } 1221 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>( 1222 offset, expected_ref, new_ref)); 1223} 1224 1225// Process some roots. 1226void ConcurrentCopying::VisitRoots( 1227 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) { 1228 for (size_t i = 0; i < count; ++i) { 1229 mirror::Object** root = roots[i]; 1230 mirror::Object* ref = *root; 1231 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1232 continue; 1233 } 1234 mirror::Object* to_ref = Mark(ref); 1235 if (to_ref == ref) { 1236 continue; 1237 } 1238 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root); 1239 mirror::Object* expected_ref = ref; 1240 mirror::Object* new_ref = to_ref; 1241 do { 1242 if (expected_ref != addr->LoadRelaxed()) { 1243 // It was updated by the mutator. 1244 break; 1245 } 1246 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1247 } 1248} 1249 1250void ConcurrentCopying::VisitRoots( 1251 mirror::CompressedReference<mirror::Object>** roots, size_t count, 1252 const RootInfo& info ATTRIBUTE_UNUSED) { 1253 for (size_t i = 0; i < count; ++i) { 1254 mirror::CompressedReference<mirror::Object>* root = roots[i]; 1255 mirror::Object* ref = root->AsMirrorPtr(); 1256 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1257 continue; 1258 } 1259 mirror::Object* to_ref = Mark(ref); 1260 if (to_ref == ref) { 1261 continue; 1262 } 1263 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); 1264 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref); 1265 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref); 1266 do { 1267 if (ref != addr->LoadRelaxed().AsMirrorPtr()) { 1268 // It was updated by the mutator. 1269 break; 1270 } 1271 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1272 } 1273} 1274 1275// Fill the given memory block with a dummy object. Used to fill in a 1276// copy of objects that was lost in race. 1277void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { 1278 CHECK(IsAligned<kObjectAlignment>(byte_size)); 1279 memset(dummy_obj, 0, byte_size); 1280 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); 1281 CHECK(int_array_class != nullptr); 1282 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); 1283 size_t component_size = int_array_class->GetComponentSize(); 1284 CHECK_EQ(component_size, sizeof(int32_t)); 1285 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); 1286 if (data_offset > byte_size) { 1287 // An int array is too big. Use java.lang.Object. 1288 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object); 1289 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object); 1290 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize()); 1291 dummy_obj->SetClass(java_lang_Object); 1292 CHECK_EQ(byte_size, dummy_obj->SizeOf()); 1293 } else { 1294 // Use an int array. 1295 dummy_obj->SetClass(int_array_class); 1296 CHECK(dummy_obj->IsArrayInstance()); 1297 int32_t length = (byte_size - data_offset) / component_size; 1298 dummy_obj->AsArray()->SetLength(length); 1299 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length) 1300 << "byte_size=" << byte_size << " length=" << length 1301 << " component_size=" << component_size << " data_offset=" << data_offset; 1302 CHECK_EQ(byte_size, dummy_obj->SizeOf()) 1303 << "byte_size=" << byte_size << " length=" << length 1304 << " component_size=" << component_size << " data_offset=" << data_offset; 1305 } 1306} 1307 1308// Reuse the memory blocks that were copy of objects that were lost in race. 1309mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { 1310 // Try to reuse the blocks that were unused due to CAS failures. 1311 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size)); 1312 Thread* self = Thread::Current(); 1313 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); 1314 MutexLock mu(self, skipped_blocks_lock_); 1315 auto it = skipped_blocks_map_.lower_bound(alloc_size); 1316 if (it == skipped_blocks_map_.end()) { 1317 // Not found. 1318 return nullptr; 1319 } 1320 { 1321 size_t byte_size = it->first; 1322 CHECK_GE(byte_size, alloc_size); 1323 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) { 1324 // If remainder would be too small for a dummy object, retry with a larger request size. 1325 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size); 1326 if (it == skipped_blocks_map_.end()) { 1327 // Not found. 1328 return nullptr; 1329 } 1330 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size)); 1331 CHECK_GE(it->first - alloc_size, min_object_size) 1332 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; 1333 } 1334 } 1335 // Found a block. 1336 CHECK(it != skipped_blocks_map_.end()); 1337 size_t byte_size = it->first; 1338 uint8_t* addr = it->second; 1339 CHECK_GE(byte_size, alloc_size); 1340 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); 1341 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size)); 1342 if (kVerboseMode) { 1343 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; 1344 } 1345 skipped_blocks_map_.erase(it); 1346 memset(addr, 0, byte_size); 1347 if (byte_size > alloc_size) { 1348 // Return the remainder to the map. 1349 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size)); 1350 CHECK_GE(byte_size - alloc_size, min_object_size); 1351 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), 1352 byte_size - alloc_size); 1353 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size))); 1354 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size)); 1355 } 1356 return reinterpret_cast<mirror::Object*>(addr); 1357} 1358 1359mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { 1360 DCHECK(region_space_->IsInFromSpace(from_ref)); 1361 // No read barrier to avoid nested RB that might violate the to-space 1362 // invariant. Note that from_ref is a from space ref so the SizeOf() 1363 // call will access the from-space meta objects, but it's ok and necessary. 1364 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(); 1365 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1366 size_t region_space_bytes_allocated = 0U; 1367 size_t non_moving_space_bytes_allocated = 0U; 1368 size_t bytes_allocated = 0U; 1369 size_t dummy; 1370 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>( 1371 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy); 1372 bytes_allocated = region_space_bytes_allocated; 1373 if (to_ref != nullptr) { 1374 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated); 1375 } 1376 bool fall_back_to_non_moving = false; 1377 if (UNLIKELY(to_ref == nullptr)) { 1378 // Failed to allocate in the region space. Try the skipped blocks. 1379 to_ref = AllocateInSkippedBlock(region_space_alloc_size); 1380 if (to_ref != nullptr) { 1381 // Succeeded to allocate in a skipped block. 1382 if (heap_->use_tlab_) { 1383 // This is necessary for the tlab case as it's not accounted in the space. 1384 region_space_->RecordAlloc(to_ref); 1385 } 1386 bytes_allocated = region_space_alloc_size; 1387 } else { 1388 // Fall back to the non-moving space. 1389 fall_back_to_non_moving = true; 1390 if (kVerboseMode) { 1391 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" 1392 << to_space_bytes_skipped_.LoadSequentiallyConsistent() 1393 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); 1394 } 1395 fall_back_to_non_moving = true; 1396 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, 1397 &non_moving_space_bytes_allocated, nullptr, &dummy); 1398 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed"; 1399 bytes_allocated = non_moving_space_bytes_allocated; 1400 // Mark it in the mark bitmap. 1401 accounting::ContinuousSpaceBitmap* mark_bitmap = 1402 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1403 CHECK(mark_bitmap != nullptr); 1404 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref)); 1405 } 1406 } 1407 DCHECK(to_ref != nullptr); 1408 1409 // Attempt to install the forward pointer. This is in a loop as the 1410 // lock word atomic write can fail. 1411 while (true) { 1412 // Copy the object. TODO: copy only the lockword in the second iteration and on? 1413 memcpy(to_ref, from_ref, obj_size); 1414 1415 LockWord old_lock_word = to_ref->GetLockWord(false); 1416 1417 if (old_lock_word.GetState() == LockWord::kForwardingAddress) { 1418 // Lost the race. Another thread (either GC or mutator) stored 1419 // the forwarding pointer first. Make the lost copy (to_ref) 1420 // look like a valid but dead (dummy) object and keep it for 1421 // future reuse. 1422 FillWithDummyObject(to_ref, bytes_allocated); 1423 if (!fall_back_to_non_moving) { 1424 DCHECK(region_space_->IsInToSpace(to_ref)); 1425 if (bytes_allocated > space::RegionSpace::kRegionSize) { 1426 // Free the large alloc. 1427 region_space_->FreeLarge(to_ref, bytes_allocated); 1428 } else { 1429 // Record the lost copy for later reuse. 1430 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1431 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1432 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); 1433 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1434 skipped_blocks_map_.insert(std::make_pair(bytes_allocated, 1435 reinterpret_cast<uint8_t*>(to_ref))); 1436 } 1437 } else { 1438 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1439 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1440 // Free the non-moving-space chunk. 1441 accounting::ContinuousSpaceBitmap* mark_bitmap = 1442 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1443 CHECK(mark_bitmap != nullptr); 1444 CHECK(mark_bitmap->Clear(to_ref)); 1445 heap_->non_moving_space_->Free(Thread::Current(), to_ref); 1446 } 1447 1448 // Get the winner's forward ptr. 1449 mirror::Object* lost_fwd_ptr = to_ref; 1450 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress()); 1451 CHECK(to_ref != nullptr); 1452 CHECK_NE(to_ref, lost_fwd_ptr); 1453 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)); 1454 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1455 return to_ref; 1456 } 1457 1458 // Set the gray ptr. 1459 if (kUseBakerReadBarrier) { 1460 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr()); 1461 } 1462 1463 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)); 1464 1465 // Try to atomically write the fwd ptr. 1466 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word); 1467 if (LIKELY(success)) { 1468 // The CAS succeeded. 1469 objects_moved_.FetchAndAddSequentiallyConsistent(1); 1470 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size); 1471 if (LIKELY(!fall_back_to_non_moving)) { 1472 DCHECK(region_space_->IsInToSpace(to_ref)); 1473 } else { 1474 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1475 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1476 } 1477 if (kUseBakerReadBarrier) { 1478 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1479 } 1480 DCHECK(GetFwdPtr(from_ref) == to_ref); 1481 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1482 PushOntoMarkStack<true>(to_ref); 1483 return to_ref; 1484 } else { 1485 // The CAS failed. It may have lost the race or may have failed 1486 // due to monitor/hashcode ops. Either way, retry. 1487 } 1488 } 1489} 1490 1491mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { 1492 DCHECK(from_ref != nullptr); 1493 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1494 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1495 // It's already marked. 1496 return from_ref; 1497 } 1498 mirror::Object* to_ref; 1499 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1500 to_ref = GetFwdPtr(from_ref); 1501 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) || 1502 heap_->non_moving_space_->HasAddress(to_ref)) 1503 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1504 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1505 if (region_space_bitmap_->Test(from_ref)) { 1506 to_ref = from_ref; 1507 } else { 1508 to_ref = nullptr; 1509 } 1510 } else { 1511 // from_ref is in a non-moving space. 1512 if (immune_region_.ContainsObject(from_ref)) { 1513 accounting::ContinuousSpaceBitmap* cc_bitmap = 1514 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1515 DCHECK(cc_bitmap != nullptr) 1516 << "An immune space object must have a bitmap"; 1517 if (kIsDebugBuild) { 1518 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1519 << "Immune space object must be already marked"; 1520 } 1521 if (cc_bitmap->Test(from_ref)) { 1522 // Already marked. 1523 to_ref = from_ref; 1524 } else { 1525 // Newly marked. 1526 to_ref = nullptr; 1527 } 1528 } else { 1529 // Non-immune non-moving space. Use the mark bitmap. 1530 accounting::ContinuousSpaceBitmap* mark_bitmap = 1531 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1532 accounting::LargeObjectBitmap* los_bitmap = 1533 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1534 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1535 bool is_los = mark_bitmap == nullptr; 1536 if (!is_los && mark_bitmap->Test(from_ref)) { 1537 // Already marked. 1538 to_ref = from_ref; 1539 } else if (is_los && los_bitmap->Test(from_ref)) { 1540 // Already marked in LOS. 1541 to_ref = from_ref; 1542 } else { 1543 // Not marked. 1544 if (IsOnAllocStack(from_ref)) { 1545 // If on the allocation stack, it's considered marked. 1546 to_ref = from_ref; 1547 } else { 1548 // Not marked. 1549 to_ref = nullptr; 1550 } 1551 } 1552 } 1553 } 1554 return to_ref; 1555} 1556 1557bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { 1558 QuasiAtomic::ThreadFenceAcquire(); 1559 accounting::ObjectStack* alloc_stack = GetAllocationStack(); 1560 return alloc_stack->Contains(ref); 1561} 1562 1563mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { 1564 if (from_ref == nullptr) { 1565 return nullptr; 1566 } 1567 DCHECK(from_ref != nullptr); 1568 DCHECK(heap_->collector_type_ == kCollectorTypeCC); 1569 if (kUseBakerReadBarrier && !is_active_) { 1570 // In the lock word forward address state, the read barrier bits 1571 // in the lock word are part of the stored forwarding address and 1572 // invalid. This is usually OK as the from-space copy of objects 1573 // aren't accessed by mutators due to the to-space 1574 // invariant. However, during the dex2oat image writing relocation 1575 // and the zygote compaction, objects can be in the forward 1576 // address state (to store the forward/relocation addresses) and 1577 // they can still be accessed and the invalid read barrier bits 1578 // are consulted. If they look like gray but aren't really, the 1579 // read barriers slow path can trigger when it shouldn't. To guard 1580 // against this, return here if the CC collector isn't running. 1581 return from_ref; 1582 } 1583 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?"; 1584 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1585 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1586 // It's already marked. 1587 return from_ref; 1588 } 1589 mirror::Object* to_ref; 1590 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1591 to_ref = GetFwdPtr(from_ref); 1592 if (kUseBakerReadBarrier) { 1593 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref; 1594 } 1595 if (to_ref == nullptr) { 1596 // It isn't marked yet. Mark it by copying it to the to-space. 1597 to_ref = Copy(from_ref); 1598 } 1599 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) 1600 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1601 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1602 // This may or may not succeed, which is ok. 1603 if (kUseBakerReadBarrier) { 1604 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1605 } 1606 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) { 1607 // Already marked. 1608 to_ref = from_ref; 1609 } else { 1610 // Newly marked. 1611 to_ref = from_ref; 1612 if (kUseBakerReadBarrier) { 1613 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1614 } 1615 PushOntoMarkStack<true>(to_ref); 1616 } 1617 } else { 1618 // from_ref is in a non-moving space. 1619 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref; 1620 if (immune_region_.ContainsObject(from_ref)) { 1621 accounting::ContinuousSpaceBitmap* cc_bitmap = 1622 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1623 DCHECK(cc_bitmap != nullptr) 1624 << "An immune space object must have a bitmap"; 1625 if (kIsDebugBuild) { 1626 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1627 << "Immune space object must be already marked"; 1628 } 1629 // This may or may not succeed, which is ok. 1630 if (kUseBakerReadBarrier) { 1631 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1632 } 1633 if (cc_bitmap->AtomicTestAndSet(from_ref)) { 1634 // Already marked. 1635 to_ref = from_ref; 1636 } else { 1637 // Newly marked. 1638 to_ref = from_ref; 1639 if (kUseBakerReadBarrier) { 1640 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1641 } 1642 PushOntoMarkStack<true>(to_ref); 1643 } 1644 } else { 1645 // Use the mark bitmap. 1646 accounting::ContinuousSpaceBitmap* mark_bitmap = 1647 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1648 accounting::LargeObjectBitmap* los_bitmap = 1649 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1650 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1651 bool is_los = mark_bitmap == nullptr; 1652 if (!is_los && mark_bitmap->Test(from_ref)) { 1653 // Already marked. 1654 to_ref = from_ref; 1655 if (kUseBakerReadBarrier) { 1656 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1657 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1658 } 1659 } else if (is_los && los_bitmap->Test(from_ref)) { 1660 // Already marked in LOS. 1661 to_ref = from_ref; 1662 if (kUseBakerReadBarrier) { 1663 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1664 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1665 } 1666 } else { 1667 // Not marked. 1668 if (IsOnAllocStack(from_ref)) { 1669 // If it's on the allocation stack, it's considered marked. Keep it white. 1670 to_ref = from_ref; 1671 // Objects on the allocation stack need not be marked. 1672 if (!is_los) { 1673 DCHECK(!mark_bitmap->Test(to_ref)); 1674 } else { 1675 DCHECK(!los_bitmap->Test(to_ref)); 1676 } 1677 if (kUseBakerReadBarrier) { 1678 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 1679 } 1680 } else { 1681 // Not marked or on the allocation stack. Try to mark it. 1682 // This may or may not succeed, which is ok. 1683 if (kUseBakerReadBarrier) { 1684 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1685 } 1686 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) { 1687 // Already marked. 1688 to_ref = from_ref; 1689 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) { 1690 // Already marked in LOS. 1691 to_ref = from_ref; 1692 } else { 1693 // Newly marked. 1694 to_ref = from_ref; 1695 if (kUseBakerReadBarrier) { 1696 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1697 } 1698 PushOntoMarkStack<true>(to_ref); 1699 } 1700 } 1701 } 1702 } 1703 } 1704 return to_ref; 1705} 1706 1707void ConcurrentCopying::FinishPhase() { 1708 region_space_ = nullptr; 1709 CHECK(mark_queue_.IsEmpty()); 1710 mark_queue_.Clear(); 1711 { 1712 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1713 skipped_blocks_map_.clear(); 1714 } 1715 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1716 heap_->ClearMarkedObjects(); 1717} 1718 1719mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) { 1720 return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); 1721} 1722 1723bool ConcurrentCopying::IsHeapReferenceMarkedCallback( 1724 mirror::HeapReference<mirror::Object>* field, void* arg) { 1725 mirror::Object* from_ref = field->AsMirrorPtr(); 1726 mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); 1727 if (to_ref == nullptr) { 1728 return false; 1729 } 1730 if (from_ref != to_ref) { 1731 QuasiAtomic::ThreadFenceRelease(); 1732 field->Assign(to_ref); 1733 QuasiAtomic::ThreadFenceSequentiallyConsistent(); 1734 } 1735 return true; 1736} 1737 1738mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) { 1739 return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref); 1740} 1741 1742void ConcurrentCopying::ProcessMarkStackCallback(void* arg) { 1743 reinterpret_cast<ConcurrentCopying*>(arg)->ProcessMarkStack(); 1744} 1745 1746void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 1747 heap_->GetReferenceProcessor()->DelayReferenceReferent( 1748 klass, reference, &IsHeapReferenceMarkedCallback, this); 1749} 1750 1751void ConcurrentCopying::ProcessReferences(Thread* self, bool concurrent) { 1752 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); 1753 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1754 GetHeap()->GetReferenceProcessor()->ProcessReferences( 1755 concurrent, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), 1756 &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this); 1757} 1758 1759void ConcurrentCopying::RevokeAllThreadLocalBuffers() { 1760 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1761 region_space_->RevokeAllThreadLocalBuffers(); 1762} 1763 1764} // namespace collector 1765} // namespace gc 1766} // namespace art 1767