concurrent_copying.cc revision 720e71af6c5f92fbcddd0cff5b94d02366b74f89
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "concurrent_copying.h" 18 19#include "gc/accounting/heap_bitmap-inl.h" 20#include "gc/accounting/space_bitmap-inl.h" 21#include "gc/space/image_space.h" 22#include "gc/space/space.h" 23#include "intern_table.h" 24#include "mirror/art_field-inl.h" 25#include "mirror/object-inl.h" 26#include "scoped_thread_state_change.h" 27#include "thread-inl.h" 28#include "thread_list.h" 29#include "well_known_classes.h" 30 31namespace art { 32namespace gc { 33namespace collector { 34 35ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) 36 : GarbageCollector(heap, 37 name_prefix + (name_prefix.empty() ? "" : " ") + 38 "concurrent copying + mark sweep"), 39 region_space_(nullptr), gc_barrier_(new Barrier(0)), mark_queue_(2 * MB), 40 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), 41 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), 42 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), 43 rb_table_(heap_->GetReadBarrierTable()), 44 force_evacuate_all_(false) { 45 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, 46 "The region space size and the read barrier table region size must match"); 47 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap)); 48 { 49 Thread* self = Thread::Current(); 50 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 51 // Cache this so that we won't have to lock heap_bitmap_lock_ in 52 // Mark() which could cause a nested lock on heap_bitmap_lock_ 53 // when GC causes a RB while doing GC or a lock order violation 54 // (class_linker_lock_ and heap_bitmap_lock_). 55 heap_mark_bitmap_ = heap->GetMarkBitmap(); 56 } 57} 58 59ConcurrentCopying::~ConcurrentCopying() { 60} 61 62void ConcurrentCopying::RunPhases() { 63 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier); 64 CHECK(!is_active_); 65 is_active_ = true; 66 Thread* self = Thread::Current(); 67 Locks::mutator_lock_->AssertNotHeld(self); 68 { 69 ReaderMutexLock mu(self, *Locks::mutator_lock_); 70 InitializePhase(); 71 } 72 FlipThreadRoots(); 73 { 74 ReaderMutexLock mu(self, *Locks::mutator_lock_); 75 MarkingPhase(); 76 } 77 // Verify no from space refs. This causes a pause. 78 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { 79 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); 80 ScopedPause pause(this); 81 CheckEmptyMarkQueue(); 82 if (kVerboseMode) { 83 LOG(INFO) << "Verifying no from-space refs"; 84 } 85 VerifyNoFromSpaceReferences(); 86 if (kVerboseMode) { 87 LOG(INFO) << "Done verifying no from-space refs"; 88 } 89 CheckEmptyMarkQueue(); 90 } 91 { 92 ReaderMutexLock mu(self, *Locks::mutator_lock_); 93 ReclaimPhase(); 94 } 95 FinishPhase(); 96 CHECK(is_active_); 97 is_active_ = false; 98} 99 100void ConcurrentCopying::BindBitmaps() { 101 Thread* self = Thread::Current(); 102 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 103 // Mark all of the spaces we never collect as immune. 104 for (const auto& space : heap_->GetContinuousSpaces()) { 105 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 106 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 107 CHECK(space->IsZygoteSpace() || space->IsImageSpace()); 108 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 109 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" : 110 "cc zygote space bitmap"; 111 // TODO: try avoiding using bitmaps for image/zygote to save space. 112 accounting::ContinuousSpaceBitmap* bitmap = 113 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity()); 114 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 115 cc_bitmaps_.push_back(bitmap); 116 } else if (space == region_space_) { 117 accounting::ContinuousSpaceBitmap* bitmap = 118 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap", 119 space->Begin(), space->Capacity()); 120 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 121 cc_bitmaps_.push_back(bitmap); 122 region_space_bitmap_ = bitmap; 123 } 124 } 125} 126 127void ConcurrentCopying::InitializePhase() { 128 TimingLogger::ScopedTiming split("InitializePhase", GetTimings()); 129 if (kVerboseMode) { 130 LOG(INFO) << "GC InitializePhase"; 131 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" 132 << reinterpret_cast<void*>(region_space_->Limit()); 133 } 134 CHECK(mark_queue_.IsEmpty()); 135 immune_region_.Reset(); 136 bytes_moved_.StoreRelaxed(0); 137 objects_moved_.StoreRelaxed(0); 138 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 139 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 140 GetCurrentIteration()->GetClearSoftReferences()) { 141 force_evacuate_all_ = true; 142 } else { 143 force_evacuate_all_ = false; 144 } 145 BindBitmaps(); 146 if (kVerboseMode) { 147 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_; 148 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End(); 149 LOG(INFO) << "GC end of InitializePhase"; 150 } 151} 152 153// Used to switch the thread roots of a thread from from-space refs to to-space refs. 154class ThreadFlipVisitor : public Closure { 155 public: 156 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) 157 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { 158 } 159 160 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 161 // Note: self is not necessarily equal to thread since thread may be suspended. 162 Thread* self = Thread::Current(); 163 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 164 << thread->GetState() << " thread " << thread << " self " << self; 165 if (use_tlab_ && thread->HasTlab()) { 166 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 167 // This must come before the revoke. 168 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); 169 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 170 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> 171 FetchAndAddSequentiallyConsistent(thread_local_objects); 172 } else { 173 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 174 } 175 } 176 if (kUseThreadLocalAllocationStack) { 177 thread->RevokeThreadLocalAllocationStack(); 178 } 179 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 180 thread->VisitRoots(concurrent_copying_); 181 concurrent_copying_->GetBarrier().Pass(self); 182 } 183 184 private: 185 ConcurrentCopying* const concurrent_copying_; 186 const bool use_tlab_; 187}; 188 189// Called back from Runtime::FlipThreadRoots() during a pause. 190class FlipCallback : public Closure { 191 public: 192 explicit FlipCallback(ConcurrentCopying* concurrent_copying) 193 : concurrent_copying_(concurrent_copying) { 194 } 195 196 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { 197 ConcurrentCopying* cc = concurrent_copying_; 198 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); 199 // Note: self is not necessarily equal to thread since thread may be suspended. 200 Thread* self = Thread::Current(); 201 CHECK(thread == self); 202 Locks::mutator_lock_->AssertExclusiveHeld(self); 203 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); 204 cc->SwapStacks(self); 205 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 206 cc->RecordLiveStackFreezeSize(self); 207 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); 208 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); 209 } 210 cc->is_marking_ = true; 211 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { 212 CHECK(Runtime::Current()->IsAotCompiler()); 213 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); 214 Runtime::Current()->VisitTransactionRoots(cc); 215 } 216 } 217 218 private: 219 ConcurrentCopying* const concurrent_copying_; 220}; 221 222// Switch threads that from from-space to to-space refs. Forward/mark the thread roots. 223void ConcurrentCopying::FlipThreadRoots() { 224 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); 225 if (kVerboseMode) { 226 LOG(INFO) << "time=" << region_space_->Time(); 227 region_space_->DumpNonFreeRegions(LOG(INFO)); 228 } 229 Thread* self = Thread::Current(); 230 Locks::mutator_lock_->AssertNotHeld(self); 231 gc_barrier_->Init(self, 0); 232 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); 233 FlipCallback flip_callback(this); 234 size_t barrier_count = Runtime::Current()->FlipThreadRoots( 235 &thread_flip_visitor, &flip_callback, this); 236 { 237 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 238 gc_barrier_->Increment(self, barrier_count); 239 } 240 is_asserting_to_space_invariant_ = true; 241 QuasiAtomic::ThreadFenceForConstructor(); 242 if (kVerboseMode) { 243 LOG(INFO) << "time=" << region_space_->Time(); 244 region_space_->DumpNonFreeRegions(LOG(INFO)); 245 LOG(INFO) << "GC end of FlipThreadRoots"; 246 } 247} 248 249void ConcurrentCopying::SwapStacks(Thread* self) { 250 heap_->SwapStacks(self); 251} 252 253void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { 254 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 255 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 256} 257 258// Used to visit objects in the immune spaces. 259class ConcurrentCopyingImmuneSpaceObjVisitor { 260 public: 261 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) 262 : collector_(cc) {} 263 264 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 265 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 266 DCHECK(obj != nullptr); 267 DCHECK(collector_->immune_region_.ContainsObject(obj)); 268 accounting::ContinuousSpaceBitmap* cc_bitmap = 269 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 270 DCHECK(cc_bitmap != nullptr) 271 << "An immune space object must have a bitmap"; 272 if (kIsDebugBuild) { 273 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) 274 << "Immune space object must be already marked"; 275 } 276 // This may or may not succeed, which is ok. 277 if (kUseBakerReadBarrier) { 278 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 279 } 280 if (cc_bitmap->AtomicTestAndSet(obj)) { 281 // Already marked. Do nothing. 282 } else { 283 // Newly marked. Set the gray bit and push it onto the mark stack. 284 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 285 collector_->PushOntoMarkStack<true>(obj); 286 } 287 } 288 289 private: 290 ConcurrentCopying* collector_; 291}; 292 293class EmptyCheckpoint : public Closure { 294 public: 295 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying) 296 : concurrent_copying_(concurrent_copying) { 297 } 298 299 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 300 // Note: self is not necessarily equal to thread since thread may be suspended. 301 Thread* self = Thread::Current(); 302 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 303 << thread->GetState() << " thread " << thread << " self " << self; 304 // If thread is a running mutator, then act on behalf of the garbage collector. 305 // See the code in ThreadList::RunCheckpoint. 306 if (thread->GetState() == kRunnable) { 307 concurrent_copying_->GetBarrier().Pass(self); 308 } 309 } 310 311 private: 312 ConcurrentCopying* const concurrent_copying_; 313}; 314 315// Concurrently mark roots that are guarded by read barriers and process the mark stack. 316void ConcurrentCopying::MarkingPhase() { 317 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); 318 if (kVerboseMode) { 319 LOG(INFO) << "GC MarkingPhase"; 320 } 321 { 322 // Mark the image root. The WB-based collectors do not need to 323 // scan the image objects from roots by relying on the card table, 324 // but it's necessary for the RB to-space invariant to hold. 325 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings()); 326 gc::space::ImageSpace* image = heap_->GetImageSpace(); 327 if (image != nullptr) { 328 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots(); 329 mirror::Object* marked_image_root = Mark(image_root); 330 CHECK_EQ(image_root, marked_image_root) << "An image object does not move"; 331 if (ReadBarrier::kEnableToSpaceInvariantChecks) { 332 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root); 333 } 334 } 335 } 336 { 337 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings()); 338 Runtime::Current()->VisitConstantRoots(this); 339 } 340 { 341 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings()); 342 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots); 343 } 344 { 345 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings()); 346 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots); 347 } 348 { 349 // TODO: don't visit the transaction roots if it's not active. 350 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); 351 Runtime::Current()->VisitNonThreadRoots(this); 352 } 353 354 // Immune spaces. 355 for (auto& space : heap_->GetContinuousSpaces()) { 356 if (immune_region_.ContainsSpace(space)) { 357 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 358 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 359 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this); 360 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 361 reinterpret_cast<uintptr_t>(space->Limit()), 362 visitor); 363 } 364 } 365 366 Thread* self = Thread::Current(); 367 { 368 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings()); 369 // Process the mark stack and issue an empty check point. If the 370 // mark stack is still empty after the check point, we're 371 // done. Otherwise, repeat. 372 ProcessMarkStack(); 373 size_t count = 0; 374 while (!ProcessMarkStack()) { 375 ++count; 376 if (kVerboseMode) { 377 LOG(INFO) << "Issue an empty check point. " << count; 378 } 379 IssueEmptyCheckpoint(); 380 } 381 // Need to ensure the mark stack is empty before reference 382 // processing to get rid of non-reference gray objects. 383 CheckEmptyMarkQueue(); 384 // Enable the GetReference slow path and disallow access to the system weaks. 385 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 386 Runtime::Current()->DisallowNewSystemWeaks(); 387 QuasiAtomic::ThreadFenceForConstructor(); 388 // Lock-unlock the system weak locks so that there's no thread in 389 // the middle of accessing system weaks. 390 Runtime::Current()->EnsureNewSystemWeaksDisallowed(); 391 // Note: Do not issue a checkpoint from here to the 392 // SweepSystemWeaks call or else a deadlock due to 393 // WaitHoldingLocks() would occur. 394 if (kVerboseMode) { 395 LOG(INFO) << "Enabled the ref proc slow path & disabled access to system weaks."; 396 LOG(INFO) << "ProcessReferences"; 397 } 398 ProcessReferences(self, true); 399 CheckEmptyMarkQueue(); 400 if (kVerboseMode) { 401 LOG(INFO) << "SweepSystemWeaks"; 402 } 403 SweepSystemWeaks(self); 404 if (kVerboseMode) { 405 LOG(INFO) << "SweepSystemWeaks done"; 406 } 407 // Because hash_set::Erase() can call the hash function for 408 // arbitrary elements in the weak intern table in 409 // InternTable::Table::SweepWeaks(), the above SweepSystemWeaks() 410 // call may have marked some objects (strings) alive. So process 411 // the mark stack here once again. 412 ProcessMarkStack(); 413 CheckEmptyMarkQueue(); 414 // Disable marking. 415 if (kUseTableLookupReadBarrier) { 416 heap_->rb_table_->ClearAll(); 417 DCHECK(heap_->rb_table_->IsAllCleared()); 418 } 419 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(1); 420 is_marking_ = false; 421 if (kVerboseMode) { 422 LOG(INFO) << "AllowNewSystemWeaks"; 423 } 424 Runtime::Current()->AllowNewSystemWeaks(); 425 CheckEmptyMarkQueue(); 426 } 427 428 if (kVerboseMode) { 429 LOG(INFO) << "GC end of MarkingPhase"; 430 } 431} 432 433void ConcurrentCopying::IssueEmptyCheckpoint() { 434 Thread* self = Thread::Current(); 435 EmptyCheckpoint check_point(this); 436 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 437 gc_barrier_->Init(self, 0); 438 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 439 // If there are no threads to wait which implys that all the checkpoint functions are finished, 440 // then no need to release the mutator lock. 441 if (barrier_count == 0) { 442 return; 443 } 444 // Release locks then wait for all mutator threads to pass the barrier. 445 Locks::mutator_lock_->SharedUnlock(self); 446 { 447 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 448 gc_barrier_->Increment(self, barrier_count); 449 } 450 Locks::mutator_lock_->SharedLock(self); 451} 452 453mirror::Object* ConcurrentCopying::PopOffMarkStack() { 454 return mark_queue_.Dequeue(); 455} 456 457template<bool kThreadSafe> 458void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { 459 CHECK_EQ(is_mark_queue_push_disallowed_.LoadRelaxed(), 0) 460 << " " << to_ref << " " << PrettyTypeOf(to_ref); 461 if (kThreadSafe) { 462 CHECK(mark_queue_.Enqueue(to_ref)) << "Mark queue overflow"; 463 } else { 464 CHECK(mark_queue_.EnqueueThreadUnsafe(to_ref)) << "Mark queue overflow"; 465 } 466} 467 468accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() { 469 return heap_->allocation_stack_.get(); 470} 471 472accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { 473 return heap_->live_stack_.get(); 474} 475 476inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { 477 DCHECK(region_space_->IsInFromSpace(from_ref)); 478 LockWord lw = from_ref->GetLockWord(false); 479 if (lw.GetState() == LockWord::kForwardingAddress) { 480 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); 481 CHECK(fwd_ptr != nullptr); 482 return fwd_ptr; 483 } else { 484 return nullptr; 485 } 486} 487 488// The following visitors are that used to verify that there's no 489// references to the from-space left after marking. 490class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { 491 public: 492 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) 493 : collector_(collector) {} 494 495 void operator()(mirror::Object* ref) const 496 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 497 if (ref == nullptr) { 498 // OK. 499 return; 500 } 501 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 502 if (kUseBakerReadBarrier) { 503 if (collector_->RegionSpace()->IsInToSpace(ref)) { 504 CHECK(ref->GetReadBarrierPointer() == nullptr) 505 << "To-space ref " << ref << " " << PrettyTypeOf(ref) 506 << " has non-white rb_ptr " << ref->GetReadBarrierPointer(); 507 } else { 508 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 509 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 510 collector_->IsOnAllocStack(ref))) 511 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref) 512 << " has non-black rb_ptr " << ref->GetReadBarrierPointer() 513 << " but isn't on the alloc stack (and has white rb_ptr)." 514 << " Is it in the non-moving space=" 515 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref)); 516 } 517 } 518 } 519 520 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 521 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 522 DCHECK(root != nullptr); 523 operator()(root); 524 } 525 526 private: 527 ConcurrentCopying* const collector_; 528}; 529 530class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { 531 public: 532 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) 533 : collector_(collector) {} 534 535 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 536 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 537 mirror::Object* ref = 538 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 539 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 540 visitor(ref); 541 } 542 void operator()(mirror::Class* klass, mirror::Reference* ref) const 543 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 544 CHECK(klass->IsTypeOfReferenceClass()); 545 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 546 } 547 548 private: 549 ConcurrentCopying* collector_; 550}; 551 552class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { 553 public: 554 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) 555 : collector_(collector) {} 556 void operator()(mirror::Object* obj) const 557 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 558 ObjectCallback(obj, collector_); 559 } 560 static void ObjectCallback(mirror::Object* obj, void *arg) 561 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 562 CHECK(obj != nullptr); 563 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 564 space::RegionSpace* region_space = collector->RegionSpace(); 565 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 566 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); 567 obj->VisitReferences<true>(visitor, visitor); 568 if (kUseBakerReadBarrier) { 569 if (collector->RegionSpace()->IsInToSpace(obj)) { 570 CHECK(obj->GetReadBarrierPointer() == nullptr) 571 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer(); 572 } else { 573 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 574 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 575 collector->IsOnAllocStack(obj))) 576 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj) 577 << " has non-black rb_ptr " << obj->GetReadBarrierPointer() 578 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space=" 579 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj)); 580 } 581 } 582 } 583 584 private: 585 ConcurrentCopying* const collector_; 586}; 587 588// Verify there's no from-space references left after the marking phase. 589void ConcurrentCopying::VerifyNoFromSpaceReferences() { 590 Thread* self = Thread::Current(); 591 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 592 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this); 593 // Roots. 594 { 595 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 596 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 597 Runtime::Current()->VisitRoots(&ref_visitor); 598 } 599 // The to-space. 600 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, 601 this); 602 // Non-moving spaces. 603 { 604 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 605 heap_->GetMarkBitmap()->Visit(visitor); 606 } 607 // The alloc stack. 608 { 609 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 610 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); 611 it < end; ++it) { 612 mirror::Object* const obj = it->AsMirrorPtr(); 613 if (obj != nullptr && obj->GetClass() != nullptr) { 614 // TODO: need to call this only if obj is alive? 615 ref_visitor(obj); 616 visitor(obj); 617 } 618 } 619 } 620 // TODO: LOS. But only refs in LOS are classes. 621} 622 623// The following visitors are used to assert the to-space invariant. 624class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { 625 public: 626 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) 627 : collector_(collector) {} 628 629 void operator()(mirror::Object* ref) const 630 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 631 if (ref == nullptr) { 632 // OK. 633 return; 634 } 635 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 636 } 637 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/) 638 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 639 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 640 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector); 641 DCHECK(root != nullptr); 642 visitor(*root); 643 } 644 645 private: 646 ConcurrentCopying* collector_; 647}; 648 649class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { 650 public: 651 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) 652 : collector_(collector) {} 653 654 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 655 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 656 mirror::Object* ref = 657 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 658 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 659 visitor(ref); 660 } 661 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const 662 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 663 CHECK(klass->IsTypeOfReferenceClass()); 664 } 665 666 private: 667 ConcurrentCopying* collector_; 668}; 669 670class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { 671 public: 672 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) 673 : collector_(collector) {} 674 void operator()(mirror::Object* obj) const 675 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 676 ObjectCallback(obj, collector_); 677 } 678 static void ObjectCallback(mirror::Object* obj, void *arg) 679 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 680 CHECK(obj != nullptr); 681 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 682 space::RegionSpace* region_space = collector->RegionSpace(); 683 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 684 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); 685 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); 686 obj->VisitReferences<true>(visitor, visitor); 687 } 688 689 private: 690 ConcurrentCopying* collector_; 691}; 692 693bool ConcurrentCopying::ProcessMarkStack() { 694 if (kVerboseMode) { 695 LOG(INFO) << "ProcessMarkStack. "; 696 } 697 size_t count = 0; 698 mirror::Object* to_ref; 699 while ((to_ref = PopOffMarkStack()) != nullptr) { 700 ++count; 701 DCHECK(!region_space_->IsInFromSpace(to_ref)); 702 if (kUseBakerReadBarrier) { 703 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 704 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 705 << " is_marked=" << IsMarked(to_ref); 706 } 707 // Scan ref fields. 708 Scan(to_ref); 709 // Mark the gray ref as white or black. 710 if (kUseBakerReadBarrier) { 711 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 712 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 713 << " is_marked=" << IsMarked(to_ref); 714 } 715 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && 716 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && 717 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { 718 // Leave References gray so that GetReferent() will trigger RB. 719 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; 720 } else { 721 if (kUseBakerReadBarrier) { 722 if (region_space_->IsInToSpace(to_ref)) { 723 // If to-space, change from gray to white. 724 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 725 ReadBarrier::WhitePtr()); 726 CHECK(success) << "Must succeed as we won the race."; 727 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 728 } else { 729 // If non-moving space/unevac from space, change from gray 730 // to black. We can't change gray to white because it's not 731 // safe to use CAS if two threads change values in opposite 732 // directions (A->B and B->A). So, we change it to black to 733 // indicate non-moving objects that have been marked 734 // through. Note we'd need to change from black to white 735 // later (concurrently). 736 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 737 ReadBarrier::BlackPtr()); 738 CHECK(success) << "Must succeed as we won the race."; 739 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 740 } 741 } 742 } 743 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { 744 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); 745 visitor(to_ref); 746 } 747 } 748 // Return true if the stack was empty. 749 return count == 0; 750} 751 752void ConcurrentCopying::CheckEmptyMarkQueue() { 753 if (!mark_queue_.IsEmpty()) { 754 while (!mark_queue_.IsEmpty()) { 755 mirror::Object* obj = mark_queue_.Dequeue(); 756 if (kUseBakerReadBarrier) { 757 mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); 758 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr 759 << " is_marked=" << IsMarked(obj); 760 } else { 761 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) 762 << " is_marked=" << IsMarked(obj); 763 } 764 } 765 LOG(FATAL) << "mark queue is not empty"; 766 } 767} 768 769void ConcurrentCopying::SweepSystemWeaks(Thread* self) { 770 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); 771 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 772 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 773} 774 775void ConcurrentCopying::Sweep(bool swap_bitmaps) { 776 { 777 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); 778 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 779 if (kEnableFromSpaceAccountingCheck) { 780 CHECK_GE(live_stack_freeze_size_, live_stack->Size()); 781 } 782 heap_->MarkAllocStackAsLive(live_stack); 783 live_stack->Reset(); 784 } 785 CHECK(mark_queue_.IsEmpty()); 786 TimingLogger::ScopedTiming split("Sweep", GetTimings()); 787 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 788 if (space->IsContinuousMemMapAllocSpace()) { 789 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 790 if (space == region_space_ || immune_region_.ContainsSpace(space)) { 791 continue; 792 } 793 TimingLogger::ScopedTiming split2( 794 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 795 RecordFree(alloc_space->Sweep(swap_bitmaps)); 796 } 797 } 798 SweepLargeObjects(swap_bitmaps); 799} 800 801void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) { 802 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 803 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 804} 805 806class ConcurrentCopyingClearBlackPtrsVisitor { 807 public: 808 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc) 809 : collector_(cc) {} 810 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 811 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 812 DCHECK(obj != nullptr); 813 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; 814 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; 815 obj->SetReadBarrierPointer(ReadBarrier::WhitePtr()); 816 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 817 } 818 819 private: 820 ConcurrentCopying* const collector_; 821}; 822 823// Clear the black ptrs in non-moving objects back to white. 824void ConcurrentCopying::ClearBlackPtrs() { 825 CHECK(kUseBakerReadBarrier); 826 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings()); 827 ConcurrentCopyingClearBlackPtrsVisitor visitor(this); 828 for (auto& space : heap_->GetContinuousSpaces()) { 829 if (space == region_space_) { 830 continue; 831 } 832 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 833 if (kVerboseMode) { 834 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap; 835 } 836 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 837 reinterpret_cast<uintptr_t>(space->Limit()), 838 visitor); 839 } 840 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 841 large_object_space->GetMarkBitmap()->VisitMarkedRange( 842 reinterpret_cast<uintptr_t>(large_object_space->Begin()), 843 reinterpret_cast<uintptr_t>(large_object_space->End()), 844 visitor); 845 // Objects on the allocation stack? 846 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) { 847 size_t count = GetAllocationStack()->Size(); 848 auto* it = GetAllocationStack()->Begin(); 849 auto* end = GetAllocationStack()->End(); 850 for (size_t i = 0; i < count; ++i, ++it) { 851 CHECK_LT(it, end); 852 mirror::Object* obj = it->AsMirrorPtr(); 853 if (obj != nullptr) { 854 // Must have been cleared above. 855 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 856 } 857 } 858 } 859} 860 861void ConcurrentCopying::ReclaimPhase() { 862 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings()); 863 if (kVerboseMode) { 864 LOG(INFO) << "GC ReclaimPhase"; 865 } 866 Thread* self = Thread::Current(); 867 868 { 869 // Double-check that the mark stack is empty. 870 // Note: need to set this after VerifyNoFromSpaceRef(). 871 is_asserting_to_space_invariant_ = false; 872 QuasiAtomic::ThreadFenceForConstructor(); 873 if (kVerboseMode) { 874 LOG(INFO) << "Issue an empty check point. "; 875 } 876 IssueEmptyCheckpoint(); 877 // Disable the check. 878 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(0); 879 CheckEmptyMarkQueue(); 880 } 881 882 { 883 // Record freed objects. 884 TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); 885 // Don't include thread-locals that are in the to-space. 886 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); 887 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); 888 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); 889 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); 890 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); 891 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); 892 if (kEnableFromSpaceAccountingCheck) { 893 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); 894 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); 895 } 896 CHECK_LE(to_objects, from_objects); 897 CHECK_LE(to_bytes, from_bytes); 898 int64_t freed_bytes = from_bytes - to_bytes; 899 int64_t freed_objects = from_objects - to_objects; 900 if (kVerboseMode) { 901 LOG(INFO) << "RecordFree:" 902 << " from_bytes=" << from_bytes << " from_objects=" << from_objects 903 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects 904 << " to_bytes=" << to_bytes << " to_objects=" << to_objects 905 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects 906 << " from_space size=" << region_space_->FromSpaceSize() 907 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() 908 << " to_space size=" << region_space_->ToSpaceSize(); 909 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 910 } 911 RecordFree(ObjectBytePair(freed_objects, freed_bytes)); 912 if (kVerboseMode) { 913 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 914 } 915 } 916 917 { 918 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings()); 919 ComputeUnevacFromSpaceLiveRatio(); 920 } 921 922 { 923 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); 924 region_space_->ClearFromSpace(); 925 } 926 927 { 928 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 929 if (kUseBakerReadBarrier) { 930 ClearBlackPtrs(); 931 } 932 Sweep(false); 933 SwapBitmaps(); 934 heap_->UnBindBitmaps(); 935 936 // Remove bitmaps for the immune spaces. 937 while (!cc_bitmaps_.empty()) { 938 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back(); 939 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap); 940 delete cc_bitmap; 941 cc_bitmaps_.pop_back(); 942 } 943 region_space_bitmap_ = nullptr; 944 } 945 946 if (kVerboseMode) { 947 LOG(INFO) << "GC end of ReclaimPhase"; 948 } 949} 950 951class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { 952 public: 953 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) 954 : collector_(cc) {} 955 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 956 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 957 DCHECK(ref != nullptr); 958 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref; 959 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; 960 if (kUseBakerReadBarrier) { 961 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref; 962 // Clear the black ptr. 963 ref->SetReadBarrierPointer(ReadBarrier::WhitePtr()); 964 } 965 size_t obj_size = ref->SizeOf(); 966 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 967 collector_->region_space_->AddLiveBytes(ref, alloc_size); 968 } 969 970 private: 971 ConcurrentCopying* collector_; 972}; 973 974// Compute how much live objects are left in regions. 975void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() { 976 region_space_->AssertAllRegionLiveBytesZeroOrCleared(); 977 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this); 978 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()), 979 reinterpret_cast<uintptr_t>(region_space_->Limit()), 980 visitor); 981} 982 983// Assert the to-space invariant. 984void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, 985 mirror::Object* ref) { 986 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 987 if (is_asserting_to_space_invariant_) { 988 if (region_space_->IsInToSpace(ref)) { 989 // OK. 990 return; 991 } else if (region_space_->IsInUnevacFromSpace(ref)) { 992 CHECK(region_space_bitmap_->Test(ref)) << ref; 993 } else if (region_space_->IsInFromSpace(ref)) { 994 // Not OK. Do extra logging. 995 if (obj != nullptr) { 996 if (kUseBakerReadBarrier) { 997 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) 998 << " holder rb_ptr=" << obj->GetReadBarrierPointer(); 999 } else { 1000 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); 1001 } 1002 if (region_space_->IsInFromSpace(obj)) { 1003 LOG(INFO) << "holder is in the from-space."; 1004 } else if (region_space_->IsInToSpace(obj)) { 1005 LOG(INFO) << "holder is in the to-space."; 1006 } else if (region_space_->IsInUnevacFromSpace(obj)) { 1007 LOG(INFO) << "holder is in the unevac from-space."; 1008 if (region_space_bitmap_->Test(obj)) { 1009 LOG(INFO) << "holder is marked in the region space bitmap."; 1010 } else { 1011 LOG(INFO) << "holder is not marked in the region space bitmap."; 1012 } 1013 } else { 1014 // In a non-moving space. 1015 if (immune_region_.ContainsObject(obj)) { 1016 LOG(INFO) << "holder is in the image or the zygote space."; 1017 accounting::ContinuousSpaceBitmap* cc_bitmap = 1018 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 1019 CHECK(cc_bitmap != nullptr) 1020 << "An immune space object must have a bitmap."; 1021 if (cc_bitmap->Test(obj)) { 1022 LOG(INFO) << "holder is marked in the bit map."; 1023 } else { 1024 LOG(INFO) << "holder is NOT marked in the bit map."; 1025 } 1026 } else { 1027 LOG(INFO) << "holder is in a non-moving (or main) space."; 1028 accounting::ContinuousSpaceBitmap* mark_bitmap = 1029 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); 1030 accounting::LargeObjectBitmap* los_bitmap = 1031 heap_mark_bitmap_->GetLargeObjectBitmap(obj); 1032 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1033 bool is_los = mark_bitmap == nullptr; 1034 if (!is_los && mark_bitmap->Test(obj)) { 1035 LOG(INFO) << "holder is marked in the mark bit map."; 1036 } else if (is_los && los_bitmap->Test(obj)) { 1037 LOG(INFO) << "holder is marked in the los bit map."; 1038 } else { 1039 // If ref is on the allocation stack, then it is considered 1040 // mark/alive (but not necessarily on the live stack.) 1041 if (IsOnAllocStack(obj)) { 1042 LOG(INFO) << "holder is on the alloc stack."; 1043 } else { 1044 LOG(INFO) << "holder is not marked or on the alloc stack."; 1045 } 1046 } 1047 } 1048 } 1049 LOG(INFO) << "offset=" << offset.SizeValue(); 1050 } 1051 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1052 } else { 1053 // In a non-moving spaces. Check that the ref is marked. 1054 if (immune_region_.ContainsObject(ref)) { 1055 accounting::ContinuousSpaceBitmap* cc_bitmap = 1056 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); 1057 CHECK(cc_bitmap != nullptr) 1058 << "An immune space ref must have a bitmap. " << ref; 1059 if (kUseBakerReadBarrier) { 1060 CHECK(cc_bitmap->Test(ref)) 1061 << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" 1062 << obj->GetReadBarrierPointer() << " ref=" << ref; 1063 } else { 1064 CHECK(cc_bitmap->Test(ref)) 1065 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; 1066 } 1067 } else { 1068 accounting::ContinuousSpaceBitmap* mark_bitmap = 1069 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 1070 accounting::LargeObjectBitmap* los_bitmap = 1071 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 1072 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1073 bool is_los = mark_bitmap == nullptr; 1074 if ((!is_los && mark_bitmap->Test(ref)) || 1075 (is_los && los_bitmap->Test(ref))) { 1076 // OK. 1077 } else { 1078 // If ref is on the allocation stack, then it may not be 1079 // marked live, but considered marked/alive (but not 1080 // necessarily on the live stack). 1081 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " 1082 << "obj=" << obj << " ref=" << ref; 1083 } 1084 } 1085 } 1086 } 1087} 1088 1089// Used to scan ref fields of an object. 1090class ConcurrentCopyingRefFieldsVisitor { 1091 public: 1092 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector) 1093 : collector_(collector) {} 1094 1095 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) 1096 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1097 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1098 collector_->Process(obj, offset); 1099 } 1100 1101 void operator()(mirror::Class* klass, mirror::Reference* ref) const 1102 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 1103 CHECK(klass->IsTypeOfReferenceClass()); 1104 collector_->DelayReferenceReferent(klass, ref); 1105 } 1106 1107 private: 1108 ConcurrentCopying* const collector_; 1109}; 1110 1111// Scan ref fields of an object. 1112void ConcurrentCopying::Scan(mirror::Object* to_ref) { 1113 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1114 ConcurrentCopyingRefFieldsVisitor visitor(this); 1115 to_ref->VisitReferences<true>(visitor, visitor); 1116} 1117 1118// Process a field. 1119inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { 1120 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); 1121 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1122 return; 1123 } 1124 mirror::Object* to_ref = Mark(ref); 1125 if (to_ref == ref) { 1126 return; 1127 } 1128 // This may fail if the mutator writes to the field at the same time. But it's ok. 1129 mirror::Object* expected_ref = ref; 1130 mirror::Object* new_ref = to_ref; 1131 do { 1132 if (expected_ref != 1133 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) { 1134 // It was updated by the mutator. 1135 break; 1136 } 1137 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>( 1138 offset, expected_ref, new_ref)); 1139} 1140 1141// Process some roots. 1142void ConcurrentCopying::VisitRoots( 1143 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) { 1144 for (size_t i = 0; i < count; ++i) { 1145 mirror::Object** root = roots[i]; 1146 mirror::Object* ref = *root; 1147 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1148 return; 1149 } 1150 mirror::Object* to_ref = Mark(ref); 1151 if (to_ref == ref) { 1152 return; 1153 } 1154 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root); 1155 mirror::Object* expected_ref = ref; 1156 mirror::Object* new_ref = to_ref; 1157 do { 1158 if (expected_ref != addr->LoadRelaxed()) { 1159 // It was updated by the mutator. 1160 break; 1161 } 1162 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1163 } 1164} 1165 1166void ConcurrentCopying::VisitRoots( 1167 mirror::CompressedReference<mirror::Object>** roots, size_t count, 1168 const RootInfo& info ATTRIBUTE_UNUSED) { 1169 for (size_t i = 0; i < count; ++i) { 1170 mirror::CompressedReference<mirror::Object>* root = roots[i]; 1171 mirror::Object* ref = root->AsMirrorPtr(); 1172 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1173 return; 1174 } 1175 mirror::Object* to_ref = Mark(ref); 1176 if (to_ref == ref) { 1177 return; 1178 } 1179 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); 1180 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref); 1181 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref); 1182 do { 1183 if (ref != addr->LoadRelaxed().AsMirrorPtr()) { 1184 // It was updated by the mutator. 1185 break; 1186 } 1187 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1188 } 1189} 1190 1191// Fill the given memory block with a dummy object. Used to fill in a 1192// copy of objects that was lost in race. 1193void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { 1194 CHECK(IsAligned<kObjectAlignment>(byte_size)); 1195 memset(dummy_obj, 0, byte_size); 1196 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); 1197 CHECK(int_array_class != nullptr); 1198 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); 1199 size_t component_size = int_array_class->GetComponentSize(); 1200 CHECK_EQ(component_size, sizeof(int32_t)); 1201 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); 1202 if (data_offset > byte_size) { 1203 // An int array is too big. Use java.lang.Object. 1204 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object); 1205 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object); 1206 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize()); 1207 dummy_obj->SetClass(java_lang_Object); 1208 CHECK_EQ(byte_size, dummy_obj->SizeOf()); 1209 } else { 1210 // Use an int array. 1211 dummy_obj->SetClass(int_array_class); 1212 CHECK(dummy_obj->IsArrayInstance()); 1213 int32_t length = (byte_size - data_offset) / component_size; 1214 dummy_obj->AsArray()->SetLength(length); 1215 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length) 1216 << "byte_size=" << byte_size << " length=" << length 1217 << " component_size=" << component_size << " data_offset=" << data_offset; 1218 CHECK_EQ(byte_size, dummy_obj->SizeOf()) 1219 << "byte_size=" << byte_size << " length=" << length 1220 << " component_size=" << component_size << " data_offset=" << data_offset; 1221 } 1222} 1223 1224// Reuse the memory blocks that were copy of objects that were lost in race. 1225mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { 1226 // Try to reuse the blocks that were unused due to CAS failures. 1227 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size)); 1228 Thread* self = Thread::Current(); 1229 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); 1230 MutexLock mu(self, skipped_blocks_lock_); 1231 auto it = skipped_blocks_map_.lower_bound(alloc_size); 1232 if (it == skipped_blocks_map_.end()) { 1233 // Not found. 1234 return nullptr; 1235 } 1236 { 1237 size_t byte_size = it->first; 1238 CHECK_GE(byte_size, alloc_size); 1239 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) { 1240 // If remainder would be too small for a dummy object, retry with a larger request size. 1241 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size); 1242 if (it == skipped_blocks_map_.end()) { 1243 // Not found. 1244 return nullptr; 1245 } 1246 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size)); 1247 CHECK_GE(it->first - alloc_size, min_object_size) 1248 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; 1249 } 1250 } 1251 // Found a block. 1252 CHECK(it != skipped_blocks_map_.end()); 1253 size_t byte_size = it->first; 1254 uint8_t* addr = it->second; 1255 CHECK_GE(byte_size, alloc_size); 1256 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); 1257 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size)); 1258 if (kVerboseMode) { 1259 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; 1260 } 1261 skipped_blocks_map_.erase(it); 1262 memset(addr, 0, byte_size); 1263 if (byte_size > alloc_size) { 1264 // Return the remainder to the map. 1265 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size)); 1266 CHECK_GE(byte_size - alloc_size, min_object_size); 1267 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), 1268 byte_size - alloc_size); 1269 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size))); 1270 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size)); 1271 } 1272 return reinterpret_cast<mirror::Object*>(addr); 1273} 1274 1275mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { 1276 DCHECK(region_space_->IsInFromSpace(from_ref)); 1277 // No read barrier to avoid nested RB that might violate the to-space 1278 // invariant. Note that from_ref is a from space ref so the SizeOf() 1279 // call will access the from-space meta objects, but it's ok and necessary. 1280 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(); 1281 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1282 size_t region_space_bytes_allocated = 0U; 1283 size_t non_moving_space_bytes_allocated = 0U; 1284 size_t bytes_allocated = 0U; 1285 size_t dummy; 1286 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>( 1287 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy); 1288 bytes_allocated = region_space_bytes_allocated; 1289 if (to_ref != nullptr) { 1290 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated); 1291 } 1292 bool fall_back_to_non_moving = false; 1293 if (UNLIKELY(to_ref == nullptr)) { 1294 // Failed to allocate in the region space. Try the skipped blocks. 1295 to_ref = AllocateInSkippedBlock(region_space_alloc_size); 1296 if (to_ref != nullptr) { 1297 // Succeeded to allocate in a skipped block. 1298 if (heap_->use_tlab_) { 1299 // This is necessary for the tlab case as it's not accounted in the space. 1300 region_space_->RecordAlloc(to_ref); 1301 } 1302 bytes_allocated = region_space_alloc_size; 1303 } else { 1304 // Fall back to the non-moving space. 1305 fall_back_to_non_moving = true; 1306 if (kVerboseMode) { 1307 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" 1308 << to_space_bytes_skipped_.LoadSequentiallyConsistent() 1309 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); 1310 } 1311 fall_back_to_non_moving = true; 1312 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, 1313 &non_moving_space_bytes_allocated, nullptr, &dummy); 1314 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed"; 1315 bytes_allocated = non_moving_space_bytes_allocated; 1316 // Mark it in the mark bitmap. 1317 accounting::ContinuousSpaceBitmap* mark_bitmap = 1318 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1319 CHECK(mark_bitmap != nullptr); 1320 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref)); 1321 } 1322 } 1323 DCHECK(to_ref != nullptr); 1324 1325 // Attempt to install the forward pointer. This is in a loop as the 1326 // lock word atomic write can fail. 1327 while (true) { 1328 // Copy the object. TODO: copy only the lockword in the second iteration and on? 1329 memcpy(to_ref, from_ref, obj_size); 1330 // Set the gray ptr. 1331 if (kUseBakerReadBarrier) { 1332 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr()); 1333 } 1334 1335 LockWord old_lock_word = to_ref->GetLockWord(false); 1336 1337 if (old_lock_word.GetState() == LockWord::kForwardingAddress) { 1338 // Lost the race. Another thread (either GC or mutator) stored 1339 // the forwarding pointer first. Make the lost copy (to_ref) 1340 // look like a valid but dead (dummy) object and keep it for 1341 // future reuse. 1342 FillWithDummyObject(to_ref, bytes_allocated); 1343 if (!fall_back_to_non_moving) { 1344 DCHECK(region_space_->IsInToSpace(to_ref)); 1345 if (bytes_allocated > space::RegionSpace::kRegionSize) { 1346 // Free the large alloc. 1347 region_space_->FreeLarge(to_ref, bytes_allocated); 1348 } else { 1349 // Record the lost copy for later reuse. 1350 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1351 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1352 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); 1353 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1354 skipped_blocks_map_.insert(std::make_pair(bytes_allocated, 1355 reinterpret_cast<uint8_t*>(to_ref))); 1356 } 1357 } else { 1358 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1359 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1360 // Free the non-moving-space chunk. 1361 accounting::ContinuousSpaceBitmap* mark_bitmap = 1362 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1363 CHECK(mark_bitmap != nullptr); 1364 CHECK(mark_bitmap->Clear(to_ref)); 1365 heap_->non_moving_space_->Free(Thread::Current(), to_ref); 1366 } 1367 1368 // Get the winner's forward ptr. 1369 mirror::Object* lost_fwd_ptr = to_ref; 1370 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress()); 1371 CHECK(to_ref != nullptr); 1372 CHECK_NE(to_ref, lost_fwd_ptr); 1373 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)); 1374 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1375 return to_ref; 1376 } 1377 1378 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)); 1379 1380 // Try to atomically write the fwd ptr. 1381 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word); 1382 if (LIKELY(success)) { 1383 // The CAS succeeded. 1384 objects_moved_.FetchAndAddSequentiallyConsistent(1); 1385 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size); 1386 if (LIKELY(!fall_back_to_non_moving)) { 1387 DCHECK(region_space_->IsInToSpace(to_ref)); 1388 } else { 1389 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1390 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1391 } 1392 if (kUseBakerReadBarrier) { 1393 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1394 } 1395 DCHECK(GetFwdPtr(from_ref) == to_ref); 1396 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1397 PushOntoMarkStack<true>(to_ref); 1398 return to_ref; 1399 } else { 1400 // The CAS failed. It may have lost the race or may have failed 1401 // due to monitor/hashcode ops. Either way, retry. 1402 } 1403 } 1404} 1405 1406mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { 1407 DCHECK(from_ref != nullptr); 1408 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1409 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1410 // It's already marked. 1411 return from_ref; 1412 } 1413 mirror::Object* to_ref; 1414 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1415 to_ref = GetFwdPtr(from_ref); 1416 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) || 1417 heap_->non_moving_space_->HasAddress(to_ref)) 1418 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1419 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1420 if (region_space_bitmap_->Test(from_ref)) { 1421 to_ref = from_ref; 1422 } else { 1423 to_ref = nullptr; 1424 } 1425 } else { 1426 // from_ref is in a non-moving space. 1427 if (immune_region_.ContainsObject(from_ref)) { 1428 accounting::ContinuousSpaceBitmap* cc_bitmap = 1429 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1430 DCHECK(cc_bitmap != nullptr) 1431 << "An immune space object must have a bitmap"; 1432 if (kIsDebugBuild) { 1433 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1434 << "Immune space object must be already marked"; 1435 } 1436 if (cc_bitmap->Test(from_ref)) { 1437 // Already marked. 1438 to_ref = from_ref; 1439 } else { 1440 // Newly marked. 1441 to_ref = nullptr; 1442 } 1443 } else { 1444 // Non-immune non-moving space. Use the mark bitmap. 1445 accounting::ContinuousSpaceBitmap* mark_bitmap = 1446 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1447 accounting::LargeObjectBitmap* los_bitmap = 1448 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1449 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1450 bool is_los = mark_bitmap == nullptr; 1451 if (!is_los && mark_bitmap->Test(from_ref)) { 1452 // Already marked. 1453 to_ref = from_ref; 1454 } else if (is_los && los_bitmap->Test(from_ref)) { 1455 // Already marked in LOS. 1456 to_ref = from_ref; 1457 } else { 1458 // Not marked. 1459 if (IsOnAllocStack(from_ref)) { 1460 // If on the allocation stack, it's considered marked. 1461 to_ref = from_ref; 1462 } else { 1463 // Not marked. 1464 to_ref = nullptr; 1465 } 1466 } 1467 } 1468 } 1469 return to_ref; 1470} 1471 1472bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { 1473 QuasiAtomic::ThreadFenceAcquire(); 1474 accounting::ObjectStack* alloc_stack = GetAllocationStack(); 1475 return alloc_stack->Contains(ref); 1476} 1477 1478mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { 1479 if (from_ref == nullptr) { 1480 return nullptr; 1481 } 1482 DCHECK(from_ref != nullptr); 1483 DCHECK(heap_->collector_type_ == kCollectorTypeCC); 1484 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1485 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1486 // It's already marked. 1487 return from_ref; 1488 } 1489 mirror::Object* to_ref; 1490 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1491 to_ref = GetFwdPtr(from_ref); 1492 if (kUseBakerReadBarrier) { 1493 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref; 1494 } 1495 if (to_ref == nullptr) { 1496 // It isn't marked yet. Mark it by copying it to the to-space. 1497 to_ref = Copy(from_ref); 1498 } 1499 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) 1500 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1501 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1502 // This may or may not succeed, which is ok. 1503 if (kUseBakerReadBarrier) { 1504 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1505 } 1506 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) { 1507 // Already marked. 1508 to_ref = from_ref; 1509 } else { 1510 // Newly marked. 1511 to_ref = from_ref; 1512 if (kUseBakerReadBarrier) { 1513 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1514 } 1515 PushOntoMarkStack<true>(to_ref); 1516 } 1517 } else { 1518 // from_ref is in a non-moving space. 1519 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref; 1520 if (immune_region_.ContainsObject(from_ref)) { 1521 accounting::ContinuousSpaceBitmap* cc_bitmap = 1522 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1523 DCHECK(cc_bitmap != nullptr) 1524 << "An immune space object must have a bitmap"; 1525 if (kIsDebugBuild) { 1526 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1527 << "Immune space object must be already marked"; 1528 } 1529 // This may or may not succeed, which is ok. 1530 if (kUseBakerReadBarrier) { 1531 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1532 } 1533 if (cc_bitmap->AtomicTestAndSet(from_ref)) { 1534 // Already marked. 1535 to_ref = from_ref; 1536 } else { 1537 // Newly marked. 1538 to_ref = from_ref; 1539 if (kUseBakerReadBarrier) { 1540 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1541 } 1542 PushOntoMarkStack<true>(to_ref); 1543 } 1544 } else { 1545 // Use the mark bitmap. 1546 accounting::ContinuousSpaceBitmap* mark_bitmap = 1547 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1548 accounting::LargeObjectBitmap* los_bitmap = 1549 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1550 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1551 bool is_los = mark_bitmap == nullptr; 1552 if (!is_los && mark_bitmap->Test(from_ref)) { 1553 // Already marked. 1554 to_ref = from_ref; 1555 if (kUseBakerReadBarrier) { 1556 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1557 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1558 } 1559 } else if (is_los && los_bitmap->Test(from_ref)) { 1560 // Already marked in LOS. 1561 to_ref = from_ref; 1562 if (kUseBakerReadBarrier) { 1563 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1564 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1565 } 1566 } else { 1567 // Not marked. 1568 if (IsOnAllocStack(from_ref)) { 1569 // If it's on the allocation stack, it's considered marked. Keep it white. 1570 to_ref = from_ref; 1571 // Objects on the allocation stack need not be marked. 1572 if (!is_los) { 1573 DCHECK(!mark_bitmap->Test(to_ref)); 1574 } else { 1575 DCHECK(!los_bitmap->Test(to_ref)); 1576 } 1577 if (kUseBakerReadBarrier) { 1578 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 1579 } 1580 } else { 1581 // Not marked or on the allocation stack. Try to mark it. 1582 // This may or may not succeed, which is ok. 1583 if (kUseBakerReadBarrier) { 1584 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1585 } 1586 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) { 1587 // Already marked. 1588 to_ref = from_ref; 1589 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) { 1590 // Already marked in LOS. 1591 to_ref = from_ref; 1592 } else { 1593 // Newly marked. 1594 to_ref = from_ref; 1595 if (kUseBakerReadBarrier) { 1596 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1597 } 1598 PushOntoMarkStack<true>(to_ref); 1599 } 1600 } 1601 } 1602 } 1603 } 1604 return to_ref; 1605} 1606 1607void ConcurrentCopying::FinishPhase() { 1608 region_space_ = nullptr; 1609 CHECK(mark_queue_.IsEmpty()); 1610 mark_queue_.Clear(); 1611 { 1612 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1613 skipped_blocks_map_.clear(); 1614 } 1615 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1616 heap_->ClearMarkedObjects(); 1617} 1618 1619mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) { 1620 return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); 1621} 1622 1623bool ConcurrentCopying::IsHeapReferenceMarkedCallback( 1624 mirror::HeapReference<mirror::Object>* field, void* arg) { 1625 mirror::Object* from_ref = field->AsMirrorPtr(); 1626 mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); 1627 if (to_ref == nullptr) { 1628 return false; 1629 } 1630 if (from_ref != to_ref) { 1631 QuasiAtomic::ThreadFenceRelease(); 1632 field->Assign(to_ref); 1633 QuasiAtomic::ThreadFenceSequentiallyConsistent(); 1634 } 1635 return true; 1636} 1637 1638mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) { 1639 return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref); 1640} 1641 1642void ConcurrentCopying::ProcessMarkStackCallback(void* arg) { 1643 reinterpret_cast<ConcurrentCopying*>(arg)->ProcessMarkStack(); 1644} 1645 1646void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 1647 heap_->GetReferenceProcessor()->DelayReferenceReferent( 1648 klass, reference, &IsHeapReferenceMarkedCallback, this); 1649} 1650 1651void ConcurrentCopying::ProcessReferences(Thread* self, bool concurrent) { 1652 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); 1653 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1654 GetHeap()->GetReferenceProcessor()->ProcessReferences( 1655 concurrent, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), 1656 &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this); 1657} 1658 1659void ConcurrentCopying::RevokeAllThreadLocalBuffers() { 1660 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1661 region_space_->RevokeAllThreadLocalBuffers(); 1662} 1663 1664} // namespace collector 1665} // namespace gc 1666} // namespace art 1667