concurrent_copying.cc revision dd9943d4466b052ef6c5ee5b32187adb48cbce74
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "concurrent_copying.h" 18 19#include "gc/accounting/heap_bitmap-inl.h" 20#include "gc/accounting/space_bitmap-inl.h" 21#include "gc/space/image_space.h" 22#include "gc/space/space.h" 23#include "intern_table.h" 24#include "mirror/art_field-inl.h" 25#include "mirror/object-inl.h" 26#include "scoped_thread_state_change.h" 27#include "thread-inl.h" 28#include "thread_list.h" 29#include "well_known_classes.h" 30 31namespace art { 32namespace gc { 33namespace collector { 34 35ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) 36 : GarbageCollector(heap, 37 name_prefix + (name_prefix.empty() ? "" : " ") + 38 "concurrent copying + mark sweep"), 39 region_space_(nullptr), gc_barrier_(new Barrier(0)), mark_queue_(2 * MB), 40 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), 41 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), 42 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), 43 rb_table_(heap_->GetReadBarrierTable()), 44 force_evacuate_all_(false) { 45 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, 46 "The region space size and the read barrier table region size must match"); 47 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap)); 48 { 49 Thread* self = Thread::Current(); 50 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 51 // Cache this so that we won't have to lock heap_bitmap_lock_ in 52 // Mark() which could cause a nested lock on heap_bitmap_lock_ 53 // when GC causes a RB while doing GC or a lock order violation 54 // (class_linker_lock_ and heap_bitmap_lock_). 55 heap_mark_bitmap_ = heap->GetMarkBitmap(); 56 } 57} 58 59ConcurrentCopying::~ConcurrentCopying() { 60} 61 62void ConcurrentCopying::RunPhases() { 63 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier); 64 CHECK(!is_active_); 65 is_active_ = true; 66 Thread* self = Thread::Current(); 67 Locks::mutator_lock_->AssertNotHeld(self); 68 { 69 ReaderMutexLock mu(self, *Locks::mutator_lock_); 70 InitializePhase(); 71 } 72 FlipThreadRoots(); 73 { 74 ReaderMutexLock mu(self, *Locks::mutator_lock_); 75 MarkingPhase(); 76 } 77 // Verify no from space refs. This causes a pause. 78 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { 79 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); 80 ScopedPause pause(this); 81 CheckEmptyMarkQueue(); 82 if (kVerboseMode) { 83 LOG(INFO) << "Verifying no from-space refs"; 84 } 85 VerifyNoFromSpaceReferences(); 86 CheckEmptyMarkQueue(); 87 } 88 { 89 ReaderMutexLock mu(self, *Locks::mutator_lock_); 90 ReclaimPhase(); 91 } 92 FinishPhase(); 93 CHECK(is_active_); 94 is_active_ = false; 95} 96 97void ConcurrentCopying::BindBitmaps() { 98 Thread* self = Thread::Current(); 99 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 100 // Mark all of the spaces we never collect as immune. 101 for (const auto& space : heap_->GetContinuousSpaces()) { 102 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 103 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 104 CHECK(space->IsZygoteSpace() || space->IsImageSpace()); 105 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 106 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" : 107 "cc zygote space bitmap"; 108 // TODO: try avoiding using bitmaps for image/zygote to save space. 109 accounting::ContinuousSpaceBitmap* bitmap = 110 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity()); 111 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 112 cc_bitmaps_.push_back(bitmap); 113 } else if (space == region_space_) { 114 accounting::ContinuousSpaceBitmap* bitmap = 115 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap", 116 space->Begin(), space->Capacity()); 117 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 118 cc_bitmaps_.push_back(bitmap); 119 region_space_bitmap_ = bitmap; 120 } 121 } 122} 123 124void ConcurrentCopying::InitializePhase() { 125 TimingLogger::ScopedTiming split("InitializePhase", GetTimings()); 126 if (kVerboseMode) { 127 LOG(INFO) << "GC InitializePhase"; 128 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" 129 << reinterpret_cast<void*>(region_space_->Limit()); 130 } 131 CHECK(mark_queue_.IsEmpty()); 132 immune_region_.Reset(); 133 bytes_moved_.StoreRelaxed(0); 134 objects_moved_.StoreRelaxed(0); 135 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 136 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 137 GetCurrentIteration()->GetClearSoftReferences()) { 138 force_evacuate_all_ = true; 139 } else { 140 force_evacuate_all_ = false; 141 } 142 BindBitmaps(); 143 if (kVerboseMode) { 144 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_; 145 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End(); 146 LOG(INFO) << "GC end of InitializePhase"; 147 } 148} 149 150// Used to switch the thread roots of a thread from from-space refs to to-space refs. 151class ThreadFlipVisitor : public Closure { 152 public: 153 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) 154 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { 155 } 156 157 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 158 // Note: self is not necessarily equal to thread since thread may be suspended. 159 Thread* self = Thread::Current(); 160 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 161 << thread->GetState() << " thread " << thread << " self " << self; 162 if (use_tlab_ && thread->HasTlab()) { 163 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 164 // This must come before the revoke. 165 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); 166 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 167 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> 168 FetchAndAddSequentiallyConsistent(thread_local_objects); 169 } else { 170 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 171 } 172 } 173 if (kUseThreadLocalAllocationStack) { 174 thread->RevokeThreadLocalAllocationStack(); 175 } 176 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 177 thread->VisitRoots(ConcurrentCopying::ProcessRootCallback, concurrent_copying_); 178 concurrent_copying_->GetBarrier().Pass(self); 179 } 180 181 private: 182 ConcurrentCopying* const concurrent_copying_; 183 const bool use_tlab_; 184}; 185 186// Called back from Runtime::FlipThreadRoots() during a pause. 187class FlipCallback : public Closure { 188 public: 189 explicit FlipCallback(ConcurrentCopying* concurrent_copying) 190 : concurrent_copying_(concurrent_copying) { 191 } 192 193 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { 194 ConcurrentCopying* cc = concurrent_copying_; 195 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); 196 // Note: self is not necessarily equal to thread since thread may be suspended. 197 Thread* self = Thread::Current(); 198 CHECK(thread == self); 199 Locks::mutator_lock_->AssertExclusiveHeld(self); 200 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); 201 cc->SwapStacks(self); 202 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 203 cc->RecordLiveStackFreezeSize(self); 204 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); 205 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); 206 } 207 cc->is_marking_ = true; 208 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { 209 CHECK(Runtime::Current()->IsCompiler()); 210 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); 211 Runtime::Current()->VisitTransactionRoots(ConcurrentCopying::ProcessRootCallback, cc); 212 } 213 } 214 215 private: 216 ConcurrentCopying* const concurrent_copying_; 217}; 218 219// Switch threads that from from-space to to-space refs. Forward/mark the thread roots. 220void ConcurrentCopying::FlipThreadRoots() { 221 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); 222 if (kVerboseMode) { 223 LOG(INFO) << "time=" << region_space_->Time(); 224 region_space_->DumpNonFreeRegions(LOG(INFO)); 225 } 226 Thread* self = Thread::Current(); 227 Locks::mutator_lock_->AssertNotHeld(self); 228 gc_barrier_->Init(self, 0); 229 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); 230 FlipCallback flip_callback(this); 231 size_t barrier_count = Runtime::Current()->FlipThreadRoots( 232 &thread_flip_visitor, &flip_callback, this); 233 { 234 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 235 gc_barrier_->Increment(self, barrier_count); 236 } 237 is_asserting_to_space_invariant_ = true; 238 QuasiAtomic::ThreadFenceForConstructor(); 239 if (kVerboseMode) { 240 LOG(INFO) << "time=" << region_space_->Time(); 241 region_space_->DumpNonFreeRegions(LOG(INFO)); 242 LOG(INFO) << "GC end of FlipThreadRoots"; 243 } 244} 245 246void ConcurrentCopying::SwapStacks(Thread* self) { 247 heap_->SwapStacks(self); 248} 249 250void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { 251 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 252 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 253} 254 255// Used to visit objects in the immune spaces. 256class ConcurrentCopyingImmuneSpaceObjVisitor { 257 public: 258 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) 259 : collector_(cc) {} 260 261 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 262 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 263 DCHECK(obj != nullptr); 264 DCHECK(collector_->immune_region_.ContainsObject(obj)); 265 accounting::ContinuousSpaceBitmap* cc_bitmap = 266 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 267 DCHECK(cc_bitmap != nullptr) 268 << "An immune space object must have a bitmap"; 269 if (kIsDebugBuild) { 270 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) 271 << "Immune space object must be already marked"; 272 } 273 // This may or may not succeed, which is ok. 274 if (kUseBakerReadBarrier) { 275 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 276 } 277 if (cc_bitmap->AtomicTestAndSet(obj)) { 278 // Already marked. Do nothing. 279 } else { 280 // Newly marked. Set the gray bit and push it onto the mark stack. 281 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 282 collector_->PushOntoMarkStack<true>(obj); 283 } 284 } 285 286 private: 287 ConcurrentCopying* collector_; 288}; 289 290class EmptyCheckpoint : public Closure { 291 public: 292 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying) 293 : concurrent_copying_(concurrent_copying) { 294 } 295 296 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 297 // Note: self is not necessarily equal to thread since thread may be suspended. 298 Thread* self = Thread::Current(); 299 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 300 << thread->GetState() << " thread " << thread << " self " << self; 301 // If thread is a running mutator, then act on behalf of the garbage collector. 302 // See the code in ThreadList::RunCheckpoint. 303 if (thread->GetState() == kRunnable) { 304 concurrent_copying_->GetBarrier().Pass(self); 305 } 306 } 307 308 private: 309 ConcurrentCopying* const concurrent_copying_; 310}; 311 312// Concurrently mark roots that are guarded by read barriers and process the mark stack. 313void ConcurrentCopying::MarkingPhase() { 314 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); 315 if (kVerboseMode) { 316 LOG(INFO) << "GC MarkingPhase"; 317 } 318 { 319 // Mark the image root. The WB-based collectors do not need to 320 // scan the image objects from roots by relying on the card table, 321 // but it's necessary for the RB to-space invariant to hold. 322 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings()); 323 gc::space::ImageSpace* image = heap_->GetImageSpace(); 324 if (image != nullptr) { 325 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots(); 326 mirror::Object* marked_image_root = Mark(image_root); 327 CHECK_EQ(image_root, marked_image_root) << "An image object does not move"; 328 if (ReadBarrier::kEnableToSpaceInvariantChecks) { 329 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root); 330 } 331 } 332 } 333 { 334 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings()); 335 Runtime::Current()->VisitConstantRoots(ProcessRootCallback, this); 336 } 337 { 338 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings()); 339 Runtime::Current()->GetInternTable()->VisitRoots(ProcessRootCallback, 340 this, kVisitRootFlagAllRoots); 341 } 342 { 343 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings()); 344 Runtime::Current()->GetClassLinker()->VisitRoots(ProcessRootCallback, 345 this, kVisitRootFlagAllRoots); 346 } 347 { 348 // TODO: don't visit the transaction roots if it's not active. 349 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); 350 Runtime::Current()->VisitNonThreadRoots(ProcessRootCallback, this); 351 } 352 353 // Immune spaces. 354 for (auto& space : heap_->GetContinuousSpaces()) { 355 if (immune_region_.ContainsSpace(space)) { 356 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 357 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 358 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this); 359 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 360 reinterpret_cast<uintptr_t>(space->Limit()), 361 visitor); 362 } 363 } 364 365 Thread* self = Thread::Current(); 366 { 367 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings()); 368 // Process the mark stack and issue an empty check point. If the 369 // mark stack is still empty after the check point, we're 370 // done. Otherwise, repeat. 371 ProcessMarkStack(); 372 size_t count = 0; 373 while (!ProcessMarkStack()) { 374 ++count; 375 if (kVerboseMode) { 376 LOG(INFO) << "Issue an empty check point. " << count; 377 } 378 IssueEmptyCheckpoint(); 379 } 380 // Need to ensure the mark stack is empty before reference 381 // processing to get rid of non-reference gray objects. 382 CheckEmptyMarkQueue(); 383 // Enable the GetReference slow path and disallow access to the system weaks. 384 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 385 Runtime::Current()->DisallowNewSystemWeaks(); 386 QuasiAtomic::ThreadFenceForConstructor(); 387 // Lock-unlock the system weak locks so that there's no thread in 388 // the middle of accessing system weaks. 389 Runtime::Current()->EnsureNewSystemWeaksDisallowed(); 390 // Note: Do not issue a checkpoint from here to the 391 // SweepSystemWeaks call or else a deadlock due to 392 // WaitHoldingLocks() would occur. 393 if (kVerboseMode) { 394 LOG(INFO) << "Enabled the ref proc slow path & disabled access to system weaks."; 395 LOG(INFO) << "ProcessReferences"; 396 } 397 ProcessReferences(self, true); 398 CheckEmptyMarkQueue(); 399 if (kVerboseMode) { 400 LOG(INFO) << "SweepSystemWeaks"; 401 } 402 SweepSystemWeaks(self); 403 if (kVerboseMode) { 404 LOG(INFO) << "SweepSystemWeaks done"; 405 } 406 // Because hash_set::Erase() can call the hash function for 407 // arbitrary elements in the weak intern table in 408 // InternTable::Table::SweepWeaks(), the above SweepSystemWeaks() 409 // call may have marked some objects (strings) alive. So process 410 // the mark stack here once again. 411 ProcessMarkStack(); 412 CheckEmptyMarkQueue(); 413 // Disable marking. 414 if (kUseTableLookupReadBarrier) { 415 heap_->rb_table_->ClearAll(); 416 DCHECK(heap_->rb_table_->IsAllCleared()); 417 } 418 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(1); 419 is_marking_ = false; 420 if (kVerboseMode) { 421 LOG(INFO) << "AllowNewSystemWeaks"; 422 } 423 Runtime::Current()->AllowNewSystemWeaks(); 424 CheckEmptyMarkQueue(); 425 } 426 427 if (kVerboseMode) { 428 LOG(INFO) << "GC end of MarkingPhase"; 429 } 430} 431 432void ConcurrentCopying::IssueEmptyCheckpoint() { 433 Thread* self = Thread::Current(); 434 EmptyCheckpoint check_point(this); 435 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 436 gc_barrier_->Init(self, 0); 437 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 438 // If there are no threads to wait which implys that all the checkpoint functions are finished, 439 // then no need to release the mutator lock. 440 if (barrier_count == 0) { 441 return; 442 } 443 // Release locks then wait for all mutator threads to pass the barrier. 444 Locks::mutator_lock_->SharedUnlock(self); 445 { 446 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 447 gc_barrier_->Increment(self, barrier_count); 448 } 449 Locks::mutator_lock_->SharedLock(self); 450} 451 452mirror::Object* ConcurrentCopying::PopOffMarkStack() { 453 return mark_queue_.Dequeue(); 454} 455 456template<bool kThreadSafe> 457void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { 458 CHECK_EQ(is_mark_queue_push_disallowed_.LoadRelaxed(), 0) 459 << " " << to_ref << " " << PrettyTypeOf(to_ref); 460 if (kThreadSafe) { 461 CHECK(mark_queue_.Enqueue(to_ref)) << "Mark queue overflow"; 462 } else { 463 CHECK(mark_queue_.EnqueueThreadUnsafe(to_ref)) << "Mark queue overflow"; 464 } 465} 466 467accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() { 468 return heap_->allocation_stack_.get(); 469} 470 471accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { 472 return heap_->live_stack_.get(); 473} 474 475inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { 476 DCHECK(region_space_->IsInFromSpace(from_ref)); 477 LockWord lw = from_ref->GetLockWord(false); 478 if (lw.GetState() == LockWord::kForwardingAddress) { 479 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); 480 CHECK(fwd_ptr != nullptr); 481 return fwd_ptr; 482 } else { 483 return nullptr; 484 } 485} 486 487inline void ConcurrentCopying::SetFwdPtr(mirror::Object* from_ref, mirror::Object* to_ref) { 488 DCHECK(region_space_->IsInFromSpace(from_ref)); 489 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->GetNonMovingSpace()->HasAddress(to_ref)); 490 LockWord lw = from_ref->GetLockWord(false); 491 DCHECK_NE(lw.GetState(), LockWord::kForwardingAddress); 492 from_ref->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)), false); 493} 494 495// The following visitors are that used to verify that there's no 496// references to the from-space left after marking. 497class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor { 498 public: 499 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) 500 : collector_(collector) {} 501 502 void operator()(mirror::Object* ref) const 503 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 504 if (ref == nullptr) { 505 // OK. 506 return; 507 } 508 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 509 if (kUseBakerReadBarrier) { 510 if (collector_->RegionSpace()->IsInToSpace(ref)) { 511 CHECK(ref->GetReadBarrierPointer() == nullptr) 512 << "To-space ref " << ref << " " << PrettyTypeOf(ref) 513 << " has non-white rb_ptr " << ref->GetReadBarrierPointer(); 514 } else { 515 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 516 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 517 collector_->IsOnAllocStack(ref))) 518 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref) 519 << " has non-black rb_ptr " << ref->GetReadBarrierPointer() 520 << " but isn't on the alloc stack (and has white rb_ptr)." 521 << " Is it in the non-moving space=" 522 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref)); 523 } 524 } 525 } 526 527 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/) 528 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 529 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 530 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector); 531 DCHECK(root != nullptr); 532 visitor(*root); 533 } 534 535 private: 536 ConcurrentCopying* collector_; 537}; 538 539class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { 540 public: 541 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) 542 : collector_(collector) {} 543 544 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 545 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 546 mirror::Object* ref = 547 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 548 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 549 visitor(ref); 550 } 551 void operator()(mirror::Class* klass, mirror::Reference* ref) const 552 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 553 CHECK(klass->IsTypeOfReferenceClass()); 554 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 555 } 556 557 private: 558 ConcurrentCopying* collector_; 559}; 560 561class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { 562 public: 563 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) 564 : collector_(collector) {} 565 void operator()(mirror::Object* obj) const 566 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 567 ObjectCallback(obj, collector_); 568 } 569 static void ObjectCallback(mirror::Object* obj, void *arg) 570 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 571 CHECK(obj != nullptr); 572 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 573 space::RegionSpace* region_space = collector->RegionSpace(); 574 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 575 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); 576 obj->VisitReferences<true>(visitor, visitor); 577 if (kUseBakerReadBarrier) { 578 if (collector->RegionSpace()->IsInToSpace(obj)) { 579 CHECK(obj->GetReadBarrierPointer() == nullptr) 580 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer(); 581 } else { 582 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 583 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 584 collector->IsOnAllocStack(obj))) 585 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj) 586 << " has non-black rb_ptr " << obj->GetReadBarrierPointer() 587 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space=" 588 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj)); 589 } 590 } 591 } 592 593 private: 594 ConcurrentCopying* const collector_; 595}; 596 597// Verify there's no from-space references left after the marking phase. 598void ConcurrentCopying::VerifyNoFromSpaceReferences() { 599 Thread* self = Thread::Current(); 600 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 601 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this); 602 // Roots. 603 { 604 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 605 Runtime::Current()->VisitRoots( 606 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor::RootCallback, this); 607 } 608 // The to-space. 609 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, 610 this); 611 // Non-moving spaces. 612 { 613 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 614 heap_->GetMarkBitmap()->Visit(visitor); 615 } 616 // The alloc stack. 617 { 618 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 619 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); 620 it < end; ++it) { 621 mirror::Object* const obj = it->AsMirrorPtr(); 622 if (obj != nullptr && obj->GetClass() != nullptr) { 623 // TODO: need to call this only if obj is alive? 624 ref_visitor(obj); 625 visitor(obj); 626 } 627 } 628 } 629 // TODO: LOS. But only refs in LOS are classes. 630} 631 632// The following visitors are used to assert the to-space invariant. 633class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { 634 public: 635 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) 636 : collector_(collector) {} 637 638 void operator()(mirror::Object* ref) const 639 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 640 if (ref == nullptr) { 641 // OK. 642 return; 643 } 644 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 645 } 646 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/) 647 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 648 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 649 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector); 650 DCHECK(root != nullptr); 651 visitor(*root); 652 } 653 654 private: 655 ConcurrentCopying* collector_; 656}; 657 658class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { 659 public: 660 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) 661 : collector_(collector) {} 662 663 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 664 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 665 mirror::Object* ref = 666 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 667 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 668 visitor(ref); 669 } 670 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const 671 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 672 CHECK(klass->IsTypeOfReferenceClass()); 673 } 674 675 private: 676 ConcurrentCopying* collector_; 677}; 678 679class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { 680 public: 681 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) 682 : collector_(collector) {} 683 void operator()(mirror::Object* obj) const 684 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 685 ObjectCallback(obj, collector_); 686 } 687 static void ObjectCallback(mirror::Object* obj, void *arg) 688 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 689 CHECK(obj != nullptr); 690 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 691 space::RegionSpace* region_space = collector->RegionSpace(); 692 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 693 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); 694 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); 695 obj->VisitReferences<true>(visitor, visitor); 696 } 697 698 private: 699 ConcurrentCopying* collector_; 700}; 701 702bool ConcurrentCopying::ProcessMarkStack() { 703 if (kVerboseMode) { 704 LOG(INFO) << "ProcessMarkStack. "; 705 } 706 size_t count = 0; 707 mirror::Object* to_ref; 708 while ((to_ref = PopOffMarkStack()) != nullptr) { 709 ++count; 710 DCHECK(!region_space_->IsInFromSpace(to_ref)); 711 if (kUseBakerReadBarrier) { 712 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 713 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 714 << " is_marked=" << IsMarked(to_ref); 715 } 716 // Scan ref fields. 717 Scan(to_ref); 718 // Mark the gray ref as white or black. 719 if (kUseBakerReadBarrier) { 720 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 721 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 722 << " is_marked=" << IsMarked(to_ref); 723 } 724 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && 725 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && 726 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { 727 // Leave References gray so that GetReferent() will trigger RB. 728 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; 729 } else { 730 if (kUseBakerReadBarrier) { 731 if (region_space_->IsInToSpace(to_ref)) { 732 // If to-space, change from gray to white. 733 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 734 ReadBarrier::WhitePtr()); 735 CHECK(success) << "Must succeed as we won the race."; 736 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 737 } else { 738 // If non-moving space/unevac from space, change from gray 739 // to black. We can't change gray to white because it's not 740 // safe to use CAS if two threads change values in opposite 741 // directions (A->B and B->A). So, we change it to black to 742 // indicate non-moving objects that have been marked 743 // through. Note we'd need to change from black to white 744 // later (concurrently). 745 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 746 ReadBarrier::BlackPtr()); 747 CHECK(success) << "Must succeed as we won the race."; 748 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 749 } 750 } 751 } 752 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { 753 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); 754 visitor(to_ref); 755 } 756 } 757 // Return true if the stack was empty. 758 return count == 0; 759} 760 761void ConcurrentCopying::CheckEmptyMarkQueue() { 762 if (!mark_queue_.IsEmpty()) { 763 while (!mark_queue_.IsEmpty()) { 764 mirror::Object* obj = mark_queue_.Dequeue(); 765 if (kUseBakerReadBarrier) { 766 mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); 767 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr 768 << " is_marked=" << IsMarked(obj); 769 } else { 770 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) 771 << " is_marked=" << IsMarked(obj); 772 } 773 } 774 LOG(FATAL) << "mark queue is not empty"; 775 } 776} 777 778void ConcurrentCopying::SweepSystemWeaks(Thread* self) { 779 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); 780 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 781 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 782} 783 784void ConcurrentCopying::Sweep(bool swap_bitmaps) { 785 { 786 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); 787 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 788 if (kEnableFromSpaceAccountingCheck) { 789 CHECK_GE(live_stack_freeze_size_, live_stack->Size()); 790 } 791 heap_->MarkAllocStackAsLive(live_stack); 792 live_stack->Reset(); 793 } 794 CHECK(mark_queue_.IsEmpty()); 795 TimingLogger::ScopedTiming split("Sweep", GetTimings()); 796 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 797 if (space->IsContinuousMemMapAllocSpace()) { 798 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 799 if (space == region_space_ || immune_region_.ContainsSpace(space)) { 800 continue; 801 } 802 TimingLogger::ScopedTiming split2( 803 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 804 RecordFree(alloc_space->Sweep(swap_bitmaps)); 805 } 806 } 807 SweepLargeObjects(swap_bitmaps); 808} 809 810void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) { 811 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 812 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 813} 814 815class ConcurrentCopyingClearBlackPtrsVisitor { 816 public: 817 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc) 818 : collector_(cc) {} 819 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 820 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 821 DCHECK(obj != nullptr); 822 CHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; 823 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; 824 obj->SetReadBarrierPointer(ReadBarrier::WhitePtr()); 825 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 826 } 827 828 private: 829 ConcurrentCopying* const collector_; 830}; 831 832// Clear the black ptrs in non-moving objects back to white. 833void ConcurrentCopying::ClearBlackPtrs() { 834 CHECK(kUseBakerReadBarrier); 835 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings()); 836 ConcurrentCopyingClearBlackPtrsVisitor visitor(this); 837 for (auto& space : heap_->GetContinuousSpaces()) { 838 if (space == region_space_) { 839 continue; 840 } 841 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 842 if (kVerboseMode) { 843 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap; 844 } 845 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 846 reinterpret_cast<uintptr_t>(space->Limit()), 847 visitor); 848 } 849 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 850 large_object_space->GetMarkBitmap()->VisitMarkedRange( 851 reinterpret_cast<uintptr_t>(large_object_space->Begin()), 852 reinterpret_cast<uintptr_t>(large_object_space->End()), 853 visitor); 854 // Objects on the allocation stack? 855 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) { 856 size_t count = GetAllocationStack()->Size(); 857 auto* it = GetAllocationStack()->Begin(); 858 auto* end = GetAllocationStack()->End(); 859 for (size_t i = 0; i < count; ++i, ++it) { 860 CHECK_LT(it, end); 861 mirror::Object* obj = it->AsMirrorPtr(); 862 if (obj != nullptr) { 863 // Must have been cleared above. 864 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 865 } 866 } 867 } 868} 869 870void ConcurrentCopying::ReclaimPhase() { 871 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings()); 872 if (kVerboseMode) { 873 LOG(INFO) << "GC ReclaimPhase"; 874 } 875 Thread* self = Thread::Current(); 876 877 { 878 // Double-check that the mark stack is empty. 879 // Note: need to set this after VerifyNoFromSpaceRef(). 880 is_asserting_to_space_invariant_ = false; 881 QuasiAtomic::ThreadFenceForConstructor(); 882 if (kVerboseMode) { 883 LOG(INFO) << "Issue an empty check point. "; 884 } 885 IssueEmptyCheckpoint(); 886 // Disable the check. 887 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(0); 888 CheckEmptyMarkQueue(); 889 } 890 891 { 892 // Record freed objects. 893 TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); 894 // Don't include thread-locals that are in the to-space. 895 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); 896 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); 897 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); 898 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); 899 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); 900 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); 901 if (kEnableFromSpaceAccountingCheck) { 902 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); 903 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); 904 } 905 CHECK_LE(to_objects, from_objects); 906 CHECK_LE(to_bytes, from_bytes); 907 int64_t freed_bytes = from_bytes - to_bytes; 908 int64_t freed_objects = from_objects - to_objects; 909 if (kVerboseMode) { 910 LOG(INFO) << "RecordFree:" 911 << " from_bytes=" << from_bytes << " from_objects=" << from_objects 912 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects 913 << " to_bytes=" << to_bytes << " to_objects=" << to_objects 914 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects 915 << " from_space size=" << region_space_->FromSpaceSize() 916 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() 917 << " to_space size=" << region_space_->ToSpaceSize(); 918 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 919 } 920 RecordFree(ObjectBytePair(freed_objects, freed_bytes)); 921 if (kVerboseMode) { 922 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 923 } 924 } 925 926 { 927 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings()); 928 ComputeUnevacFromSpaceLiveRatio(); 929 } 930 931 { 932 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); 933 region_space_->ClearFromSpace(); 934 } 935 936 { 937 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 938 if (kUseBakerReadBarrier) { 939 ClearBlackPtrs(); 940 } 941 Sweep(false); 942 SwapBitmaps(); 943 heap_->UnBindBitmaps(); 944 945 // Remove bitmaps for the immune spaces. 946 while (!cc_bitmaps_.empty()) { 947 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back(); 948 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap); 949 delete cc_bitmap; 950 cc_bitmaps_.pop_back(); 951 } 952 region_space_bitmap_ = nullptr; 953 } 954 955 if (kVerboseMode) { 956 LOG(INFO) << "GC end of ReclaimPhase"; 957 } 958} 959 960class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { 961 public: 962 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) 963 : collector_(cc) {} 964 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 965 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 966 DCHECK(ref != nullptr); 967 CHECK(collector_->region_space_bitmap_->Test(ref)) << ref; 968 CHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; 969 if (kUseBakerReadBarrier) { 970 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()) << ref; 971 // Clear the black ptr. 972 ref->SetReadBarrierPointer(ReadBarrier::WhitePtr()); 973 } 974 size_t obj_size = ref->SizeOf(); 975 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 976 collector_->region_space_->AddLiveBytes(ref, alloc_size); 977 } 978 979 private: 980 ConcurrentCopying* collector_; 981}; 982 983// Compute how much live objects are left in regions. 984void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() { 985 region_space_->AssertAllRegionLiveBytesZeroOrCleared(); 986 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this); 987 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()), 988 reinterpret_cast<uintptr_t>(region_space_->Limit()), 989 visitor); 990} 991 992// Assert the to-space invariant. 993void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, 994 mirror::Object* ref) { 995 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 996 if (is_asserting_to_space_invariant_) { 997 if (region_space_->IsInToSpace(ref)) { 998 // OK. 999 return; 1000 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1001 CHECK(region_space_bitmap_->Test(ref)) << ref; 1002 } else if (region_space_->IsInFromSpace(ref)) { 1003 // Not OK. Do extra logging. 1004 if (obj != nullptr) { 1005 if (kUseBakerReadBarrier) { 1006 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) 1007 << " holder rb_ptr=" << obj->GetReadBarrierPointer(); 1008 } else { 1009 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); 1010 } 1011 if (region_space_->IsInFromSpace(obj)) { 1012 LOG(INFO) << "holder is in the from-space."; 1013 } else if (region_space_->IsInToSpace(obj)) { 1014 LOG(INFO) << "holder is in the to-space."; 1015 } else if (region_space_->IsInUnevacFromSpace(obj)) { 1016 LOG(INFO) << "holder is in the unevac from-space."; 1017 if (region_space_bitmap_->Test(obj)) { 1018 LOG(INFO) << "holder is marked in the region space bitmap."; 1019 } else { 1020 LOG(INFO) << "holder is not marked in the region space bitmap."; 1021 } 1022 } else { 1023 // In a non-moving space. 1024 if (immune_region_.ContainsObject(obj)) { 1025 LOG(INFO) << "holder is in the image or the zygote space."; 1026 accounting::ContinuousSpaceBitmap* cc_bitmap = 1027 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 1028 CHECK(cc_bitmap != nullptr) 1029 << "An immune space object must have a bitmap."; 1030 if (cc_bitmap->Test(obj)) { 1031 LOG(INFO) << "holder is marked in the bit map."; 1032 } else { 1033 LOG(INFO) << "holder is NOT marked in the bit map."; 1034 } 1035 } else { 1036 LOG(INFO) << "holder is in a non-moving (or main) space."; 1037 accounting::ContinuousSpaceBitmap* mark_bitmap = 1038 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); 1039 accounting::LargeObjectBitmap* los_bitmap = 1040 heap_mark_bitmap_->GetLargeObjectBitmap(obj); 1041 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1042 bool is_los = mark_bitmap == nullptr; 1043 if (!is_los && mark_bitmap->Test(obj)) { 1044 LOG(INFO) << "holder is marked in the mark bit map."; 1045 } else if (is_los && los_bitmap->Test(obj)) { 1046 LOG(INFO) << "holder is marked in the los bit map."; 1047 } else { 1048 // If ref is on the allocation stack, then it is considered 1049 // mark/alive (but not necessarily on the live stack.) 1050 if (IsOnAllocStack(obj)) { 1051 LOG(INFO) << "holder is on the alloc stack."; 1052 } else { 1053 LOG(INFO) << "holder is not marked or on the alloc stack."; 1054 } 1055 } 1056 } 1057 } 1058 LOG(INFO) << "offset=" << offset.SizeValue(); 1059 } 1060 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1061 } else { 1062 // In a non-moving spaces. Check that the ref is marked. 1063 if (immune_region_.ContainsObject(ref)) { 1064 accounting::ContinuousSpaceBitmap* cc_bitmap = 1065 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); 1066 CHECK(cc_bitmap != nullptr) 1067 << "An immune space ref must have a bitmap. " << ref; 1068 if (kUseBakerReadBarrier) { 1069 CHECK(cc_bitmap->Test(ref)) 1070 << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" 1071 << obj->GetReadBarrierPointer() << " ref=" << ref; 1072 } else { 1073 CHECK(cc_bitmap->Test(ref)) 1074 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; 1075 } 1076 } else { 1077 accounting::ContinuousSpaceBitmap* mark_bitmap = 1078 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 1079 accounting::LargeObjectBitmap* los_bitmap = 1080 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 1081 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1082 bool is_los = mark_bitmap == nullptr; 1083 if ((!is_los && mark_bitmap->Test(ref)) || 1084 (is_los && los_bitmap->Test(ref))) { 1085 // OK. 1086 } else { 1087 // If ref is on the allocation stack, then it may not be 1088 // marked live, but considered marked/alive (but not 1089 // necessarily on the live stack). 1090 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " 1091 << "obj=" << obj << " ref=" << ref; 1092 } 1093 } 1094 } 1095 } 1096} 1097 1098void ConcurrentCopying::ProcessRootCallback(mirror::Object** root, void* arg, 1099 const RootInfo& /*root_info*/) { 1100 reinterpret_cast<ConcurrentCopying*>(arg)->Process(root); 1101} 1102 1103// Used to scan ref fields of an object. 1104class ConcurrentCopyingRefFieldsVisitor { 1105 public: 1106 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector) 1107 : collector_(collector) {} 1108 1109 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) 1110 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1111 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1112 collector_->Process(obj, offset); 1113 } 1114 1115 void operator()(mirror::Class* klass, mirror::Reference* ref) const 1116 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 1117 CHECK(klass->IsTypeOfReferenceClass()); 1118 collector_->DelayReferenceReferent(klass, ref); 1119 } 1120 1121 private: 1122 ConcurrentCopying* const collector_; 1123}; 1124 1125// Scan ref fields of an object. 1126void ConcurrentCopying::Scan(mirror::Object* to_ref) { 1127 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1128 ConcurrentCopyingRefFieldsVisitor visitor(this); 1129 to_ref->VisitReferences<true>(visitor, visitor); 1130} 1131 1132// Process a field. 1133inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { 1134 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); 1135 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1136 return; 1137 } 1138 mirror::Object* to_ref = Mark(ref); 1139 if (to_ref == ref) { 1140 return; 1141 } 1142 // This may fail if the mutator writes to the field at the same time. But it's ok. 1143 mirror::Object* expected_ref = ref; 1144 mirror::Object* new_ref = to_ref; 1145 do { 1146 if (expected_ref != 1147 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) { 1148 // It was updated by the mutator. 1149 break; 1150 } 1151 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>( 1152 offset, expected_ref, new_ref)); 1153} 1154 1155// Process a root. 1156void ConcurrentCopying::Process(mirror::Object** root) { 1157 mirror::Object* ref = *root; 1158 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1159 return; 1160 } 1161 mirror::Object* to_ref = Mark(ref); 1162 if (to_ref == ref) { 1163 return; 1164 } 1165 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root); 1166 mirror::Object* expected_ref = ref; 1167 mirror::Object* new_ref = to_ref; 1168 do { 1169 if (expected_ref != addr->LoadRelaxed()) { 1170 // It was updated by the mutator. 1171 break; 1172 } 1173 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1174} 1175 1176// Fill the given memory block with a dummy object. Used to fill in a 1177// copy of objects that was lost in race. 1178void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { 1179 CHECK(IsAligned<kObjectAlignment>(byte_size)); 1180 memset(dummy_obj, 0, byte_size); 1181 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); 1182 CHECK(int_array_class != nullptr); 1183 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); 1184 size_t component_size = int_array_class->GetComponentSize(); 1185 CHECK_EQ(component_size, sizeof(int32_t)); 1186 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); 1187 if (data_offset > byte_size) { 1188 // An int array is too big. Use java.lang.Object. 1189 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object); 1190 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object); 1191 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize()); 1192 dummy_obj->SetClass(java_lang_Object); 1193 CHECK_EQ(byte_size, dummy_obj->SizeOf()); 1194 } else { 1195 // Use an int array. 1196 dummy_obj->SetClass(int_array_class); 1197 CHECK(dummy_obj->IsArrayInstance()); 1198 int32_t length = (byte_size - data_offset) / component_size; 1199 dummy_obj->AsArray()->SetLength(length); 1200 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length) 1201 << "byte_size=" << byte_size << " length=" << length 1202 << " component_size=" << component_size << " data_offset=" << data_offset; 1203 CHECK_EQ(byte_size, dummy_obj->SizeOf()) 1204 << "byte_size=" << byte_size << " length=" << length 1205 << " component_size=" << component_size << " data_offset=" << data_offset; 1206 } 1207} 1208 1209// Reuse the memory blocks that were copy of objects that were lost in race. 1210mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { 1211 // Try to reuse the blocks that were unused due to CAS failures. 1212 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size)); 1213 Thread* self = Thread::Current(); 1214 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); 1215 MutexLock mu(self, skipped_blocks_lock_); 1216 auto it = skipped_blocks_map_.lower_bound(alloc_size); 1217 if (it == skipped_blocks_map_.end()) { 1218 // Not found. 1219 return nullptr; 1220 } 1221 { 1222 size_t byte_size = it->first; 1223 CHECK_GE(byte_size, alloc_size); 1224 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) { 1225 // If remainder would be too small for a dummy object, retry with a larger request size. 1226 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size); 1227 if (it == skipped_blocks_map_.end()) { 1228 // Not found. 1229 return nullptr; 1230 } 1231 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size)); 1232 CHECK_GE(it->first - alloc_size, min_object_size) 1233 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; 1234 } 1235 } 1236 // Found a block. 1237 CHECK(it != skipped_blocks_map_.end()); 1238 size_t byte_size = it->first; 1239 uint8_t* addr = it->second; 1240 CHECK_GE(byte_size, alloc_size); 1241 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); 1242 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size)); 1243 if (kVerboseMode) { 1244 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; 1245 } 1246 skipped_blocks_map_.erase(it); 1247 memset(addr, 0, byte_size); 1248 if (byte_size > alloc_size) { 1249 // Return the remainder to the map. 1250 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size)); 1251 CHECK_GE(byte_size - alloc_size, min_object_size); 1252 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), 1253 byte_size - alloc_size); 1254 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size))); 1255 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size)); 1256 } 1257 return reinterpret_cast<mirror::Object*>(addr); 1258} 1259 1260mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { 1261 DCHECK(region_space_->IsInFromSpace(from_ref)); 1262 // No read barrier to avoid nested RB that might violate the to-space 1263 // invariant. Note that from_ref is a from space ref so the SizeOf() 1264 // call will access the from-space meta objects, but it's ok and necessary. 1265 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(); 1266 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1267 size_t region_space_bytes_allocated = 0U; 1268 size_t non_moving_space_bytes_allocated = 0U; 1269 size_t bytes_allocated = 0U; 1270 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>( 1271 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr); 1272 bytes_allocated = region_space_bytes_allocated; 1273 if (to_ref != nullptr) { 1274 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated); 1275 } 1276 bool fall_back_to_non_moving = false; 1277 if (UNLIKELY(to_ref == nullptr)) { 1278 // Failed to allocate in the region space. Try the skipped blocks. 1279 to_ref = AllocateInSkippedBlock(region_space_alloc_size); 1280 if (to_ref != nullptr) { 1281 // Succeeded to allocate in a skipped block. 1282 if (heap_->use_tlab_) { 1283 // This is necessary for the tlab case as it's not accounted in the space. 1284 region_space_->RecordAlloc(to_ref); 1285 } 1286 bytes_allocated = region_space_alloc_size; 1287 } else { 1288 // Fall back to the non-moving space. 1289 fall_back_to_non_moving = true; 1290 if (kVerboseMode) { 1291 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" 1292 << to_space_bytes_skipped_.LoadSequentiallyConsistent() 1293 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); 1294 } 1295 fall_back_to_non_moving = true; 1296 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, 1297 &non_moving_space_bytes_allocated, nullptr); 1298 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed"; 1299 bytes_allocated = non_moving_space_bytes_allocated; 1300 // Mark it in the mark bitmap. 1301 accounting::ContinuousSpaceBitmap* mark_bitmap = 1302 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1303 CHECK(mark_bitmap != nullptr); 1304 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref)); 1305 } 1306 } 1307 DCHECK(to_ref != nullptr); 1308 1309 // Attempt to install the forward pointer. This is in a loop as the 1310 // lock word atomic write can fail. 1311 while (true) { 1312 // Copy the object. TODO: copy only the lockword in the second iteration and on? 1313 memcpy(to_ref, from_ref, obj_size); 1314 // Set the gray ptr. 1315 if (kUseBakerReadBarrier) { 1316 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr()); 1317 } 1318 1319 LockWord old_lock_word = to_ref->GetLockWord(false); 1320 1321 if (old_lock_word.GetState() == LockWord::kForwardingAddress) { 1322 // Lost the race. Another thread (either GC or mutator) stored 1323 // the forwarding pointer first. Make the lost copy (to_ref) 1324 // look like a valid but dead (dummy) object and keep it for 1325 // future reuse. 1326 FillWithDummyObject(to_ref, bytes_allocated); 1327 if (!fall_back_to_non_moving) { 1328 DCHECK(region_space_->IsInToSpace(to_ref)); 1329 if (bytes_allocated > space::RegionSpace::kRegionSize) { 1330 // Free the large alloc. 1331 region_space_->FreeLarge(to_ref, bytes_allocated); 1332 } else { 1333 // Record the lost copy for later reuse. 1334 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1335 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1336 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); 1337 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1338 skipped_blocks_map_.insert(std::make_pair(bytes_allocated, 1339 reinterpret_cast<uint8_t*>(to_ref))); 1340 } 1341 } else { 1342 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1343 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1344 // Free the non-moving-space chunk. 1345 accounting::ContinuousSpaceBitmap* mark_bitmap = 1346 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1347 CHECK(mark_bitmap != nullptr); 1348 CHECK(mark_bitmap->Clear(to_ref)); 1349 heap_->non_moving_space_->Free(Thread::Current(), to_ref); 1350 } 1351 1352 // Get the winner's forward ptr. 1353 mirror::Object* lost_fwd_ptr = to_ref; 1354 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress()); 1355 CHECK(to_ref != nullptr); 1356 CHECK_NE(to_ref, lost_fwd_ptr); 1357 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)); 1358 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1359 return to_ref; 1360 } 1361 1362 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)); 1363 1364 // Try to atomically write the fwd ptr. 1365 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word); 1366 if (LIKELY(success)) { 1367 // The CAS succeeded. 1368 objects_moved_.FetchAndAddSequentiallyConsistent(1); 1369 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size); 1370 if (LIKELY(!fall_back_to_non_moving)) { 1371 DCHECK(region_space_->IsInToSpace(to_ref)); 1372 } else { 1373 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1374 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1375 } 1376 if (kUseBakerReadBarrier) { 1377 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1378 } 1379 DCHECK(GetFwdPtr(from_ref) == to_ref); 1380 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1381 PushOntoMarkStack<true>(to_ref); 1382 return to_ref; 1383 } else { 1384 // The CAS failed. It may have lost the race or may have failed 1385 // due to monitor/hashcode ops. Either way, retry. 1386 } 1387 } 1388} 1389 1390mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { 1391 DCHECK(from_ref != nullptr); 1392 if (region_space_->IsInToSpace(from_ref)) { 1393 // It's already marked. 1394 return from_ref; 1395 } 1396 mirror::Object* to_ref; 1397 if (region_space_->IsInFromSpace(from_ref)) { 1398 to_ref = GetFwdPtr(from_ref); 1399 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) || 1400 heap_->non_moving_space_->HasAddress(to_ref)) 1401 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1402 } else if (region_space_->IsInUnevacFromSpace(from_ref)) { 1403 if (region_space_bitmap_->Test(from_ref)) { 1404 to_ref = from_ref; 1405 } else { 1406 to_ref = nullptr; 1407 } 1408 } else { 1409 // from_ref is in a non-moving space. 1410 if (immune_region_.ContainsObject(from_ref)) { 1411 accounting::ContinuousSpaceBitmap* cc_bitmap = 1412 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1413 DCHECK(cc_bitmap != nullptr) 1414 << "An immune space object must have a bitmap"; 1415 if (kIsDebugBuild) { 1416 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1417 << "Immune space object must be already marked"; 1418 } 1419 if (cc_bitmap->Test(from_ref)) { 1420 // Already marked. 1421 to_ref = from_ref; 1422 } else { 1423 // Newly marked. 1424 to_ref = nullptr; 1425 } 1426 } else { 1427 // Non-immune non-moving space. Use the mark bitmap. 1428 accounting::ContinuousSpaceBitmap* mark_bitmap = 1429 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1430 accounting::LargeObjectBitmap* los_bitmap = 1431 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1432 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1433 bool is_los = mark_bitmap == nullptr; 1434 if (!is_los && mark_bitmap->Test(from_ref)) { 1435 // Already marked. 1436 to_ref = from_ref; 1437 } else if (is_los && los_bitmap->Test(from_ref)) { 1438 // Already marked in LOS. 1439 to_ref = from_ref; 1440 } else { 1441 // Not marked. 1442 if (IsOnAllocStack(from_ref)) { 1443 // If on the allocation stack, it's considered marked. 1444 to_ref = from_ref; 1445 } else { 1446 // Not marked. 1447 to_ref = nullptr; 1448 } 1449 } 1450 } 1451 } 1452 return to_ref; 1453} 1454 1455bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { 1456 QuasiAtomic::ThreadFenceAcquire(); 1457 accounting::ObjectStack* alloc_stack = GetAllocationStack(); 1458 return alloc_stack->Contains(ref); 1459} 1460 1461mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { 1462 if (from_ref == nullptr) { 1463 return nullptr; 1464 } 1465 DCHECK(from_ref != nullptr); 1466 DCHECK(heap_->collector_type_ == kCollectorTypeCC); 1467 if (region_space_->IsInToSpace(from_ref)) { 1468 // It's already marked. 1469 return from_ref; 1470 } 1471 mirror::Object* to_ref; 1472 if (region_space_->IsInFromSpace(from_ref)) { 1473 to_ref = GetFwdPtr(from_ref); 1474 if (kUseBakerReadBarrier) { 1475 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref; 1476 } 1477 if (to_ref == nullptr) { 1478 // It isn't marked yet. Mark it by copying it to the to-space. 1479 to_ref = Copy(from_ref); 1480 } 1481 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) 1482 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1483 } else if (region_space_->IsInUnevacFromSpace(from_ref)) { 1484 // This may or may not succeed, which is ok. 1485 if (kUseBakerReadBarrier) { 1486 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1487 } 1488 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) { 1489 // Already marked. 1490 to_ref = from_ref; 1491 } else { 1492 // Newly marked. 1493 to_ref = from_ref; 1494 if (kUseBakerReadBarrier) { 1495 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1496 } 1497 PushOntoMarkStack<true>(to_ref); 1498 } 1499 } else { 1500 // from_ref is in a non-moving space. 1501 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref; 1502 if (immune_region_.ContainsObject(from_ref)) { 1503 accounting::ContinuousSpaceBitmap* cc_bitmap = 1504 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1505 DCHECK(cc_bitmap != nullptr) 1506 << "An immune space object must have a bitmap"; 1507 if (kIsDebugBuild) { 1508 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1509 << "Immune space object must be already marked"; 1510 } 1511 // This may or may not succeed, which is ok. 1512 if (kUseBakerReadBarrier) { 1513 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1514 } 1515 if (cc_bitmap->AtomicTestAndSet(from_ref)) { 1516 // Already marked. 1517 to_ref = from_ref; 1518 } else { 1519 // Newly marked. 1520 to_ref = from_ref; 1521 if (kUseBakerReadBarrier) { 1522 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1523 } 1524 PushOntoMarkStack<true>(to_ref); 1525 } 1526 } else { 1527 // Use the mark bitmap. 1528 accounting::ContinuousSpaceBitmap* mark_bitmap = 1529 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1530 accounting::LargeObjectBitmap* los_bitmap = 1531 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1532 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1533 bool is_los = mark_bitmap == nullptr; 1534 if (!is_los && mark_bitmap->Test(from_ref)) { 1535 // Already marked. 1536 to_ref = from_ref; 1537 if (kUseBakerReadBarrier) { 1538 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1539 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1540 } 1541 } else if (is_los && los_bitmap->Test(from_ref)) { 1542 // Already marked in LOS. 1543 to_ref = from_ref; 1544 if (kUseBakerReadBarrier) { 1545 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1546 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1547 } 1548 } else { 1549 // Not marked. 1550 if (IsOnAllocStack(from_ref)) { 1551 // If it's on the allocation stack, it's considered marked. Keep it white. 1552 to_ref = from_ref; 1553 // Objects on the allocation stack need not be marked. 1554 if (!is_los) { 1555 DCHECK(!mark_bitmap->Test(to_ref)); 1556 } else { 1557 DCHECK(!los_bitmap->Test(to_ref)); 1558 } 1559 if (kUseBakerReadBarrier) { 1560 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 1561 } 1562 } else { 1563 // Not marked or on the allocation stack. Try to mark it. 1564 // This may or may not succeed, which is ok. 1565 if (kUseBakerReadBarrier) { 1566 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1567 } 1568 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) { 1569 // Already marked. 1570 to_ref = from_ref; 1571 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) { 1572 // Already marked in LOS. 1573 to_ref = from_ref; 1574 } else { 1575 // Newly marked. 1576 to_ref = from_ref; 1577 if (kUseBakerReadBarrier) { 1578 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1579 } 1580 PushOntoMarkStack<true>(to_ref); 1581 } 1582 } 1583 } 1584 } 1585 } 1586 return to_ref; 1587} 1588 1589void ConcurrentCopying::FinishPhase() { 1590 region_space_ = nullptr; 1591 CHECK(mark_queue_.IsEmpty()); 1592 mark_queue_.Clear(); 1593 { 1594 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1595 skipped_blocks_map_.clear(); 1596 } 1597 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1598 heap_->ClearMarkedObjects(); 1599} 1600 1601mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) { 1602 return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); 1603} 1604 1605bool ConcurrentCopying::IsHeapReferenceMarkedCallback( 1606 mirror::HeapReference<mirror::Object>* field, void* arg) { 1607 mirror::Object* from_ref = field->AsMirrorPtr(); 1608 mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); 1609 if (to_ref == nullptr) { 1610 return false; 1611 } 1612 if (from_ref != to_ref) { 1613 QuasiAtomic::ThreadFenceRelease(); 1614 field->Assign(to_ref); 1615 QuasiAtomic::ThreadFenceSequentiallyConsistent(); 1616 } 1617 return true; 1618} 1619 1620mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) { 1621 return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref); 1622} 1623 1624void ConcurrentCopying::ProcessMarkStackCallback(void* arg) { 1625 reinterpret_cast<ConcurrentCopying*>(arg)->ProcessMarkStack(); 1626} 1627 1628void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 1629 heap_->GetReferenceProcessor()->DelayReferenceReferent( 1630 klass, reference, &IsHeapReferenceMarkedCallback, this); 1631} 1632 1633void ConcurrentCopying::ProcessReferences(Thread* self, bool concurrent) { 1634 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); 1635 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1636 GetHeap()->GetReferenceProcessor()->ProcessReferences( 1637 concurrent, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), 1638 &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this); 1639} 1640 1641void ConcurrentCopying::RevokeAllThreadLocalBuffers() { 1642 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1643 region_space_->RevokeAllThreadLocalBuffers(); 1644} 1645 1646} // namespace collector 1647} // namespace gc 1648} // namespace art 1649