concurrent_copying.cc revision 184c9dc3bfc500134fdb2fbea0a7fab290c0abb0
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "concurrent_copying.h" 18 19#include "gc/accounting/heap_bitmap-inl.h" 20#include "gc/accounting/space_bitmap-inl.h" 21#include "gc/space/image_space.h" 22#include "gc/space/space.h" 23#include "intern_table.h" 24#include "mirror/art_field-inl.h" 25#include "mirror/object-inl.h" 26#include "scoped_thread_state_change.h" 27#include "thread-inl.h" 28#include "thread_list.h" 29#include "well_known_classes.h" 30 31namespace art { 32namespace gc { 33namespace collector { 34 35ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) 36 : GarbageCollector(heap, 37 name_prefix + (name_prefix.empty() ? "" : " ") + 38 "concurrent copying + mark sweep"), 39 region_space_(nullptr), gc_barrier_(new Barrier(0)), mark_queue_(2 * MB), 40 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), 41 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), 42 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), 43 rb_table_(heap_->GetReadBarrierTable()), 44 force_evacuate_all_(false) { 45 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, 46 "The region space size and the read barrier table region size must match"); 47 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap)); 48 { 49 Thread* self = Thread::Current(); 50 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 51 // Cache this so that we won't have to lock heap_bitmap_lock_ in 52 // Mark() which could cause a nested lock on heap_bitmap_lock_ 53 // when GC causes a RB while doing GC or a lock order violation 54 // (class_linker_lock_ and heap_bitmap_lock_). 55 heap_mark_bitmap_ = heap->GetMarkBitmap(); 56 } 57} 58 59ConcurrentCopying::~ConcurrentCopying() { 60} 61 62void ConcurrentCopying::RunPhases() { 63 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier); 64 CHECK(!is_active_); 65 is_active_ = true; 66 Thread* self = Thread::Current(); 67 Locks::mutator_lock_->AssertNotHeld(self); 68 { 69 ReaderMutexLock mu(self, *Locks::mutator_lock_); 70 InitializePhase(); 71 } 72 FlipThreadRoots(); 73 { 74 ReaderMutexLock mu(self, *Locks::mutator_lock_); 75 MarkingPhase(); 76 } 77 // Verify no from space refs. This causes a pause. 78 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { 79 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); 80 ScopedPause pause(this); 81 CheckEmptyMarkQueue(); 82 if (kVerboseMode) { 83 LOG(INFO) << "Verifying no from-space refs"; 84 } 85 VerifyNoFromSpaceReferences(); 86 CheckEmptyMarkQueue(); 87 } 88 { 89 ReaderMutexLock mu(self, *Locks::mutator_lock_); 90 ReclaimPhase(); 91 } 92 FinishPhase(); 93 CHECK(is_active_); 94 is_active_ = false; 95} 96 97void ConcurrentCopying::BindBitmaps() { 98 Thread* self = Thread::Current(); 99 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 100 // Mark all of the spaces we never collect as immune. 101 for (const auto& space : heap_->GetContinuousSpaces()) { 102 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 103 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 104 CHECK(space->IsZygoteSpace() || space->IsImageSpace()); 105 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 106 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" : 107 "cc zygote space bitmap"; 108 // TODO: try avoiding using bitmaps for image/zygote to save space. 109 accounting::ContinuousSpaceBitmap* bitmap = 110 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity()); 111 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 112 cc_bitmaps_.push_back(bitmap); 113 } else if (space == region_space_) { 114 accounting::ContinuousSpaceBitmap* bitmap = 115 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap", 116 space->Begin(), space->Capacity()); 117 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 118 cc_bitmaps_.push_back(bitmap); 119 region_space_bitmap_ = bitmap; 120 } 121 } 122} 123 124void ConcurrentCopying::InitializePhase() { 125 TimingLogger::ScopedTiming split("InitializePhase", GetTimings()); 126 if (kVerboseMode) { 127 LOG(INFO) << "GC InitializePhase"; 128 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" 129 << reinterpret_cast<void*>(region_space_->Limit()); 130 } 131 CHECK(mark_queue_.IsEmpty()); 132 immune_region_.Reset(); 133 bytes_moved_.StoreRelaxed(0); 134 objects_moved_.StoreRelaxed(0); 135 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 136 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 137 GetCurrentIteration()->GetClearSoftReferences()) { 138 force_evacuate_all_ = true; 139 } else { 140 force_evacuate_all_ = false; 141 } 142 BindBitmaps(); 143 if (kVerboseMode) { 144 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_; 145 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End(); 146 LOG(INFO) << "GC end of InitializePhase"; 147 } 148} 149 150// Used to switch the thread roots of a thread from from-space refs to to-space refs. 151class ThreadFlipVisitor : public Closure { 152 public: 153 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) 154 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { 155 } 156 157 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 158 // Note: self is not necessarily equal to thread since thread may be suspended. 159 Thread* self = Thread::Current(); 160 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 161 << thread->GetState() << " thread " << thread << " self " << self; 162 if (use_tlab_ && thread->HasTlab()) { 163 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 164 // This must come before the revoke. 165 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); 166 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 167 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> 168 FetchAndAddSequentiallyConsistent(thread_local_objects); 169 } else { 170 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 171 } 172 } 173 if (kUseThreadLocalAllocationStack) { 174 thread->RevokeThreadLocalAllocationStack(); 175 } 176 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 177 thread->VisitRoots(ConcurrentCopying::ProcessRootCallback, concurrent_copying_); 178 concurrent_copying_->GetBarrier().Pass(self); 179 } 180 181 private: 182 ConcurrentCopying* const concurrent_copying_; 183 const bool use_tlab_; 184}; 185 186// Called back from Runtime::FlipThreadRoots() during a pause. 187class FlipCallback : public Closure { 188 public: 189 explicit FlipCallback(ConcurrentCopying* concurrent_copying) 190 : concurrent_copying_(concurrent_copying) { 191 } 192 193 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { 194 ConcurrentCopying* cc = concurrent_copying_; 195 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); 196 // Note: self is not necessarily equal to thread since thread may be suspended. 197 Thread* self = Thread::Current(); 198 CHECK(thread == self); 199 Locks::mutator_lock_->AssertExclusiveHeld(self); 200 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); 201 cc->SwapStacks(self); 202 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 203 cc->RecordLiveStackFreezeSize(self); 204 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); 205 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); 206 } 207 cc->is_marking_ = true; 208 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { 209 CHECK(Runtime::Current()->IsAotCompiler()); 210 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); 211 Runtime::Current()->VisitTransactionRoots(ConcurrentCopying::ProcessRootCallback, cc); 212 } 213 } 214 215 private: 216 ConcurrentCopying* const concurrent_copying_; 217}; 218 219// Switch threads that from from-space to to-space refs. Forward/mark the thread roots. 220void ConcurrentCopying::FlipThreadRoots() { 221 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); 222 if (kVerboseMode) { 223 LOG(INFO) << "time=" << region_space_->Time(); 224 region_space_->DumpNonFreeRegions(LOG(INFO)); 225 } 226 Thread* self = Thread::Current(); 227 Locks::mutator_lock_->AssertNotHeld(self); 228 gc_barrier_->Init(self, 0); 229 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); 230 FlipCallback flip_callback(this); 231 size_t barrier_count = Runtime::Current()->FlipThreadRoots( 232 &thread_flip_visitor, &flip_callback, this); 233 { 234 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 235 gc_barrier_->Increment(self, barrier_count); 236 } 237 is_asserting_to_space_invariant_ = true; 238 QuasiAtomic::ThreadFenceForConstructor(); 239 if (kVerboseMode) { 240 LOG(INFO) << "time=" << region_space_->Time(); 241 region_space_->DumpNonFreeRegions(LOG(INFO)); 242 LOG(INFO) << "GC end of FlipThreadRoots"; 243 } 244} 245 246void ConcurrentCopying::SwapStacks(Thread* self) { 247 heap_->SwapStacks(self); 248} 249 250void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { 251 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 252 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 253} 254 255// Used to visit objects in the immune spaces. 256class ConcurrentCopyingImmuneSpaceObjVisitor { 257 public: 258 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) 259 : collector_(cc) {} 260 261 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 262 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 263 DCHECK(obj != nullptr); 264 DCHECK(collector_->immune_region_.ContainsObject(obj)); 265 accounting::ContinuousSpaceBitmap* cc_bitmap = 266 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 267 DCHECK(cc_bitmap != nullptr) 268 << "An immune space object must have a bitmap"; 269 if (kIsDebugBuild) { 270 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) 271 << "Immune space object must be already marked"; 272 } 273 // This may or may not succeed, which is ok. 274 if (kUseBakerReadBarrier) { 275 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 276 } 277 if (cc_bitmap->AtomicTestAndSet(obj)) { 278 // Already marked. Do nothing. 279 } else { 280 // Newly marked. Set the gray bit and push it onto the mark stack. 281 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 282 collector_->PushOntoMarkStack<true>(obj); 283 } 284 } 285 286 private: 287 ConcurrentCopying* collector_; 288}; 289 290class EmptyCheckpoint : public Closure { 291 public: 292 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying) 293 : concurrent_copying_(concurrent_copying) { 294 } 295 296 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 297 // Note: self is not necessarily equal to thread since thread may be suspended. 298 Thread* self = Thread::Current(); 299 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 300 << thread->GetState() << " thread " << thread << " self " << self; 301 // If thread is a running mutator, then act on behalf of the garbage collector. 302 // See the code in ThreadList::RunCheckpoint. 303 if (thread->GetState() == kRunnable) { 304 concurrent_copying_->GetBarrier().Pass(self); 305 } 306 } 307 308 private: 309 ConcurrentCopying* const concurrent_copying_; 310}; 311 312// Concurrently mark roots that are guarded by read barriers and process the mark stack. 313void ConcurrentCopying::MarkingPhase() { 314 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); 315 if (kVerboseMode) { 316 LOG(INFO) << "GC MarkingPhase"; 317 } 318 { 319 // Mark the image root. The WB-based collectors do not need to 320 // scan the image objects from roots by relying on the card table, 321 // but it's necessary for the RB to-space invariant to hold. 322 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings()); 323 gc::space::ImageSpace* image = heap_->GetImageSpace(); 324 if (image != nullptr) { 325 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots(); 326 mirror::Object* marked_image_root = Mark(image_root); 327 CHECK_EQ(image_root, marked_image_root) << "An image object does not move"; 328 if (ReadBarrier::kEnableToSpaceInvariantChecks) { 329 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root); 330 } 331 } 332 } 333 { 334 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings()); 335 Runtime::Current()->VisitConstantRoots(ProcessRootCallback, this); 336 } 337 { 338 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings()); 339 Runtime::Current()->GetInternTable()->VisitRoots(ProcessRootCallback, 340 this, kVisitRootFlagAllRoots); 341 } 342 { 343 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings()); 344 Runtime::Current()->GetClassLinker()->VisitRoots(ProcessRootCallback, 345 this, kVisitRootFlagAllRoots); 346 } 347 { 348 // TODO: don't visit the transaction roots if it's not active. 349 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); 350 Runtime::Current()->VisitNonThreadRoots(ProcessRootCallback, this); 351 } 352 353 // Immune spaces. 354 for (auto& space : heap_->GetContinuousSpaces()) { 355 if (immune_region_.ContainsSpace(space)) { 356 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 357 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 358 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this); 359 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 360 reinterpret_cast<uintptr_t>(space->Limit()), 361 visitor); 362 } 363 } 364 365 Thread* self = Thread::Current(); 366 { 367 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings()); 368 // Process the mark stack and issue an empty check point. If the 369 // mark stack is still empty after the check point, we're 370 // done. Otherwise, repeat. 371 ProcessMarkStack(); 372 size_t count = 0; 373 while (!ProcessMarkStack()) { 374 ++count; 375 if (kVerboseMode) { 376 LOG(INFO) << "Issue an empty check point. " << count; 377 } 378 IssueEmptyCheckpoint(); 379 } 380 // Need to ensure the mark stack is empty before reference 381 // processing to get rid of non-reference gray objects. 382 CheckEmptyMarkQueue(); 383 // Enable the GetReference slow path and disallow access to the system weaks. 384 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 385 Runtime::Current()->DisallowNewSystemWeaks(); 386 QuasiAtomic::ThreadFenceForConstructor(); 387 // Lock-unlock the system weak locks so that there's no thread in 388 // the middle of accessing system weaks. 389 Runtime::Current()->EnsureNewSystemWeaksDisallowed(); 390 // Note: Do not issue a checkpoint from here to the 391 // SweepSystemWeaks call or else a deadlock due to 392 // WaitHoldingLocks() would occur. 393 if (kVerboseMode) { 394 LOG(INFO) << "Enabled the ref proc slow path & disabled access to system weaks."; 395 LOG(INFO) << "ProcessReferences"; 396 } 397 ProcessReferences(self, true); 398 CheckEmptyMarkQueue(); 399 if (kVerboseMode) { 400 LOG(INFO) << "SweepSystemWeaks"; 401 } 402 SweepSystemWeaks(self); 403 if (kVerboseMode) { 404 LOG(INFO) << "SweepSystemWeaks done"; 405 } 406 // Because hash_set::Erase() can call the hash function for 407 // arbitrary elements in the weak intern table in 408 // InternTable::Table::SweepWeaks(), the above SweepSystemWeaks() 409 // call may have marked some objects (strings) alive. So process 410 // the mark stack here once again. 411 ProcessMarkStack(); 412 CheckEmptyMarkQueue(); 413 // Disable marking. 414 if (kUseTableLookupReadBarrier) { 415 heap_->rb_table_->ClearAll(); 416 DCHECK(heap_->rb_table_->IsAllCleared()); 417 } 418 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(1); 419 is_marking_ = false; 420 if (kVerboseMode) { 421 LOG(INFO) << "AllowNewSystemWeaks"; 422 } 423 Runtime::Current()->AllowNewSystemWeaks(); 424 CheckEmptyMarkQueue(); 425 } 426 427 if (kVerboseMode) { 428 LOG(INFO) << "GC end of MarkingPhase"; 429 } 430} 431 432void ConcurrentCopying::IssueEmptyCheckpoint() { 433 Thread* self = Thread::Current(); 434 EmptyCheckpoint check_point(this); 435 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 436 gc_barrier_->Init(self, 0); 437 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 438 // If there are no threads to wait which implys that all the checkpoint functions are finished, 439 // then no need to release the mutator lock. 440 if (barrier_count == 0) { 441 return; 442 } 443 // Release locks then wait for all mutator threads to pass the barrier. 444 Locks::mutator_lock_->SharedUnlock(self); 445 { 446 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 447 gc_barrier_->Increment(self, barrier_count); 448 } 449 Locks::mutator_lock_->SharedLock(self); 450} 451 452mirror::Object* ConcurrentCopying::PopOffMarkStack() { 453 return mark_queue_.Dequeue(); 454} 455 456template<bool kThreadSafe> 457void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { 458 CHECK_EQ(is_mark_queue_push_disallowed_.LoadRelaxed(), 0) 459 << " " << to_ref << " " << PrettyTypeOf(to_ref); 460 if (kThreadSafe) { 461 CHECK(mark_queue_.Enqueue(to_ref)) << "Mark queue overflow"; 462 } else { 463 CHECK(mark_queue_.EnqueueThreadUnsafe(to_ref)) << "Mark queue overflow"; 464 } 465} 466 467accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() { 468 return heap_->allocation_stack_.get(); 469} 470 471accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { 472 return heap_->live_stack_.get(); 473} 474 475inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { 476 DCHECK(region_space_->IsInFromSpace(from_ref)); 477 LockWord lw = from_ref->GetLockWord(false); 478 if (lw.GetState() == LockWord::kForwardingAddress) { 479 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); 480 CHECK(fwd_ptr != nullptr); 481 return fwd_ptr; 482 } else { 483 return nullptr; 484 } 485} 486 487// The following visitors are that used to verify that there's no 488// references to the from-space left after marking. 489class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor { 490 public: 491 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) 492 : collector_(collector) {} 493 494 void operator()(mirror::Object* ref) const 495 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 496 if (ref == nullptr) { 497 // OK. 498 return; 499 } 500 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 501 if (kUseBakerReadBarrier) { 502 if (collector_->RegionSpace()->IsInToSpace(ref)) { 503 CHECK(ref->GetReadBarrierPointer() == nullptr) 504 << "To-space ref " << ref << " " << PrettyTypeOf(ref) 505 << " has non-white rb_ptr " << ref->GetReadBarrierPointer(); 506 } else { 507 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 508 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 509 collector_->IsOnAllocStack(ref))) 510 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref) 511 << " has non-black rb_ptr " << ref->GetReadBarrierPointer() 512 << " but isn't on the alloc stack (and has white rb_ptr)." 513 << " Is it in the non-moving space=" 514 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref)); 515 } 516 } 517 } 518 519 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/) 520 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 521 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 522 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector); 523 DCHECK(root != nullptr); 524 visitor(*root); 525 } 526 527 private: 528 ConcurrentCopying* collector_; 529}; 530 531class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { 532 public: 533 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) 534 : collector_(collector) {} 535 536 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 537 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 538 mirror::Object* ref = 539 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 540 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 541 visitor(ref); 542 } 543 void operator()(mirror::Class* klass, mirror::Reference* ref) const 544 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 545 CHECK(klass->IsTypeOfReferenceClass()); 546 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 547 } 548 549 private: 550 ConcurrentCopying* collector_; 551}; 552 553class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { 554 public: 555 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) 556 : collector_(collector) {} 557 void operator()(mirror::Object* obj) const 558 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 559 ObjectCallback(obj, collector_); 560 } 561 static void ObjectCallback(mirror::Object* obj, void *arg) 562 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 563 CHECK(obj != nullptr); 564 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 565 space::RegionSpace* region_space = collector->RegionSpace(); 566 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 567 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); 568 obj->VisitReferences<true>(visitor, visitor); 569 if (kUseBakerReadBarrier) { 570 if (collector->RegionSpace()->IsInToSpace(obj)) { 571 CHECK(obj->GetReadBarrierPointer() == nullptr) 572 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer(); 573 } else { 574 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 575 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 576 collector->IsOnAllocStack(obj))) 577 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj) 578 << " has non-black rb_ptr " << obj->GetReadBarrierPointer() 579 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space=" 580 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj)); 581 } 582 } 583 } 584 585 private: 586 ConcurrentCopying* const collector_; 587}; 588 589// Verify there's no from-space references left after the marking phase. 590void ConcurrentCopying::VerifyNoFromSpaceReferences() { 591 Thread* self = Thread::Current(); 592 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 593 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this); 594 // Roots. 595 { 596 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 597 Runtime::Current()->VisitRoots( 598 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor::RootCallback, this); 599 } 600 // The to-space. 601 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, 602 this); 603 // Non-moving spaces. 604 { 605 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 606 heap_->GetMarkBitmap()->Visit(visitor); 607 } 608 // The alloc stack. 609 { 610 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 611 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); 612 it < end; ++it) { 613 mirror::Object* const obj = it->AsMirrorPtr(); 614 if (obj != nullptr && obj->GetClass() != nullptr) { 615 // TODO: need to call this only if obj is alive? 616 ref_visitor(obj); 617 visitor(obj); 618 } 619 } 620 } 621 // TODO: LOS. But only refs in LOS are classes. 622} 623 624// The following visitors are used to assert the to-space invariant. 625class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { 626 public: 627 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) 628 : collector_(collector) {} 629 630 void operator()(mirror::Object* ref) const 631 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 632 if (ref == nullptr) { 633 // OK. 634 return; 635 } 636 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 637 } 638 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/) 639 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 640 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 641 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector); 642 DCHECK(root != nullptr); 643 visitor(*root); 644 } 645 646 private: 647 ConcurrentCopying* collector_; 648}; 649 650class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { 651 public: 652 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) 653 : collector_(collector) {} 654 655 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 656 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 657 mirror::Object* ref = 658 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 659 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 660 visitor(ref); 661 } 662 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const 663 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 664 CHECK(klass->IsTypeOfReferenceClass()); 665 } 666 667 private: 668 ConcurrentCopying* collector_; 669}; 670 671class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { 672 public: 673 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) 674 : collector_(collector) {} 675 void operator()(mirror::Object* obj) const 676 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 677 ObjectCallback(obj, collector_); 678 } 679 static void ObjectCallback(mirror::Object* obj, void *arg) 680 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 681 CHECK(obj != nullptr); 682 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 683 space::RegionSpace* region_space = collector->RegionSpace(); 684 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 685 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); 686 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); 687 obj->VisitReferences<true>(visitor, visitor); 688 } 689 690 private: 691 ConcurrentCopying* collector_; 692}; 693 694bool ConcurrentCopying::ProcessMarkStack() { 695 if (kVerboseMode) { 696 LOG(INFO) << "ProcessMarkStack. "; 697 } 698 size_t count = 0; 699 mirror::Object* to_ref; 700 while ((to_ref = PopOffMarkStack()) != nullptr) { 701 ++count; 702 DCHECK(!region_space_->IsInFromSpace(to_ref)); 703 if (kUseBakerReadBarrier) { 704 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 705 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 706 << " is_marked=" << IsMarked(to_ref); 707 } 708 // Scan ref fields. 709 Scan(to_ref); 710 // Mark the gray ref as white or black. 711 if (kUseBakerReadBarrier) { 712 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 713 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 714 << " is_marked=" << IsMarked(to_ref); 715 } 716 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && 717 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && 718 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { 719 // Leave References gray so that GetReferent() will trigger RB. 720 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; 721 } else { 722 if (kUseBakerReadBarrier) { 723 if (region_space_->IsInToSpace(to_ref)) { 724 // If to-space, change from gray to white. 725 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 726 ReadBarrier::WhitePtr()); 727 CHECK(success) << "Must succeed as we won the race."; 728 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 729 } else { 730 // If non-moving space/unevac from space, change from gray 731 // to black. We can't change gray to white because it's not 732 // safe to use CAS if two threads change values in opposite 733 // directions (A->B and B->A). So, we change it to black to 734 // indicate non-moving objects that have been marked 735 // through. Note we'd need to change from black to white 736 // later (concurrently). 737 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 738 ReadBarrier::BlackPtr()); 739 CHECK(success) << "Must succeed as we won the race."; 740 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 741 } 742 } 743 } 744 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { 745 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); 746 visitor(to_ref); 747 } 748 } 749 // Return true if the stack was empty. 750 return count == 0; 751} 752 753void ConcurrentCopying::CheckEmptyMarkQueue() { 754 if (!mark_queue_.IsEmpty()) { 755 while (!mark_queue_.IsEmpty()) { 756 mirror::Object* obj = mark_queue_.Dequeue(); 757 if (kUseBakerReadBarrier) { 758 mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); 759 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr 760 << " is_marked=" << IsMarked(obj); 761 } else { 762 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) 763 << " is_marked=" << IsMarked(obj); 764 } 765 } 766 LOG(FATAL) << "mark queue is not empty"; 767 } 768} 769 770void ConcurrentCopying::SweepSystemWeaks(Thread* self) { 771 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); 772 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 773 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 774} 775 776void ConcurrentCopying::Sweep(bool swap_bitmaps) { 777 { 778 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); 779 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 780 if (kEnableFromSpaceAccountingCheck) { 781 CHECK_GE(live_stack_freeze_size_, live_stack->Size()); 782 } 783 heap_->MarkAllocStackAsLive(live_stack); 784 live_stack->Reset(); 785 } 786 CHECK(mark_queue_.IsEmpty()); 787 TimingLogger::ScopedTiming split("Sweep", GetTimings()); 788 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 789 if (space->IsContinuousMemMapAllocSpace()) { 790 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 791 if (space == region_space_ || immune_region_.ContainsSpace(space)) { 792 continue; 793 } 794 TimingLogger::ScopedTiming split2( 795 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 796 RecordFree(alloc_space->Sweep(swap_bitmaps)); 797 } 798 } 799 SweepLargeObjects(swap_bitmaps); 800} 801 802void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) { 803 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 804 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 805} 806 807class ConcurrentCopyingClearBlackPtrsVisitor { 808 public: 809 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc) 810 : collector_(cc) {} 811 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 812 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 813 DCHECK(obj != nullptr); 814 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; 815 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; 816 obj->SetReadBarrierPointer(ReadBarrier::WhitePtr()); 817 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 818 } 819 820 private: 821 ConcurrentCopying* const collector_; 822}; 823 824// Clear the black ptrs in non-moving objects back to white. 825void ConcurrentCopying::ClearBlackPtrs() { 826 CHECK(kUseBakerReadBarrier); 827 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings()); 828 ConcurrentCopyingClearBlackPtrsVisitor visitor(this); 829 for (auto& space : heap_->GetContinuousSpaces()) { 830 if (space == region_space_) { 831 continue; 832 } 833 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 834 if (kVerboseMode) { 835 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap; 836 } 837 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 838 reinterpret_cast<uintptr_t>(space->Limit()), 839 visitor); 840 } 841 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 842 large_object_space->GetMarkBitmap()->VisitMarkedRange( 843 reinterpret_cast<uintptr_t>(large_object_space->Begin()), 844 reinterpret_cast<uintptr_t>(large_object_space->End()), 845 visitor); 846 // Objects on the allocation stack? 847 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) { 848 size_t count = GetAllocationStack()->Size(); 849 auto* it = GetAllocationStack()->Begin(); 850 auto* end = GetAllocationStack()->End(); 851 for (size_t i = 0; i < count; ++i, ++it) { 852 CHECK_LT(it, end); 853 mirror::Object* obj = it->AsMirrorPtr(); 854 if (obj != nullptr) { 855 // Must have been cleared above. 856 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 857 } 858 } 859 } 860} 861 862void ConcurrentCopying::ReclaimPhase() { 863 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings()); 864 if (kVerboseMode) { 865 LOG(INFO) << "GC ReclaimPhase"; 866 } 867 Thread* self = Thread::Current(); 868 869 { 870 // Double-check that the mark stack is empty. 871 // Note: need to set this after VerifyNoFromSpaceRef(). 872 is_asserting_to_space_invariant_ = false; 873 QuasiAtomic::ThreadFenceForConstructor(); 874 if (kVerboseMode) { 875 LOG(INFO) << "Issue an empty check point. "; 876 } 877 IssueEmptyCheckpoint(); 878 // Disable the check. 879 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(0); 880 CheckEmptyMarkQueue(); 881 } 882 883 { 884 // Record freed objects. 885 TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); 886 // Don't include thread-locals that are in the to-space. 887 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); 888 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); 889 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); 890 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); 891 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); 892 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); 893 if (kEnableFromSpaceAccountingCheck) { 894 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); 895 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); 896 } 897 CHECK_LE(to_objects, from_objects); 898 CHECK_LE(to_bytes, from_bytes); 899 int64_t freed_bytes = from_bytes - to_bytes; 900 int64_t freed_objects = from_objects - to_objects; 901 if (kVerboseMode) { 902 LOG(INFO) << "RecordFree:" 903 << " from_bytes=" << from_bytes << " from_objects=" << from_objects 904 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects 905 << " to_bytes=" << to_bytes << " to_objects=" << to_objects 906 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects 907 << " from_space size=" << region_space_->FromSpaceSize() 908 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() 909 << " to_space size=" << region_space_->ToSpaceSize(); 910 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 911 } 912 RecordFree(ObjectBytePair(freed_objects, freed_bytes)); 913 if (kVerboseMode) { 914 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 915 } 916 } 917 918 { 919 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings()); 920 ComputeUnevacFromSpaceLiveRatio(); 921 } 922 923 { 924 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); 925 region_space_->ClearFromSpace(); 926 } 927 928 { 929 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 930 if (kUseBakerReadBarrier) { 931 ClearBlackPtrs(); 932 } 933 Sweep(false); 934 SwapBitmaps(); 935 heap_->UnBindBitmaps(); 936 937 // Remove bitmaps for the immune spaces. 938 while (!cc_bitmaps_.empty()) { 939 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back(); 940 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap); 941 delete cc_bitmap; 942 cc_bitmaps_.pop_back(); 943 } 944 region_space_bitmap_ = nullptr; 945 } 946 947 if (kVerboseMode) { 948 LOG(INFO) << "GC end of ReclaimPhase"; 949 } 950} 951 952class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { 953 public: 954 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) 955 : collector_(cc) {} 956 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 957 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 958 DCHECK(ref != nullptr); 959 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref; 960 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; 961 if (kUseBakerReadBarrier) { 962 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref; 963 // Clear the black ptr. 964 ref->SetReadBarrierPointer(ReadBarrier::WhitePtr()); 965 } 966 size_t obj_size = ref->SizeOf(); 967 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 968 collector_->region_space_->AddLiveBytes(ref, alloc_size); 969 } 970 971 private: 972 ConcurrentCopying* collector_; 973}; 974 975// Compute how much live objects are left in regions. 976void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() { 977 region_space_->AssertAllRegionLiveBytesZeroOrCleared(); 978 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this); 979 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()), 980 reinterpret_cast<uintptr_t>(region_space_->Limit()), 981 visitor); 982} 983 984// Assert the to-space invariant. 985void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, 986 mirror::Object* ref) { 987 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 988 if (is_asserting_to_space_invariant_) { 989 if (region_space_->IsInToSpace(ref)) { 990 // OK. 991 return; 992 } else if (region_space_->IsInUnevacFromSpace(ref)) { 993 CHECK(region_space_bitmap_->Test(ref)) << ref; 994 } else if (region_space_->IsInFromSpace(ref)) { 995 // Not OK. Do extra logging. 996 if (obj != nullptr) { 997 if (kUseBakerReadBarrier) { 998 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) 999 << " holder rb_ptr=" << obj->GetReadBarrierPointer(); 1000 } else { 1001 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); 1002 } 1003 if (region_space_->IsInFromSpace(obj)) { 1004 LOG(INFO) << "holder is in the from-space."; 1005 } else if (region_space_->IsInToSpace(obj)) { 1006 LOG(INFO) << "holder is in the to-space."; 1007 } else if (region_space_->IsInUnevacFromSpace(obj)) { 1008 LOG(INFO) << "holder is in the unevac from-space."; 1009 if (region_space_bitmap_->Test(obj)) { 1010 LOG(INFO) << "holder is marked in the region space bitmap."; 1011 } else { 1012 LOG(INFO) << "holder is not marked in the region space bitmap."; 1013 } 1014 } else { 1015 // In a non-moving space. 1016 if (immune_region_.ContainsObject(obj)) { 1017 LOG(INFO) << "holder is in the image or the zygote space."; 1018 accounting::ContinuousSpaceBitmap* cc_bitmap = 1019 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 1020 CHECK(cc_bitmap != nullptr) 1021 << "An immune space object must have a bitmap."; 1022 if (cc_bitmap->Test(obj)) { 1023 LOG(INFO) << "holder is marked in the bit map."; 1024 } else { 1025 LOG(INFO) << "holder is NOT marked in the bit map."; 1026 } 1027 } else { 1028 LOG(INFO) << "holder is in a non-moving (or main) space."; 1029 accounting::ContinuousSpaceBitmap* mark_bitmap = 1030 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); 1031 accounting::LargeObjectBitmap* los_bitmap = 1032 heap_mark_bitmap_->GetLargeObjectBitmap(obj); 1033 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1034 bool is_los = mark_bitmap == nullptr; 1035 if (!is_los && mark_bitmap->Test(obj)) { 1036 LOG(INFO) << "holder is marked in the mark bit map."; 1037 } else if (is_los && los_bitmap->Test(obj)) { 1038 LOG(INFO) << "holder is marked in the los bit map."; 1039 } else { 1040 // If ref is on the allocation stack, then it is considered 1041 // mark/alive (but not necessarily on the live stack.) 1042 if (IsOnAllocStack(obj)) { 1043 LOG(INFO) << "holder is on the alloc stack."; 1044 } else { 1045 LOG(INFO) << "holder is not marked or on the alloc stack."; 1046 } 1047 } 1048 } 1049 } 1050 LOG(INFO) << "offset=" << offset.SizeValue(); 1051 } 1052 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1053 } else { 1054 // In a non-moving spaces. Check that the ref is marked. 1055 if (immune_region_.ContainsObject(ref)) { 1056 accounting::ContinuousSpaceBitmap* cc_bitmap = 1057 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); 1058 CHECK(cc_bitmap != nullptr) 1059 << "An immune space ref must have a bitmap. " << ref; 1060 if (kUseBakerReadBarrier) { 1061 CHECK(cc_bitmap->Test(ref)) 1062 << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" 1063 << obj->GetReadBarrierPointer() << " ref=" << ref; 1064 } else { 1065 CHECK(cc_bitmap->Test(ref)) 1066 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; 1067 } 1068 } else { 1069 accounting::ContinuousSpaceBitmap* mark_bitmap = 1070 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 1071 accounting::LargeObjectBitmap* los_bitmap = 1072 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 1073 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1074 bool is_los = mark_bitmap == nullptr; 1075 if ((!is_los && mark_bitmap->Test(ref)) || 1076 (is_los && los_bitmap->Test(ref))) { 1077 // OK. 1078 } else { 1079 // If ref is on the allocation stack, then it may not be 1080 // marked live, but considered marked/alive (but not 1081 // necessarily on the live stack). 1082 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " 1083 << "obj=" << obj << " ref=" << ref; 1084 } 1085 } 1086 } 1087 } 1088} 1089 1090void ConcurrentCopying::ProcessRootCallback(mirror::Object** root, void* arg, 1091 const RootInfo& /*root_info*/) { 1092 reinterpret_cast<ConcurrentCopying*>(arg)->Process(root); 1093} 1094 1095// Used to scan ref fields of an object. 1096class ConcurrentCopyingRefFieldsVisitor { 1097 public: 1098 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector) 1099 : collector_(collector) {} 1100 1101 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) 1102 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1103 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1104 collector_->Process(obj, offset); 1105 } 1106 1107 void operator()(mirror::Class* klass, mirror::Reference* ref) const 1108 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 1109 CHECK(klass->IsTypeOfReferenceClass()); 1110 collector_->DelayReferenceReferent(klass, ref); 1111 } 1112 1113 private: 1114 ConcurrentCopying* const collector_; 1115}; 1116 1117// Scan ref fields of an object. 1118void ConcurrentCopying::Scan(mirror::Object* to_ref) { 1119 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1120 ConcurrentCopyingRefFieldsVisitor visitor(this); 1121 to_ref->VisitReferences<true>(visitor, visitor); 1122} 1123 1124// Process a field. 1125inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { 1126 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); 1127 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1128 return; 1129 } 1130 mirror::Object* to_ref = Mark(ref); 1131 if (to_ref == ref) { 1132 return; 1133 } 1134 // This may fail if the mutator writes to the field at the same time. But it's ok. 1135 mirror::Object* expected_ref = ref; 1136 mirror::Object* new_ref = to_ref; 1137 do { 1138 if (expected_ref != 1139 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) { 1140 // It was updated by the mutator. 1141 break; 1142 } 1143 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>( 1144 offset, expected_ref, new_ref)); 1145} 1146 1147// Process a root. 1148void ConcurrentCopying::Process(mirror::Object** root) { 1149 mirror::Object* ref = *root; 1150 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1151 return; 1152 } 1153 mirror::Object* to_ref = Mark(ref); 1154 if (to_ref == ref) { 1155 return; 1156 } 1157 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root); 1158 mirror::Object* expected_ref = ref; 1159 mirror::Object* new_ref = to_ref; 1160 do { 1161 if (expected_ref != addr->LoadRelaxed()) { 1162 // It was updated by the mutator. 1163 break; 1164 } 1165 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1166} 1167 1168// Fill the given memory block with a dummy object. Used to fill in a 1169// copy of objects that was lost in race. 1170void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { 1171 CHECK(IsAligned<kObjectAlignment>(byte_size)); 1172 memset(dummy_obj, 0, byte_size); 1173 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); 1174 CHECK(int_array_class != nullptr); 1175 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); 1176 size_t component_size = int_array_class->GetComponentSize(); 1177 CHECK_EQ(component_size, sizeof(int32_t)); 1178 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); 1179 if (data_offset > byte_size) { 1180 // An int array is too big. Use java.lang.Object. 1181 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object); 1182 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object); 1183 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize()); 1184 dummy_obj->SetClass(java_lang_Object); 1185 CHECK_EQ(byte_size, dummy_obj->SizeOf()); 1186 } else { 1187 // Use an int array. 1188 dummy_obj->SetClass(int_array_class); 1189 CHECK(dummy_obj->IsArrayInstance()); 1190 int32_t length = (byte_size - data_offset) / component_size; 1191 dummy_obj->AsArray()->SetLength(length); 1192 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length) 1193 << "byte_size=" << byte_size << " length=" << length 1194 << " component_size=" << component_size << " data_offset=" << data_offset; 1195 CHECK_EQ(byte_size, dummy_obj->SizeOf()) 1196 << "byte_size=" << byte_size << " length=" << length 1197 << " component_size=" << component_size << " data_offset=" << data_offset; 1198 } 1199} 1200 1201// Reuse the memory blocks that were copy of objects that were lost in race. 1202mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { 1203 // Try to reuse the blocks that were unused due to CAS failures. 1204 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size)); 1205 Thread* self = Thread::Current(); 1206 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); 1207 MutexLock mu(self, skipped_blocks_lock_); 1208 auto it = skipped_blocks_map_.lower_bound(alloc_size); 1209 if (it == skipped_blocks_map_.end()) { 1210 // Not found. 1211 return nullptr; 1212 } 1213 { 1214 size_t byte_size = it->first; 1215 CHECK_GE(byte_size, alloc_size); 1216 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) { 1217 // If remainder would be too small for a dummy object, retry with a larger request size. 1218 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size); 1219 if (it == skipped_blocks_map_.end()) { 1220 // Not found. 1221 return nullptr; 1222 } 1223 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size)); 1224 CHECK_GE(it->first - alloc_size, min_object_size) 1225 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; 1226 } 1227 } 1228 // Found a block. 1229 CHECK(it != skipped_blocks_map_.end()); 1230 size_t byte_size = it->first; 1231 uint8_t* addr = it->second; 1232 CHECK_GE(byte_size, alloc_size); 1233 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); 1234 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size)); 1235 if (kVerboseMode) { 1236 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; 1237 } 1238 skipped_blocks_map_.erase(it); 1239 memset(addr, 0, byte_size); 1240 if (byte_size > alloc_size) { 1241 // Return the remainder to the map. 1242 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size)); 1243 CHECK_GE(byte_size - alloc_size, min_object_size); 1244 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), 1245 byte_size - alloc_size); 1246 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size))); 1247 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size)); 1248 } 1249 return reinterpret_cast<mirror::Object*>(addr); 1250} 1251 1252mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { 1253 DCHECK(region_space_->IsInFromSpace(from_ref)); 1254 // No read barrier to avoid nested RB that might violate the to-space 1255 // invariant. Note that from_ref is a from space ref so the SizeOf() 1256 // call will access the from-space meta objects, but it's ok and necessary. 1257 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(); 1258 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1259 size_t region_space_bytes_allocated = 0U; 1260 size_t non_moving_space_bytes_allocated = 0U; 1261 size_t bytes_allocated = 0U; 1262 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>( 1263 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr); 1264 bytes_allocated = region_space_bytes_allocated; 1265 if (to_ref != nullptr) { 1266 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated); 1267 } 1268 bool fall_back_to_non_moving = false; 1269 if (UNLIKELY(to_ref == nullptr)) { 1270 // Failed to allocate in the region space. Try the skipped blocks. 1271 to_ref = AllocateInSkippedBlock(region_space_alloc_size); 1272 if (to_ref != nullptr) { 1273 // Succeeded to allocate in a skipped block. 1274 if (heap_->use_tlab_) { 1275 // This is necessary for the tlab case as it's not accounted in the space. 1276 region_space_->RecordAlloc(to_ref); 1277 } 1278 bytes_allocated = region_space_alloc_size; 1279 } else { 1280 // Fall back to the non-moving space. 1281 fall_back_to_non_moving = true; 1282 if (kVerboseMode) { 1283 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" 1284 << to_space_bytes_skipped_.LoadSequentiallyConsistent() 1285 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); 1286 } 1287 fall_back_to_non_moving = true; 1288 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, 1289 &non_moving_space_bytes_allocated, nullptr); 1290 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed"; 1291 bytes_allocated = non_moving_space_bytes_allocated; 1292 // Mark it in the mark bitmap. 1293 accounting::ContinuousSpaceBitmap* mark_bitmap = 1294 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1295 CHECK(mark_bitmap != nullptr); 1296 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref)); 1297 } 1298 } 1299 DCHECK(to_ref != nullptr); 1300 1301 // Attempt to install the forward pointer. This is in a loop as the 1302 // lock word atomic write can fail. 1303 while (true) { 1304 // Copy the object. TODO: copy only the lockword in the second iteration and on? 1305 memcpy(to_ref, from_ref, obj_size); 1306 // Set the gray ptr. 1307 if (kUseBakerReadBarrier) { 1308 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr()); 1309 } 1310 1311 LockWord old_lock_word = to_ref->GetLockWord(false); 1312 1313 if (old_lock_word.GetState() == LockWord::kForwardingAddress) { 1314 // Lost the race. Another thread (either GC or mutator) stored 1315 // the forwarding pointer first. Make the lost copy (to_ref) 1316 // look like a valid but dead (dummy) object and keep it for 1317 // future reuse. 1318 FillWithDummyObject(to_ref, bytes_allocated); 1319 if (!fall_back_to_non_moving) { 1320 DCHECK(region_space_->IsInToSpace(to_ref)); 1321 if (bytes_allocated > space::RegionSpace::kRegionSize) { 1322 // Free the large alloc. 1323 region_space_->FreeLarge(to_ref, bytes_allocated); 1324 } else { 1325 // Record the lost copy for later reuse. 1326 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1327 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1328 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); 1329 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1330 skipped_blocks_map_.insert(std::make_pair(bytes_allocated, 1331 reinterpret_cast<uint8_t*>(to_ref))); 1332 } 1333 } else { 1334 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1335 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1336 // Free the non-moving-space chunk. 1337 accounting::ContinuousSpaceBitmap* mark_bitmap = 1338 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1339 CHECK(mark_bitmap != nullptr); 1340 CHECK(mark_bitmap->Clear(to_ref)); 1341 heap_->non_moving_space_->Free(Thread::Current(), to_ref); 1342 } 1343 1344 // Get the winner's forward ptr. 1345 mirror::Object* lost_fwd_ptr = to_ref; 1346 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress()); 1347 CHECK(to_ref != nullptr); 1348 CHECK_NE(to_ref, lost_fwd_ptr); 1349 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)); 1350 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1351 return to_ref; 1352 } 1353 1354 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)); 1355 1356 // Try to atomically write the fwd ptr. 1357 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word); 1358 if (LIKELY(success)) { 1359 // The CAS succeeded. 1360 objects_moved_.FetchAndAddSequentiallyConsistent(1); 1361 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size); 1362 if (LIKELY(!fall_back_to_non_moving)) { 1363 DCHECK(region_space_->IsInToSpace(to_ref)); 1364 } else { 1365 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1366 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1367 } 1368 if (kUseBakerReadBarrier) { 1369 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1370 } 1371 DCHECK(GetFwdPtr(from_ref) == to_ref); 1372 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1373 PushOntoMarkStack<true>(to_ref); 1374 return to_ref; 1375 } else { 1376 // The CAS failed. It may have lost the race or may have failed 1377 // due to monitor/hashcode ops. Either way, retry. 1378 } 1379 } 1380} 1381 1382mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { 1383 DCHECK(from_ref != nullptr); 1384 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1385 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1386 // It's already marked. 1387 return from_ref; 1388 } 1389 mirror::Object* to_ref; 1390 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1391 to_ref = GetFwdPtr(from_ref); 1392 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) || 1393 heap_->non_moving_space_->HasAddress(to_ref)) 1394 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1395 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1396 if (region_space_bitmap_->Test(from_ref)) { 1397 to_ref = from_ref; 1398 } else { 1399 to_ref = nullptr; 1400 } 1401 } else { 1402 // from_ref is in a non-moving space. 1403 if (immune_region_.ContainsObject(from_ref)) { 1404 accounting::ContinuousSpaceBitmap* cc_bitmap = 1405 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1406 DCHECK(cc_bitmap != nullptr) 1407 << "An immune space object must have a bitmap"; 1408 if (kIsDebugBuild) { 1409 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1410 << "Immune space object must be already marked"; 1411 } 1412 if (cc_bitmap->Test(from_ref)) { 1413 // Already marked. 1414 to_ref = from_ref; 1415 } else { 1416 // Newly marked. 1417 to_ref = nullptr; 1418 } 1419 } else { 1420 // Non-immune non-moving space. Use the mark bitmap. 1421 accounting::ContinuousSpaceBitmap* mark_bitmap = 1422 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1423 accounting::LargeObjectBitmap* los_bitmap = 1424 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1425 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1426 bool is_los = mark_bitmap == nullptr; 1427 if (!is_los && mark_bitmap->Test(from_ref)) { 1428 // Already marked. 1429 to_ref = from_ref; 1430 } else if (is_los && los_bitmap->Test(from_ref)) { 1431 // Already marked in LOS. 1432 to_ref = from_ref; 1433 } else { 1434 // Not marked. 1435 if (IsOnAllocStack(from_ref)) { 1436 // If on the allocation stack, it's considered marked. 1437 to_ref = from_ref; 1438 } else { 1439 // Not marked. 1440 to_ref = nullptr; 1441 } 1442 } 1443 } 1444 } 1445 return to_ref; 1446} 1447 1448bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { 1449 QuasiAtomic::ThreadFenceAcquire(); 1450 accounting::ObjectStack* alloc_stack = GetAllocationStack(); 1451 return alloc_stack->Contains(ref); 1452} 1453 1454mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { 1455 if (from_ref == nullptr) { 1456 return nullptr; 1457 } 1458 DCHECK(from_ref != nullptr); 1459 DCHECK(heap_->collector_type_ == kCollectorTypeCC); 1460 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1461 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1462 // It's already marked. 1463 return from_ref; 1464 } 1465 mirror::Object* to_ref; 1466 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1467 to_ref = GetFwdPtr(from_ref); 1468 if (kUseBakerReadBarrier) { 1469 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref; 1470 } 1471 if (to_ref == nullptr) { 1472 // It isn't marked yet. Mark it by copying it to the to-space. 1473 to_ref = Copy(from_ref); 1474 } 1475 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) 1476 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1477 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1478 // This may or may not succeed, which is ok. 1479 if (kUseBakerReadBarrier) { 1480 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1481 } 1482 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) { 1483 // Already marked. 1484 to_ref = from_ref; 1485 } else { 1486 // Newly marked. 1487 to_ref = from_ref; 1488 if (kUseBakerReadBarrier) { 1489 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1490 } 1491 PushOntoMarkStack<true>(to_ref); 1492 } 1493 } else { 1494 // from_ref is in a non-moving space. 1495 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref; 1496 if (immune_region_.ContainsObject(from_ref)) { 1497 accounting::ContinuousSpaceBitmap* cc_bitmap = 1498 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1499 DCHECK(cc_bitmap != nullptr) 1500 << "An immune space object must have a bitmap"; 1501 if (kIsDebugBuild) { 1502 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1503 << "Immune space object must be already marked"; 1504 } 1505 // This may or may not succeed, which is ok. 1506 if (kUseBakerReadBarrier) { 1507 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1508 } 1509 if (cc_bitmap->AtomicTestAndSet(from_ref)) { 1510 // Already marked. 1511 to_ref = from_ref; 1512 } else { 1513 // Newly marked. 1514 to_ref = from_ref; 1515 if (kUseBakerReadBarrier) { 1516 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1517 } 1518 PushOntoMarkStack<true>(to_ref); 1519 } 1520 } else { 1521 // Use the mark bitmap. 1522 accounting::ContinuousSpaceBitmap* mark_bitmap = 1523 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1524 accounting::LargeObjectBitmap* los_bitmap = 1525 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1526 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1527 bool is_los = mark_bitmap == nullptr; 1528 if (!is_los && mark_bitmap->Test(from_ref)) { 1529 // Already marked. 1530 to_ref = from_ref; 1531 if (kUseBakerReadBarrier) { 1532 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1533 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1534 } 1535 } else if (is_los && los_bitmap->Test(from_ref)) { 1536 // Already marked in LOS. 1537 to_ref = from_ref; 1538 if (kUseBakerReadBarrier) { 1539 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1540 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1541 } 1542 } else { 1543 // Not marked. 1544 if (IsOnAllocStack(from_ref)) { 1545 // If it's on the allocation stack, it's considered marked. Keep it white. 1546 to_ref = from_ref; 1547 // Objects on the allocation stack need not be marked. 1548 if (!is_los) { 1549 DCHECK(!mark_bitmap->Test(to_ref)); 1550 } else { 1551 DCHECK(!los_bitmap->Test(to_ref)); 1552 } 1553 if (kUseBakerReadBarrier) { 1554 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 1555 } 1556 } else { 1557 // Not marked or on the allocation stack. Try to mark it. 1558 // This may or may not succeed, which is ok. 1559 if (kUseBakerReadBarrier) { 1560 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1561 } 1562 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) { 1563 // Already marked. 1564 to_ref = from_ref; 1565 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) { 1566 // Already marked in LOS. 1567 to_ref = from_ref; 1568 } else { 1569 // Newly marked. 1570 to_ref = from_ref; 1571 if (kUseBakerReadBarrier) { 1572 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1573 } 1574 PushOntoMarkStack<true>(to_ref); 1575 } 1576 } 1577 } 1578 } 1579 } 1580 return to_ref; 1581} 1582 1583void ConcurrentCopying::FinishPhase() { 1584 region_space_ = nullptr; 1585 CHECK(mark_queue_.IsEmpty()); 1586 mark_queue_.Clear(); 1587 { 1588 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1589 skipped_blocks_map_.clear(); 1590 } 1591 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1592 heap_->ClearMarkedObjects(); 1593} 1594 1595mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) { 1596 return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); 1597} 1598 1599bool ConcurrentCopying::IsHeapReferenceMarkedCallback( 1600 mirror::HeapReference<mirror::Object>* field, void* arg) { 1601 mirror::Object* from_ref = field->AsMirrorPtr(); 1602 mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); 1603 if (to_ref == nullptr) { 1604 return false; 1605 } 1606 if (from_ref != to_ref) { 1607 QuasiAtomic::ThreadFenceRelease(); 1608 field->Assign(to_ref); 1609 QuasiAtomic::ThreadFenceSequentiallyConsistent(); 1610 } 1611 return true; 1612} 1613 1614mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) { 1615 return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref); 1616} 1617 1618void ConcurrentCopying::ProcessMarkStackCallback(void* arg) { 1619 reinterpret_cast<ConcurrentCopying*>(arg)->ProcessMarkStack(); 1620} 1621 1622void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 1623 heap_->GetReferenceProcessor()->DelayReferenceReferent( 1624 klass, reference, &IsHeapReferenceMarkedCallback, this); 1625} 1626 1627void ConcurrentCopying::ProcessReferences(Thread* self, bool concurrent) { 1628 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); 1629 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1630 GetHeap()->GetReferenceProcessor()->ProcessReferences( 1631 concurrent, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), 1632 &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this); 1633} 1634 1635void ConcurrentCopying::RevokeAllThreadLocalBuffers() { 1636 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1637 region_space_->RevokeAllThreadLocalBuffers(); 1638} 1639 1640} // namespace collector 1641} // namespace gc 1642} // namespace art 1643