concurrent_copying.cc revision d25f84250700c35f006d5a1d295231af174c3734
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "concurrent_copying.h" 18 19#include "gc/accounting/heap_bitmap-inl.h" 20#include "gc/accounting/space_bitmap-inl.h" 21#include "gc/space/image_space.h" 22#include "gc/space/space.h" 23#include "intern_table.h" 24#include "mirror/art_field-inl.h" 25#include "mirror/object-inl.h" 26#include "scoped_thread_state_change.h" 27#include "thread-inl.h" 28#include "thread_list.h" 29#include "well_known_classes.h" 30 31namespace art { 32namespace gc { 33namespace collector { 34 35ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) 36 : GarbageCollector(heap, 37 name_prefix + (name_prefix.empty() ? "" : " ") + 38 "concurrent copying + mark sweep"), 39 region_space_(nullptr), gc_barrier_(new Barrier(0)), mark_queue_(2 * MB), 40 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), 41 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), 42 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), 43 rb_table_(heap_->GetReadBarrierTable()), 44 force_evacuate_all_(false) { 45 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, 46 "The region space size and the read barrier table region size must match"); 47 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap)); 48 { 49 Thread* self = Thread::Current(); 50 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 51 // Cache this so that we won't have to lock heap_bitmap_lock_ in 52 // Mark() which could cause a nested lock on heap_bitmap_lock_ 53 // when GC causes a RB while doing GC or a lock order violation 54 // (class_linker_lock_ and heap_bitmap_lock_). 55 heap_mark_bitmap_ = heap->GetMarkBitmap(); 56 } 57} 58 59ConcurrentCopying::~ConcurrentCopying() { 60} 61 62void ConcurrentCopying::RunPhases() { 63 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier); 64 CHECK(!is_active_); 65 is_active_ = true; 66 Thread* self = Thread::Current(); 67 Locks::mutator_lock_->AssertNotHeld(self); 68 { 69 ReaderMutexLock mu(self, *Locks::mutator_lock_); 70 InitializePhase(); 71 } 72 FlipThreadRoots(); 73 { 74 ReaderMutexLock mu(self, *Locks::mutator_lock_); 75 MarkingPhase(); 76 } 77 // Verify no from space refs. This causes a pause. 78 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { 79 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); 80 ScopedPause pause(this); 81 CheckEmptyMarkQueue(); 82 if (kVerboseMode) { 83 LOG(INFO) << "Verifying no from-space refs"; 84 } 85 VerifyNoFromSpaceReferences(); 86 CheckEmptyMarkQueue(); 87 } 88 { 89 ReaderMutexLock mu(self, *Locks::mutator_lock_); 90 ReclaimPhase(); 91 } 92 FinishPhase(); 93 CHECK(is_active_); 94 is_active_ = false; 95} 96 97void ConcurrentCopying::BindBitmaps() { 98 Thread* self = Thread::Current(); 99 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 100 // Mark all of the spaces we never collect as immune. 101 for (const auto& space : heap_->GetContinuousSpaces()) { 102 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 103 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 104 CHECK(space->IsZygoteSpace() || space->IsImageSpace()); 105 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 106 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" : 107 "cc zygote space bitmap"; 108 // TODO: try avoiding using bitmaps for image/zygote to save space. 109 accounting::ContinuousSpaceBitmap* bitmap = 110 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity()); 111 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 112 cc_bitmaps_.push_back(bitmap); 113 } else if (space == region_space_) { 114 accounting::ContinuousSpaceBitmap* bitmap = 115 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap", 116 space->Begin(), space->Capacity()); 117 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap); 118 cc_bitmaps_.push_back(bitmap); 119 region_space_bitmap_ = bitmap; 120 } 121 } 122} 123 124void ConcurrentCopying::InitializePhase() { 125 TimingLogger::ScopedTiming split("InitializePhase", GetTimings()); 126 if (kVerboseMode) { 127 LOG(INFO) << "GC InitializePhase"; 128 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" 129 << reinterpret_cast<void*>(region_space_->Limit()); 130 } 131 CHECK(mark_queue_.IsEmpty()); 132 immune_region_.Reset(); 133 bytes_moved_.StoreRelaxed(0); 134 objects_moved_.StoreRelaxed(0); 135 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit || 136 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc || 137 GetCurrentIteration()->GetClearSoftReferences()) { 138 force_evacuate_all_ = true; 139 } else { 140 force_evacuate_all_ = false; 141 } 142 BindBitmaps(); 143 if (kVerboseMode) { 144 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_; 145 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End(); 146 LOG(INFO) << "GC end of InitializePhase"; 147 } 148} 149 150// Used to switch the thread roots of a thread from from-space refs to to-space refs. 151class ThreadFlipVisitor : public Closure { 152 public: 153 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) 154 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { 155 } 156 157 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 158 // Note: self is not necessarily equal to thread since thread may be suspended. 159 Thread* self = Thread::Current(); 160 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 161 << thread->GetState() << " thread " << thread << " self " << self; 162 if (use_tlab_ && thread->HasTlab()) { 163 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 164 // This must come before the revoke. 165 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); 166 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 167 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> 168 FetchAndAddSequentiallyConsistent(thread_local_objects); 169 } else { 170 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 171 } 172 } 173 if (kUseThreadLocalAllocationStack) { 174 thread->RevokeThreadLocalAllocationStack(); 175 } 176 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 177 thread->VisitRoots(ConcurrentCopying::ProcessRootCallback, concurrent_copying_); 178 concurrent_copying_->GetBarrier().Pass(self); 179 } 180 181 private: 182 ConcurrentCopying* const concurrent_copying_; 183 const bool use_tlab_; 184}; 185 186// Called back from Runtime::FlipThreadRoots() during a pause. 187class FlipCallback : public Closure { 188 public: 189 explicit FlipCallback(ConcurrentCopying* concurrent_copying) 190 : concurrent_copying_(concurrent_copying) { 191 } 192 193 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { 194 ConcurrentCopying* cc = concurrent_copying_; 195 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); 196 // Note: self is not necessarily equal to thread since thread may be suspended. 197 Thread* self = Thread::Current(); 198 CHECK(thread == self); 199 Locks::mutator_lock_->AssertExclusiveHeld(self); 200 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); 201 cc->SwapStacks(self); 202 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 203 cc->RecordLiveStackFreezeSize(self); 204 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); 205 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); 206 } 207 cc->is_marking_ = true; 208 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { 209 CHECK(Runtime::Current()->IsCompiler()); 210 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); 211 Runtime::Current()->VisitTransactionRoots(ConcurrentCopying::ProcessRootCallback, cc); 212 } 213 } 214 215 private: 216 ConcurrentCopying* const concurrent_copying_; 217}; 218 219// Switch threads that from from-space to to-space refs. Forward/mark the thread roots. 220void ConcurrentCopying::FlipThreadRoots() { 221 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); 222 if (kVerboseMode) { 223 LOG(INFO) << "time=" << region_space_->Time(); 224 region_space_->DumpNonFreeRegions(LOG(INFO)); 225 } 226 Thread* self = Thread::Current(); 227 Locks::mutator_lock_->AssertNotHeld(self); 228 gc_barrier_->Init(self, 0); 229 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); 230 FlipCallback flip_callback(this); 231 size_t barrier_count = Runtime::Current()->FlipThreadRoots( 232 &thread_flip_visitor, &flip_callback, this); 233 { 234 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 235 gc_barrier_->Increment(self, barrier_count); 236 } 237 is_asserting_to_space_invariant_ = true; 238 QuasiAtomic::ThreadFenceForConstructor(); 239 if (kVerboseMode) { 240 LOG(INFO) << "time=" << region_space_->Time(); 241 region_space_->DumpNonFreeRegions(LOG(INFO)); 242 LOG(INFO) << "GC end of FlipThreadRoots"; 243 } 244} 245 246void ConcurrentCopying::SwapStacks(Thread* self) { 247 heap_->SwapStacks(self); 248} 249 250void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { 251 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 252 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 253} 254 255// Used to visit objects in the immune spaces. 256class ConcurrentCopyingImmuneSpaceObjVisitor { 257 public: 258 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) 259 : collector_(cc) {} 260 261 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 262 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 263 DCHECK(obj != nullptr); 264 DCHECK(collector_->immune_region_.ContainsObject(obj)); 265 accounting::ContinuousSpaceBitmap* cc_bitmap = 266 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 267 DCHECK(cc_bitmap != nullptr) 268 << "An immune space object must have a bitmap"; 269 if (kIsDebugBuild) { 270 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) 271 << "Immune space object must be already marked"; 272 } 273 // This may or may not succeed, which is ok. 274 if (kUseBakerReadBarrier) { 275 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 276 } 277 if (cc_bitmap->AtomicTestAndSet(obj)) { 278 // Already marked. Do nothing. 279 } else { 280 // Newly marked. Set the gray bit and push it onto the mark stack. 281 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 282 collector_->PushOntoMarkStack<true>(obj); 283 } 284 } 285 286 private: 287 ConcurrentCopying* collector_; 288}; 289 290class EmptyCheckpoint : public Closure { 291 public: 292 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying) 293 : concurrent_copying_(concurrent_copying) { 294 } 295 296 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 297 // Note: self is not necessarily equal to thread since thread may be suspended. 298 Thread* self = Thread::Current(); 299 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 300 << thread->GetState() << " thread " << thread << " self " << self; 301 concurrent_copying_->GetBarrier().Pass(self); 302 } 303 304 private: 305 ConcurrentCopying* const concurrent_copying_; 306}; 307 308// Concurrently mark roots that are guarded by read barriers and process the mark stack. 309void ConcurrentCopying::MarkingPhase() { 310 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); 311 if (kVerboseMode) { 312 LOG(INFO) << "GC MarkingPhase"; 313 } 314 { 315 // Mark the image root. The WB-based collectors do not need to 316 // scan the image objects from roots by relying on the card table, 317 // but it's necessary for the RB to-space invariant to hold. 318 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings()); 319 gc::space::ImageSpace* image = heap_->GetImageSpace(); 320 if (image != nullptr) { 321 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots(); 322 mirror::Object* marked_image_root = Mark(image_root); 323 CHECK_EQ(image_root, marked_image_root) << "An image object does not move"; 324 if (ReadBarrier::kEnableToSpaceInvariantChecks) { 325 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root); 326 } 327 } 328 } 329 { 330 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings()); 331 Runtime::Current()->VisitConstantRoots(ProcessRootCallback, this); 332 } 333 { 334 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings()); 335 Runtime::Current()->GetInternTable()->VisitRoots(ProcessRootCallback, 336 this, kVisitRootFlagAllRoots); 337 } 338 { 339 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings()); 340 Runtime::Current()->GetClassLinker()->VisitRoots(ProcessRootCallback, 341 this, kVisitRootFlagAllRoots); 342 } 343 { 344 // TODO: don't visit the transaction roots if it's not active. 345 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); 346 Runtime::Current()->VisitNonThreadRoots(ProcessRootCallback, this); 347 } 348 349 // Immune spaces. 350 for (auto& space : heap_->GetContinuousSpaces()) { 351 if (immune_region_.ContainsSpace(space)) { 352 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 353 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 354 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this); 355 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 356 reinterpret_cast<uintptr_t>(space->Limit()), 357 visitor); 358 } 359 } 360 361 Thread* self = Thread::Current(); 362 { 363 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings()); 364 // Process the mark stack and issue an empty check point. If the 365 // mark stack is still empty after the check point, we're 366 // done. Otherwise, repeat. 367 ProcessMarkStack(); 368 size_t count = 0; 369 while (!ProcessMarkStack()) { 370 ++count; 371 if (kVerboseMode) { 372 LOG(INFO) << "Issue an empty check point. " << count; 373 } 374 IssueEmptyCheckpoint(); 375 } 376 // Need to ensure the mark stack is empty before reference 377 // processing to get rid of non-reference gray objects. 378 CheckEmptyMarkQueue(); 379 // Enable the GetReference slow path and disallow access to the system weaks. 380 GetHeap()->GetReferenceProcessor()->EnableSlowPath(); 381 Runtime::Current()->DisallowNewSystemWeaks(); 382 QuasiAtomic::ThreadFenceForConstructor(); 383 // Lock-unlock the system weak locks so that there's no thread in 384 // the middle of accessing system weaks. 385 Runtime::Current()->EnsureNewSystemWeaksDisallowed(); 386 // Note: Do not issue a checkpoint from here to the 387 // SweepSystemWeaks call or else a deadlock due to 388 // WaitHoldingLocks() would occur. 389 if (kVerboseMode) { 390 LOG(INFO) << "Enabled the ref proc slow path & disabled access to system weaks."; 391 LOG(INFO) << "ProcessReferences"; 392 } 393 ProcessReferences(self, true); 394 CheckEmptyMarkQueue(); 395 if (kVerboseMode) { 396 LOG(INFO) << "SweepSystemWeaks"; 397 } 398 SweepSystemWeaks(self); 399 if (kVerboseMode) { 400 LOG(INFO) << "SweepSystemWeaks done"; 401 } 402 // Because hash_set::Erase() can call the hash function for 403 // arbitrary elements in the weak intern table in 404 // InternTable::Table::SweepWeaks(), the above SweepSystemWeaks() 405 // call may have marked some objects (strings) alive. So process 406 // the mark stack here once again. 407 ProcessMarkStack(); 408 CheckEmptyMarkQueue(); 409 // Disable marking. 410 if (kUseTableLookupReadBarrier) { 411 heap_->rb_table_->ClearAll(); 412 DCHECK(heap_->rb_table_->IsAllCleared()); 413 } 414 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(1); 415 is_marking_ = false; 416 if (kVerboseMode) { 417 LOG(INFO) << "AllowNewSystemWeaks"; 418 } 419 Runtime::Current()->AllowNewSystemWeaks(); 420 CheckEmptyMarkQueue(); 421 } 422 423 if (kVerboseMode) { 424 LOG(INFO) << "GC end of MarkingPhase"; 425 } 426} 427 428void ConcurrentCopying::IssueEmptyCheckpoint() { 429 Thread* self = Thread::Current(); 430 EmptyCheckpoint check_point(this); 431 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 432 gc_barrier_->Init(self, 0); 433 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 434 // Release locks then wait for all mutator threads to pass the barrier. 435 Locks::mutator_lock_->SharedUnlock(self); 436 { 437 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 438 gc_barrier_->Increment(self, barrier_count); 439 } 440 Locks::mutator_lock_->SharedLock(self); 441} 442 443mirror::Object* ConcurrentCopying::PopOffMarkStack() { 444 return mark_queue_.Dequeue(); 445} 446 447template<bool kThreadSafe> 448void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { 449 CHECK_EQ(is_mark_queue_push_disallowed_.LoadRelaxed(), 0) 450 << " " << to_ref << " " << PrettyTypeOf(to_ref); 451 if (kThreadSafe) { 452 CHECK(mark_queue_.Enqueue(to_ref)) << "Mark queue overflow"; 453 } else { 454 CHECK(mark_queue_.EnqueueThreadUnsafe(to_ref)) << "Mark queue overflow"; 455 } 456} 457 458accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() { 459 return heap_->allocation_stack_.get(); 460} 461 462accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { 463 return heap_->live_stack_.get(); 464} 465 466inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { 467 DCHECK(region_space_->IsInFromSpace(from_ref)); 468 LockWord lw = from_ref->GetLockWord(false); 469 if (lw.GetState() == LockWord::kForwardingAddress) { 470 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); 471 CHECK(fwd_ptr != nullptr); 472 return fwd_ptr; 473 } else { 474 return nullptr; 475 } 476} 477 478inline void ConcurrentCopying::SetFwdPtr(mirror::Object* from_ref, mirror::Object* to_ref) { 479 DCHECK(region_space_->IsInFromSpace(from_ref)); 480 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->GetNonMovingSpace()->HasAddress(to_ref)); 481 LockWord lw = from_ref->GetLockWord(false); 482 DCHECK_NE(lw.GetState(), LockWord::kForwardingAddress); 483 from_ref->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)), false); 484} 485 486// The following visitors are that used to verify that there's no 487// references to the from-space left after marking. 488class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor { 489 public: 490 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) 491 : collector_(collector) {} 492 493 void operator()(mirror::Object* ref) const 494 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 495 if (ref == nullptr) { 496 // OK. 497 return; 498 } 499 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 500 if (kUseBakerReadBarrier) { 501 if (collector_->RegionSpace()->IsInToSpace(ref)) { 502 CHECK(ref->GetReadBarrierPointer() == nullptr) 503 << "To-space ref " << ref << " " << PrettyTypeOf(ref) 504 << " has non-white rb_ptr " << ref->GetReadBarrierPointer(); 505 } else { 506 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 507 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 508 collector_->IsOnAllocStack(ref))) 509 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref) 510 << " has non-black rb_ptr " << ref->GetReadBarrierPointer() 511 << " but isn't on the alloc stack (and has white rb_ptr)." 512 << " Is it in the non-moving space=" 513 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref)); 514 } 515 } 516 } 517 518 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/) 519 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 520 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 521 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector); 522 DCHECK(root != nullptr); 523 visitor(*root); 524 } 525 526 private: 527 ConcurrentCopying* collector_; 528}; 529 530class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { 531 public: 532 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) 533 : collector_(collector) {} 534 535 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 536 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 537 mirror::Object* ref = 538 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 539 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); 540 visitor(ref); 541 } 542 void operator()(mirror::Class* klass, mirror::Reference* ref) const 543 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 544 CHECK(klass->IsTypeOfReferenceClass()); 545 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 546 } 547 548 private: 549 ConcurrentCopying* collector_; 550}; 551 552class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { 553 public: 554 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) 555 : collector_(collector) {} 556 void operator()(mirror::Object* obj) const 557 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 558 ObjectCallback(obj, collector_); 559 } 560 static void ObjectCallback(mirror::Object* obj, void *arg) 561 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 562 CHECK(obj != nullptr); 563 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 564 space::RegionSpace* region_space = collector->RegionSpace(); 565 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 566 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); 567 obj->VisitReferences<true>(visitor, visitor); 568 if (kUseBakerReadBarrier) { 569 if (collector->RegionSpace()->IsInToSpace(obj)) { 570 CHECK(obj->GetReadBarrierPointer() == nullptr) 571 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer(); 572 } else { 573 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() || 574 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() && 575 collector->IsOnAllocStack(obj))) 576 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj) 577 << " has non-black rb_ptr " << obj->GetReadBarrierPointer() 578 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space=" 579 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj)); 580 } 581 } 582 } 583 584 private: 585 ConcurrentCopying* const collector_; 586}; 587 588// Verify there's no from-space references left after the marking phase. 589void ConcurrentCopying::VerifyNoFromSpaceReferences() { 590 Thread* self = Thread::Current(); 591 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 592 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this); 593 // Roots. 594 { 595 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 596 Runtime::Current()->VisitRoots( 597 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor::RootCallback, this); 598 } 599 // The to-space. 600 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, 601 this); 602 // Non-moving spaces. 603 { 604 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 605 heap_->GetMarkBitmap()->Visit(visitor); 606 } 607 // The alloc stack. 608 { 609 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this); 610 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); 611 it < end; ++it) { 612 mirror::Object* const obj = it->AsMirrorPtr(); 613 if (obj != nullptr && obj->GetClass() != nullptr) { 614 // TODO: need to call this only if obj is alive? 615 ref_visitor(obj); 616 visitor(obj); 617 } 618 } 619 } 620 // TODO: LOS. But only refs in LOS are classes. 621} 622 623// The following visitors are used to assert the to-space invariant. 624class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { 625 public: 626 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) 627 : collector_(collector) {} 628 629 void operator()(mirror::Object* ref) const 630 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 631 if (ref == nullptr) { 632 // OK. 633 return; 634 } 635 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 636 } 637 static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/) 638 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 639 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 640 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector); 641 DCHECK(root != nullptr); 642 visitor(*root); 643 } 644 645 private: 646 ConcurrentCopying* collector_; 647}; 648 649class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { 650 public: 651 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) 652 : collector_(collector) {} 653 654 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const 655 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 656 mirror::Object* ref = 657 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 658 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); 659 visitor(ref); 660 } 661 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const 662 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 663 CHECK(klass->IsTypeOfReferenceClass()); 664 } 665 666 private: 667 ConcurrentCopying* collector_; 668}; 669 670class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { 671 public: 672 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) 673 : collector_(collector) {} 674 void operator()(mirror::Object* obj) const 675 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 676 ObjectCallback(obj, collector_); 677 } 678 static void ObjectCallback(mirror::Object* obj, void *arg) 679 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 680 CHECK(obj != nullptr); 681 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 682 space::RegionSpace* region_space = collector->RegionSpace(); 683 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 684 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); 685 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); 686 obj->VisitReferences<true>(visitor, visitor); 687 } 688 689 private: 690 ConcurrentCopying* collector_; 691}; 692 693bool ConcurrentCopying::ProcessMarkStack() { 694 if (kVerboseMode) { 695 LOG(INFO) << "ProcessMarkStack. "; 696 } 697 size_t count = 0; 698 mirror::Object* to_ref; 699 while ((to_ref = PopOffMarkStack()) != nullptr) { 700 ++count; 701 DCHECK(!region_space_->IsInFromSpace(to_ref)); 702 if (kUseBakerReadBarrier) { 703 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 704 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 705 << " is_marked=" << IsMarked(to_ref); 706 } 707 // Scan ref fields. 708 Scan(to_ref); 709 // Mark the gray ref as white or black. 710 if (kUseBakerReadBarrier) { 711 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) 712 << " " << to_ref << " " << to_ref->GetReadBarrierPointer() 713 << " is_marked=" << IsMarked(to_ref); 714 } 715 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && 716 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && 717 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { 718 // Leave References gray so that GetReferent() will trigger RB. 719 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; 720 } else { 721 if (kUseBakerReadBarrier) { 722 if (region_space_->IsInToSpace(to_ref)) { 723 // If to-space, change from gray to white. 724 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 725 ReadBarrier::WhitePtr()); 726 CHECK(success) << "Must succeed as we won the race."; 727 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 728 } else { 729 // If non-moving space/unevac from space, change from gray 730 // to black. We can't change gray to white because it's not 731 // safe to use CAS if two threads change values in opposite 732 // directions (A->B and B->A). So, we change it to black to 733 // indicate non-moving objects that have been marked 734 // through. Note we'd need to change from black to white 735 // later (concurrently). 736 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), 737 ReadBarrier::BlackPtr()); 738 CHECK(success) << "Must succeed as we won the race."; 739 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 740 } 741 } 742 } 743 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { 744 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); 745 visitor(to_ref); 746 } 747 } 748 // Return true if the stack was empty. 749 return count == 0; 750} 751 752void ConcurrentCopying::CheckEmptyMarkQueue() { 753 if (!mark_queue_.IsEmpty()) { 754 while (!mark_queue_.IsEmpty()) { 755 mirror::Object* obj = mark_queue_.Dequeue(); 756 if (kUseBakerReadBarrier) { 757 mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); 758 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr 759 << " is_marked=" << IsMarked(obj); 760 } else { 761 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) 762 << " is_marked=" << IsMarked(obj); 763 } 764 } 765 LOG(FATAL) << "mark queue is not empty"; 766 } 767} 768 769void ConcurrentCopying::SweepSystemWeaks(Thread* self) { 770 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); 771 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 772 Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); 773} 774 775void ConcurrentCopying::Sweep(bool swap_bitmaps) { 776 { 777 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); 778 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 779 if (kEnableFromSpaceAccountingCheck) { 780 CHECK_GE(live_stack_freeze_size_, live_stack->Size()); 781 } 782 heap_->MarkAllocStackAsLive(live_stack); 783 live_stack->Reset(); 784 } 785 CHECK(mark_queue_.IsEmpty()); 786 TimingLogger::ScopedTiming split("Sweep", GetTimings()); 787 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 788 if (space->IsContinuousMemMapAllocSpace()) { 789 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 790 if (space == region_space_ || immune_region_.ContainsSpace(space)) { 791 continue; 792 } 793 TimingLogger::ScopedTiming split2( 794 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 795 RecordFree(alloc_space->Sweep(swap_bitmaps)); 796 } 797 } 798 SweepLargeObjects(swap_bitmaps); 799} 800 801void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) { 802 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 803 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 804} 805 806class ConcurrentCopyingClearBlackPtrsVisitor { 807 public: 808 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc) 809 : collector_(cc) {} 810 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 811 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 812 DCHECK(obj != nullptr); 813 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; 814 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; 815 obj->SetReadBarrierPointer(ReadBarrier::WhitePtr()); 816 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 817 } 818 819 private: 820 ConcurrentCopying* const collector_; 821}; 822 823// Clear the black ptrs in non-moving objects back to white. 824void ConcurrentCopying::ClearBlackPtrs() { 825 CHECK(kUseBakerReadBarrier); 826 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings()); 827 ConcurrentCopyingClearBlackPtrsVisitor visitor(this); 828 for (auto& space : heap_->GetContinuousSpaces()) { 829 if (space == region_space_) { 830 continue; 831 } 832 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 833 if (kVerboseMode) { 834 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap; 835 } 836 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 837 reinterpret_cast<uintptr_t>(space->Limit()), 838 visitor); 839 } 840 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace(); 841 large_object_space->GetMarkBitmap()->VisitMarkedRange( 842 reinterpret_cast<uintptr_t>(large_object_space->Begin()), 843 reinterpret_cast<uintptr_t>(large_object_space->End()), 844 visitor); 845 // Objects on the allocation stack? 846 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) { 847 size_t count = GetAllocationStack()->Size(); 848 auto* it = GetAllocationStack()->Begin(); 849 auto* end = GetAllocationStack()->End(); 850 for (size_t i = 0; i < count; ++i, ++it) { 851 CHECK_LT(it, end); 852 mirror::Object* obj = it->AsMirrorPtr(); 853 if (obj != nullptr) { 854 // Must have been cleared above. 855 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj; 856 } 857 } 858 } 859} 860 861void ConcurrentCopying::ReclaimPhase() { 862 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings()); 863 if (kVerboseMode) { 864 LOG(INFO) << "GC ReclaimPhase"; 865 } 866 Thread* self = Thread::Current(); 867 868 { 869 // Double-check that the mark stack is empty. 870 // Note: need to set this after VerifyNoFromSpaceRef(). 871 is_asserting_to_space_invariant_ = false; 872 QuasiAtomic::ThreadFenceForConstructor(); 873 if (kVerboseMode) { 874 LOG(INFO) << "Issue an empty check point. "; 875 } 876 IssueEmptyCheckpoint(); 877 // Disable the check. 878 is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(0); 879 CheckEmptyMarkQueue(); 880 } 881 882 { 883 // Record freed objects. 884 TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); 885 // Don't include thread-locals that are in the to-space. 886 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); 887 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); 888 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); 889 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); 890 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); 891 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); 892 if (kEnableFromSpaceAccountingCheck) { 893 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); 894 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); 895 } 896 CHECK_LE(to_objects, from_objects); 897 CHECK_LE(to_bytes, from_bytes); 898 int64_t freed_bytes = from_bytes - to_bytes; 899 int64_t freed_objects = from_objects - to_objects; 900 if (kVerboseMode) { 901 LOG(INFO) << "RecordFree:" 902 << " from_bytes=" << from_bytes << " from_objects=" << from_objects 903 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects 904 << " to_bytes=" << to_bytes << " to_objects=" << to_objects 905 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects 906 << " from_space size=" << region_space_->FromSpaceSize() 907 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() 908 << " to_space size=" << region_space_->ToSpaceSize(); 909 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 910 } 911 RecordFree(ObjectBytePair(freed_objects, freed_bytes)); 912 if (kVerboseMode) { 913 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 914 } 915 } 916 917 { 918 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings()); 919 ComputeUnevacFromSpaceLiveRatio(); 920 } 921 922 { 923 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); 924 region_space_->ClearFromSpace(); 925 } 926 927 { 928 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 929 if (kUseBakerReadBarrier) { 930 ClearBlackPtrs(); 931 } 932 Sweep(false); 933 SwapBitmaps(); 934 heap_->UnBindBitmaps(); 935 936 // Remove bitmaps for the immune spaces. 937 while (!cc_bitmaps_.empty()) { 938 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back(); 939 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap); 940 delete cc_bitmap; 941 cc_bitmaps_.pop_back(); 942 } 943 region_space_bitmap_ = nullptr; 944 } 945 946 if (kVerboseMode) { 947 LOG(INFO) << "GC end of ReclaimPhase"; 948 } 949} 950 951class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { 952 public: 953 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) 954 : collector_(cc) {} 955 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 956 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 957 DCHECK(ref != nullptr); 958 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref; 959 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; 960 if (kUseBakerReadBarrier) { 961 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref; 962 // Clear the black ptr. 963 ref->SetReadBarrierPointer(ReadBarrier::WhitePtr()); 964 } 965 size_t obj_size = ref->SizeOf(); 966 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 967 collector_->region_space_->AddLiveBytes(ref, alloc_size); 968 } 969 970 private: 971 ConcurrentCopying* collector_; 972}; 973 974// Compute how much live objects are left in regions. 975void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() { 976 region_space_->AssertAllRegionLiveBytesZeroOrCleared(); 977 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this); 978 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()), 979 reinterpret_cast<uintptr_t>(region_space_->Limit()), 980 visitor); 981} 982 983// Assert the to-space invariant. 984void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, 985 mirror::Object* ref) { 986 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 987 if (is_asserting_to_space_invariant_) { 988 if (region_space_->IsInToSpace(ref)) { 989 // OK. 990 return; 991 } else if (region_space_->IsInUnevacFromSpace(ref)) { 992 CHECK(region_space_bitmap_->Test(ref)) << ref; 993 } else if (region_space_->IsInFromSpace(ref)) { 994 // Not OK. Do extra logging. 995 if (obj != nullptr) { 996 if (kUseBakerReadBarrier) { 997 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj) 998 << " holder rb_ptr=" << obj->GetReadBarrierPointer(); 999 } else { 1000 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj); 1001 } 1002 if (region_space_->IsInFromSpace(obj)) { 1003 LOG(INFO) << "holder is in the from-space."; 1004 } else if (region_space_->IsInToSpace(obj)) { 1005 LOG(INFO) << "holder is in the to-space."; 1006 } else if (region_space_->IsInUnevacFromSpace(obj)) { 1007 LOG(INFO) << "holder is in the unevac from-space."; 1008 if (region_space_bitmap_->Test(obj)) { 1009 LOG(INFO) << "holder is marked in the region space bitmap."; 1010 } else { 1011 LOG(INFO) << "holder is not marked in the region space bitmap."; 1012 } 1013 } else { 1014 // In a non-moving space. 1015 if (immune_region_.ContainsObject(obj)) { 1016 LOG(INFO) << "holder is in the image or the zygote space."; 1017 accounting::ContinuousSpaceBitmap* cc_bitmap = 1018 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj); 1019 CHECK(cc_bitmap != nullptr) 1020 << "An immune space object must have a bitmap."; 1021 if (cc_bitmap->Test(obj)) { 1022 LOG(INFO) << "holder is marked in the bit map."; 1023 } else { 1024 LOG(INFO) << "holder is NOT marked in the bit map."; 1025 } 1026 } else { 1027 LOG(INFO) << "holder is in a non-moving (or main) space."; 1028 accounting::ContinuousSpaceBitmap* mark_bitmap = 1029 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); 1030 accounting::LargeObjectBitmap* los_bitmap = 1031 heap_mark_bitmap_->GetLargeObjectBitmap(obj); 1032 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1033 bool is_los = mark_bitmap == nullptr; 1034 if (!is_los && mark_bitmap->Test(obj)) { 1035 LOG(INFO) << "holder is marked in the mark bit map."; 1036 } else if (is_los && los_bitmap->Test(obj)) { 1037 LOG(INFO) << "holder is marked in the los bit map."; 1038 } else { 1039 // If ref is on the allocation stack, then it is considered 1040 // mark/alive (but not necessarily on the live stack.) 1041 if (IsOnAllocStack(obj)) { 1042 LOG(INFO) << "holder is on the alloc stack."; 1043 } else { 1044 LOG(INFO) << "holder is not marked or on the alloc stack."; 1045 } 1046 } 1047 } 1048 } 1049 LOG(INFO) << "offset=" << offset.SizeValue(); 1050 } 1051 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref); 1052 } else { 1053 // In a non-moving spaces. Check that the ref is marked. 1054 if (immune_region_.ContainsObject(ref)) { 1055 accounting::ContinuousSpaceBitmap* cc_bitmap = 1056 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref); 1057 CHECK(cc_bitmap != nullptr) 1058 << "An immune space ref must have a bitmap. " << ref; 1059 if (kUseBakerReadBarrier) { 1060 CHECK(cc_bitmap->Test(ref)) 1061 << "Unmarked immune space ref. obj=" << obj << " rb_ptr=" 1062 << obj->GetReadBarrierPointer() << " ref=" << ref; 1063 } else { 1064 CHECK(cc_bitmap->Test(ref)) 1065 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref; 1066 } 1067 } else { 1068 accounting::ContinuousSpaceBitmap* mark_bitmap = 1069 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 1070 accounting::LargeObjectBitmap* los_bitmap = 1071 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 1072 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1073 bool is_los = mark_bitmap == nullptr; 1074 if ((!is_los && mark_bitmap->Test(ref)) || 1075 (is_los && los_bitmap->Test(ref))) { 1076 // OK. 1077 } else { 1078 // If ref is on the allocation stack, then it may not be 1079 // marked live, but considered marked/alive (but not 1080 // necessarily on the live stack). 1081 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " 1082 << "obj=" << obj << " ref=" << ref; 1083 } 1084 } 1085 } 1086 } 1087} 1088 1089void ConcurrentCopying::ProcessRootCallback(mirror::Object** root, void* arg, 1090 const RootInfo& /*root_info*/) { 1091 reinterpret_cast<ConcurrentCopying*>(arg)->Process(root); 1092} 1093 1094// Used to scan ref fields of an object. 1095class ConcurrentCopyingRefFieldsVisitor { 1096 public: 1097 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector) 1098 : collector_(collector) {} 1099 1100 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) 1101 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1102 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1103 collector_->Process(obj, offset); 1104 } 1105 1106 void operator()(mirror::Class* klass, mirror::Reference* ref) const 1107 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 1108 CHECK(klass->IsTypeOfReferenceClass()); 1109 collector_->DelayReferenceReferent(klass, ref); 1110 } 1111 1112 private: 1113 ConcurrentCopying* const collector_; 1114}; 1115 1116// Scan ref fields of an object. 1117void ConcurrentCopying::Scan(mirror::Object* to_ref) { 1118 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1119 ConcurrentCopyingRefFieldsVisitor visitor(this); 1120 to_ref->VisitReferences<true>(visitor, visitor); 1121} 1122 1123// Process a field. 1124inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { 1125 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); 1126 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1127 return; 1128 } 1129 mirror::Object* to_ref = Mark(ref); 1130 if (to_ref == ref) { 1131 return; 1132 } 1133 // This may fail if the mutator writes to the field at the same time. But it's ok. 1134 mirror::Object* expected_ref = ref; 1135 mirror::Object* new_ref = to_ref; 1136 do { 1137 if (expected_ref != 1138 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) { 1139 // It was updated by the mutator. 1140 break; 1141 } 1142 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>( 1143 offset, expected_ref, new_ref)); 1144} 1145 1146// Process a root. 1147void ConcurrentCopying::Process(mirror::Object** root) { 1148 mirror::Object* ref = *root; 1149 if (ref == nullptr || region_space_->IsInToSpace(ref)) { 1150 return; 1151 } 1152 mirror::Object* to_ref = Mark(ref); 1153 if (to_ref == ref) { 1154 return; 1155 } 1156 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root); 1157 mirror::Object* expected_ref = ref; 1158 mirror::Object* new_ref = to_ref; 1159 do { 1160 if (expected_ref != addr->LoadRelaxed()) { 1161 // It was updated by the mutator. 1162 break; 1163 } 1164 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref)); 1165} 1166 1167// Fill the given memory block with a dummy object. Used to fill in a 1168// copy of objects that was lost in race. 1169void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { 1170 CHECK(IsAligned<kObjectAlignment>(byte_size)); 1171 memset(dummy_obj, 0, byte_size); 1172 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); 1173 CHECK(int_array_class != nullptr); 1174 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); 1175 size_t component_size = int_array_class->GetComponentSize(); 1176 CHECK_EQ(component_size, sizeof(int32_t)); 1177 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); 1178 if (data_offset > byte_size) { 1179 // An int array is too big. Use java.lang.Object. 1180 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object); 1181 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object); 1182 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize()); 1183 dummy_obj->SetClass(java_lang_Object); 1184 CHECK_EQ(byte_size, dummy_obj->SizeOf()); 1185 } else { 1186 // Use an int array. 1187 dummy_obj->SetClass(int_array_class); 1188 CHECK(dummy_obj->IsArrayInstance()); 1189 int32_t length = (byte_size - data_offset) / component_size; 1190 dummy_obj->AsArray()->SetLength(length); 1191 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length) 1192 << "byte_size=" << byte_size << " length=" << length 1193 << " component_size=" << component_size << " data_offset=" << data_offset; 1194 CHECK_EQ(byte_size, dummy_obj->SizeOf()) 1195 << "byte_size=" << byte_size << " length=" << length 1196 << " component_size=" << component_size << " data_offset=" << data_offset; 1197 } 1198} 1199 1200// Reuse the memory blocks that were copy of objects that were lost in race. 1201mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { 1202 // Try to reuse the blocks that were unused due to CAS failures. 1203 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size)); 1204 Thread* self = Thread::Current(); 1205 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); 1206 MutexLock mu(self, skipped_blocks_lock_); 1207 auto it = skipped_blocks_map_.lower_bound(alloc_size); 1208 if (it == skipped_blocks_map_.end()) { 1209 // Not found. 1210 return nullptr; 1211 } 1212 { 1213 size_t byte_size = it->first; 1214 CHECK_GE(byte_size, alloc_size); 1215 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) { 1216 // If remainder would be too small for a dummy object, retry with a larger request size. 1217 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size); 1218 if (it == skipped_blocks_map_.end()) { 1219 // Not found. 1220 return nullptr; 1221 } 1222 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size)); 1223 CHECK_GE(it->first - alloc_size, min_object_size) 1224 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; 1225 } 1226 } 1227 // Found a block. 1228 CHECK(it != skipped_blocks_map_.end()); 1229 size_t byte_size = it->first; 1230 uint8_t* addr = it->second; 1231 CHECK_GE(byte_size, alloc_size); 1232 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); 1233 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size)); 1234 if (kVerboseMode) { 1235 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; 1236 } 1237 skipped_blocks_map_.erase(it); 1238 memset(addr, 0, byte_size); 1239 if (byte_size > alloc_size) { 1240 // Return the remainder to the map. 1241 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size)); 1242 CHECK_GE(byte_size - alloc_size, min_object_size); 1243 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), 1244 byte_size - alloc_size); 1245 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size))); 1246 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size)); 1247 } 1248 return reinterpret_cast<mirror::Object*>(addr); 1249} 1250 1251mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { 1252 DCHECK(region_space_->IsInFromSpace(from_ref)); 1253 // No read barrier to avoid nested RB that might violate the to-space 1254 // invariant. Note that from_ref is a from space ref so the SizeOf() 1255 // call will access the from-space meta objects, but it's ok and necessary. 1256 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(); 1257 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1258 size_t region_space_bytes_allocated = 0U; 1259 size_t non_moving_space_bytes_allocated = 0U; 1260 size_t bytes_allocated = 0U; 1261 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>( 1262 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr); 1263 bytes_allocated = region_space_bytes_allocated; 1264 if (to_ref != nullptr) { 1265 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated); 1266 } 1267 bool fall_back_to_non_moving = false; 1268 if (UNLIKELY(to_ref == nullptr)) { 1269 // Failed to allocate in the region space. Try the skipped blocks. 1270 to_ref = AllocateInSkippedBlock(region_space_alloc_size); 1271 if (to_ref != nullptr) { 1272 // Succeeded to allocate in a skipped block. 1273 if (heap_->use_tlab_) { 1274 // This is necessary for the tlab case as it's not accounted in the space. 1275 region_space_->RecordAlloc(to_ref); 1276 } 1277 bytes_allocated = region_space_alloc_size; 1278 } else { 1279 // Fall back to the non-moving space. 1280 fall_back_to_non_moving = true; 1281 if (kVerboseMode) { 1282 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" 1283 << to_space_bytes_skipped_.LoadSequentiallyConsistent() 1284 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); 1285 } 1286 fall_back_to_non_moving = true; 1287 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, 1288 &non_moving_space_bytes_allocated, nullptr); 1289 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed"; 1290 bytes_allocated = non_moving_space_bytes_allocated; 1291 // Mark it in the mark bitmap. 1292 accounting::ContinuousSpaceBitmap* mark_bitmap = 1293 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1294 CHECK(mark_bitmap != nullptr); 1295 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref)); 1296 } 1297 } 1298 DCHECK(to_ref != nullptr); 1299 1300 // Attempt to install the forward pointer. This is in a loop as the 1301 // lock word atomic write can fail. 1302 while (true) { 1303 // Copy the object. TODO: copy only the lockword in the second iteration and on? 1304 memcpy(to_ref, from_ref, obj_size); 1305 // Set the gray ptr. 1306 if (kUseBakerReadBarrier) { 1307 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr()); 1308 } 1309 1310 LockWord old_lock_word = to_ref->GetLockWord(false); 1311 1312 if (old_lock_word.GetState() == LockWord::kForwardingAddress) { 1313 // Lost the race. Another thread (either GC or mutator) stored 1314 // the forwarding pointer first. Make the lost copy (to_ref) 1315 // look like a valid but dead (dummy) object and keep it for 1316 // future reuse. 1317 FillWithDummyObject(to_ref, bytes_allocated); 1318 if (!fall_back_to_non_moving) { 1319 DCHECK(region_space_->IsInToSpace(to_ref)); 1320 if (bytes_allocated > space::RegionSpace::kRegionSize) { 1321 // Free the large alloc. 1322 region_space_->FreeLarge(to_ref, bytes_allocated); 1323 } else { 1324 // Record the lost copy for later reuse. 1325 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1326 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); 1327 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); 1328 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1329 skipped_blocks_map_.insert(std::make_pair(bytes_allocated, 1330 reinterpret_cast<uint8_t*>(to_ref))); 1331 } 1332 } else { 1333 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1334 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1335 // Free the non-moving-space chunk. 1336 accounting::ContinuousSpaceBitmap* mark_bitmap = 1337 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 1338 CHECK(mark_bitmap != nullptr); 1339 CHECK(mark_bitmap->Clear(to_ref)); 1340 heap_->non_moving_space_->Free(Thread::Current(), to_ref); 1341 } 1342 1343 // Get the winner's forward ptr. 1344 mirror::Object* lost_fwd_ptr = to_ref; 1345 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress()); 1346 CHECK(to_ref != nullptr); 1347 CHECK_NE(to_ref, lost_fwd_ptr); 1348 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)); 1349 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1350 return to_ref; 1351 } 1352 1353 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)); 1354 1355 // Try to atomically write the fwd ptr. 1356 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word); 1357 if (LIKELY(success)) { 1358 // The CAS succeeded. 1359 objects_moved_.FetchAndAddSequentiallyConsistent(1); 1360 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size); 1361 if (LIKELY(!fall_back_to_non_moving)) { 1362 DCHECK(region_space_->IsInToSpace(to_ref)); 1363 } else { 1364 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 1365 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 1366 } 1367 if (kUseBakerReadBarrier) { 1368 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1369 } 1370 DCHECK(GetFwdPtr(from_ref) == to_ref); 1371 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 1372 PushOntoMarkStack<true>(to_ref); 1373 return to_ref; 1374 } else { 1375 // The CAS failed. It may have lost the race or may have failed 1376 // due to monitor/hashcode ops. Either way, retry. 1377 } 1378 } 1379} 1380 1381mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { 1382 DCHECK(from_ref != nullptr); 1383 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1384 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1385 // It's already marked. 1386 return from_ref; 1387 } 1388 mirror::Object* to_ref; 1389 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1390 to_ref = GetFwdPtr(from_ref); 1391 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) || 1392 heap_->non_moving_space_->HasAddress(to_ref)) 1393 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1394 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1395 if (region_space_bitmap_->Test(from_ref)) { 1396 to_ref = from_ref; 1397 } else { 1398 to_ref = nullptr; 1399 } 1400 } else { 1401 // from_ref is in a non-moving space. 1402 if (immune_region_.ContainsObject(from_ref)) { 1403 accounting::ContinuousSpaceBitmap* cc_bitmap = 1404 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1405 DCHECK(cc_bitmap != nullptr) 1406 << "An immune space object must have a bitmap"; 1407 if (kIsDebugBuild) { 1408 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1409 << "Immune space object must be already marked"; 1410 } 1411 if (cc_bitmap->Test(from_ref)) { 1412 // Already marked. 1413 to_ref = from_ref; 1414 } else { 1415 // Newly marked. 1416 to_ref = nullptr; 1417 } 1418 } else { 1419 // Non-immune non-moving space. Use the mark bitmap. 1420 accounting::ContinuousSpaceBitmap* mark_bitmap = 1421 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1422 accounting::LargeObjectBitmap* los_bitmap = 1423 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1424 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1425 bool is_los = mark_bitmap == nullptr; 1426 if (!is_los && mark_bitmap->Test(from_ref)) { 1427 // Already marked. 1428 to_ref = from_ref; 1429 } else if (is_los && los_bitmap->Test(from_ref)) { 1430 // Already marked in LOS. 1431 to_ref = from_ref; 1432 } else { 1433 // Not marked. 1434 if (IsOnAllocStack(from_ref)) { 1435 // If on the allocation stack, it's considered marked. 1436 to_ref = from_ref; 1437 } else { 1438 // Not marked. 1439 to_ref = nullptr; 1440 } 1441 } 1442 } 1443 } 1444 return to_ref; 1445} 1446 1447bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { 1448 QuasiAtomic::ThreadFenceAcquire(); 1449 accounting::ObjectStack* alloc_stack = GetAllocationStack(); 1450 return alloc_stack->Contains(ref); 1451} 1452 1453mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { 1454 if (from_ref == nullptr) { 1455 return nullptr; 1456 } 1457 DCHECK(from_ref != nullptr); 1458 DCHECK(heap_->collector_type_ == kCollectorTypeCC); 1459 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 1460 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 1461 // It's already marked. 1462 return from_ref; 1463 } 1464 mirror::Object* to_ref; 1465 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 1466 to_ref = GetFwdPtr(from_ref); 1467 if (kUseBakerReadBarrier) { 1468 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref; 1469 } 1470 if (to_ref == nullptr) { 1471 // It isn't marked yet. Mark it by copying it to the to-space. 1472 to_ref = Copy(from_ref); 1473 } 1474 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) 1475 << "from_ref=" << from_ref << " to_ref=" << to_ref; 1476 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 1477 // This may or may not succeed, which is ok. 1478 if (kUseBakerReadBarrier) { 1479 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1480 } 1481 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) { 1482 // Already marked. 1483 to_ref = from_ref; 1484 } else { 1485 // Newly marked. 1486 to_ref = from_ref; 1487 if (kUseBakerReadBarrier) { 1488 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1489 } 1490 PushOntoMarkStack<true>(to_ref); 1491 } 1492 } else { 1493 // from_ref is in a non-moving space. 1494 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref; 1495 if (immune_region_.ContainsObject(from_ref)) { 1496 accounting::ContinuousSpaceBitmap* cc_bitmap = 1497 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref); 1498 DCHECK(cc_bitmap != nullptr) 1499 << "An immune space object must have a bitmap"; 1500 if (kIsDebugBuild) { 1501 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref)) 1502 << "Immune space object must be already marked"; 1503 } 1504 // This may or may not succeed, which is ok. 1505 if (kUseBakerReadBarrier) { 1506 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1507 } 1508 if (cc_bitmap->AtomicTestAndSet(from_ref)) { 1509 // Already marked. 1510 to_ref = from_ref; 1511 } else { 1512 // Newly marked. 1513 to_ref = from_ref; 1514 if (kUseBakerReadBarrier) { 1515 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1516 } 1517 PushOntoMarkStack<true>(to_ref); 1518 } 1519 } else { 1520 // Use the mark bitmap. 1521 accounting::ContinuousSpaceBitmap* mark_bitmap = 1522 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 1523 accounting::LargeObjectBitmap* los_bitmap = 1524 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 1525 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1526 bool is_los = mark_bitmap == nullptr; 1527 if (!is_los && mark_bitmap->Test(from_ref)) { 1528 // Already marked. 1529 to_ref = from_ref; 1530 if (kUseBakerReadBarrier) { 1531 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1532 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1533 } 1534 } else if (is_los && los_bitmap->Test(from_ref)) { 1535 // Already marked in LOS. 1536 to_ref = from_ref; 1537 if (kUseBakerReadBarrier) { 1538 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() || 1539 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); 1540 } 1541 } else { 1542 // Not marked. 1543 if (IsOnAllocStack(from_ref)) { 1544 // If it's on the allocation stack, it's considered marked. Keep it white. 1545 to_ref = from_ref; 1546 // Objects on the allocation stack need not be marked. 1547 if (!is_los) { 1548 DCHECK(!mark_bitmap->Test(to_ref)); 1549 } else { 1550 DCHECK(!los_bitmap->Test(to_ref)); 1551 } 1552 if (kUseBakerReadBarrier) { 1553 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); 1554 } 1555 } else { 1556 // Not marked or on the allocation stack. Try to mark it. 1557 // This may or may not succeed, which is ok. 1558 if (kUseBakerReadBarrier) { 1559 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr()); 1560 } 1561 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) { 1562 // Already marked. 1563 to_ref = from_ref; 1564 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) { 1565 // Already marked in LOS. 1566 to_ref = from_ref; 1567 } else { 1568 // Newly marked. 1569 to_ref = from_ref; 1570 if (kUseBakerReadBarrier) { 1571 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); 1572 } 1573 PushOntoMarkStack<true>(to_ref); 1574 } 1575 } 1576 } 1577 } 1578 } 1579 return to_ref; 1580} 1581 1582void ConcurrentCopying::FinishPhase() { 1583 region_space_ = nullptr; 1584 CHECK(mark_queue_.IsEmpty()); 1585 mark_queue_.Clear(); 1586 { 1587 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 1588 skipped_blocks_map_.clear(); 1589 } 1590 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1591 heap_->ClearMarkedObjects(); 1592} 1593 1594mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) { 1595 return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); 1596} 1597 1598bool ConcurrentCopying::IsHeapReferenceMarkedCallback( 1599 mirror::HeapReference<mirror::Object>* field, void* arg) { 1600 mirror::Object* from_ref = field->AsMirrorPtr(); 1601 mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); 1602 if (to_ref == nullptr) { 1603 return false; 1604 } 1605 if (from_ref != to_ref) { 1606 QuasiAtomic::ThreadFenceRelease(); 1607 field->Assign(to_ref); 1608 QuasiAtomic::ThreadFenceSequentiallyConsistent(); 1609 } 1610 return true; 1611} 1612 1613mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) { 1614 return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref); 1615} 1616 1617void ConcurrentCopying::ProcessMarkStackCallback(void* arg) { 1618 reinterpret_cast<ConcurrentCopying*>(arg)->ProcessMarkStack(); 1619} 1620 1621void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 1622 heap_->GetReferenceProcessor()->DelayReferenceReferent( 1623 klass, reference, &IsHeapReferenceMarkedCallback, this); 1624} 1625 1626void ConcurrentCopying::ProcessReferences(Thread* self, bool concurrent) { 1627 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); 1628 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1629 GetHeap()->GetReferenceProcessor()->ProcessReferences( 1630 concurrent, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), 1631 &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this); 1632} 1633 1634void ConcurrentCopying::RevokeAllThreadLocalBuffers() { 1635 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 1636 region_space_->RevokeAllThreadLocalBuffers(); 1637} 1638 1639} // namespace collector 1640} // namespace gc 1641} // namespace art 1642