1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "concurrent_copying.h" 18 19#include "art_field-inl.h" 20#include "base/enums.h" 21#include "base/histogram-inl.h" 22#include "base/stl_util.h" 23#include "base/systrace.h" 24#include "debugger.h" 25#include "gc/accounting/atomic_stack.h" 26#include "gc/accounting/heap_bitmap-inl.h" 27#include "gc/accounting/mod_union_table-inl.h" 28#include "gc/accounting/read_barrier_table.h" 29#include "gc/accounting/space_bitmap-inl.h" 30#include "gc/gc_pause_listener.h" 31#include "gc/reference_processor.h" 32#include "gc/space/image_space.h" 33#include "gc/space/space-inl.h" 34#include "gc/verification.h" 35#include "image-inl.h" 36#include "intern_table.h" 37#include "mirror/class-inl.h" 38#include "mirror/object-inl.h" 39#include "mirror/object-refvisitor-inl.h" 40#include "scoped_thread_state_change-inl.h" 41#include "thread-inl.h" 42#include "thread_list.h" 43#include "well_known_classes.h" 44 45namespace art { 46namespace gc { 47namespace collector { 48 49static constexpr size_t kDefaultGcMarkStackSize = 2 * MB; 50// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod 51// union table. Disabled since it does not seem to help the pause much. 52static constexpr bool kFilterModUnionCards = kIsDebugBuild; 53// If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any that occur during 54// ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers. 55// Only enabled for kIsDebugBuild to avoid performance hit. 56static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild; 57// Slow path mark stack size, increase this if the stack is getting full and it is causing 58// performance problems. 59static constexpr size_t kReadBarrierMarkStackSize = 512 * KB; 60// Verify that there are no missing card marks. 61static constexpr bool kVerifyNoMissingCardMarks = kIsDebugBuild; 62 63ConcurrentCopying::ConcurrentCopying(Heap* heap, 64 const std::string& name_prefix, 65 bool measure_read_barrier_slow_path) 66 : GarbageCollector(heap, 67 name_prefix + (name_prefix.empty() ? "" : " ") + 68 "concurrent copying"), 69 region_space_(nullptr), gc_barrier_(new Barrier(0)), 70 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack", 71 kDefaultGcMarkStackSize, 72 kDefaultGcMarkStackSize)), 73 rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack", 74 kReadBarrierMarkStackSize, 75 kReadBarrierMarkStackSize)), 76 rb_mark_bit_stack_full_(false), 77 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock), 78 thread_running_gc_(nullptr), 79 is_marking_(false), 80 is_active_(false), 81 is_asserting_to_space_invariant_(false), 82 region_space_bitmap_(nullptr), 83 heap_mark_bitmap_(nullptr), 84 live_stack_freeze_size_(0), 85 from_space_num_objects_at_first_pause_(0), 86 from_space_num_bytes_at_first_pause_(0), 87 mark_stack_mode_(kMarkStackModeOff), 88 weak_ref_access_enabled_(true), 89 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), 90 measure_read_barrier_slow_path_(measure_read_barrier_slow_path), 91 mark_from_read_barrier_measurements_(false), 92 rb_slow_path_ns_(0), 93 rb_slow_path_count_(0), 94 rb_slow_path_count_gc_(0), 95 rb_slow_path_histogram_lock_("Read barrier histogram lock"), 96 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32), 97 rb_slow_path_count_total_(0), 98 rb_slow_path_count_gc_total_(0), 99 rb_table_(heap_->GetReadBarrierTable()), 100 force_evacuate_all_(false), 101 gc_grays_immune_objects_(false), 102 immune_gray_stack_lock_("concurrent copying immune gray stack lock", 103 kMarkSweepMarkStackLock) { 104 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, 105 "The region space size and the read barrier table region size must match"); 106 Thread* self = Thread::Current(); 107 { 108 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 109 // Cache this so that we won't have to lock heap_bitmap_lock_ in 110 // Mark() which could cause a nested lock on heap_bitmap_lock_ 111 // when GC causes a RB while doing GC or a lock order violation 112 // (class_linker_lock_ and heap_bitmap_lock_). 113 heap_mark_bitmap_ = heap->GetMarkBitmap(); 114 } 115 { 116 MutexLock mu(self, mark_stack_lock_); 117 for (size_t i = 0; i < kMarkStackPoolSize; ++i) { 118 accounting::AtomicStack<mirror::Object>* mark_stack = 119 accounting::AtomicStack<mirror::Object>::Create( 120 "thread local mark stack", kMarkStackSize, kMarkStackSize); 121 pooled_mark_stacks_.push_back(mark_stack); 122 } 123 } 124} 125 126void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* field, 127 bool do_atomic_update) { 128 if (UNLIKELY(do_atomic_update)) { 129 // Used to mark the referent in DelayReferenceReferent in transaction mode. 130 mirror::Object* from_ref = field->AsMirrorPtr(); 131 if (from_ref == nullptr) { 132 return; 133 } 134 mirror::Object* to_ref = Mark(from_ref); 135 if (from_ref != to_ref) { 136 do { 137 if (field->AsMirrorPtr() != from_ref) { 138 // Concurrently overwritten by a mutator. 139 break; 140 } 141 } while (!field->CasWeakRelaxed(from_ref, to_ref)); 142 } 143 } else { 144 // Used for preserving soft references, should be OK to not have a CAS here since there should be 145 // no other threads which can trigger read barriers on the same referent during reference 146 // processing. 147 field->Assign(Mark(field->AsMirrorPtr())); 148 } 149} 150 151ConcurrentCopying::~ConcurrentCopying() { 152 STLDeleteElements(&pooled_mark_stacks_); 153} 154 155void ConcurrentCopying::RunPhases() { 156 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier); 157 CHECK(!is_active_); 158 is_active_ = true; 159 Thread* self = Thread::Current(); 160 thread_running_gc_ = self; 161 Locks::mutator_lock_->AssertNotHeld(self); 162 { 163 ReaderMutexLock mu(self, *Locks::mutator_lock_); 164 InitializePhase(); 165 } 166 FlipThreadRoots(); 167 { 168 ReaderMutexLock mu(self, *Locks::mutator_lock_); 169 MarkingPhase(); 170 } 171 // Verify no from space refs. This causes a pause. 172 if (kEnableNoFromSpaceRefsVerification) { 173 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); 174 ScopedPause pause(this, false); 175 CheckEmptyMarkStack(); 176 if (kVerboseMode) { 177 LOG(INFO) << "Verifying no from-space refs"; 178 } 179 VerifyNoFromSpaceReferences(); 180 if (kVerboseMode) { 181 LOG(INFO) << "Done verifying no from-space refs"; 182 } 183 CheckEmptyMarkStack(); 184 } 185 { 186 ReaderMutexLock mu(self, *Locks::mutator_lock_); 187 ReclaimPhase(); 188 } 189 FinishPhase(); 190 CHECK(is_active_); 191 is_active_ = false; 192 thread_running_gc_ = nullptr; 193} 194 195void ConcurrentCopying::BindBitmaps() { 196 Thread* self = Thread::Current(); 197 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 198 // Mark all of the spaces we never collect as immune. 199 for (const auto& space : heap_->GetContinuousSpaces()) { 200 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect || 201 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { 202 CHECK(space->IsZygoteSpace() || space->IsImageSpace()); 203 immune_spaces_.AddSpace(space); 204 } else if (space == region_space_) { 205 // It is OK to clear the bitmap with mutators running since the only place it is read is 206 // VisitObjects which has exclusion with CC. 207 region_space_bitmap_ = region_space_->GetMarkBitmap(); 208 region_space_bitmap_->Clear(); 209 } 210 } 211} 212 213void ConcurrentCopying::InitializePhase() { 214 TimingLogger::ScopedTiming split("InitializePhase", GetTimings()); 215 if (kVerboseMode) { 216 LOG(INFO) << "GC InitializePhase"; 217 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" 218 << reinterpret_cast<void*>(region_space_->Limit()); 219 } 220 CheckEmptyMarkStack(); 221 if (kIsDebugBuild) { 222 MutexLock mu(Thread::Current(), mark_stack_lock_); 223 CHECK(false_gray_stack_.empty()); 224 } 225 226 rb_mark_bit_stack_full_ = false; 227 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_; 228 if (measure_read_barrier_slow_path_) { 229 rb_slow_path_ns_.StoreRelaxed(0); 230 rb_slow_path_count_.StoreRelaxed(0); 231 rb_slow_path_count_gc_.StoreRelaxed(0); 232 } 233 234 immune_spaces_.Reset(); 235 bytes_moved_.StoreRelaxed(0); 236 objects_moved_.StoreRelaxed(0); 237 GcCause gc_cause = GetCurrentIteration()->GetGcCause(); 238 if (gc_cause == kGcCauseExplicit || 239 gc_cause == kGcCauseForNativeAlloc || 240 gc_cause == kGcCauseCollectorTransition || 241 GetCurrentIteration()->GetClearSoftReferences()) { 242 force_evacuate_all_ = true; 243 } else { 244 force_evacuate_all_ = false; 245 } 246 if (kUseBakerReadBarrier) { 247 updated_all_immune_objects_.StoreRelaxed(false); 248 // GC may gray immune objects in the thread flip. 249 gc_grays_immune_objects_ = true; 250 if (kIsDebugBuild) { 251 MutexLock mu(Thread::Current(), immune_gray_stack_lock_); 252 DCHECK(immune_gray_stack_.empty()); 253 } 254 } 255 BindBitmaps(); 256 if (kVerboseMode) { 257 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_; 258 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin() 259 << "-" << immune_spaces_.GetLargestImmuneRegion().End(); 260 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) { 261 LOG(INFO) << "Immune space: " << *space; 262 } 263 LOG(INFO) << "GC end of InitializePhase"; 264 } 265 // Mark all of the zygote large objects without graying them. 266 MarkZygoteLargeObjects(); 267} 268 269// Used to switch the thread roots of a thread from from-space refs to to-space refs. 270class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor { 271 public: 272 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) 273 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { 274 } 275 276 virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 277 // Note: self is not necessarily equal to thread since thread may be suspended. 278 Thread* self = Thread::Current(); 279 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 280 << thread->GetState() << " thread " << thread << " self " << self; 281 thread->SetIsGcMarkingAndUpdateEntrypoints(true); 282 if (use_tlab_ && thread->HasTlab()) { 283 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 284 // This must come before the revoke. 285 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated(); 286 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 287 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)-> 288 FetchAndAddSequentiallyConsistent(thread_local_objects); 289 } else { 290 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); 291 } 292 } 293 if (kUseThreadLocalAllocationStack) { 294 thread->RevokeThreadLocalAllocationStack(); 295 } 296 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 297 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots 298 // only. 299 thread->VisitRoots(this); 300 concurrent_copying_->GetBarrier().Pass(self); 301 } 302 303 void VisitRoots(mirror::Object*** roots, 304 size_t count, 305 const RootInfo& info ATTRIBUTE_UNUSED) 306 REQUIRES_SHARED(Locks::mutator_lock_) { 307 for (size_t i = 0; i < count; ++i) { 308 mirror::Object** root = roots[i]; 309 mirror::Object* ref = *root; 310 if (ref != nullptr) { 311 mirror::Object* to_ref = concurrent_copying_->Mark(ref); 312 if (to_ref != ref) { 313 *root = to_ref; 314 } 315 } 316 } 317 } 318 319 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, 320 size_t count, 321 const RootInfo& info ATTRIBUTE_UNUSED) 322 REQUIRES_SHARED(Locks::mutator_lock_) { 323 for (size_t i = 0; i < count; ++i) { 324 mirror::CompressedReference<mirror::Object>* const root = roots[i]; 325 if (!root->IsNull()) { 326 mirror::Object* ref = root->AsMirrorPtr(); 327 mirror::Object* to_ref = concurrent_copying_->Mark(ref); 328 if (to_ref != ref) { 329 root->Assign(to_ref); 330 } 331 } 332 } 333 } 334 335 private: 336 ConcurrentCopying* const concurrent_copying_; 337 const bool use_tlab_; 338}; 339 340// Called back from Runtime::FlipThreadRoots() during a pause. 341class ConcurrentCopying::FlipCallback : public Closure { 342 public: 343 explicit FlipCallback(ConcurrentCopying* concurrent_copying) 344 : concurrent_copying_(concurrent_copying) { 345 } 346 347 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) { 348 ConcurrentCopying* cc = concurrent_copying_; 349 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); 350 // Note: self is not necessarily equal to thread since thread may be suspended. 351 Thread* self = Thread::Current(); 352 if (kVerifyNoMissingCardMarks) { 353 cc->VerifyNoMissingCardMarks(); 354 } 355 CHECK(thread == self); 356 Locks::mutator_lock_->AssertExclusiveHeld(self); 357 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); 358 cc->SwapStacks(); 359 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { 360 cc->RecordLiveStackFreezeSize(self); 361 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); 362 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); 363 } 364 cc->is_marking_ = true; 365 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal); 366 if (kIsDebugBuild) { 367 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared(); 368 } 369 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { 370 CHECK(Runtime::Current()->IsAotCompiler()); 371 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); 372 Runtime::Current()->VisitTransactionRoots(cc); 373 } 374 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) { 375 cc->GrayAllDirtyImmuneObjects(); 376 if (kIsDebugBuild) { 377 // Check that all non-gray immune objects only refernce immune objects. 378 cc->VerifyGrayImmuneObjects(); 379 } 380 } 381 cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark( 382 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object).Ptr())); 383 } 384 385 private: 386 ConcurrentCopying* const concurrent_copying_; 387}; 388 389class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor { 390 public: 391 explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector) 392 : collector_(collector) {} 393 394 void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */) 395 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) 396 REQUIRES_SHARED(Locks::heap_bitmap_lock_) { 397 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset), 398 obj, offset); 399 } 400 401 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const 402 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { 403 CHECK(klass->IsTypeOfReferenceClass()); 404 CheckReference(ref->GetReferent<kWithoutReadBarrier>(), 405 ref, 406 mirror::Reference::ReferentOffset()); 407 } 408 409 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 410 ALWAYS_INLINE 411 REQUIRES_SHARED(Locks::mutator_lock_) { 412 if (!root->IsNull()) { 413 VisitRoot(root); 414 } 415 } 416 417 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 418 ALWAYS_INLINE 419 REQUIRES_SHARED(Locks::mutator_lock_) { 420 CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0)); 421 } 422 423 private: 424 ConcurrentCopying* const collector_; 425 426 void CheckReference(ObjPtr<mirror::Object> ref, 427 ObjPtr<mirror::Object> holder, 428 MemberOffset offset) const 429 REQUIRES_SHARED(Locks::mutator_lock_) { 430 if (ref != nullptr) { 431 if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) { 432 // Not immune, must be a zygote large object. 433 CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject( 434 Thread::Current(), ref.Ptr())) 435 << "Non gray object references non immune, non zygote large object "<< ref << " " 436 << mirror::Object::PrettyTypeOf(ref) << " in holder " << holder << " " 437 << mirror::Object::PrettyTypeOf(holder) << " offset=" << offset.Uint32Value(); 438 } else { 439 // Make sure the large object class is immune since we will never scan the large object. 440 CHECK(collector_->immune_spaces_.ContainsObject( 441 ref->GetClass<kVerifyNone, kWithoutReadBarrier>())); 442 } 443 } 444 } 445}; 446 447void ConcurrentCopying::VerifyGrayImmuneObjects() { 448 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 449 for (auto& space : immune_spaces_.GetSpaces()) { 450 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 451 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 452 VerifyGrayImmuneObjectsVisitor visitor(this); 453 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 454 reinterpret_cast<uintptr_t>(space->Limit()), 455 [&visitor](mirror::Object* obj) 456 REQUIRES_SHARED(Locks::mutator_lock_) { 457 // If an object is not gray, it should only have references to things in the immune spaces. 458 if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) { 459 obj->VisitReferences</*kVisitNativeRoots*/true, 460 kDefaultVerifyFlags, 461 kWithoutReadBarrier>(visitor, visitor); 462 } 463 }); 464 } 465} 466 467class ConcurrentCopying::VerifyNoMissingCardMarkVisitor { 468 public: 469 VerifyNoMissingCardMarkVisitor(ConcurrentCopying* cc, ObjPtr<mirror::Object> holder) 470 : cc_(cc), 471 holder_(holder) {} 472 473 void operator()(ObjPtr<mirror::Object> obj, 474 MemberOffset offset, 475 bool is_static ATTRIBUTE_UNUSED) const 476 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { 477 if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) { 478 CheckReference(obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>( 479 offset), offset.Uint32Value()); 480 } 481 } 482 void operator()(ObjPtr<mirror::Class> klass, 483 ObjPtr<mirror::Reference> ref) const 484 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { 485 CHECK(klass->IsTypeOfReferenceClass()); 486 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 487 } 488 489 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 490 REQUIRES_SHARED(Locks::mutator_lock_) { 491 if (!root->IsNull()) { 492 VisitRoot(root); 493 } 494 } 495 496 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 497 REQUIRES_SHARED(Locks::mutator_lock_) { 498 CheckReference(root->AsMirrorPtr()); 499 } 500 501 void CheckReference(mirror::Object* ref, int32_t offset = -1) const 502 REQUIRES_SHARED(Locks::mutator_lock_) { 503 CHECK(ref == nullptr || !cc_->region_space_->IsInNewlyAllocatedRegion(ref)) 504 << holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object " 505 << ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset; 506 } 507 508 private: 509 ConcurrentCopying* const cc_; 510 ObjPtr<mirror::Object> const holder_; 511}; 512 513void ConcurrentCopying::VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg) { 514 auto* collector = reinterpret_cast<ConcurrentCopying*>(arg); 515 // Objects not on dirty cards should never have references to newly allocated regions. 516 if (!collector->heap_->GetCardTable()->IsDirty(obj)) { 517 VerifyNoMissingCardMarkVisitor visitor(collector, /*holder*/ obj); 518 obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>( 519 visitor, 520 visitor); 521 } 522} 523 524void ConcurrentCopying::VerifyNoMissingCardMarks() { 525 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 526 region_space_->Walk(&VerifyNoMissingCardMarkCallback, this); 527 { 528 ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_); 529 heap_->GetLiveBitmap()->Walk(&VerifyNoMissingCardMarkCallback, this); 530 } 531} 532 533// Switch threads that from from-space to to-space refs. Forward/mark the thread roots. 534void ConcurrentCopying::FlipThreadRoots() { 535 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings()); 536 if (kVerboseMode) { 537 LOG(INFO) << "time=" << region_space_->Time(); 538 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO)); 539 } 540 Thread* self = Thread::Current(); 541 Locks::mutator_lock_->AssertNotHeld(self); 542 gc_barrier_->Init(self, 0); 543 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); 544 FlipCallback flip_callback(this); 545 546 // This is the point where Concurrent-Copying will pause all threads. We report a pause here, if 547 // necessary. This is slightly over-reporting, as this includes the time to actually suspend 548 // threads. 549 { 550 GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener(); 551 if (pause_listener != nullptr) { 552 pause_listener->StartPause(); 553 } 554 } 555 556 size_t barrier_count = Runtime::Current()->FlipThreadRoots( 557 &thread_flip_visitor, &flip_callback, this); 558 559 { 560 GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener(); 561 if (pause_listener != nullptr) { 562 pause_listener->EndPause(); 563 } 564 } 565 566 { 567 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 568 gc_barrier_->Increment(self, barrier_count); 569 } 570 is_asserting_to_space_invariant_ = true; 571 QuasiAtomic::ThreadFenceForConstructor(); 572 if (kVerboseMode) { 573 LOG(INFO) << "time=" << region_space_->Time(); 574 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO)); 575 LOG(INFO) << "GC end of FlipThreadRoots"; 576 } 577} 578 579class ConcurrentCopying::GrayImmuneObjectVisitor { 580 public: 581 explicit GrayImmuneObjectVisitor() {} 582 583 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) { 584 if (kUseBakerReadBarrier) { 585 if (kIsDebugBuild) { 586 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 587 } 588 obj->SetReadBarrierState(ReadBarrier::GrayState()); 589 } 590 } 591 592 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) { 593 reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj); 594 } 595}; 596 597void ConcurrentCopying::GrayAllDirtyImmuneObjects() { 598 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 599 gc::Heap* const heap = Runtime::Current()->GetHeap(); 600 accounting::CardTable* const card_table = heap->GetCardTable(); 601 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 602 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) { 603 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 604 GrayImmuneObjectVisitor visitor; 605 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space); 606 // Mark all the objects on dirty cards since these may point to objects in other space. 607 // Once these are marked, the GC will eventually clear them later. 608 // Table is non null for boot image and zygote spaces. It is only null for application image 609 // spaces. 610 if (table != nullptr) { 611 // TODO: Consider adding precleaning outside the pause. 612 table->ProcessCards(); 613 table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor); 614 // Since the cards are recorded in the mod-union table and this is paused, we can clear 615 // the cards for the space (to madvise). 616 TimingLogger::ScopedTiming split2("(Paused)ClearCards", GetTimings()); 617 card_table->ClearCardRange(space->Begin(), 618 AlignDown(space->End(), accounting::CardTable::kCardSize)); 619 } else { 620 // TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the 621 // pause because app image spaces are all dirty pages anyways. 622 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor); 623 } 624 } 625 // Since all of the objects that may point to other spaces are marked, we can avoid all the read 626 // barriers in the immune spaces. 627 updated_all_immune_objects_.StoreRelaxed(true); 628} 629 630void ConcurrentCopying::SwapStacks() { 631 heap_->SwapStacks(); 632} 633 634void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { 635 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 636 live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); 637} 638 639// Used to visit objects in the immune spaces. 640inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) { 641 DCHECK(obj != nullptr); 642 DCHECK(immune_spaces_.ContainsObject(obj)); 643 // Update the fields without graying it or pushing it onto the mark stack. 644 Scan(obj); 645} 646 647class ConcurrentCopying::ImmuneSpaceScanObjVisitor { 648 public: 649 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc) 650 : collector_(cc) {} 651 652 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) { 653 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) { 654 if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) { 655 collector_->ScanImmuneObject(obj); 656 // Done scanning the object, go back to white. 657 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(), 658 ReadBarrier::WhiteState()); 659 CHECK(success); 660 } 661 } else { 662 collector_->ScanImmuneObject(obj); 663 } 664 } 665 666 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) { 667 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj); 668 } 669 670 private: 671 ConcurrentCopying* const collector_; 672}; 673 674// Concurrently mark roots that are guarded by read barriers and process the mark stack. 675void ConcurrentCopying::MarkingPhase() { 676 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings()); 677 if (kVerboseMode) { 678 LOG(INFO) << "GC MarkingPhase"; 679 } 680 Thread* self = Thread::Current(); 681 if (kIsDebugBuild) { 682 MutexLock mu(self, *Locks::thread_list_lock_); 683 CHECK(weak_ref_access_enabled_); 684 } 685 686 // Scan immune spaces. 687 // Update all the fields in the immune spaces first without graying the objects so that we 688 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some 689 // of the objects. 690 if (kUseBakerReadBarrier) { 691 gc_grays_immune_objects_ = false; 692 } 693 { 694 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings()); 695 for (auto& space : immune_spaces_.GetSpaces()) { 696 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 697 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 698 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 699 ImmuneSpaceScanObjVisitor visitor(this); 700 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) { 701 table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor); 702 } else { 703 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 704 reinterpret_cast<uintptr_t>(space->Limit()), 705 visitor); 706 } 707 } 708 } 709 if (kUseBakerReadBarrier) { 710 // This release fence makes the field updates in the above loop visible before allowing mutator 711 // getting access to immune objects without graying it first. 712 updated_all_immune_objects_.StoreRelease(true); 713 // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in 714 // the above loop because we would incorrectly disable the read barrier by whitening an object 715 // which may point to an unscanned, white object, breaking the to-space invariant. 716 // 717 // Make sure no mutators are in the middle of marking an immune object before whitening immune 718 // objects. 719 IssueEmptyCheckpoint(); 720 MutexLock mu(Thread::Current(), immune_gray_stack_lock_); 721 if (kVerboseMode) { 722 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size(); 723 } 724 for (mirror::Object* obj : immune_gray_stack_) { 725 DCHECK(obj->GetReadBarrierState() == ReadBarrier::GrayState()); 726 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(), 727 ReadBarrier::WhiteState()); 728 DCHECK(success); 729 } 730 immune_gray_stack_.clear(); 731 } 732 733 { 734 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings()); 735 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots); 736 } 737 { 738 // TODO: don't visit the transaction roots if it's not active. 739 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); 740 Runtime::Current()->VisitNonThreadRoots(this); 741 } 742 743 { 744 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings()); 745 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The 746 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark 747 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock 748 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we 749 // reach the point where we process weak references, we can avoid using a lock when accessing 750 // the GC mark stack, which makes mark stack processing more efficient. 751 752 // Process the mark stack once in the thread local stack mode. This marks most of the live 753 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system 754 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray 755 // objects and push refs on the mark stack. 756 ProcessMarkStack(); 757 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks 758 // for the last time before transitioning to the shared mark stack mode, which would process new 759 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack() 760 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's 761 // important to do these together in a single checkpoint so that we can ensure that mutators 762 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and 763 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on 764 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref 765 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones. 766 SwitchToSharedMarkStackMode(); 767 CHECK(!self->GetWeakRefAccessEnabled()); 768 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here 769 // (which may be non-empty if there were refs found on thread-local mark stacks during the above 770 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators 771 // (via read barriers) have no way to produce any more refs to process. Marking converges once 772 // before we process weak refs below. 773 ProcessMarkStack(); 774 CheckEmptyMarkStack(); 775 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a 776 // lock from this point on. 777 SwitchToGcExclusiveMarkStackMode(); 778 CheckEmptyMarkStack(); 779 if (kVerboseMode) { 780 LOG(INFO) << "ProcessReferences"; 781 } 782 // Process weak references. This may produce new refs to process and have them processed via 783 // ProcessMarkStack (in the GC exclusive mark stack mode). 784 ProcessReferences(self); 785 CheckEmptyMarkStack(); 786 if (kVerboseMode) { 787 LOG(INFO) << "SweepSystemWeaks"; 788 } 789 SweepSystemWeaks(self); 790 if (kVerboseMode) { 791 LOG(INFO) << "SweepSystemWeaks done"; 792 } 793 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have 794 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for 795 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks(). 796 ProcessMarkStack(); 797 CheckEmptyMarkStack(); 798 // Re-enable weak ref accesses. 799 ReenableWeakRefAccess(self); 800 // Free data for class loaders that we unloaded. 801 Runtime::Current()->GetClassLinker()->CleanupClassLoaders(); 802 // Marking is done. Disable marking. 803 DisableMarking(); 804 if (kUseBakerReadBarrier) { 805 ProcessFalseGrayStack(); 806 } 807 CheckEmptyMarkStack(); 808 } 809 810 if (kIsDebugBuild) { 811 MutexLock mu(self, *Locks::thread_list_lock_); 812 CHECK(weak_ref_access_enabled_); 813 } 814 if (kVerboseMode) { 815 LOG(INFO) << "GC end of MarkingPhase"; 816 } 817} 818 819void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) { 820 if (kVerboseMode) { 821 LOG(INFO) << "ReenableWeakRefAccess"; 822 } 823 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access. 824 { 825 MutexLock mu(self, *Locks::thread_list_lock_); 826 weak_ref_access_enabled_ = true; // This is for new threads. 827 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 828 for (Thread* thread : thread_list) { 829 thread->SetWeakRefAccessEnabled(true); 830 } 831 } 832 // Unblock blocking threads. 833 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self); 834 Runtime::Current()->BroadcastForNewSystemWeaks(); 835} 836 837class ConcurrentCopying::DisableMarkingCheckpoint : public Closure { 838 public: 839 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying) 840 : concurrent_copying_(concurrent_copying) { 841 } 842 843 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 844 // Note: self is not necessarily equal to thread since thread may be suspended. 845 Thread* self = Thread::Current(); 846 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 847 << thread->GetState() << " thread " << thread << " self " << self; 848 // Disable the thread-local is_gc_marking flag. 849 // Note a thread that has just started right before this checkpoint may have already this flag 850 // set to false, which is ok. 851 thread->SetIsGcMarkingAndUpdateEntrypoints(false); 852 // If thread is a running mutator, then act on behalf of the garbage collector. 853 // See the code in ThreadList::RunCheckpoint. 854 concurrent_copying_->GetBarrier().Pass(self); 855 } 856 857 private: 858 ConcurrentCopying* const concurrent_copying_; 859}; 860 861class ConcurrentCopying::DisableMarkingCallback : public Closure { 862 public: 863 explicit DisableMarkingCallback(ConcurrentCopying* concurrent_copying) 864 : concurrent_copying_(concurrent_copying) { 865 } 866 867 void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) { 868 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint() 869 // to avoid a race with ThreadList::Register(). 870 CHECK(concurrent_copying_->is_marking_); 871 concurrent_copying_->is_marking_ = false; 872 } 873 874 private: 875 ConcurrentCopying* const concurrent_copying_; 876}; 877 878void ConcurrentCopying::IssueDisableMarkingCheckpoint() { 879 Thread* self = Thread::Current(); 880 DisableMarkingCheckpoint check_point(this); 881 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 882 gc_barrier_->Init(self, 0); 883 DisableMarkingCallback dmc(this); 884 size_t barrier_count = thread_list->RunCheckpoint(&check_point, &dmc); 885 // If there are no threads to wait which implies that all the checkpoint functions are finished, 886 // then no need to release the mutator lock. 887 if (barrier_count == 0) { 888 return; 889 } 890 // Release locks then wait for all mutator threads to pass the barrier. 891 Locks::mutator_lock_->SharedUnlock(self); 892 { 893 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 894 gc_barrier_->Increment(self, barrier_count); 895 } 896 Locks::mutator_lock_->SharedLock(self); 897} 898 899void ConcurrentCopying::DisableMarking() { 900 // Use a checkpoint to turn off the global is_marking and the thread-local is_gc_marking flags and 901 // to ensure no threads are still in the middle of a read barrier which may have a from-space ref 902 // cached in a local variable. 903 IssueDisableMarkingCheckpoint(); 904 if (kUseTableLookupReadBarrier) { 905 heap_->rb_table_->ClearAll(); 906 DCHECK(heap_->rb_table_->IsAllCleared()); 907 } 908 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1); 909 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff); 910} 911 912void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) { 913 CHECK(kUseBakerReadBarrier); 914 DCHECK(ref != nullptr); 915 MutexLock mu(Thread::Current(), mark_stack_lock_); 916 false_gray_stack_.push_back(ref); 917} 918 919void ConcurrentCopying::ProcessFalseGrayStack() { 920 CHECK(kUseBakerReadBarrier); 921 // Change the objects on the false gray stack from gray to white. 922 MutexLock mu(Thread::Current(), mark_stack_lock_); 923 for (mirror::Object* obj : false_gray_stack_) { 924 DCHECK(IsMarked(obj)); 925 // The object could be white here if a thread got preempted after a success at the 926 // AtomicSetReadBarrierState in Mark(), GC started marking through it (but not finished so 927 // still gray), and the thread ran to register it onto the false gray stack. 928 if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) { 929 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(), 930 ReadBarrier::WhiteState()); 931 DCHECK(success); 932 } 933 } 934 false_gray_stack_.clear(); 935} 936 937void ConcurrentCopying::IssueEmptyCheckpoint() { 938 Thread* self = Thread::Current(); 939 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 940 // Release locks then wait for all mutator threads to pass the barrier. 941 Locks::mutator_lock_->SharedUnlock(self); 942 thread_list->RunEmptyCheckpoint(); 943 Locks::mutator_lock_->SharedLock(self); 944} 945 946void ConcurrentCopying::ExpandGcMarkStack() { 947 DCHECK(gc_mark_stack_->IsFull()); 948 const size_t new_size = gc_mark_stack_->Capacity() * 2; 949 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(), 950 gc_mark_stack_->End()); 951 gc_mark_stack_->Resize(new_size); 952 for (auto& ref : temp) { 953 gc_mark_stack_->PushBack(ref.AsMirrorPtr()); 954 } 955 DCHECK(!gc_mark_stack_->IsFull()); 956} 957 958void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { 959 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0) 960 << " " << to_ref << " " << mirror::Object::PrettyTypeOf(to_ref); 961 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites? 962 CHECK(thread_running_gc_ != nullptr); 963 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 964 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) { 965 if (LIKELY(self == thread_running_gc_)) { 966 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack. 967 CHECK(self->GetThreadLocalMarkStack() == nullptr); 968 if (UNLIKELY(gc_mark_stack_->IsFull())) { 969 ExpandGcMarkStack(); 970 } 971 gc_mark_stack_->PushBack(to_ref); 972 } else { 973 // Otherwise, use a thread-local mark stack. 974 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack(); 975 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) { 976 MutexLock mu(self, mark_stack_lock_); 977 // Get a new thread local mark stack. 978 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack; 979 if (!pooled_mark_stacks_.empty()) { 980 // Use a pooled mark stack. 981 new_tl_mark_stack = pooled_mark_stacks_.back(); 982 pooled_mark_stacks_.pop_back(); 983 } else { 984 // None pooled. Create a new one. 985 new_tl_mark_stack = 986 accounting::AtomicStack<mirror::Object>::Create( 987 "thread local mark stack", 4 * KB, 4 * KB); 988 } 989 DCHECK(new_tl_mark_stack != nullptr); 990 DCHECK(new_tl_mark_stack->IsEmpty()); 991 new_tl_mark_stack->PushBack(to_ref); 992 self->SetThreadLocalMarkStack(new_tl_mark_stack); 993 if (tl_mark_stack != nullptr) { 994 // Store the old full stack into a vector. 995 revoked_mark_stacks_.push_back(tl_mark_stack); 996 } 997 } else { 998 tl_mark_stack->PushBack(to_ref); 999 } 1000 } 1001 } else if (mark_stack_mode == kMarkStackModeShared) { 1002 // Access the shared GC mark stack with a lock. 1003 MutexLock mu(self, mark_stack_lock_); 1004 if (UNLIKELY(gc_mark_stack_->IsFull())) { 1005 ExpandGcMarkStack(); 1006 } 1007 gc_mark_stack_->PushBack(to_ref); 1008 } else { 1009 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), 1010 static_cast<uint32_t>(kMarkStackModeGcExclusive)) 1011 << "ref=" << to_ref 1012 << " self->gc_marking=" << self->GetIsGcMarking() 1013 << " cc->is_marking=" << is_marking_; 1014 CHECK(self == thread_running_gc_) 1015 << "Only GC-running thread should access the mark stack " 1016 << "in the GC exclusive mark stack mode"; 1017 // Access the GC mark stack without a lock. 1018 if (UNLIKELY(gc_mark_stack_->IsFull())) { 1019 ExpandGcMarkStack(); 1020 } 1021 gc_mark_stack_->PushBack(to_ref); 1022 } 1023} 1024 1025accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() { 1026 return heap_->allocation_stack_.get(); 1027} 1028 1029accounting::ObjectStack* ConcurrentCopying::GetLiveStack() { 1030 return heap_->live_stack_.get(); 1031} 1032 1033// The following visitors are used to verify that there's no references to the from-space left after 1034// marking. 1035class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { 1036 public: 1037 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector) 1038 : collector_(collector) {} 1039 1040 void operator()(mirror::Object* ref, 1041 MemberOffset offset = MemberOffset(0), 1042 mirror::Object* holder = nullptr) const 1043 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { 1044 if (ref == nullptr) { 1045 // OK. 1046 return; 1047 } 1048 collector_->AssertToSpaceInvariant(holder, offset, ref); 1049 if (kUseBakerReadBarrier) { 1050 CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState()) 1051 << "Ref " << ref << " " << ref->PrettyTypeOf() 1052 << " has non-white rb_state "; 1053 } 1054 } 1055 1056 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 1057 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 1058 DCHECK(root != nullptr); 1059 operator()(root); 1060 } 1061 1062 private: 1063 ConcurrentCopying* const collector_; 1064}; 1065 1066class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor { 1067 public: 1068 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) 1069 : collector_(collector) {} 1070 1071 void operator()(ObjPtr<mirror::Object> obj, 1072 MemberOffset offset, 1073 bool is_static ATTRIBUTE_UNUSED) const 1074 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { 1075 mirror::Object* ref = 1076 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 1077 VerifyNoFromSpaceRefsVisitor visitor(collector_); 1078 visitor(ref, offset, obj.Ptr()); 1079 } 1080 void operator()(ObjPtr<mirror::Class> klass, 1081 ObjPtr<mirror::Reference> ref) const 1082 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { 1083 CHECK(klass->IsTypeOfReferenceClass()); 1084 this->operator()(ref, mirror::Reference::ReferentOffset(), false); 1085 } 1086 1087 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 1088 REQUIRES_SHARED(Locks::mutator_lock_) { 1089 if (!root->IsNull()) { 1090 VisitRoot(root); 1091 } 1092 } 1093 1094 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 1095 REQUIRES_SHARED(Locks::mutator_lock_) { 1096 VerifyNoFromSpaceRefsVisitor visitor(collector_); 1097 visitor(root->AsMirrorPtr()); 1098 } 1099 1100 private: 1101 ConcurrentCopying* const collector_; 1102}; 1103 1104class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor { 1105 public: 1106 explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) 1107 : collector_(collector) {} 1108 void operator()(mirror::Object* obj) const 1109 REQUIRES_SHARED(Locks::mutator_lock_) { 1110 ObjectCallback(obj, collector_); 1111 } 1112 static void ObjectCallback(mirror::Object* obj, void *arg) 1113 REQUIRES_SHARED(Locks::mutator_lock_) { 1114 CHECK(obj != nullptr); 1115 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 1116 space::RegionSpace* region_space = collector->RegionSpace(); 1117 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 1118 VerifyNoFromSpaceRefsFieldVisitor visitor(collector); 1119 obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( 1120 visitor, 1121 visitor); 1122 if (kUseBakerReadBarrier) { 1123 CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState()) 1124 << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState(); 1125 } 1126 } 1127 1128 private: 1129 ConcurrentCopying* const collector_; 1130}; 1131 1132// Verify there's no from-space references left after the marking phase. 1133void ConcurrentCopying::VerifyNoFromSpaceReferences() { 1134 Thread* self = Thread::Current(); 1135 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 1136 // Verify all threads have is_gc_marking to be false 1137 { 1138 MutexLock mu(self, *Locks::thread_list_lock_); 1139 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 1140 for (Thread* thread : thread_list) { 1141 CHECK(!thread->GetIsGcMarking()); 1142 } 1143 } 1144 VerifyNoFromSpaceRefsObjectVisitor visitor(this); 1145 // Roots. 1146 { 1147 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1148 VerifyNoFromSpaceRefsVisitor ref_visitor(this); 1149 Runtime::Current()->VisitRoots(&ref_visitor); 1150 } 1151 // The to-space. 1152 region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this); 1153 // Non-moving spaces. 1154 { 1155 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1156 heap_->GetMarkBitmap()->Visit(visitor); 1157 } 1158 // The alloc stack. 1159 { 1160 VerifyNoFromSpaceRefsVisitor ref_visitor(this); 1161 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End(); 1162 it < end; ++it) { 1163 mirror::Object* const obj = it->AsMirrorPtr(); 1164 if (obj != nullptr && obj->GetClass() != nullptr) { 1165 // TODO: need to call this only if obj is alive? 1166 ref_visitor(obj); 1167 visitor(obj); 1168 } 1169 } 1170 } 1171 // TODO: LOS. But only refs in LOS are classes. 1172} 1173 1174// The following visitors are used to assert the to-space invariant. 1175class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor { 1176 public: 1177 explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector) 1178 : collector_(collector) {} 1179 1180 void operator()(mirror::Object* ref) const 1181 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { 1182 if (ref == nullptr) { 1183 // OK. 1184 return; 1185 } 1186 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); 1187 } 1188 1189 private: 1190 ConcurrentCopying* const collector_; 1191}; 1192 1193class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor { 1194 public: 1195 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) 1196 : collector_(collector) {} 1197 1198 void operator()(ObjPtr<mirror::Object> obj, 1199 MemberOffset offset, 1200 bool is_static ATTRIBUTE_UNUSED) const 1201 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { 1202 mirror::Object* ref = 1203 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); 1204 AssertToSpaceInvariantRefsVisitor visitor(collector_); 1205 visitor(ref); 1206 } 1207 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const 1208 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { 1209 CHECK(klass->IsTypeOfReferenceClass()); 1210 } 1211 1212 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 1213 REQUIRES_SHARED(Locks::mutator_lock_) { 1214 if (!root->IsNull()) { 1215 VisitRoot(root); 1216 } 1217 } 1218 1219 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 1220 REQUIRES_SHARED(Locks::mutator_lock_) { 1221 AssertToSpaceInvariantRefsVisitor visitor(collector_); 1222 visitor(root->AsMirrorPtr()); 1223 } 1224 1225 private: 1226 ConcurrentCopying* const collector_; 1227}; 1228 1229class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor { 1230 public: 1231 explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) 1232 : collector_(collector) {} 1233 void operator()(mirror::Object* obj) const 1234 REQUIRES_SHARED(Locks::mutator_lock_) { 1235 ObjectCallback(obj, collector_); 1236 } 1237 static void ObjectCallback(mirror::Object* obj, void *arg) 1238 REQUIRES_SHARED(Locks::mutator_lock_) { 1239 CHECK(obj != nullptr); 1240 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); 1241 space::RegionSpace* region_space = collector->RegionSpace(); 1242 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; 1243 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); 1244 AssertToSpaceInvariantFieldVisitor visitor(collector); 1245 obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( 1246 visitor, 1247 visitor); 1248 } 1249 1250 private: 1251 ConcurrentCopying* const collector_; 1252}; 1253 1254class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure { 1255 public: 1256 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying, 1257 bool disable_weak_ref_access) 1258 : concurrent_copying_(concurrent_copying), 1259 disable_weak_ref_access_(disable_weak_ref_access) { 1260 } 1261 1262 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 1263 // Note: self is not necessarily equal to thread since thread may be suspended. 1264 Thread* self = Thread::Current(); 1265 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 1266 << thread->GetState() << " thread " << thread << " self " << self; 1267 // Revoke thread local mark stacks. 1268 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); 1269 if (tl_mark_stack != nullptr) { 1270 MutexLock mu(self, concurrent_copying_->mark_stack_lock_); 1271 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack); 1272 thread->SetThreadLocalMarkStack(nullptr); 1273 } 1274 // Disable weak ref access. 1275 if (disable_weak_ref_access_) { 1276 thread->SetWeakRefAccessEnabled(false); 1277 } 1278 // If thread is a running mutator, then act on behalf of the garbage collector. 1279 // See the code in ThreadList::RunCheckpoint. 1280 concurrent_copying_->GetBarrier().Pass(self); 1281 } 1282 1283 private: 1284 ConcurrentCopying* const concurrent_copying_; 1285 const bool disable_weak_ref_access_; 1286}; 1287 1288void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, 1289 Closure* checkpoint_callback) { 1290 Thread* self = Thread::Current(); 1291 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access); 1292 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1293 gc_barrier_->Init(self, 0); 1294 size_t barrier_count = thread_list->RunCheckpoint(&check_point, checkpoint_callback); 1295 // If there are no threads to wait which implys that all the checkpoint functions are finished, 1296 // then no need to release the mutator lock. 1297 if (barrier_count == 0) { 1298 return; 1299 } 1300 Locks::mutator_lock_->SharedUnlock(self); 1301 { 1302 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1303 gc_barrier_->Increment(self, barrier_count); 1304 } 1305 Locks::mutator_lock_->SharedLock(self); 1306} 1307 1308void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) { 1309 Thread* self = Thread::Current(); 1310 CHECK_EQ(self, thread); 1311 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); 1312 if (tl_mark_stack != nullptr) { 1313 CHECK(is_marking_); 1314 MutexLock mu(self, mark_stack_lock_); 1315 revoked_mark_stacks_.push_back(tl_mark_stack); 1316 thread->SetThreadLocalMarkStack(nullptr); 1317 } 1318} 1319 1320void ConcurrentCopying::ProcessMarkStack() { 1321 if (kVerboseMode) { 1322 LOG(INFO) << "ProcessMarkStack. "; 1323 } 1324 bool empty_prev = false; 1325 while (true) { 1326 bool empty = ProcessMarkStackOnce(); 1327 if (empty_prev && empty) { 1328 // Saw empty mark stack for a second time, done. 1329 break; 1330 } 1331 empty_prev = empty; 1332 } 1333} 1334 1335bool ConcurrentCopying::ProcessMarkStackOnce() { 1336 Thread* self = Thread::Current(); 1337 CHECK(thread_running_gc_ != nullptr); 1338 CHECK(self == thread_running_gc_); 1339 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1340 size_t count = 0; 1341 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1342 if (mark_stack_mode == kMarkStackModeThreadLocal) { 1343 // Process the thread-local mark stacks and the GC mark stack. 1344 count += ProcessThreadLocalMarkStacks(false, nullptr); 1345 while (!gc_mark_stack_->IsEmpty()) { 1346 mirror::Object* to_ref = gc_mark_stack_->PopBack(); 1347 ProcessMarkStackRef(to_ref); 1348 ++count; 1349 } 1350 gc_mark_stack_->Reset(); 1351 } else if (mark_stack_mode == kMarkStackModeShared) { 1352 // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read 1353 // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is 1354 // disabled at this point. 1355 IssueEmptyCheckpoint(); 1356 // Process the shared GC mark stack with a lock. 1357 { 1358 MutexLock mu(self, mark_stack_lock_); 1359 CHECK(revoked_mark_stacks_.empty()); 1360 } 1361 while (true) { 1362 std::vector<mirror::Object*> refs; 1363 { 1364 // Copy refs with lock. Note the number of refs should be small. 1365 MutexLock mu(self, mark_stack_lock_); 1366 if (gc_mark_stack_->IsEmpty()) { 1367 break; 1368 } 1369 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin(); 1370 p != gc_mark_stack_->End(); ++p) { 1371 refs.push_back(p->AsMirrorPtr()); 1372 } 1373 gc_mark_stack_->Reset(); 1374 } 1375 for (mirror::Object* ref : refs) { 1376 ProcessMarkStackRef(ref); 1377 ++count; 1378 } 1379 } 1380 } else { 1381 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), 1382 static_cast<uint32_t>(kMarkStackModeGcExclusive)); 1383 { 1384 MutexLock mu(self, mark_stack_lock_); 1385 CHECK(revoked_mark_stacks_.empty()); 1386 } 1387 // Process the GC mark stack in the exclusive mode. No need to take the lock. 1388 while (!gc_mark_stack_->IsEmpty()) { 1389 mirror::Object* to_ref = gc_mark_stack_->PopBack(); 1390 ProcessMarkStackRef(to_ref); 1391 ++count; 1392 } 1393 gc_mark_stack_->Reset(); 1394 } 1395 1396 // Return true if the stack was empty. 1397 return count == 0; 1398} 1399 1400size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, 1401 Closure* checkpoint_callback) { 1402 // Run a checkpoint to collect all thread local mark stacks and iterate over them all. 1403 RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback); 1404 size_t count = 0; 1405 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks; 1406 { 1407 MutexLock mu(Thread::Current(), mark_stack_lock_); 1408 // Make a copy of the mark stack vector. 1409 mark_stacks = revoked_mark_stacks_; 1410 revoked_mark_stacks_.clear(); 1411 } 1412 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) { 1413 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) { 1414 mirror::Object* to_ref = p->AsMirrorPtr(); 1415 ProcessMarkStackRef(to_ref); 1416 ++count; 1417 } 1418 { 1419 MutexLock mu(Thread::Current(), mark_stack_lock_); 1420 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) { 1421 // The pool has enough. Delete it. 1422 delete mark_stack; 1423 } else { 1424 // Otherwise, put it into the pool for later reuse. 1425 mark_stack->Reset(); 1426 pooled_mark_stacks_.push_back(mark_stack); 1427 } 1428 } 1429 } 1430 return count; 1431} 1432 1433inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { 1434 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1435 if (kUseBakerReadBarrier) { 1436 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState()) 1437 << " " << to_ref << " " << to_ref->GetReadBarrierState() 1438 << " is_marked=" << IsMarked(to_ref); 1439 } 1440 bool add_to_live_bytes = false; 1441 if (region_space_->IsInUnevacFromSpace(to_ref)) { 1442 // Mark the bitmap only in the GC thread here so that we don't need a CAS. 1443 if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) { 1444 // It may be already marked if we accidentally pushed the same object twice due to the racy 1445 // bitmap read in MarkUnevacFromSpaceRegion. 1446 Scan(to_ref); 1447 // Only add to the live bytes if the object was not already marked. 1448 add_to_live_bytes = true; 1449 } 1450 } else { 1451 Scan(to_ref); 1452 } 1453 if (kUseBakerReadBarrier) { 1454 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState()) 1455 << " " << to_ref << " " << to_ref->GetReadBarrierState() 1456 << " is_marked=" << IsMarked(to_ref); 1457 } 1458#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER 1459 mirror::Object* referent = nullptr; 1460 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && 1461 (referent = to_ref->AsReference()->GetReferent<kWithoutReadBarrier>()) != nullptr && 1462 !IsInToSpace(referent)))) { 1463 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We 1464 // will change it to white later in ReferenceQueue::DequeuePendingReference(). 1465 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref; 1466 } else { 1467 // We may occasionally leave a reference white in the queue if its referent happens to be 1468 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the 1469 // above IsInToSpace() evaluates to true and we change the color from gray to white here in this 1470 // else block. 1471 if (kUseBakerReadBarrier) { 1472 bool success = to_ref->AtomicSetReadBarrierState</*kCasRelease*/true>( 1473 ReadBarrier::GrayState(), 1474 ReadBarrier::WhiteState()); 1475 DCHECK(success) << "Must succeed as we won the race."; 1476 } 1477 } 1478#else 1479 DCHECK(!kUseBakerReadBarrier); 1480#endif 1481 1482 if (add_to_live_bytes) { 1483 // Add to the live bytes per unevacuated from space. Note this code is always run by the 1484 // GC-running thread (no synchronization required). 1485 DCHECK(region_space_bitmap_->Test(to_ref)); 1486 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags>(); 1487 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 1488 region_space_->AddLiveBytes(to_ref, alloc_size); 1489 } 1490 if (ReadBarrier::kEnableToSpaceInvariantChecks) { 1491 AssertToSpaceInvariantObjectVisitor visitor(this); 1492 visitor(to_ref); 1493 } 1494} 1495 1496class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure { 1497 public: 1498 explicit DisableWeakRefAccessCallback(ConcurrentCopying* concurrent_copying) 1499 : concurrent_copying_(concurrent_copying) { 1500 } 1501 1502 void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) { 1503 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint() 1504 // to avoid a deadlock b/31500969. 1505 CHECK(concurrent_copying_->weak_ref_access_enabled_); 1506 concurrent_copying_->weak_ref_access_enabled_ = false; 1507 } 1508 1509 private: 1510 ConcurrentCopying* const concurrent_copying_; 1511}; 1512 1513void ConcurrentCopying::SwitchToSharedMarkStackMode() { 1514 Thread* self = Thread::Current(); 1515 CHECK(thread_running_gc_ != nullptr); 1516 CHECK_EQ(self, thread_running_gc_); 1517 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1518 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1519 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), 1520 static_cast<uint32_t>(kMarkStackModeThreadLocal)); 1521 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared); 1522 DisableWeakRefAccessCallback dwrac(this); 1523 // Process the thread local mark stacks one last time after switching to the shared mark stack 1524 // mode and disable weak ref accesses. 1525 ProcessThreadLocalMarkStacks(true, &dwrac); 1526 if (kVerboseMode) { 1527 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access"; 1528 } 1529} 1530 1531void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() { 1532 Thread* self = Thread::Current(); 1533 CHECK(thread_running_gc_ != nullptr); 1534 CHECK_EQ(self, thread_running_gc_); 1535 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1536 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1537 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), 1538 static_cast<uint32_t>(kMarkStackModeShared)); 1539 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive); 1540 QuasiAtomic::ThreadFenceForConstructor(); 1541 if (kVerboseMode) { 1542 LOG(INFO) << "Switched to GC exclusive mark stack mode"; 1543 } 1544} 1545 1546void ConcurrentCopying::CheckEmptyMarkStack() { 1547 Thread* self = Thread::Current(); 1548 CHECK(thread_running_gc_ != nullptr); 1549 CHECK_EQ(self, thread_running_gc_); 1550 CHECK(self->GetThreadLocalMarkStack() == nullptr); 1551 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); 1552 if (mark_stack_mode == kMarkStackModeThreadLocal) { 1553 // Thread-local mark stack mode. 1554 RevokeThreadLocalMarkStacks(false, nullptr); 1555 MutexLock mu(Thread::Current(), mark_stack_lock_); 1556 if (!revoked_mark_stacks_.empty()) { 1557 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) { 1558 while (!mark_stack->IsEmpty()) { 1559 mirror::Object* obj = mark_stack->PopBack(); 1560 if (kUseBakerReadBarrier) { 1561 uint32_t rb_state = obj->GetReadBarrierState(); 1562 LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() << " rb_state=" 1563 << rb_state << " is_marked=" << IsMarked(obj); 1564 } else { 1565 LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() 1566 << " is_marked=" << IsMarked(obj); 1567 } 1568 } 1569 } 1570 LOG(FATAL) << "mark stack is not empty"; 1571 } 1572 } else { 1573 // Shared, GC-exclusive, or off. 1574 MutexLock mu(Thread::Current(), mark_stack_lock_); 1575 CHECK(gc_mark_stack_->IsEmpty()); 1576 CHECK(revoked_mark_stacks_.empty()); 1577 } 1578} 1579 1580void ConcurrentCopying::SweepSystemWeaks(Thread* self) { 1581 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); 1582 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1583 Runtime::Current()->SweepSystemWeaks(this); 1584} 1585 1586void ConcurrentCopying::Sweep(bool swap_bitmaps) { 1587 { 1588 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); 1589 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 1590 if (kEnableFromSpaceAccountingCheck) { 1591 CHECK_GE(live_stack_freeze_size_, live_stack->Size()); 1592 } 1593 heap_->MarkAllocStackAsLive(live_stack); 1594 live_stack->Reset(); 1595 } 1596 CheckEmptyMarkStack(); 1597 TimingLogger::ScopedTiming split("Sweep", GetTimings()); 1598 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 1599 if (space->IsContinuousMemMapAllocSpace()) { 1600 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 1601 if (space == region_space_ || immune_spaces_.ContainsSpace(space)) { 1602 continue; 1603 } 1604 TimingLogger::ScopedTiming split2( 1605 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); 1606 RecordFree(alloc_space->Sweep(swap_bitmaps)); 1607 } 1608 } 1609 SweepLargeObjects(swap_bitmaps); 1610} 1611 1612void ConcurrentCopying::MarkZygoteLargeObjects() { 1613 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings()); 1614 Thread* const self = Thread::Current(); 1615 WriterMutexLock rmu(self, *Locks::heap_bitmap_lock_); 1616 space::LargeObjectSpace* const los = heap_->GetLargeObjectsSpace(); 1617 if (los != nullptr) { 1618 // Pick the current live bitmap (mark bitmap if swapped). 1619 accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap(); 1620 accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap(); 1621 // Walk through all of the objects and explicitly mark the zygote ones so they don't get swept. 1622 std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic(); 1623 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first), 1624 reinterpret_cast<uintptr_t>(range.second), 1625 [mark_bitmap, los, self](mirror::Object* obj) 1626 REQUIRES(Locks::heap_bitmap_lock_) 1627 REQUIRES_SHARED(Locks::mutator_lock_) { 1628 if (los->IsZygoteLargeObject(self, obj)) { 1629 mark_bitmap->Set(obj); 1630 } 1631 }); 1632 } 1633} 1634 1635void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) { 1636 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings()); 1637 if (heap_->GetLargeObjectsSpace() != nullptr) { 1638 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps)); 1639 } 1640} 1641 1642void ConcurrentCopying::ReclaimPhase() { 1643 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings()); 1644 if (kVerboseMode) { 1645 LOG(INFO) << "GC ReclaimPhase"; 1646 } 1647 Thread* self = Thread::Current(); 1648 1649 { 1650 // Double-check that the mark stack is empty. 1651 // Note: need to set this after VerifyNoFromSpaceRef(). 1652 is_asserting_to_space_invariant_ = false; 1653 QuasiAtomic::ThreadFenceForConstructor(); 1654 if (kVerboseMode) { 1655 LOG(INFO) << "Issue an empty check point. "; 1656 } 1657 IssueEmptyCheckpoint(); 1658 // Disable the check. 1659 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0); 1660 if (kUseBakerReadBarrier) { 1661 updated_all_immune_objects_.StoreSequentiallyConsistent(false); 1662 } 1663 CheckEmptyMarkStack(); 1664 } 1665 1666 { 1667 // Record freed objects. 1668 TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); 1669 // Don't include thread-locals that are in the to-space. 1670 const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); 1671 const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); 1672 const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); 1673 const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); 1674 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); 1675 cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes); 1676 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); 1677 cumulative_objects_moved_.FetchAndAddRelaxed(to_objects); 1678 if (kEnableFromSpaceAccountingCheck) { 1679 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); 1680 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); 1681 } 1682 CHECK_LE(to_objects, from_objects); 1683 CHECK_LE(to_bytes, from_bytes); 1684 // cleared_bytes and cleared_objects may be greater than the from space equivalents since 1685 // ClearFromSpace may clear empty unevac regions. 1686 uint64_t cleared_bytes; 1687 uint64_t cleared_objects; 1688 { 1689 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); 1690 region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects); 1691 CHECK_GE(cleared_bytes, from_bytes); 1692 CHECK_GE(cleared_objects, from_objects); 1693 } 1694 int64_t freed_bytes = cleared_bytes - to_bytes; 1695 int64_t freed_objects = cleared_objects - to_objects; 1696 if (kVerboseMode) { 1697 LOG(INFO) << "RecordFree:" 1698 << " from_bytes=" << from_bytes << " from_objects=" << from_objects 1699 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects 1700 << " to_bytes=" << to_bytes << " to_objects=" << to_objects 1701 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects 1702 << " from_space size=" << region_space_->FromSpaceSize() 1703 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() 1704 << " to_space size=" << region_space_->ToSpaceSize(); 1705 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 1706 } 1707 RecordFree(ObjectBytePair(freed_objects, freed_bytes)); 1708 if (kVerboseMode) { 1709 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); 1710 } 1711 } 1712 1713 { 1714 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1715 Sweep(false); 1716 SwapBitmaps(); 1717 heap_->UnBindBitmaps(); 1718 1719 // The bitmap was cleared at the start of the GC, there is nothing we need to do here. 1720 DCHECK(region_space_bitmap_ != nullptr); 1721 region_space_bitmap_ = nullptr; 1722 } 1723 1724 CheckEmptyMarkStack(); 1725 1726 if (kVerboseMode) { 1727 LOG(INFO) << "GC end of ReclaimPhase"; 1728 } 1729} 1730 1731// Assert the to-space invariant. 1732void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, 1733 MemberOffset offset, 1734 mirror::Object* ref) { 1735 CHECK_EQ(heap_->collector_type_, kCollectorTypeCC); 1736 if (is_asserting_to_space_invariant_) { 1737 using RegionType = space::RegionSpace::RegionType; 1738 space::RegionSpace::RegionType type = region_space_->GetRegionType(ref); 1739 if (type == RegionType::kRegionTypeToSpace) { 1740 // OK. 1741 return; 1742 } else if (type == RegionType::kRegionTypeUnevacFromSpace) { 1743 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref; 1744 } else if (UNLIKELY(type == RegionType::kRegionTypeFromSpace)) { 1745 // Not OK. Do extra logging. 1746 if (obj != nullptr) { 1747 LogFromSpaceRefHolder(obj, offset); 1748 } 1749 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); 1750 CHECK(false) << "Found from-space ref " << ref << " " << ref->PrettyTypeOf(); 1751 } else { 1752 AssertToSpaceInvariantInNonMovingSpace(obj, ref); 1753 } 1754 } 1755} 1756 1757class RootPrinter { 1758 public: 1759 RootPrinter() { } 1760 1761 template <class MirrorType> 1762 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) 1763 REQUIRES_SHARED(Locks::mutator_lock_) { 1764 if (!root->IsNull()) { 1765 VisitRoot(root); 1766 } 1767 } 1768 1769 template <class MirrorType> 1770 void VisitRoot(mirror::Object** root) 1771 REQUIRES_SHARED(Locks::mutator_lock_) { 1772 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << *root; 1773 } 1774 1775 template <class MirrorType> 1776 void VisitRoot(mirror::CompressedReference<MirrorType>* root) 1777 REQUIRES_SHARED(Locks::mutator_lock_) { 1778 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << root->AsMirrorPtr(); 1779 } 1780}; 1781 1782void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source, 1783 mirror::Object* ref) { 1784 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_); 1785 if (is_asserting_to_space_invariant_) { 1786 if (region_space_->IsInToSpace(ref)) { 1787 // OK. 1788 return; 1789 } else if (region_space_->IsInUnevacFromSpace(ref)) { 1790 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref; 1791 } else if (region_space_->IsInFromSpace(ref)) { 1792 // Not OK. Do extra logging. 1793 if (gc_root_source == nullptr) { 1794 // No info. 1795 } else if (gc_root_source->HasArtField()) { 1796 ArtField* field = gc_root_source->GetArtField(); 1797 LOG(FATAL_WITHOUT_ABORT) << "gc root in field " << field << " " 1798 << ArtField::PrettyField(field); 1799 RootPrinter root_printer; 1800 field->VisitRoots(root_printer); 1801 } else if (gc_root_source->HasArtMethod()) { 1802 ArtMethod* method = gc_root_source->GetArtMethod(); 1803 LOG(FATAL_WITHOUT_ABORT) << "gc root in method " << method << " " 1804 << ArtMethod::PrettyMethod(method); 1805 RootPrinter root_printer; 1806 method->VisitRoots(root_printer, kRuntimePointerSize); 1807 } 1808 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); 1809 region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT)); 1810 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT); 1811 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true); 1812 CHECK(false) << "Found from-space ref " << ref << " " << ref->PrettyTypeOf(); 1813 } else { 1814 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref); 1815 } 1816 } 1817} 1818 1819void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) { 1820 if (kUseBakerReadBarrier) { 1821 LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf() 1822 << " holder rb_state=" << obj->GetReadBarrierState(); 1823 } else { 1824 LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf(); 1825 } 1826 if (region_space_->IsInFromSpace(obj)) { 1827 LOG(INFO) << "holder is in the from-space."; 1828 } else if (region_space_->IsInToSpace(obj)) { 1829 LOG(INFO) << "holder is in the to-space."; 1830 } else if (region_space_->IsInUnevacFromSpace(obj)) { 1831 LOG(INFO) << "holder is in the unevac from-space."; 1832 if (IsMarkedInUnevacFromSpace(obj)) { 1833 LOG(INFO) << "holder is marked in the region space bitmap."; 1834 } else { 1835 LOG(INFO) << "holder is not marked in the region space bitmap."; 1836 } 1837 } else { 1838 // In a non-moving space. 1839 if (immune_spaces_.ContainsObject(obj)) { 1840 LOG(INFO) << "holder is in an immune image or the zygote space."; 1841 } else { 1842 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space."; 1843 accounting::ContinuousSpaceBitmap* mark_bitmap = 1844 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj); 1845 accounting::LargeObjectBitmap* los_bitmap = 1846 heap_mark_bitmap_->GetLargeObjectBitmap(obj); 1847 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 1848 bool is_los = mark_bitmap == nullptr; 1849 if (!is_los && mark_bitmap->Test(obj)) { 1850 LOG(INFO) << "holder is marked in the mark bit map."; 1851 } else if (is_los && los_bitmap->Test(obj)) { 1852 LOG(INFO) << "holder is marked in the los bit map."; 1853 } else { 1854 // If ref is on the allocation stack, then it is considered 1855 // mark/alive (but not necessarily on the live stack.) 1856 if (IsOnAllocStack(obj)) { 1857 LOG(INFO) << "holder is on the alloc stack."; 1858 } else { 1859 LOG(INFO) << "holder is not marked or on the alloc stack."; 1860 } 1861 } 1862 } 1863 } 1864 LOG(INFO) << "offset=" << offset.SizeValue(); 1865} 1866 1867void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, 1868 mirror::Object* ref) { 1869 // In a non-moving spaces. Check that the ref is marked. 1870 if (immune_spaces_.ContainsObject(ref)) { 1871 if (kUseBakerReadBarrier) { 1872 // Immune object may not be gray if called from the GC. 1873 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) { 1874 return; 1875 } 1876 bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent(); 1877 CHECK(updated_all_immune_objects || ref->GetReadBarrierState() == ReadBarrier::GrayState()) 1878 << "Unmarked immune space ref. obj=" << obj << " rb_state=" 1879 << (obj != nullptr ? obj->GetReadBarrierState() : 0U) 1880 << " ref=" << ref << " ref rb_state=" << ref->GetReadBarrierState() 1881 << " updated_all_immune_objects=" << updated_all_immune_objects; 1882 } 1883 } else { 1884 accounting::ContinuousSpaceBitmap* mark_bitmap = 1885 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 1886 accounting::LargeObjectBitmap* los_bitmap = 1887 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 1888 bool is_los = mark_bitmap == nullptr; 1889 if ((!is_los && mark_bitmap->Test(ref)) || 1890 (is_los && los_bitmap->Test(ref))) { 1891 // OK. 1892 } else { 1893 // If ref is on the allocation stack, then it may not be 1894 // marked live, but considered marked/alive (but not 1895 // necessarily on the live stack). 1896 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. " 1897 << "obj=" << obj << " ref=" << ref; 1898 } 1899 } 1900} 1901 1902// Used to scan ref fields of an object. 1903class ConcurrentCopying::RefFieldsVisitor { 1904 public: 1905 explicit RefFieldsVisitor(ConcurrentCopying* collector) 1906 : collector_(collector) {} 1907 1908 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) 1909 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) 1910 REQUIRES_SHARED(Locks::heap_bitmap_lock_) { 1911 collector_->Process(obj, offset); 1912 } 1913 1914 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const 1915 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE { 1916 CHECK(klass->IsTypeOfReferenceClass()); 1917 collector_->DelayReferenceReferent(klass, ref); 1918 } 1919 1920 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 1921 ALWAYS_INLINE 1922 REQUIRES_SHARED(Locks::mutator_lock_) { 1923 if (!root->IsNull()) { 1924 VisitRoot(root); 1925 } 1926 } 1927 1928 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 1929 ALWAYS_INLINE 1930 REQUIRES_SHARED(Locks::mutator_lock_) { 1931 collector_->MarkRoot</*kGrayImmuneObject*/false>(root); 1932 } 1933 1934 private: 1935 ConcurrentCopying* const collector_; 1936}; 1937 1938// Scan ref fields of an object. 1939inline void ConcurrentCopying::Scan(mirror::Object* to_ref) { 1940 if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) { 1941 // Avoid all read barriers during visit references to help performance. 1942 // Don't do this in transaction mode because we may read the old value of an field which may 1943 // trigger read barriers. 1944 Thread::Current()->ModifyDebugDisallowReadBarrier(1); 1945 } 1946 DCHECK(!region_space_->IsInFromSpace(to_ref)); 1947 DCHECK_EQ(Thread::Current(), thread_running_gc_); 1948 RefFieldsVisitor visitor(this); 1949 // Disable the read barrier for a performance reason. 1950 to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( 1951 visitor, visitor); 1952 if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) { 1953 Thread::Current()->ModifyDebugDisallowReadBarrier(-1); 1954 } 1955} 1956 1957// Process a field. 1958inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { 1959 DCHECK_EQ(Thread::Current(), thread_running_gc_); 1960 mirror::Object* ref = obj->GetFieldObject< 1961 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); 1962 mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, /*kFromGCThread*/true>( 1963 ref, 1964 /*holder*/ obj, 1965 offset); 1966 if (to_ref == ref) { 1967 return; 1968 } 1969 // This may fail if the mutator writes to the field at the same time. But it's ok. 1970 mirror::Object* expected_ref = ref; 1971 mirror::Object* new_ref = to_ref; 1972 do { 1973 if (expected_ref != 1974 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) { 1975 // It was updated by the mutator. 1976 break; 1977 } 1978 // Use release cas to make sure threads reading the reference see contents of copied objects. 1979 } while (!obj->CasFieldWeakReleaseObjectWithoutWriteBarrier<false, false, kVerifyNone>( 1980 offset, 1981 expected_ref, 1982 new_ref)); 1983} 1984 1985// Process some roots. 1986inline void ConcurrentCopying::VisitRoots( 1987 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) { 1988 for (size_t i = 0; i < count; ++i) { 1989 mirror::Object** root = roots[i]; 1990 mirror::Object* ref = *root; 1991 mirror::Object* to_ref = Mark(ref); 1992 if (to_ref == ref) { 1993 continue; 1994 } 1995 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root); 1996 mirror::Object* expected_ref = ref; 1997 mirror::Object* new_ref = to_ref; 1998 do { 1999 if (expected_ref != addr->LoadRelaxed()) { 2000 // It was updated by the mutator. 2001 break; 2002 } 2003 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref)); 2004 } 2005} 2006 2007template<bool kGrayImmuneObject> 2008inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) { 2009 DCHECK(!root->IsNull()); 2010 mirror::Object* const ref = root->AsMirrorPtr(); 2011 mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref); 2012 if (to_ref != ref) { 2013 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); 2014 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref); 2015 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref); 2016 // If the cas fails, then it was updated by the mutator. 2017 do { 2018 if (ref != addr->LoadRelaxed().AsMirrorPtr()) { 2019 // It was updated by the mutator. 2020 break; 2021 } 2022 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref)); 2023 } 2024} 2025 2026inline void ConcurrentCopying::VisitRoots( 2027 mirror::CompressedReference<mirror::Object>** roots, size_t count, 2028 const RootInfo& info ATTRIBUTE_UNUSED) { 2029 for (size_t i = 0; i < count; ++i) { 2030 mirror::CompressedReference<mirror::Object>* const root = roots[i]; 2031 if (!root->IsNull()) { 2032 // kGrayImmuneObject is true because this is used for the thread flip. 2033 MarkRoot</*kGrayImmuneObject*/true>(root); 2034 } 2035 } 2036} 2037 2038// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC. 2039class ConcurrentCopying::ScopedGcGraysImmuneObjects { 2040 public: 2041 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector) 2042 : collector_(collector), enabled_(false) { 2043 if (kUseBakerReadBarrier && 2044 collector_->thread_running_gc_ == Thread::Current() && 2045 !collector_->gc_grays_immune_objects_) { 2046 collector_->gc_grays_immune_objects_ = true; 2047 enabled_ = true; 2048 } 2049 } 2050 2051 ~ScopedGcGraysImmuneObjects() { 2052 if (kUseBakerReadBarrier && 2053 collector_->thread_running_gc_ == Thread::Current() && 2054 enabled_) { 2055 DCHECK(collector_->gc_grays_immune_objects_); 2056 collector_->gc_grays_immune_objects_ = false; 2057 } 2058 } 2059 2060 private: 2061 ConcurrentCopying* const collector_; 2062 bool enabled_; 2063}; 2064 2065// Fill the given memory block with a dummy object. Used to fill in a 2066// copy of objects that was lost in race. 2067void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { 2068 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read 2069 // barriers here because we need the updated reference to the int array class, etc. Temporary set 2070 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace(). 2071 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this); 2072 CHECK_ALIGNED(byte_size, kObjectAlignment); 2073 memset(dummy_obj, 0, byte_size); 2074 // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled. 2075 // Explicitly mark to make sure to get an object in the to-space. 2076 mirror::Class* int_array_class = down_cast<mirror::Class*>( 2077 Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>())); 2078 CHECK(int_array_class != nullptr); 2079 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class); 2080 size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>(); 2081 CHECK_EQ(component_size, sizeof(int32_t)); 2082 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue(); 2083 if (data_offset > byte_size) { 2084 // An int array is too big. Use java.lang.Object. 2085 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_); 2086 CHECK_EQ(byte_size, (java_lang_Object_->GetObjectSize<kVerifyNone, kWithoutReadBarrier>())); 2087 dummy_obj->SetClass(java_lang_Object_); 2088 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>())); 2089 } else { 2090 // Use an int array. 2091 dummy_obj->SetClass(int_array_class); 2092 CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>())); 2093 int32_t length = (byte_size - data_offset) / component_size; 2094 mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>(); 2095 dummy_arr->SetLength(length); 2096 CHECK_EQ(dummy_arr->GetLength(), length) 2097 << "byte_size=" << byte_size << " length=" << length 2098 << " component_size=" << component_size << " data_offset=" << data_offset; 2099 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>())) 2100 << "byte_size=" << byte_size << " length=" << length 2101 << " component_size=" << component_size << " data_offset=" << data_offset; 2102 } 2103} 2104 2105// Reuse the memory blocks that were copy of objects that were lost in race. 2106mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { 2107 // Try to reuse the blocks that were unused due to CAS failures. 2108 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment); 2109 Thread* self = Thread::Current(); 2110 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); 2111 size_t byte_size; 2112 uint8_t* addr; 2113 { 2114 MutexLock mu(self, skipped_blocks_lock_); 2115 auto it = skipped_blocks_map_.lower_bound(alloc_size); 2116 if (it == skipped_blocks_map_.end()) { 2117 // Not found. 2118 return nullptr; 2119 } 2120 byte_size = it->first; 2121 CHECK_GE(byte_size, alloc_size); 2122 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) { 2123 // If remainder would be too small for a dummy object, retry with a larger request size. 2124 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size); 2125 if (it == skipped_blocks_map_.end()) { 2126 // Not found. 2127 return nullptr; 2128 } 2129 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment); 2130 CHECK_GE(it->first - alloc_size, min_object_size) 2131 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; 2132 } 2133 // Found a block. 2134 CHECK(it != skipped_blocks_map_.end()); 2135 byte_size = it->first; 2136 addr = it->second; 2137 CHECK_GE(byte_size, alloc_size); 2138 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); 2139 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment); 2140 if (kVerboseMode) { 2141 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; 2142 } 2143 skipped_blocks_map_.erase(it); 2144 } 2145 memset(addr, 0, byte_size); 2146 if (byte_size > alloc_size) { 2147 // Return the remainder to the map. 2148 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment); 2149 CHECK_GE(byte_size - alloc_size, min_object_size); 2150 // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock 2151 // violation and possible deadlock. The deadlock case is a recursive case: 2152 // FillWithDummyObject -> IntArray::GetArrayClass -> Mark -> Copy -> AllocateInSkippedBlock. 2153 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), 2154 byte_size - alloc_size); 2155 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size))); 2156 { 2157 MutexLock mu(self, skipped_blocks_lock_); 2158 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size)); 2159 } 2160 } 2161 return reinterpret_cast<mirror::Object*>(addr); 2162} 2163 2164mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref, 2165 mirror::Object* holder, 2166 MemberOffset offset) { 2167 DCHECK(region_space_->IsInFromSpace(from_ref)); 2168 // If the class pointer is null, the object is invalid. This could occur for a dangling pointer 2169 // from a previous GC that is either inside or outside the allocated region. 2170 mirror::Class* klass = from_ref->GetClass<kVerifyNone, kWithoutReadBarrier>(); 2171 if (UNLIKELY(klass == nullptr)) { 2172 heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true); 2173 } 2174 // There must not be a read barrier to avoid nested RB that might violate the to-space invariant. 2175 // Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta 2176 // objects, but it's ok and necessary. 2177 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags>(); 2178 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment); 2179 size_t region_space_bytes_allocated = 0U; 2180 size_t non_moving_space_bytes_allocated = 0U; 2181 size_t bytes_allocated = 0U; 2182 size_t dummy; 2183 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>( 2184 region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy); 2185 bytes_allocated = region_space_bytes_allocated; 2186 if (to_ref != nullptr) { 2187 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated); 2188 } 2189 bool fall_back_to_non_moving = false; 2190 if (UNLIKELY(to_ref == nullptr)) { 2191 // Failed to allocate in the region space. Try the skipped blocks. 2192 to_ref = AllocateInSkippedBlock(region_space_alloc_size); 2193 if (to_ref != nullptr) { 2194 // Succeeded to allocate in a skipped block. 2195 if (heap_->use_tlab_) { 2196 // This is necessary for the tlab case as it's not accounted in the space. 2197 region_space_->RecordAlloc(to_ref); 2198 } 2199 bytes_allocated = region_space_alloc_size; 2200 } else { 2201 // Fall back to the non-moving space. 2202 fall_back_to_non_moving = true; 2203 if (kVerboseMode) { 2204 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" 2205 << to_space_bytes_skipped_.LoadSequentiallyConsistent() 2206 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); 2207 } 2208 fall_back_to_non_moving = true; 2209 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, 2210 &non_moving_space_bytes_allocated, nullptr, &dummy); 2211 if (UNLIKELY(to_ref == nullptr)) { 2212 LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a " 2213 << obj_size << " byte object in region type " 2214 << region_space_->GetRegionType(from_ref); 2215 LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf(); 2216 } 2217 bytes_allocated = non_moving_space_bytes_allocated; 2218 // Mark it in the mark bitmap. 2219 accounting::ContinuousSpaceBitmap* mark_bitmap = 2220 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 2221 CHECK(mark_bitmap != nullptr); 2222 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref)); 2223 } 2224 } 2225 DCHECK(to_ref != nullptr); 2226 2227 // Copy the object excluding the lock word since that is handled in the loop. 2228 to_ref->SetClass(klass); 2229 const size_t kObjectHeaderSize = sizeof(mirror::Object); 2230 DCHECK_GE(obj_size, kObjectHeaderSize); 2231 static_assert(kObjectHeaderSize == sizeof(mirror::HeapReference<mirror::Class>) + 2232 sizeof(LockWord), 2233 "Object header size does not match"); 2234 // Memcpy can tear for words since it may do byte copy. It is only safe to do this since the 2235 // object in the from space is immutable other than the lock word. b/31423258 2236 memcpy(reinterpret_cast<uint8_t*>(to_ref) + kObjectHeaderSize, 2237 reinterpret_cast<const uint8_t*>(from_ref) + kObjectHeaderSize, 2238 obj_size - kObjectHeaderSize); 2239 2240 // Attempt to install the forward pointer. This is in a loop as the 2241 // lock word atomic write can fail. 2242 while (true) { 2243 LockWord old_lock_word = from_ref->GetLockWord(false); 2244 2245 if (old_lock_word.GetState() == LockWord::kForwardingAddress) { 2246 // Lost the race. Another thread (either GC or mutator) stored 2247 // the forwarding pointer first. Make the lost copy (to_ref) 2248 // look like a valid but dead (dummy) object and keep it for 2249 // future reuse. 2250 FillWithDummyObject(to_ref, bytes_allocated); 2251 if (!fall_back_to_non_moving) { 2252 DCHECK(region_space_->IsInToSpace(to_ref)); 2253 if (bytes_allocated > space::RegionSpace::kRegionSize) { 2254 // Free the large alloc. 2255 region_space_->FreeLarge(to_ref, bytes_allocated); 2256 } else { 2257 // Record the lost copy for later reuse. 2258 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); 2259 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); 2260 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); 2261 MutexLock mu(Thread::Current(), skipped_blocks_lock_); 2262 skipped_blocks_map_.insert(std::make_pair(bytes_allocated, 2263 reinterpret_cast<uint8_t*>(to_ref))); 2264 } 2265 } else { 2266 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 2267 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 2268 // Free the non-moving-space chunk. 2269 accounting::ContinuousSpaceBitmap* mark_bitmap = 2270 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref); 2271 CHECK(mark_bitmap != nullptr); 2272 CHECK(mark_bitmap->Clear(to_ref)); 2273 heap_->non_moving_space_->Free(Thread::Current(), to_ref); 2274 } 2275 2276 // Get the winner's forward ptr. 2277 mirror::Object* lost_fwd_ptr = to_ref; 2278 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress()); 2279 CHECK(to_ref != nullptr); 2280 CHECK_NE(to_ref, lost_fwd_ptr); 2281 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) 2282 << "to_ref=" << to_ref << " " << heap_->DumpSpaces(); 2283 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 2284 return to_ref; 2285 } 2286 2287 // Copy the old lock word over since we did not copy it yet. 2288 to_ref->SetLockWord(old_lock_word, false); 2289 // Set the gray ptr. 2290 if (kUseBakerReadBarrier) { 2291 to_ref->SetReadBarrierState(ReadBarrier::GrayState()); 2292 } 2293 2294 // Do a fence to prevent the field CAS in ConcurrentCopying::Process from possibly reordering 2295 // before the object copy. 2296 QuasiAtomic::ThreadFenceRelease(); 2297 2298 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref)); 2299 2300 // Try to atomically write the fwd ptr. 2301 bool success = from_ref->CasLockWordWeakRelaxed(old_lock_word, new_lock_word); 2302 if (LIKELY(success)) { 2303 // The CAS succeeded. 2304 objects_moved_.FetchAndAddRelaxed(1); 2305 bytes_moved_.FetchAndAddRelaxed(region_space_alloc_size); 2306 if (LIKELY(!fall_back_to_non_moving)) { 2307 DCHECK(region_space_->IsInToSpace(to_ref)); 2308 } else { 2309 DCHECK(heap_->non_moving_space_->HasAddress(to_ref)); 2310 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated); 2311 } 2312 if (kUseBakerReadBarrier) { 2313 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState()); 2314 } 2315 DCHECK(GetFwdPtr(from_ref) == to_ref); 2316 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); 2317 PushOntoMarkStack(to_ref); 2318 return to_ref; 2319 } else { 2320 // The CAS failed. It may have lost the race or may have failed 2321 // due to monitor/hashcode ops. Either way, retry. 2322 } 2323 } 2324} 2325 2326mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) { 2327 DCHECK(from_ref != nullptr); 2328 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 2329 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) { 2330 // It's already marked. 2331 return from_ref; 2332 } 2333 mirror::Object* to_ref; 2334 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) { 2335 to_ref = GetFwdPtr(from_ref); 2336 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) || 2337 heap_->non_moving_space_->HasAddress(to_ref)) 2338 << "from_ref=" << from_ref << " to_ref=" << to_ref; 2339 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) { 2340 if (IsMarkedInUnevacFromSpace(from_ref)) { 2341 to_ref = from_ref; 2342 } else { 2343 to_ref = nullptr; 2344 } 2345 } else { 2346 // from_ref is in a non-moving space. 2347 if (immune_spaces_.ContainsObject(from_ref)) { 2348 // An immune object is alive. 2349 to_ref = from_ref; 2350 } else { 2351 // Non-immune non-moving space. Use the mark bitmap. 2352 accounting::ContinuousSpaceBitmap* mark_bitmap = 2353 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref); 2354 accounting::LargeObjectBitmap* los_bitmap = 2355 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref); 2356 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range"; 2357 bool is_los = mark_bitmap == nullptr; 2358 if (!is_los && mark_bitmap->Test(from_ref)) { 2359 // Already marked. 2360 to_ref = from_ref; 2361 } else if (is_los && los_bitmap->Test(from_ref)) { 2362 // Already marked in LOS. 2363 to_ref = from_ref; 2364 } else { 2365 // Not marked. 2366 if (IsOnAllocStack(from_ref)) { 2367 // If on the allocation stack, it's considered marked. 2368 to_ref = from_ref; 2369 } else { 2370 // Not marked. 2371 to_ref = nullptr; 2372 } 2373 } 2374 } 2375 } 2376 return to_ref; 2377} 2378 2379bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) { 2380 QuasiAtomic::ThreadFenceAcquire(); 2381 accounting::ObjectStack* alloc_stack = GetAllocationStack(); 2382 return alloc_stack->Contains(ref); 2383} 2384 2385mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref, 2386 mirror::Object* holder, 2387 MemberOffset offset) { 2388 // ref is in a non-moving space (from_ref == to_ref). 2389 DCHECK(!region_space_->HasAddress(ref)) << ref; 2390 DCHECK(!immune_spaces_.ContainsObject(ref)); 2391 // Use the mark bitmap. 2392 accounting::ContinuousSpaceBitmap* mark_bitmap = 2393 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref); 2394 accounting::LargeObjectBitmap* los_bitmap = 2395 heap_mark_bitmap_->GetLargeObjectBitmap(ref); 2396 bool is_los = mark_bitmap == nullptr; 2397 if (!is_los && mark_bitmap->Test(ref)) { 2398 // Already marked. 2399 if (kUseBakerReadBarrier) { 2400 DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() || 2401 ref->GetReadBarrierState() == ReadBarrier::WhiteState()); 2402 } 2403 } else if (is_los && los_bitmap->Test(ref)) { 2404 // Already marked in LOS. 2405 if (kUseBakerReadBarrier) { 2406 DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() || 2407 ref->GetReadBarrierState() == ReadBarrier::WhiteState()); 2408 } 2409 } else { 2410 // Not marked. 2411 if (IsOnAllocStack(ref)) { 2412 // If it's on the allocation stack, it's considered marked. Keep it white. 2413 // Objects on the allocation stack need not be marked. 2414 if (!is_los) { 2415 DCHECK(!mark_bitmap->Test(ref)); 2416 } else { 2417 DCHECK(!los_bitmap->Test(ref)); 2418 } 2419 if (kUseBakerReadBarrier) { 2420 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState()); 2421 } 2422 } else { 2423 // For the baker-style RB, we need to handle 'false-gray' cases. See the 2424 // kRegionTypeUnevacFromSpace-case comment in Mark(). 2425 if (kUseBakerReadBarrier) { 2426 // Test the bitmap first to reduce the chance of false gray cases. 2427 if ((!is_los && mark_bitmap->Test(ref)) || 2428 (is_los && los_bitmap->Test(ref))) { 2429 return ref; 2430 } 2431 } 2432 if (is_los && !IsAligned<kPageSize>(ref)) { 2433 // Ref is a large object that is not aligned, it must be heap corruption. Dump data before 2434 // AtomicSetReadBarrierState since it will fault if the address is not valid. 2435 heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true); 2436 } 2437 // Not marked or on the allocation stack. Try to mark it. 2438 // This may or may not succeed, which is ok. 2439 bool cas_success = false; 2440 if (kUseBakerReadBarrier) { 2441 cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), 2442 ReadBarrier::GrayState()); 2443 } 2444 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) { 2445 // Already marked. 2446 if (kUseBakerReadBarrier && cas_success && 2447 ref->GetReadBarrierState() == ReadBarrier::GrayState()) { 2448 PushOntoFalseGrayStack(ref); 2449 } 2450 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) { 2451 // Already marked in LOS. 2452 if (kUseBakerReadBarrier && cas_success && 2453 ref->GetReadBarrierState() == ReadBarrier::GrayState()) { 2454 PushOntoFalseGrayStack(ref); 2455 } 2456 } else { 2457 // Newly marked. 2458 if (kUseBakerReadBarrier) { 2459 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState()); 2460 } 2461 PushOntoMarkStack(ref); 2462 } 2463 } 2464 } 2465 return ref; 2466} 2467 2468void ConcurrentCopying::FinishPhase() { 2469 Thread* const self = Thread::Current(); 2470 { 2471 MutexLock mu(self, mark_stack_lock_); 2472 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize); 2473 } 2474 // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false 2475 // positives. 2476 if (!kVerifyNoMissingCardMarks) { 2477 TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings()); 2478 // We do not currently use the region space cards at all, madvise them away to save ram. 2479 heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit()); 2480 } 2481 { 2482 MutexLock mu(self, skipped_blocks_lock_); 2483 skipped_blocks_map_.clear(); 2484 } 2485 { 2486 ReaderMutexLock mu(self, *Locks::mutator_lock_); 2487 { 2488 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_); 2489 heap_->ClearMarkedObjects(); 2490 } 2491 if (kUseBakerReadBarrier && kFilterModUnionCards) { 2492 TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings()); 2493 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_); 2494 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) { 2495 DCHECK(space->IsImageSpace() || space->IsZygoteSpace()); 2496 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 2497 // Filter out cards that don't need to be set. 2498 if (table != nullptr) { 2499 table->FilterCards(); 2500 } 2501 } 2502 } 2503 if (kUseBakerReadBarrier) { 2504 TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings()); 2505 DCHECK(rb_mark_bit_stack_ != nullptr); 2506 const auto* limit = rb_mark_bit_stack_->End(); 2507 for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) { 2508 CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0)); 2509 } 2510 rb_mark_bit_stack_->Reset(); 2511 } 2512 } 2513 if (measure_read_barrier_slow_path_) { 2514 MutexLock mu(self, rb_slow_path_histogram_lock_); 2515 rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed()); 2516 rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed(); 2517 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed(); 2518 } 2519} 2520 2521bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field, 2522 bool do_atomic_update) { 2523 mirror::Object* from_ref = field->AsMirrorPtr(); 2524 if (from_ref == nullptr) { 2525 return true; 2526 } 2527 mirror::Object* to_ref = IsMarked(from_ref); 2528 if (to_ref == nullptr) { 2529 return false; 2530 } 2531 if (from_ref != to_ref) { 2532 if (do_atomic_update) { 2533 do { 2534 if (field->AsMirrorPtr() != from_ref) { 2535 // Concurrently overwritten by a mutator. 2536 break; 2537 } 2538 } while (!field->CasWeakRelaxed(from_ref, to_ref)); 2539 } else { 2540 QuasiAtomic::ThreadFenceRelease(); 2541 field->Assign(to_ref); 2542 QuasiAtomic::ThreadFenceSequentiallyConsistent(); 2543 } 2544 } 2545 return true; 2546} 2547 2548mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) { 2549 return Mark(from_ref); 2550} 2551 2552void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass, 2553 ObjPtr<mirror::Reference> reference) { 2554 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this); 2555} 2556 2557void ConcurrentCopying::ProcessReferences(Thread* self) { 2558 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); 2559 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps. 2560 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 2561 GetHeap()->GetReferenceProcessor()->ProcessReferences( 2562 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); 2563} 2564 2565void ConcurrentCopying::RevokeAllThreadLocalBuffers() { 2566 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); 2567 region_space_->RevokeAllThreadLocalBuffers(); 2568} 2569 2570mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) { 2571 if (Thread::Current() != thread_running_gc_) { 2572 rb_slow_path_count_.FetchAndAddRelaxed(1u); 2573 } else { 2574 rb_slow_path_count_gc_.FetchAndAddRelaxed(1u); 2575 } 2576 ScopedTrace tr(__FUNCTION__); 2577 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u; 2578 mirror::Object* ret = Mark(from_ref); 2579 if (measure_read_barrier_slow_path_) { 2580 rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time); 2581 } 2582 return ret; 2583} 2584 2585void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) { 2586 GarbageCollector::DumpPerformanceInfo(os); 2587 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_); 2588 if (rb_slow_path_time_histogram_.SampleSize() > 0) { 2589 Histogram<uint64_t>::CumulativeData cumulative_data; 2590 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data); 2591 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data); 2592 } 2593 if (rb_slow_path_count_total_ > 0) { 2594 os << "Slow path count " << rb_slow_path_count_total_ << "\n"; 2595 } 2596 if (rb_slow_path_count_gc_total_ > 0) { 2597 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n"; 2598 } 2599 os << "Cumulative bytes moved " << cumulative_bytes_moved_.LoadRelaxed() << "\n"; 2600 os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n"; 2601} 2602 2603} // namespace collector 2604} // namespace gc 2605} // namespace art 2606