concurrent_copying.cc revision 46ec520fc2d08e4bb602472406be8fd1ef7ca73c
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
19#include "art_field-inl.h"
20#include "gc/accounting/heap_bitmap-inl.h"
21#include "gc/accounting/space_bitmap-inl.h"
22#include "gc/space/image_space.h"
23#include "gc/space/space.h"
24#include "intern_table.h"
25#include "mirror/class-inl.h"
26#include "mirror/object-inl.h"
27#include "scoped_thread_state_change.h"
28#include "thread-inl.h"
29#include "thread_list.h"
30#include "well_known_classes.h"
31
32namespace art {
33namespace gc {
34namespace collector {
35
36ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
37    : GarbageCollector(heap,
38                       name_prefix + (name_prefix.empty() ? "" : " ") +
39                       "concurrent copying + mark sweep"),
40      region_space_(nullptr), gc_barrier_(new Barrier(0)), mark_queue_(2 * MB),
41      is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
42      heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0),
43      skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
44      rb_table_(heap_->GetReadBarrierTable()),
45      force_evacuate_all_(false) {
46  static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
47                "The region space size and the read barrier table region size must match");
48  cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
49  {
50    Thread* self = Thread::Current();
51    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
52    // Cache this so that we won't have to lock heap_bitmap_lock_ in
53    // Mark() which could cause a nested lock on heap_bitmap_lock_
54    // when GC causes a RB while doing GC or a lock order violation
55    // (class_linker_lock_ and heap_bitmap_lock_).
56    heap_mark_bitmap_ = heap->GetMarkBitmap();
57  }
58}
59
60ConcurrentCopying::~ConcurrentCopying() {
61}
62
63void ConcurrentCopying::RunPhases() {
64  CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
65  CHECK(!is_active_);
66  is_active_ = true;
67  Thread* self = Thread::Current();
68  Locks::mutator_lock_->AssertNotHeld(self);
69  {
70    ReaderMutexLock mu(self, *Locks::mutator_lock_);
71    InitializePhase();
72  }
73  FlipThreadRoots();
74  {
75    ReaderMutexLock mu(self, *Locks::mutator_lock_);
76    MarkingPhase();
77  }
78  // Verify no from space refs. This causes a pause.
79  if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
80    TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
81    ScopedPause pause(this);
82    CheckEmptyMarkQueue();
83    if (kVerboseMode) {
84      LOG(INFO) << "Verifying no from-space refs";
85    }
86    VerifyNoFromSpaceReferences();
87    if (kVerboseMode) {
88      LOG(INFO) << "Done verifying no from-space refs";
89    }
90    CheckEmptyMarkQueue();
91  }
92  {
93    ReaderMutexLock mu(self, *Locks::mutator_lock_);
94    ReclaimPhase();
95  }
96  FinishPhase();
97  CHECK(is_active_);
98  is_active_ = false;
99}
100
101void ConcurrentCopying::BindBitmaps() {
102  Thread* self = Thread::Current();
103  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
104  // Mark all of the spaces we never collect as immune.
105  for (const auto& space : heap_->GetContinuousSpaces()) {
106    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
107        || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
108      CHECK(space->IsZygoteSpace() || space->IsImageSpace());
109      CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
110      const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
111          "cc zygote space bitmap";
112      // TODO: try avoiding using bitmaps for image/zygote to save space.
113      accounting::ContinuousSpaceBitmap* bitmap =
114          accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
115      cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
116      cc_bitmaps_.push_back(bitmap);
117    } else if (space == region_space_) {
118      accounting::ContinuousSpaceBitmap* bitmap =
119          accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
120                                                    space->Begin(), space->Capacity());
121      cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
122      cc_bitmaps_.push_back(bitmap);
123      region_space_bitmap_ = bitmap;
124    }
125  }
126}
127
128void ConcurrentCopying::InitializePhase() {
129  TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
130  if (kVerboseMode) {
131    LOG(INFO) << "GC InitializePhase";
132    LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
133              << reinterpret_cast<void*>(region_space_->Limit());
134  }
135  CHECK(mark_queue_.IsEmpty());
136  immune_region_.Reset();
137  bytes_moved_.StoreRelaxed(0);
138  objects_moved_.StoreRelaxed(0);
139  if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
140      GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
141      GetCurrentIteration()->GetClearSoftReferences()) {
142    force_evacuate_all_ = true;
143  } else {
144    force_evacuate_all_ = false;
145  }
146  BindBitmaps();
147  if (kVerboseMode) {
148    LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
149    LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End();
150    LOG(INFO) << "GC end of InitializePhase";
151  }
152}
153
154// Used to switch the thread roots of a thread from from-space refs to to-space refs.
155class ThreadFlipVisitor : public Closure {
156 public:
157  explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
158      : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
159  }
160
161  virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
162    // Note: self is not necessarily equal to thread since thread may be suspended.
163    Thread* self = Thread::Current();
164    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
165        << thread->GetState() << " thread " << thread << " self " << self;
166    if (use_tlab_ && thread->HasTlab()) {
167      if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
168        // This must come before the revoke.
169        size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
170        concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
171        reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
172            FetchAndAddSequentiallyConsistent(thread_local_objects);
173      } else {
174        concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
175      }
176    }
177    if (kUseThreadLocalAllocationStack) {
178      thread->RevokeThreadLocalAllocationStack();
179    }
180    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
181    thread->VisitRoots(concurrent_copying_);
182    concurrent_copying_->GetBarrier().Pass(self);
183  }
184
185 private:
186  ConcurrentCopying* const concurrent_copying_;
187  const bool use_tlab_;
188};
189
190// Called back from Runtime::FlipThreadRoots() during a pause.
191class FlipCallback : public Closure {
192 public:
193  explicit FlipCallback(ConcurrentCopying* concurrent_copying)
194      : concurrent_copying_(concurrent_copying) {
195  }
196
197  virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
198    ConcurrentCopying* cc = concurrent_copying_;
199    TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
200    // Note: self is not necessarily equal to thread since thread may be suspended.
201    Thread* self = Thread::Current();
202    CHECK(thread == self);
203    Locks::mutator_lock_->AssertExclusiveHeld(self);
204    cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
205    cc->SwapStacks(self);
206    if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
207      cc->RecordLiveStackFreezeSize(self);
208      cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
209      cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
210    }
211    cc->is_marking_ = true;
212    if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
213      CHECK(Runtime::Current()->IsAotCompiler());
214      TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
215      Runtime::Current()->VisitTransactionRoots(cc);
216    }
217  }
218
219 private:
220  ConcurrentCopying* const concurrent_copying_;
221};
222
223// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
224void ConcurrentCopying::FlipThreadRoots() {
225  TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
226  if (kVerboseMode) {
227    LOG(INFO) << "time=" << region_space_->Time();
228    region_space_->DumpNonFreeRegions(LOG(INFO));
229  }
230  Thread* self = Thread::Current();
231  Locks::mutator_lock_->AssertNotHeld(self);
232  gc_barrier_->Init(self, 0);
233  ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
234  FlipCallback flip_callback(this);
235  size_t barrier_count = Runtime::Current()->FlipThreadRoots(
236      &thread_flip_visitor, &flip_callback, this);
237  {
238    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
239    gc_barrier_->Increment(self, barrier_count);
240  }
241  is_asserting_to_space_invariant_ = true;
242  QuasiAtomic::ThreadFenceForConstructor();
243  if (kVerboseMode) {
244    LOG(INFO) << "time=" << region_space_->Time();
245    region_space_->DumpNonFreeRegions(LOG(INFO));
246    LOG(INFO) << "GC end of FlipThreadRoots";
247  }
248}
249
250void ConcurrentCopying::SwapStacks(Thread* self) {
251  heap_->SwapStacks(self);
252}
253
254void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
255  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
256  live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
257}
258
259// Used to visit objects in the immune spaces.
260class ConcurrentCopyingImmuneSpaceObjVisitor {
261 public:
262  explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
263      : collector_(cc) {}
264
265  void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
266      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
267    DCHECK(obj != nullptr);
268    DCHECK(collector_->immune_region_.ContainsObject(obj));
269    accounting::ContinuousSpaceBitmap* cc_bitmap =
270        collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
271    DCHECK(cc_bitmap != nullptr)
272        << "An immune space object must have a bitmap";
273    if (kIsDebugBuild) {
274      DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
275          << "Immune space object must be already marked";
276    }
277    // This may or may not succeed, which is ok.
278    if (kUseBakerReadBarrier) {
279      obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
280    }
281    if (cc_bitmap->AtomicTestAndSet(obj)) {
282      // Already marked. Do nothing.
283    } else {
284      // Newly marked. Set the gray bit and push it onto the mark stack.
285      CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
286      collector_->PushOntoMarkStack<true>(obj);
287    }
288  }
289
290 private:
291  ConcurrentCopying* collector_;
292};
293
294class EmptyCheckpoint : public Closure {
295 public:
296  explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
297      : concurrent_copying_(concurrent_copying) {
298  }
299
300  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
301    // Note: self is not necessarily equal to thread since thread may be suspended.
302    Thread* self = Thread::Current();
303    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
304        << thread->GetState() << " thread " << thread << " self " << self;
305    // If thread is a running mutator, then act on behalf of the garbage collector.
306    // See the code in ThreadList::RunCheckpoint.
307    if (thread->GetState() == kRunnable) {
308      concurrent_copying_->GetBarrier().Pass(self);
309    }
310  }
311
312 private:
313  ConcurrentCopying* const concurrent_copying_;
314};
315
316// Concurrently mark roots that are guarded by read barriers and process the mark stack.
317void ConcurrentCopying::MarkingPhase() {
318  TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
319  if (kVerboseMode) {
320    LOG(INFO) << "GC MarkingPhase";
321  }
322  {
323    // Mark the image root. The WB-based collectors do not need to
324    // scan the image objects from roots by relying on the card table,
325    // but it's necessary for the RB to-space invariant to hold.
326    TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
327    gc::space::ImageSpace* image = heap_->GetImageSpace();
328    if (image != nullptr) {
329      mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
330      mirror::Object* marked_image_root = Mark(image_root);
331      CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
332      if (ReadBarrier::kEnableToSpaceInvariantChecks) {
333        AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
334      }
335    }
336  }
337  {
338    TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
339    Runtime::Current()->VisitConstantRoots(this);
340  }
341  {
342    TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
343    Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
344  }
345  {
346    TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
347    Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
348  }
349  {
350    // TODO: don't visit the transaction roots if it's not active.
351    TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
352    Runtime::Current()->VisitNonThreadRoots(this);
353  }
354
355  // Immune spaces.
356  for (auto& space : heap_->GetContinuousSpaces()) {
357    if (immune_region_.ContainsSpace(space)) {
358      DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
359      accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
360      ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
361      live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
362                                    reinterpret_cast<uintptr_t>(space->Limit()),
363                                    visitor);
364    }
365  }
366
367  Thread* self = Thread::Current();
368  {
369    TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings());
370    // Process the mark stack and issue an empty check point. If the
371    // mark stack is still empty after the check point, we're
372    // done. Otherwise, repeat.
373    ProcessMarkStack();
374    size_t count = 0;
375    while (!ProcessMarkStack()) {
376      ++count;
377      if (kVerboseMode) {
378        LOG(INFO) << "Issue an empty check point. " << count;
379      }
380      IssueEmptyCheckpoint();
381    }
382    // Need to ensure the mark stack is empty before reference
383    // processing to get rid of non-reference gray objects.
384    CheckEmptyMarkQueue();
385    // Enable the GetReference slow path and disallow access to the system weaks.
386    GetHeap()->GetReferenceProcessor()->EnableSlowPath();
387    Runtime::Current()->DisallowNewSystemWeaks();
388    QuasiAtomic::ThreadFenceForConstructor();
389    // Lock-unlock the system weak locks so that there's no thread in
390    // the middle of accessing system weaks.
391    Runtime::Current()->EnsureNewSystemWeaksDisallowed();
392    // Note: Do not issue a checkpoint from here to the
393    // SweepSystemWeaks call or else a deadlock due to
394    // WaitHoldingLocks() would occur.
395    if (kVerboseMode) {
396      LOG(INFO) << "Enabled the ref proc slow path & disabled access to system weaks.";
397      LOG(INFO) << "ProcessReferences";
398    }
399    ProcessReferences(self, true);
400    CheckEmptyMarkQueue();
401    if (kVerboseMode) {
402      LOG(INFO) << "SweepSystemWeaks";
403    }
404    SweepSystemWeaks(self);
405    if (kVerboseMode) {
406      LOG(INFO) << "SweepSystemWeaks done";
407    }
408    // Because hash_set::Erase() can call the hash function for
409    // arbitrary elements in the weak intern table in
410    // InternTable::Table::SweepWeaks(), the above SweepSystemWeaks()
411    // call may have marked some objects (strings) alive. So process
412    // the mark stack here once again.
413    ProcessMarkStack();
414    CheckEmptyMarkQueue();
415    if (kVerboseMode) {
416      LOG(INFO) << "AllowNewSystemWeaks";
417    }
418    Runtime::Current()->AllowNewSystemWeaks();
419    IssueEmptyCheckpoint();
420    // Disable marking.
421    if (kUseTableLookupReadBarrier) {
422      heap_->rb_table_->ClearAll();
423      DCHECK(heap_->rb_table_->IsAllCleared());
424    }
425    is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(1);
426    is_marking_ = false;
427    CheckEmptyMarkQueue();
428  }
429
430  if (kVerboseMode) {
431    LOG(INFO) << "GC end of MarkingPhase";
432  }
433}
434
435void ConcurrentCopying::IssueEmptyCheckpoint() {
436  Thread* self = Thread::Current();
437  EmptyCheckpoint check_point(this);
438  ThreadList* thread_list = Runtime::Current()->GetThreadList();
439  gc_barrier_->Init(self, 0);
440  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
441  // If there are no threads to wait which implys that all the checkpoint functions are finished,
442  // then no need to release the mutator lock.
443  if (barrier_count == 0) {
444    return;
445  }
446  // Release locks then wait for all mutator threads to pass the barrier.
447  Locks::mutator_lock_->SharedUnlock(self);
448  {
449    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
450    gc_barrier_->Increment(self, barrier_count);
451  }
452  Locks::mutator_lock_->SharedLock(self);
453}
454
455mirror::Object* ConcurrentCopying::PopOffMarkStack() {
456  return mark_queue_.Dequeue();
457}
458
459template<bool kThreadSafe>
460void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
461  CHECK_EQ(is_mark_queue_push_disallowed_.LoadRelaxed(), 0)
462      << " " << to_ref << " " << PrettyTypeOf(to_ref);
463  if (kThreadSafe) {
464    CHECK(mark_queue_.Enqueue(to_ref)) << "Mark queue overflow";
465  } else {
466    CHECK(mark_queue_.EnqueueThreadUnsafe(to_ref)) << "Mark queue overflow";
467  }
468}
469
470accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
471  return heap_->allocation_stack_.get();
472}
473
474accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
475  return heap_->live_stack_.get();
476}
477
478inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
479  DCHECK(region_space_->IsInFromSpace(from_ref));
480  LockWord lw = from_ref->GetLockWord(false);
481  if (lw.GetState() == LockWord::kForwardingAddress) {
482    mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
483    CHECK(fwd_ptr != nullptr);
484    return fwd_ptr;
485  } else {
486    return nullptr;
487  }
488}
489
490// The following visitors are that used to verify that there's no
491// references to the from-space left after marking.
492class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
493 public:
494  explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
495      : collector_(collector) {}
496
497  void operator()(mirror::Object* ref) const
498      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
499    if (ref == nullptr) {
500      // OK.
501      return;
502    }
503    collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
504    if (kUseBakerReadBarrier) {
505      if (collector_->RegionSpace()->IsInToSpace(ref)) {
506        CHECK(ref->GetReadBarrierPointer() == nullptr)
507            << "To-space ref " << ref << " " << PrettyTypeOf(ref)
508            << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
509      } else {
510        CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
511              (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
512               collector_->IsOnAllocStack(ref)))
513            << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
514            << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
515            << " but isn't on the alloc stack (and has white rb_ptr)."
516            << " Is it in the non-moving space="
517            << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
518      }
519    }
520  }
521
522  void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
523      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
524    DCHECK(root != nullptr);
525    operator()(root);
526  }
527
528 private:
529  ConcurrentCopying* const collector_;
530};
531
532class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
533 public:
534  explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
535      : collector_(collector) {}
536
537  void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
538      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
539    mirror::Object* ref =
540        obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
541    ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
542    visitor(ref);
543  }
544  void operator()(mirror::Class* klass, mirror::Reference* ref) const
545      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
546    CHECK(klass->IsTypeOfReferenceClass());
547    this->operator()(ref, mirror::Reference::ReferentOffset(), false);
548  }
549
550 private:
551  ConcurrentCopying* collector_;
552};
553
554class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
555 public:
556  explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
557      : collector_(collector) {}
558  void operator()(mirror::Object* obj) const
559      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
560    ObjectCallback(obj, collector_);
561  }
562  static void ObjectCallback(mirror::Object* obj, void *arg)
563      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
564    CHECK(obj != nullptr);
565    ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
566    space::RegionSpace* region_space = collector->RegionSpace();
567    CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
568    ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
569    obj->VisitReferences<true>(visitor, visitor);
570    if (kUseBakerReadBarrier) {
571      if (collector->RegionSpace()->IsInToSpace(obj)) {
572        CHECK(obj->GetReadBarrierPointer() == nullptr)
573            << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
574      } else {
575        CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
576              (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
577               collector->IsOnAllocStack(obj)))
578            << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
579            << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
580            << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
581            << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
582      }
583    }
584  }
585
586 private:
587  ConcurrentCopying* const collector_;
588};
589
590// Verify there's no from-space references left after the marking phase.
591void ConcurrentCopying::VerifyNoFromSpaceReferences() {
592  Thread* self = Thread::Current();
593  DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
594  ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
595  // Roots.
596  {
597    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
598    ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
599    Runtime::Current()->VisitRoots(&ref_visitor);
600  }
601  // The to-space.
602  region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
603                             this);
604  // Non-moving spaces.
605  {
606    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
607    heap_->GetMarkBitmap()->Visit(visitor);
608  }
609  // The alloc stack.
610  {
611    ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
612    for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
613        it < end; ++it) {
614      mirror::Object* const obj = it->AsMirrorPtr();
615      if (obj != nullptr && obj->GetClass() != nullptr) {
616        // TODO: need to call this only if obj is alive?
617        ref_visitor(obj);
618        visitor(obj);
619      }
620    }
621  }
622  // TODO: LOS. But only refs in LOS are classes.
623}
624
625// The following visitors are used to assert the to-space invariant.
626class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
627 public:
628  explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
629      : collector_(collector) {}
630
631  void operator()(mirror::Object* ref) const
632      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
633    if (ref == nullptr) {
634      // OK.
635      return;
636    }
637    collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
638  }
639  static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/)
640      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
641    ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
642    ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector);
643    DCHECK(root != nullptr);
644    visitor(*root);
645  }
646
647 private:
648  ConcurrentCopying* collector_;
649};
650
651class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
652 public:
653  explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
654      : collector_(collector) {}
655
656  void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
657      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
658    mirror::Object* ref =
659        obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
660    ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
661    visitor(ref);
662  }
663  void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const
664      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
665    CHECK(klass->IsTypeOfReferenceClass());
666  }
667
668 private:
669  ConcurrentCopying* collector_;
670};
671
672class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
673 public:
674  explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
675      : collector_(collector) {}
676  void operator()(mirror::Object* obj) const
677      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
678    ObjectCallback(obj, collector_);
679  }
680  static void ObjectCallback(mirror::Object* obj, void *arg)
681      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
682    CHECK(obj != nullptr);
683    ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
684    space::RegionSpace* region_space = collector->RegionSpace();
685    CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
686    collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
687    ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
688    obj->VisitReferences<true>(visitor, visitor);
689  }
690
691 private:
692  ConcurrentCopying* collector_;
693};
694
695bool ConcurrentCopying::ProcessMarkStack() {
696  if (kVerboseMode) {
697    LOG(INFO) << "ProcessMarkStack. ";
698  }
699  size_t count = 0;
700  mirror::Object* to_ref;
701  while ((to_ref = PopOffMarkStack()) != nullptr) {
702    ++count;
703    DCHECK(!region_space_->IsInFromSpace(to_ref));
704    if (kUseBakerReadBarrier) {
705      DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
706          << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
707          << " is_marked=" << IsMarked(to_ref);
708    }
709    // Scan ref fields.
710    Scan(to_ref);
711    // Mark the gray ref as white or black.
712    if (kUseBakerReadBarrier) {
713      DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
714          << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
715          << " is_marked=" << IsMarked(to_ref);
716    }
717    if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
718        to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
719        !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) {
720      // Leave References gray so that GetReferent() will trigger RB.
721      CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
722    } else {
723#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
724      if (kUseBakerReadBarrier) {
725        if (region_space_->IsInToSpace(to_ref)) {
726          // If to-space, change from gray to white.
727          bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
728                                                             ReadBarrier::WhitePtr());
729          CHECK(success) << "Must succeed as we won the race.";
730          CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
731        } else {
732          // If non-moving space/unevac from space, change from gray
733          // to black. We can't change gray to white because it's not
734          // safe to use CAS if two threads change values in opposite
735          // directions (A->B and B->A). So, we change it to black to
736          // indicate non-moving objects that have been marked
737          // through. Note we'd need to change from black to white
738          // later (concurrently).
739          bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
740                                                             ReadBarrier::BlackPtr());
741          CHECK(success) << "Must succeed as we won the race.";
742          CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
743        }
744      }
745#else
746      DCHECK(!kUseBakerReadBarrier);
747#endif
748    }
749    if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
750      ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
751      visitor(to_ref);
752    }
753  }
754  // Return true if the stack was empty.
755  return count == 0;
756}
757
758void ConcurrentCopying::CheckEmptyMarkQueue() {
759  if (!mark_queue_.IsEmpty()) {
760    while (!mark_queue_.IsEmpty()) {
761      mirror::Object* obj = mark_queue_.Dequeue();
762      if (kUseBakerReadBarrier) {
763        mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
764        LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
765                  << " is_marked=" << IsMarked(obj);
766      } else {
767        LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
768                  << " is_marked=" << IsMarked(obj);
769      }
770    }
771    LOG(FATAL) << "mark queue is not empty";
772  }
773}
774
775void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
776  TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
777  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
778  Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
779}
780
781void ConcurrentCopying::Sweep(bool swap_bitmaps) {
782  {
783    TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
784    accounting::ObjectStack* live_stack = heap_->GetLiveStack();
785    if (kEnableFromSpaceAccountingCheck) {
786      CHECK_GE(live_stack_freeze_size_, live_stack->Size());
787    }
788    heap_->MarkAllocStackAsLive(live_stack);
789    live_stack->Reset();
790  }
791  CHECK(mark_queue_.IsEmpty());
792  TimingLogger::ScopedTiming split("Sweep", GetTimings());
793  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
794    if (space->IsContinuousMemMapAllocSpace()) {
795      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
796      if (space == region_space_ || immune_region_.ContainsSpace(space)) {
797        continue;
798      }
799      TimingLogger::ScopedTiming split2(
800          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
801      RecordFree(alloc_space->Sweep(swap_bitmaps));
802    }
803  }
804  SweepLargeObjects(swap_bitmaps);
805}
806
807void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
808  TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
809  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
810}
811
812class ConcurrentCopyingClearBlackPtrsVisitor {
813 public:
814  explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
815      : collector_(cc) {}
816#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
817  NO_RETURN
818#endif
819  void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
820      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
821    DCHECK(obj != nullptr);
822    DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
823    DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
824    obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
825    DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
826  }
827
828 private:
829  ConcurrentCopying* const collector_;
830};
831
832// Clear the black ptrs in non-moving objects back to white.
833void ConcurrentCopying::ClearBlackPtrs() {
834  CHECK(kUseBakerReadBarrier);
835  TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
836  ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
837  for (auto& space : heap_->GetContinuousSpaces()) {
838    if (space == region_space_) {
839      continue;
840    }
841    accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
842    if (kVerboseMode) {
843      LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
844    }
845    mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
846                                  reinterpret_cast<uintptr_t>(space->Limit()),
847                                  visitor);
848  }
849  space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
850  large_object_space->GetMarkBitmap()->VisitMarkedRange(
851      reinterpret_cast<uintptr_t>(large_object_space->Begin()),
852      reinterpret_cast<uintptr_t>(large_object_space->End()),
853      visitor);
854  // Objects on the allocation stack?
855  if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
856    size_t count = GetAllocationStack()->Size();
857    auto* it = GetAllocationStack()->Begin();
858    auto* end = GetAllocationStack()->End();
859    for (size_t i = 0; i < count; ++i, ++it) {
860      CHECK_LT(it, end);
861      mirror::Object* obj = it->AsMirrorPtr();
862      if (obj != nullptr) {
863        // Must have been cleared above.
864        CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
865      }
866    }
867  }
868}
869
870void ConcurrentCopying::ReclaimPhase() {
871  TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
872  if (kVerboseMode) {
873    LOG(INFO) << "GC ReclaimPhase";
874  }
875  Thread* self = Thread::Current();
876
877  {
878    // Double-check that the mark stack is empty.
879    // Note: need to set this after VerifyNoFromSpaceRef().
880    is_asserting_to_space_invariant_ = false;
881    QuasiAtomic::ThreadFenceForConstructor();
882    if (kVerboseMode) {
883      LOG(INFO) << "Issue an empty check point. ";
884    }
885    IssueEmptyCheckpoint();
886    // Disable the check.
887    is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(0);
888    CheckEmptyMarkQueue();
889  }
890
891  {
892    // Record freed objects.
893    TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
894    // Don't include thread-locals that are in the to-space.
895    uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
896    uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
897    uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
898    uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
899    uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
900    uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
901    if (kEnableFromSpaceAccountingCheck) {
902      CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
903      CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
904    }
905    CHECK_LE(to_objects, from_objects);
906    CHECK_LE(to_bytes, from_bytes);
907    int64_t freed_bytes = from_bytes - to_bytes;
908    int64_t freed_objects = from_objects - to_objects;
909    if (kVerboseMode) {
910      LOG(INFO) << "RecordFree:"
911                << " from_bytes=" << from_bytes << " from_objects=" << from_objects
912                << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
913                << " to_bytes=" << to_bytes << " to_objects=" << to_objects
914                << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
915                << " from_space size=" << region_space_->FromSpaceSize()
916                << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
917                << " to_space size=" << region_space_->ToSpaceSize();
918      LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
919    }
920    RecordFree(ObjectBytePair(freed_objects, freed_bytes));
921    if (kVerboseMode) {
922      LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
923    }
924  }
925
926  {
927    TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
928    ComputeUnevacFromSpaceLiveRatio();
929  }
930
931  {
932    TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
933    region_space_->ClearFromSpace();
934  }
935
936  {
937    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
938    if (kUseBakerReadBarrier) {
939      ClearBlackPtrs();
940    }
941    Sweep(false);
942    SwapBitmaps();
943    heap_->UnBindBitmaps();
944
945    // Remove bitmaps for the immune spaces.
946    while (!cc_bitmaps_.empty()) {
947      accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
948      cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
949      delete cc_bitmap;
950      cc_bitmaps_.pop_back();
951    }
952    region_space_bitmap_ = nullptr;
953  }
954
955  if (kVerboseMode) {
956    LOG(INFO) << "GC end of ReclaimPhase";
957  }
958}
959
960class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
961 public:
962  explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
963      : collector_(cc) {}
964  void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
965      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
966    DCHECK(ref != nullptr);
967    DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
968    DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
969    if (kUseBakerReadBarrier) {
970      DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
971      // Clear the black ptr.
972      ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
973      DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
974    }
975    size_t obj_size = ref->SizeOf();
976    size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
977    collector_->region_space_->AddLiveBytes(ref, alloc_size);
978  }
979
980 private:
981  ConcurrentCopying* collector_;
982};
983
984// Compute how much live objects are left in regions.
985void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
986  region_space_->AssertAllRegionLiveBytesZeroOrCleared();
987  ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
988  region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
989                                         reinterpret_cast<uintptr_t>(region_space_->Limit()),
990                                         visitor);
991}
992
993// Assert the to-space invariant.
994void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
995                                               mirror::Object* ref) {
996  CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
997  if (is_asserting_to_space_invariant_) {
998    if (region_space_->IsInToSpace(ref)) {
999      // OK.
1000      return;
1001    } else if (region_space_->IsInUnevacFromSpace(ref)) {
1002      CHECK(region_space_bitmap_->Test(ref)) << ref;
1003    } else if (region_space_->IsInFromSpace(ref)) {
1004      // Not OK. Do extra logging.
1005      if (obj != nullptr) {
1006        LogFromSpaceRefHolder(obj, offset);
1007      }
1008      ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1009      CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1010    } else {
1011      AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1012    }
1013  }
1014}
1015
1016class RootPrinter {
1017 public:
1018  RootPrinter() { }
1019
1020  template <class MirrorType>
1021  ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
1022      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1023    if (!root->IsNull()) {
1024      VisitRoot(root);
1025    }
1026  }
1027
1028  template <class MirrorType>
1029  void VisitRoot(mirror::Object** root)
1030      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1031    LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1032  }
1033
1034  template <class MirrorType>
1035  void VisitRoot(mirror::CompressedReference<MirrorType>* root)
1036      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1037    LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1038  }
1039};
1040
1041void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1042                                               mirror::Object* ref) {
1043  CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1044  if (is_asserting_to_space_invariant_) {
1045    if (region_space_->IsInToSpace(ref)) {
1046      // OK.
1047      return;
1048    } else if (region_space_->IsInUnevacFromSpace(ref)) {
1049      CHECK(region_space_bitmap_->Test(ref)) << ref;
1050    } else if (region_space_->IsInFromSpace(ref)) {
1051      // Not OK. Do extra logging.
1052      if (gc_root_source == nullptr) {
1053        // No info.
1054      } else if (gc_root_source->HasArtField()) {
1055        ArtField* field = gc_root_source->GetArtField();
1056        LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1057        RootPrinter root_printer;
1058        field->VisitRoots(root_printer);
1059      } else if (gc_root_source->HasArtMethod()) {
1060        ArtMethod* method = gc_root_source->GetArtMethod();
1061        LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1062        RootPrinter root_printer;
1063        method->VisitRoots(root_printer);
1064      }
1065      ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1066      region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1067      PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1068      MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1069      CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1070    } else {
1071      AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1072    }
1073  }
1074}
1075
1076void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1077  if (kUseBakerReadBarrier) {
1078    LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1079              << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1080  } else {
1081    LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1082  }
1083  if (region_space_->IsInFromSpace(obj)) {
1084    LOG(INFO) << "holder is in the from-space.";
1085  } else if (region_space_->IsInToSpace(obj)) {
1086    LOG(INFO) << "holder is in the to-space.";
1087  } else if (region_space_->IsInUnevacFromSpace(obj)) {
1088    LOG(INFO) << "holder is in the unevac from-space.";
1089    if (region_space_bitmap_->Test(obj)) {
1090      LOG(INFO) << "holder is marked in the region space bitmap.";
1091    } else {
1092      LOG(INFO) << "holder is not marked in the region space bitmap.";
1093    }
1094  } else {
1095    // In a non-moving space.
1096    if (immune_region_.ContainsObject(obj)) {
1097      LOG(INFO) << "holder is in the image or the zygote space.";
1098      accounting::ContinuousSpaceBitmap* cc_bitmap =
1099          cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1100      CHECK(cc_bitmap != nullptr)
1101          << "An immune space object must have a bitmap.";
1102      if (cc_bitmap->Test(obj)) {
1103        LOG(INFO) << "holder is marked in the bit map.";
1104      } else {
1105        LOG(INFO) << "holder is NOT marked in the bit map.";
1106      }
1107    } else {
1108      LOG(INFO) << "holder is in a non-moving (or main) space.";
1109      accounting::ContinuousSpaceBitmap* mark_bitmap =
1110          heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1111      accounting::LargeObjectBitmap* los_bitmap =
1112          heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1113      CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1114      bool is_los = mark_bitmap == nullptr;
1115      if (!is_los && mark_bitmap->Test(obj)) {
1116        LOG(INFO) << "holder is marked in the mark bit map.";
1117      } else if (is_los && los_bitmap->Test(obj)) {
1118        LOG(INFO) << "holder is marked in the los bit map.";
1119      } else {
1120        // If ref is on the allocation stack, then it is considered
1121        // mark/alive (but not necessarily on the live stack.)
1122        if (IsOnAllocStack(obj)) {
1123          LOG(INFO) << "holder is on the alloc stack.";
1124        } else {
1125          LOG(INFO) << "holder is not marked or on the alloc stack.";
1126        }
1127      }
1128    }
1129  }
1130  LOG(INFO) << "offset=" << offset.SizeValue();
1131}
1132
1133void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1134                                                               mirror::Object* ref) {
1135  // In a non-moving spaces. Check that the ref is marked.
1136  if (immune_region_.ContainsObject(ref)) {
1137    accounting::ContinuousSpaceBitmap* cc_bitmap =
1138        cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1139    CHECK(cc_bitmap != nullptr)
1140        << "An immune space ref must have a bitmap. " << ref;
1141    if (kUseBakerReadBarrier) {
1142      CHECK(cc_bitmap->Test(ref))
1143          << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
1144          << obj->GetReadBarrierPointer() << " ref=" << ref;
1145    } else {
1146      CHECK(cc_bitmap->Test(ref))
1147          << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
1148    }
1149  } else {
1150    accounting::ContinuousSpaceBitmap* mark_bitmap =
1151        heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1152    accounting::LargeObjectBitmap* los_bitmap =
1153        heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1154    CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1155    bool is_los = mark_bitmap == nullptr;
1156    if ((!is_los && mark_bitmap->Test(ref)) ||
1157        (is_los && los_bitmap->Test(ref))) {
1158      // OK.
1159    } else {
1160      // If ref is on the allocation stack, then it may not be
1161      // marked live, but considered marked/alive (but not
1162      // necessarily on the live stack).
1163      CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1164                                 << "obj=" << obj << " ref=" << ref;
1165    }
1166  }
1167}
1168
1169// Used to scan ref fields of an object.
1170class ConcurrentCopyingRefFieldsVisitor {
1171 public:
1172  explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
1173      : collector_(collector) {}
1174
1175  void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
1176      const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1177      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1178    collector_->Process(obj, offset);
1179  }
1180
1181  void operator()(mirror::Class* klass, mirror::Reference* ref) const
1182      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
1183    CHECK(klass->IsTypeOfReferenceClass());
1184    collector_->DelayReferenceReferent(klass, ref);
1185  }
1186
1187 private:
1188  ConcurrentCopying* const collector_;
1189};
1190
1191// Scan ref fields of an object.
1192void ConcurrentCopying::Scan(mirror::Object* to_ref) {
1193  DCHECK(!region_space_->IsInFromSpace(to_ref));
1194  ConcurrentCopyingRefFieldsVisitor visitor(this);
1195  to_ref->VisitReferences<true>(visitor, visitor);
1196}
1197
1198// Process a field.
1199inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
1200  mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
1201  if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1202    return;
1203  }
1204  mirror::Object* to_ref = Mark(ref);
1205  if (to_ref == ref) {
1206    return;
1207  }
1208  // This may fail if the mutator writes to the field at the same time. But it's ok.
1209  mirror::Object* expected_ref = ref;
1210  mirror::Object* new_ref = to_ref;
1211  do {
1212    if (expected_ref !=
1213        obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1214      // It was updated by the mutator.
1215      break;
1216    }
1217  } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>(
1218      offset, expected_ref, new_ref));
1219}
1220
1221// Process some roots.
1222void ConcurrentCopying::VisitRoots(
1223    mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1224  for (size_t i = 0; i < count; ++i) {
1225    mirror::Object** root = roots[i];
1226    mirror::Object* ref = *root;
1227    if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1228      continue;
1229    }
1230    mirror::Object* to_ref = Mark(ref);
1231    if (to_ref == ref) {
1232      continue;
1233    }
1234    Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1235    mirror::Object* expected_ref = ref;
1236    mirror::Object* new_ref = to_ref;
1237    do {
1238      if (expected_ref != addr->LoadRelaxed()) {
1239        // It was updated by the mutator.
1240        break;
1241      }
1242    } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1243  }
1244}
1245
1246void ConcurrentCopying::VisitRoots(
1247    mirror::CompressedReference<mirror::Object>** roots, size_t count,
1248    const RootInfo& info ATTRIBUTE_UNUSED) {
1249  for (size_t i = 0; i < count; ++i) {
1250    mirror::CompressedReference<mirror::Object>* root = roots[i];
1251    mirror::Object* ref = root->AsMirrorPtr();
1252    if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1253      continue;
1254    }
1255    mirror::Object* to_ref = Mark(ref);
1256    if (to_ref == ref) {
1257      continue;
1258    }
1259    auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1260    auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1261    auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
1262    do {
1263      if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1264        // It was updated by the mutator.
1265        break;
1266      }
1267    } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1268  }
1269}
1270
1271// Fill the given memory block with a dummy object. Used to fill in a
1272// copy of objects that was lost in race.
1273void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
1274  CHECK(IsAligned<kObjectAlignment>(byte_size));
1275  memset(dummy_obj, 0, byte_size);
1276  mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1277  CHECK(int_array_class != nullptr);
1278  AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1279  size_t component_size = int_array_class->GetComponentSize();
1280  CHECK_EQ(component_size, sizeof(int32_t));
1281  size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1282  if (data_offset > byte_size) {
1283    // An int array is too big. Use java.lang.Object.
1284    mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1285    AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1286    CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1287    dummy_obj->SetClass(java_lang_Object);
1288    CHECK_EQ(byte_size, dummy_obj->SizeOf());
1289  } else {
1290    // Use an int array.
1291    dummy_obj->SetClass(int_array_class);
1292    CHECK(dummy_obj->IsArrayInstance());
1293    int32_t length = (byte_size - data_offset) / component_size;
1294    dummy_obj->AsArray()->SetLength(length);
1295    CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1296        << "byte_size=" << byte_size << " length=" << length
1297        << " component_size=" << component_size << " data_offset=" << data_offset;
1298    CHECK_EQ(byte_size, dummy_obj->SizeOf())
1299        << "byte_size=" << byte_size << " length=" << length
1300        << " component_size=" << component_size << " data_offset=" << data_offset;
1301  }
1302}
1303
1304// Reuse the memory blocks that were copy of objects that were lost in race.
1305mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1306  // Try to reuse the blocks that were unused due to CAS failures.
1307  CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size));
1308  Thread* self = Thread::Current();
1309  size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1310  MutexLock mu(self, skipped_blocks_lock_);
1311  auto it = skipped_blocks_map_.lower_bound(alloc_size);
1312  if (it == skipped_blocks_map_.end()) {
1313    // Not found.
1314    return nullptr;
1315  }
1316  {
1317    size_t byte_size = it->first;
1318    CHECK_GE(byte_size, alloc_size);
1319    if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1320      // If remainder would be too small for a dummy object, retry with a larger request size.
1321      it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1322      if (it == skipped_blocks_map_.end()) {
1323        // Not found.
1324        return nullptr;
1325      }
1326      CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size));
1327      CHECK_GE(it->first - alloc_size, min_object_size)
1328          << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1329    }
1330  }
1331  // Found a block.
1332  CHECK(it != skipped_blocks_map_.end());
1333  size_t byte_size = it->first;
1334  uint8_t* addr = it->second;
1335  CHECK_GE(byte_size, alloc_size);
1336  CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1337  CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size));
1338  if (kVerboseMode) {
1339    LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1340  }
1341  skipped_blocks_map_.erase(it);
1342  memset(addr, 0, byte_size);
1343  if (byte_size > alloc_size) {
1344    // Return the remainder to the map.
1345    CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size));
1346    CHECK_GE(byte_size - alloc_size, min_object_size);
1347    FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1348                        byte_size - alloc_size);
1349    CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1350    skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1351  }
1352  return reinterpret_cast<mirror::Object*>(addr);
1353}
1354
1355mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1356  DCHECK(region_space_->IsInFromSpace(from_ref));
1357  // No read barrier to avoid nested RB that might violate the to-space
1358  // invariant. Note that from_ref is a from space ref so the SizeOf()
1359  // call will access the from-space meta objects, but it's ok and necessary.
1360  size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1361  size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1362  size_t region_space_bytes_allocated = 0U;
1363  size_t non_moving_space_bytes_allocated = 0U;
1364  size_t bytes_allocated = 0U;
1365  size_t dummy;
1366  mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
1367      region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
1368  bytes_allocated = region_space_bytes_allocated;
1369  if (to_ref != nullptr) {
1370    DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1371  }
1372  bool fall_back_to_non_moving = false;
1373  if (UNLIKELY(to_ref == nullptr)) {
1374    // Failed to allocate in the region space. Try the skipped blocks.
1375    to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1376    if (to_ref != nullptr) {
1377      // Succeeded to allocate in a skipped block.
1378      if (heap_->use_tlab_) {
1379        // This is necessary for the tlab case as it's not accounted in the space.
1380        region_space_->RecordAlloc(to_ref);
1381      }
1382      bytes_allocated = region_space_alloc_size;
1383    } else {
1384      // Fall back to the non-moving space.
1385      fall_back_to_non_moving = true;
1386      if (kVerboseMode) {
1387        LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1388                  << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1389                  << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1390      }
1391      fall_back_to_non_moving = true;
1392      to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
1393                                               &non_moving_space_bytes_allocated, nullptr, &dummy);
1394      CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1395      bytes_allocated = non_moving_space_bytes_allocated;
1396      // Mark it in the mark bitmap.
1397      accounting::ContinuousSpaceBitmap* mark_bitmap =
1398          heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1399      CHECK(mark_bitmap != nullptr);
1400      CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1401    }
1402  }
1403  DCHECK(to_ref != nullptr);
1404
1405  // Attempt to install the forward pointer. This is in a loop as the
1406  // lock word atomic write can fail.
1407  while (true) {
1408    // Copy the object. TODO: copy only the lockword in the second iteration and on?
1409    memcpy(to_ref, from_ref, obj_size);
1410
1411    LockWord old_lock_word = to_ref->GetLockWord(false);
1412
1413    if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1414      // Lost the race. Another thread (either GC or mutator) stored
1415      // the forwarding pointer first. Make the lost copy (to_ref)
1416      // look like a valid but dead (dummy) object and keep it for
1417      // future reuse.
1418      FillWithDummyObject(to_ref, bytes_allocated);
1419      if (!fall_back_to_non_moving) {
1420        DCHECK(region_space_->IsInToSpace(to_ref));
1421        if (bytes_allocated > space::RegionSpace::kRegionSize) {
1422          // Free the large alloc.
1423          region_space_->FreeLarge(to_ref, bytes_allocated);
1424        } else {
1425          // Record the lost copy for later reuse.
1426          heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1427          to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1428          to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1429          MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1430          skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1431                                                    reinterpret_cast<uint8_t*>(to_ref)));
1432        }
1433      } else {
1434        DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1435        DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1436        // Free the non-moving-space chunk.
1437        accounting::ContinuousSpaceBitmap* mark_bitmap =
1438            heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1439        CHECK(mark_bitmap != nullptr);
1440        CHECK(mark_bitmap->Clear(to_ref));
1441        heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1442      }
1443
1444      // Get the winner's forward ptr.
1445      mirror::Object* lost_fwd_ptr = to_ref;
1446      to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1447      CHECK(to_ref != nullptr);
1448      CHECK_NE(to_ref, lost_fwd_ptr);
1449      CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1450      CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1451      return to_ref;
1452    }
1453
1454    // Set the gray ptr.
1455    if (kUseBakerReadBarrier) {
1456      to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1457    }
1458
1459    LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1460
1461    // Try to atomically write the fwd ptr.
1462    bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1463    if (LIKELY(success)) {
1464      // The CAS succeeded.
1465      objects_moved_.FetchAndAddSequentiallyConsistent(1);
1466      bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1467      if (LIKELY(!fall_back_to_non_moving)) {
1468        DCHECK(region_space_->IsInToSpace(to_ref));
1469      } else {
1470        DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1471        DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1472      }
1473      if (kUseBakerReadBarrier) {
1474        DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1475      }
1476      DCHECK(GetFwdPtr(from_ref) == to_ref);
1477      CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1478      PushOntoMarkStack<true>(to_ref);
1479      return to_ref;
1480    } else {
1481      // The CAS failed. It may have lost the race or may have failed
1482      // due to monitor/hashcode ops. Either way, retry.
1483    }
1484  }
1485}
1486
1487mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1488  DCHECK(from_ref != nullptr);
1489  space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1490  if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
1491    // It's already marked.
1492    return from_ref;
1493  }
1494  mirror::Object* to_ref;
1495  if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
1496    to_ref = GetFwdPtr(from_ref);
1497    DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1498           heap_->non_moving_space_->HasAddress(to_ref))
1499        << "from_ref=" << from_ref << " to_ref=" << to_ref;
1500  } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
1501    if (region_space_bitmap_->Test(from_ref)) {
1502      to_ref = from_ref;
1503    } else {
1504      to_ref = nullptr;
1505    }
1506  } else {
1507    // from_ref is in a non-moving space.
1508    if (immune_region_.ContainsObject(from_ref)) {
1509      accounting::ContinuousSpaceBitmap* cc_bitmap =
1510          cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1511      DCHECK(cc_bitmap != nullptr)
1512          << "An immune space object must have a bitmap";
1513      if (kIsDebugBuild) {
1514        DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1515            << "Immune space object must be already marked";
1516      }
1517      if (cc_bitmap->Test(from_ref)) {
1518        // Already marked.
1519        to_ref = from_ref;
1520      } else {
1521        // Newly marked.
1522        to_ref = nullptr;
1523      }
1524    } else {
1525      // Non-immune non-moving space. Use the mark bitmap.
1526      accounting::ContinuousSpaceBitmap* mark_bitmap =
1527          heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1528      accounting::LargeObjectBitmap* los_bitmap =
1529          heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1530      CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1531      bool is_los = mark_bitmap == nullptr;
1532      if (!is_los && mark_bitmap->Test(from_ref)) {
1533        // Already marked.
1534        to_ref = from_ref;
1535      } else if (is_los && los_bitmap->Test(from_ref)) {
1536        // Already marked in LOS.
1537        to_ref = from_ref;
1538      } else {
1539        // Not marked.
1540        if (IsOnAllocStack(from_ref)) {
1541          // If on the allocation stack, it's considered marked.
1542          to_ref = from_ref;
1543        } else {
1544          // Not marked.
1545          to_ref = nullptr;
1546        }
1547      }
1548    }
1549  }
1550  return to_ref;
1551}
1552
1553bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1554  QuasiAtomic::ThreadFenceAcquire();
1555  accounting::ObjectStack* alloc_stack = GetAllocationStack();
1556  return alloc_stack->Contains(ref);
1557}
1558
1559mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
1560  if (from_ref == nullptr) {
1561    return nullptr;
1562  }
1563  DCHECK(from_ref != nullptr);
1564  DCHECK(heap_->collector_type_ == kCollectorTypeCC);
1565  if (kUseBakerReadBarrier && !is_active_) {
1566    // In the lock word forward address state, the read barrier bits
1567    // in the lock word are part of the stored forwarding address and
1568    // invalid. This is usually OK as the from-space copy of objects
1569    // aren't accessed by mutators due to the to-space
1570    // invariant. However, during the dex2oat image writing relocation
1571    // and the zygote compaction, objects can be in the forward
1572    // address state (to store the forward/relocation addresses) and
1573    // they can still be accessed and the invalid read barrier bits
1574    // are consulted. If they look like gray but aren't really, the
1575    // read barriers slow path can trigger when it shouldn't. To guard
1576    // against this, return here if the CC collector isn't running.
1577    return from_ref;
1578  }
1579  DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
1580  space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1581  if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
1582    // It's already marked.
1583    return from_ref;
1584  }
1585  mirror::Object* to_ref;
1586  if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
1587    to_ref = GetFwdPtr(from_ref);
1588    if (kUseBakerReadBarrier) {
1589      DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref;
1590    }
1591    if (to_ref == nullptr) {
1592      // It isn't marked yet. Mark it by copying it to the to-space.
1593      to_ref = Copy(from_ref);
1594    }
1595    DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
1596        << "from_ref=" << from_ref << " to_ref=" << to_ref;
1597  } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
1598    // This may or may not succeed, which is ok.
1599    if (kUseBakerReadBarrier) {
1600      from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1601    }
1602    if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
1603      // Already marked.
1604      to_ref = from_ref;
1605    } else {
1606      // Newly marked.
1607      to_ref = from_ref;
1608      if (kUseBakerReadBarrier) {
1609        DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1610      }
1611      PushOntoMarkStack<true>(to_ref);
1612    }
1613  } else {
1614    // from_ref is in a non-moving space.
1615    DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
1616    if (immune_region_.ContainsObject(from_ref)) {
1617      accounting::ContinuousSpaceBitmap* cc_bitmap =
1618          cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1619      DCHECK(cc_bitmap != nullptr)
1620          << "An immune space object must have a bitmap";
1621      if (kIsDebugBuild) {
1622        DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1623            << "Immune space object must be already marked";
1624      }
1625      // This may or may not succeed, which is ok.
1626      if (kUseBakerReadBarrier) {
1627        from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1628      }
1629      if (cc_bitmap->AtomicTestAndSet(from_ref)) {
1630        // Already marked.
1631        to_ref = from_ref;
1632      } else {
1633        // Newly marked.
1634        to_ref = from_ref;
1635        if (kUseBakerReadBarrier) {
1636          DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1637        }
1638        PushOntoMarkStack<true>(to_ref);
1639      }
1640    } else {
1641      // Use the mark bitmap.
1642      accounting::ContinuousSpaceBitmap* mark_bitmap =
1643          heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1644      accounting::LargeObjectBitmap* los_bitmap =
1645          heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1646      CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1647      bool is_los = mark_bitmap == nullptr;
1648      if (!is_los && mark_bitmap->Test(from_ref)) {
1649        // Already marked.
1650        to_ref = from_ref;
1651        if (kUseBakerReadBarrier) {
1652          DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1653                 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1654        }
1655      } else if (is_los && los_bitmap->Test(from_ref)) {
1656        // Already marked in LOS.
1657        to_ref = from_ref;
1658        if (kUseBakerReadBarrier) {
1659          DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1660                 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1661        }
1662      } else {
1663        // Not marked.
1664        if (IsOnAllocStack(from_ref)) {
1665          // If it's on the allocation stack, it's considered marked. Keep it white.
1666          to_ref = from_ref;
1667          // Objects on the allocation stack need not be marked.
1668          if (!is_los) {
1669            DCHECK(!mark_bitmap->Test(to_ref));
1670          } else {
1671            DCHECK(!los_bitmap->Test(to_ref));
1672          }
1673          if (kUseBakerReadBarrier) {
1674            DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1675          }
1676        } else {
1677          // Not marked or on the allocation stack. Try to mark it.
1678          // This may or may not succeed, which is ok.
1679          if (kUseBakerReadBarrier) {
1680            from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1681          }
1682          if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) {
1683            // Already marked.
1684            to_ref = from_ref;
1685          } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) {
1686            // Already marked in LOS.
1687            to_ref = from_ref;
1688          } else {
1689            // Newly marked.
1690            to_ref = from_ref;
1691            if (kUseBakerReadBarrier) {
1692              DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1693            }
1694            PushOntoMarkStack<true>(to_ref);
1695          }
1696        }
1697      }
1698    }
1699  }
1700  return to_ref;
1701}
1702
1703void ConcurrentCopying::FinishPhase() {
1704  region_space_ = nullptr;
1705  CHECK(mark_queue_.IsEmpty());
1706  mark_queue_.Clear();
1707  {
1708    MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1709    skipped_blocks_map_.clear();
1710  }
1711  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1712  heap_->ClearMarkedObjects();
1713}
1714
1715mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) {
1716  return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
1717}
1718
1719bool ConcurrentCopying::IsHeapReferenceMarkedCallback(
1720    mirror::HeapReference<mirror::Object>* field, void* arg) {
1721  mirror::Object* from_ref = field->AsMirrorPtr();
1722  mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
1723  if (to_ref == nullptr) {
1724    return false;
1725  }
1726  if (from_ref != to_ref) {
1727    QuasiAtomic::ThreadFenceRelease();
1728    field->Assign(to_ref);
1729    QuasiAtomic::ThreadFenceSequentiallyConsistent();
1730  }
1731  return true;
1732}
1733
1734mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) {
1735  return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref);
1736}
1737
1738void ConcurrentCopying::ProcessMarkStackCallback(void* arg) {
1739  reinterpret_cast<ConcurrentCopying*>(arg)->ProcessMarkStack();
1740}
1741
1742void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
1743  heap_->GetReferenceProcessor()->DelayReferenceReferent(
1744      klass, reference, &IsHeapReferenceMarkedCallback, this);
1745}
1746
1747void ConcurrentCopying::ProcessReferences(Thread* self, bool concurrent) {
1748  TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
1749  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1750  GetHeap()->GetReferenceProcessor()->ProcessReferences(
1751      concurrent, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
1752      &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this);
1753}
1754
1755void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
1756  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1757  region_space_->RevokeAllThreadLocalBuffers();
1758}
1759
1760}  // namespace collector
1761}  // namespace gc
1762}  // namespace art
1763