concurrent_copying.cc revision a6b1ead81603513fd40b77fd72f06d8cb1f35276
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
19#include "art_field-inl.h"
20#include "base/stl_util.h"
21#include "debugger.h"
22#include "gc/accounting/heap_bitmap-inl.h"
23#include "gc/accounting/space_bitmap-inl.h"
24#include "gc/reference_processor.h"
25#include "gc/space/image_space.h"
26#include "gc/space/space.h"
27#include "intern_table.h"
28#include "mirror/class-inl.h"
29#include "mirror/object-inl.h"
30#include "scoped_thread_state_change.h"
31#include "thread-inl.h"
32#include "thread_list.h"
33#include "well_known_classes.h"
34
35namespace art {
36namespace gc {
37namespace collector {
38
39ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
40    : GarbageCollector(heap,
41                       name_prefix + (name_prefix.empty() ? "" : " ") +
42                       "concurrent copying + mark sweep"),
43      region_space_(nullptr), gc_barrier_(new Barrier(0)),
44      gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
45                                                     2 * MB, 2 * MB)),
46      mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
47      thread_running_gc_(nullptr),
48      is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
49      heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
50      weak_ref_access_enabled_(true),
51      skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
52      rb_table_(heap_->GetReadBarrierTable()),
53      force_evacuate_all_(false) {
54  static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
55                "The region space size and the read barrier table region size must match");
56  cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
57  Thread* self = Thread::Current();
58  {
59    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
60    // Cache this so that we won't have to lock heap_bitmap_lock_ in
61    // Mark() which could cause a nested lock on heap_bitmap_lock_
62    // when GC causes a RB while doing GC or a lock order violation
63    // (class_linker_lock_ and heap_bitmap_lock_).
64    heap_mark_bitmap_ = heap->GetMarkBitmap();
65  }
66  {
67    MutexLock mu(self, mark_stack_lock_);
68    for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
69      accounting::AtomicStack<mirror::Object>* mark_stack =
70          accounting::AtomicStack<mirror::Object>::Create(
71              "thread local mark stack", kMarkStackSize, kMarkStackSize);
72      pooled_mark_stacks_.push_back(mark_stack);
73    }
74  }
75}
76
77void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
78  // Used for preserving soft references, should be OK to not have a CAS here since there should be
79  // no other threads which can trigger read barriers on the same referent during reference
80  // processing.
81  from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
82  DCHECK(!from_ref->IsNull());
83}
84
85ConcurrentCopying::~ConcurrentCopying() {
86  STLDeleteElements(&pooled_mark_stacks_);
87}
88
89void ConcurrentCopying::RunPhases() {
90  CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
91  CHECK(!is_active_);
92  is_active_ = true;
93  Thread* self = Thread::Current();
94  thread_running_gc_ = self;
95  Locks::mutator_lock_->AssertNotHeld(self);
96  {
97    ReaderMutexLock mu(self, *Locks::mutator_lock_);
98    InitializePhase();
99  }
100  FlipThreadRoots();
101  {
102    ReaderMutexLock mu(self, *Locks::mutator_lock_);
103    MarkingPhase();
104  }
105  // Verify no from space refs. This causes a pause.
106  if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
107    TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
108    ScopedPause pause(this);
109    CheckEmptyMarkStack();
110    if (kVerboseMode) {
111      LOG(INFO) << "Verifying no from-space refs";
112    }
113    VerifyNoFromSpaceReferences();
114    if (kVerboseMode) {
115      LOG(INFO) << "Done verifying no from-space refs";
116    }
117    CheckEmptyMarkStack();
118  }
119  {
120    ReaderMutexLock mu(self, *Locks::mutator_lock_);
121    ReclaimPhase();
122  }
123  FinishPhase();
124  CHECK(is_active_);
125  is_active_ = false;
126  thread_running_gc_ = nullptr;
127}
128
129void ConcurrentCopying::BindBitmaps() {
130  Thread* self = Thread::Current();
131  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
132  // Mark all of the spaces we never collect as immune.
133  for (const auto& space : heap_->GetContinuousSpaces()) {
134    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
135        || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
136      CHECK(space->IsZygoteSpace() || space->IsImageSpace());
137      CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
138      const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
139          "cc zygote space bitmap";
140      // TODO: try avoiding using bitmaps for image/zygote to save space.
141      accounting::ContinuousSpaceBitmap* bitmap =
142          accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
143      cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
144      cc_bitmaps_.push_back(bitmap);
145    } else if (space == region_space_) {
146      accounting::ContinuousSpaceBitmap* bitmap =
147          accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
148                                                    space->Begin(), space->Capacity());
149      cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
150      cc_bitmaps_.push_back(bitmap);
151      region_space_bitmap_ = bitmap;
152    }
153  }
154}
155
156void ConcurrentCopying::InitializePhase() {
157  TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
158  if (kVerboseMode) {
159    LOG(INFO) << "GC InitializePhase";
160    LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
161              << reinterpret_cast<void*>(region_space_->Limit());
162  }
163  CheckEmptyMarkStack();
164  immune_region_.Reset();
165  bytes_moved_.StoreRelaxed(0);
166  objects_moved_.StoreRelaxed(0);
167  if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
168      GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
169      GetCurrentIteration()->GetClearSoftReferences()) {
170    force_evacuate_all_ = true;
171  } else {
172    force_evacuate_all_ = false;
173  }
174  BindBitmaps();
175  if (kVerboseMode) {
176    LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
177    LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End();
178    LOG(INFO) << "GC end of InitializePhase";
179  }
180}
181
182// Used to switch the thread roots of a thread from from-space refs to to-space refs.
183class ThreadFlipVisitor : public Closure {
184 public:
185  ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
186      : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
187  }
188
189  virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
190    // Note: self is not necessarily equal to thread since thread may be suspended.
191    Thread* self = Thread::Current();
192    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
193        << thread->GetState() << " thread " << thread << " self " << self;
194    thread->SetIsGcMarking(true);
195    if (use_tlab_ && thread->HasTlab()) {
196      if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
197        // This must come before the revoke.
198        size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
199        concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
200        reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
201            FetchAndAddSequentiallyConsistent(thread_local_objects);
202      } else {
203        concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
204      }
205    }
206    if (kUseThreadLocalAllocationStack) {
207      thread->RevokeThreadLocalAllocationStack();
208    }
209    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
210    thread->VisitRoots(concurrent_copying_);
211    concurrent_copying_->GetBarrier().Pass(self);
212  }
213
214 private:
215  ConcurrentCopying* const concurrent_copying_;
216  const bool use_tlab_;
217};
218
219// Called back from Runtime::FlipThreadRoots() during a pause.
220class FlipCallback : public Closure {
221 public:
222  explicit FlipCallback(ConcurrentCopying* concurrent_copying)
223      : concurrent_copying_(concurrent_copying) {
224  }
225
226  virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
227    ConcurrentCopying* cc = concurrent_copying_;
228    TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
229    // Note: self is not necessarily equal to thread since thread may be suspended.
230    Thread* self = Thread::Current();
231    CHECK(thread == self);
232    Locks::mutator_lock_->AssertExclusiveHeld(self);
233    cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
234    cc->SwapStacks();
235    if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
236      cc->RecordLiveStackFreezeSize(self);
237      cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
238      cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
239    }
240    cc->is_marking_ = true;
241    cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
242    if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
243      CHECK(Runtime::Current()->IsAotCompiler());
244      TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
245      Runtime::Current()->VisitTransactionRoots(cc);
246    }
247  }
248
249 private:
250  ConcurrentCopying* const concurrent_copying_;
251};
252
253// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
254void ConcurrentCopying::FlipThreadRoots() {
255  TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
256  if (kVerboseMode) {
257    LOG(INFO) << "time=" << region_space_->Time();
258    region_space_->DumpNonFreeRegions(LOG(INFO));
259  }
260  Thread* self = Thread::Current();
261  Locks::mutator_lock_->AssertNotHeld(self);
262  gc_barrier_->Init(self, 0);
263  ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
264  FlipCallback flip_callback(this);
265  heap_->ThreadFlipBegin(self);  // Sync with JNI critical calls.
266  size_t barrier_count = Runtime::Current()->FlipThreadRoots(
267      &thread_flip_visitor, &flip_callback, this);
268  heap_->ThreadFlipEnd(self);
269  {
270    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
271    gc_barrier_->Increment(self, barrier_count);
272  }
273  is_asserting_to_space_invariant_ = true;
274  QuasiAtomic::ThreadFenceForConstructor();
275  if (kVerboseMode) {
276    LOG(INFO) << "time=" << region_space_->Time();
277    region_space_->DumpNonFreeRegions(LOG(INFO));
278    LOG(INFO) << "GC end of FlipThreadRoots";
279  }
280}
281
282void ConcurrentCopying::SwapStacks() {
283  heap_->SwapStacks();
284}
285
286void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
287  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
288  live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
289}
290
291// Used to visit objects in the immune spaces.
292class ConcurrentCopyingImmuneSpaceObjVisitor {
293 public:
294  explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
295      : collector_(cc) {}
296
297  void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
298      SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
299    DCHECK(obj != nullptr);
300    DCHECK(collector_->immune_region_.ContainsObject(obj));
301    accounting::ContinuousSpaceBitmap* cc_bitmap =
302        collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
303    DCHECK(cc_bitmap != nullptr)
304        << "An immune space object must have a bitmap";
305    if (kIsDebugBuild) {
306      DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
307          << "Immune space object must be already marked";
308    }
309    // This may or may not succeed, which is ok.
310    if (kUseBakerReadBarrier) {
311      obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
312    }
313    if (cc_bitmap->AtomicTestAndSet(obj)) {
314      // Already marked. Do nothing.
315    } else {
316      // Newly marked. Set the gray bit and push it onto the mark stack.
317      CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
318      collector_->PushOntoMarkStack(obj);
319    }
320  }
321
322 private:
323  ConcurrentCopying* const collector_;
324};
325
326class EmptyCheckpoint : public Closure {
327 public:
328  explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
329      : concurrent_copying_(concurrent_copying) {
330  }
331
332  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
333    // Note: self is not necessarily equal to thread since thread may be suspended.
334    Thread* self = Thread::Current();
335    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
336        << thread->GetState() << " thread " << thread << " self " << self;
337    // If thread is a running mutator, then act on behalf of the garbage collector.
338    // See the code in ThreadList::RunCheckpoint.
339    if (thread->GetState() == kRunnable) {
340      concurrent_copying_->GetBarrier().Pass(self);
341    }
342  }
343
344 private:
345  ConcurrentCopying* const concurrent_copying_;
346};
347
348// Concurrently mark roots that are guarded by read barriers and process the mark stack.
349void ConcurrentCopying::MarkingPhase() {
350  TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
351  if (kVerboseMode) {
352    LOG(INFO) << "GC MarkingPhase";
353  }
354  CHECK(weak_ref_access_enabled_);
355  {
356    // Mark the image root. The WB-based collectors do not need to
357    // scan the image objects from roots by relying on the card table,
358    // but it's necessary for the RB to-space invariant to hold.
359    TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
360    gc::space::ImageSpace* image = heap_->GetImageSpace();
361    if (image != nullptr) {
362      mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
363      mirror::Object* marked_image_root = Mark(image_root);
364      CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
365      if (ReadBarrier::kEnableToSpaceInvariantChecks) {
366        AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
367      }
368    }
369  }
370  // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part
371  // to also use the same function.
372  {
373    TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
374    Runtime::Current()->VisitConstantRoots(this);
375  }
376  {
377    TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
378    Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
379  }
380  {
381    TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
382    Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
383  }
384  {
385    // TODO: don't visit the transaction roots if it's not active.
386    TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
387    Runtime::Current()->VisitNonThreadRoots(this);
388  }
389  {
390    TimingLogger::ScopedTiming split6("Dbg::VisitRoots", GetTimings());
391    Dbg::VisitRoots(this);
392  }
393  Runtime::Current()->GetHeap()->VisitAllocationRecords(this);
394
395  // Immune spaces.
396  for (auto& space : heap_->GetContinuousSpaces()) {
397    if (immune_region_.ContainsSpace(space)) {
398      DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
399      accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
400      ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
401      live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
402                                    reinterpret_cast<uintptr_t>(space->Limit()),
403                                    visitor);
404    }
405  }
406
407  Thread* self = Thread::Current();
408  {
409    TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
410    // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
411    // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
412    // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
413    // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
414    // reach the point where we process weak references, we can avoid using a lock when accessing
415    // the GC mark stack, which makes mark stack processing more efficient.
416
417    // Process the mark stack once in the thread local stack mode. This marks most of the live
418    // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
419    // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
420    // objects and push refs on the mark stack.
421    ProcessMarkStack();
422    // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
423    // for the last time before transitioning to the shared mark stack mode, which would process new
424    // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
425    // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
426    // important to do these together in a single checkpoint so that we can ensure that mutators
427    // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
428    // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
429    // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
430    // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
431    SwitchToSharedMarkStackMode();
432    CHECK(!self->GetWeakRefAccessEnabled());
433    // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
434    // (which may be non-empty if there were refs found on thread-local mark stacks during the above
435    // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
436    // (via read barriers) have no way to produce any more refs to process. Marking converges once
437    // before we process weak refs below.
438    ProcessMarkStack();
439    CheckEmptyMarkStack();
440    // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
441    // lock from this point on.
442    SwitchToGcExclusiveMarkStackMode();
443    CheckEmptyMarkStack();
444    if (kVerboseMode) {
445      LOG(INFO) << "ProcessReferences";
446    }
447    // Process weak references. This may produce new refs to process and have them processed via
448    // ProcessMarkStack (in the GC exclusive mark stack mode).
449    ProcessReferences(self);
450    CheckEmptyMarkStack();
451    if (kVerboseMode) {
452      LOG(INFO) << "SweepSystemWeaks";
453    }
454    SweepSystemWeaks(self);
455    if (kVerboseMode) {
456      LOG(INFO) << "SweepSystemWeaks done";
457    }
458    // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
459    // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
460    // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
461    ProcessMarkStack();
462    CheckEmptyMarkStack();
463    // Re-enable weak ref accesses.
464    ReenableWeakRefAccess(self);
465    // Free data for class loaders that we unloaded.
466    Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
467    // Marking is done. Disable marking.
468    DisableMarking();
469    CheckEmptyMarkStack();
470  }
471
472  CHECK(weak_ref_access_enabled_);
473  if (kVerboseMode) {
474    LOG(INFO) << "GC end of MarkingPhase";
475  }
476}
477
478void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
479  if (kVerboseMode) {
480    LOG(INFO) << "ReenableWeakRefAccess";
481  }
482  weak_ref_access_enabled_.StoreRelaxed(true);  // This is for new threads.
483  QuasiAtomic::ThreadFenceForConstructor();
484  // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
485  {
486    MutexLock mu(self, *Locks::thread_list_lock_);
487    std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
488    for (Thread* thread : thread_list) {
489      thread->SetWeakRefAccessEnabled(true);
490    }
491  }
492  // Unblock blocking threads.
493  GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
494  Runtime::Current()->BroadcastForNewSystemWeaks();
495}
496
497class DisableMarkingCheckpoint : public Closure {
498 public:
499  explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
500      : concurrent_copying_(concurrent_copying) {
501  }
502
503  void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
504    // Note: self is not necessarily equal to thread since thread may be suspended.
505    Thread* self = Thread::Current();
506    DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
507        << thread->GetState() << " thread " << thread << " self " << self;
508    // Disable the thread-local is_gc_marking flag.
509    // Note a thread that has just started right before this checkpoint may have already this flag
510    // set to false, which is ok.
511    thread->SetIsGcMarking(false);
512    // If thread is a running mutator, then act on behalf of the garbage collector.
513    // See the code in ThreadList::RunCheckpoint.
514    if (thread->GetState() == kRunnable) {
515      concurrent_copying_->GetBarrier().Pass(self);
516    }
517  }
518
519 private:
520  ConcurrentCopying* const concurrent_copying_;
521};
522
523void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
524  Thread* self = Thread::Current();
525  DisableMarkingCheckpoint check_point(this);
526  ThreadList* thread_list = Runtime::Current()->GetThreadList();
527  gc_barrier_->Init(self, 0);
528  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
529  // If there are no threads to wait which implies that all the checkpoint functions are finished,
530  // then no need to release the mutator lock.
531  if (barrier_count == 0) {
532    return;
533  }
534  // Release locks then wait for all mutator threads to pass the barrier.
535  Locks::mutator_lock_->SharedUnlock(self);
536  {
537    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
538    gc_barrier_->Increment(self, barrier_count);
539  }
540  Locks::mutator_lock_->SharedLock(self);
541}
542
543void ConcurrentCopying::DisableMarking() {
544  // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
545  // thread-local flags so that a new thread starting up will get the correct is_marking flag.
546  is_marking_ = false;
547  QuasiAtomic::ThreadFenceForConstructor();
548  // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
549  // still in the middle of a read barrier which may have a from-space ref cached in a local
550  // variable.
551  IssueDisableMarkingCheckpoint();
552  if (kUseTableLookupReadBarrier) {
553    heap_->rb_table_->ClearAll();
554    DCHECK(heap_->rb_table_->IsAllCleared());
555  }
556  is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
557  mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
558}
559
560void ConcurrentCopying::IssueEmptyCheckpoint() {
561  Thread* self = Thread::Current();
562  EmptyCheckpoint check_point(this);
563  ThreadList* thread_list = Runtime::Current()->GetThreadList();
564  gc_barrier_->Init(self, 0);
565  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
566  // If there are no threads to wait which implys that all the checkpoint functions are finished,
567  // then no need to release the mutator lock.
568  if (barrier_count == 0) {
569    return;
570  }
571  // Release locks then wait for all mutator threads to pass the barrier.
572  Locks::mutator_lock_->SharedUnlock(self);
573  {
574    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
575    gc_barrier_->Increment(self, barrier_count);
576  }
577  Locks::mutator_lock_->SharedLock(self);
578}
579
580void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
581  CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
582      << " " << to_ref << " " << PrettyTypeOf(to_ref);
583  Thread* self = Thread::Current();  // TODO: pass self as an argument from call sites?
584  CHECK(thread_running_gc_ != nullptr);
585  MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
586  if (mark_stack_mode == kMarkStackModeThreadLocal) {
587    if (self == thread_running_gc_) {
588      // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
589      CHECK(self->GetThreadLocalMarkStack() == nullptr);
590      CHECK(!gc_mark_stack_->IsFull());
591      gc_mark_stack_->PushBack(to_ref);
592    } else {
593      // Otherwise, use a thread-local mark stack.
594      accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
595      if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
596        MutexLock mu(self, mark_stack_lock_);
597        // Get a new thread local mark stack.
598        accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
599        if (!pooled_mark_stacks_.empty()) {
600          // Use a pooled mark stack.
601          new_tl_mark_stack = pooled_mark_stacks_.back();
602          pooled_mark_stacks_.pop_back();
603        } else {
604          // None pooled. Create a new one.
605          new_tl_mark_stack =
606              accounting::AtomicStack<mirror::Object>::Create(
607                  "thread local mark stack", 4 * KB, 4 * KB);
608        }
609        DCHECK(new_tl_mark_stack != nullptr);
610        DCHECK(new_tl_mark_stack->IsEmpty());
611        new_tl_mark_stack->PushBack(to_ref);
612        self->SetThreadLocalMarkStack(new_tl_mark_stack);
613        if (tl_mark_stack != nullptr) {
614          // Store the old full stack into a vector.
615          revoked_mark_stacks_.push_back(tl_mark_stack);
616        }
617      } else {
618        tl_mark_stack->PushBack(to_ref);
619      }
620    }
621  } else if (mark_stack_mode == kMarkStackModeShared) {
622    // Access the shared GC mark stack with a lock.
623    MutexLock mu(self, mark_stack_lock_);
624    CHECK(!gc_mark_stack_->IsFull());
625    gc_mark_stack_->PushBack(to_ref);
626  } else {
627    CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
628             static_cast<uint32_t>(kMarkStackModeGcExclusive))
629        << "ref=" << to_ref
630        << " self->gc_marking=" << self->GetIsGcMarking()
631        << " cc->is_marking=" << is_marking_;
632    CHECK(self == thread_running_gc_)
633        << "Only GC-running thread should access the mark stack "
634        << "in the GC exclusive mark stack mode";
635    // Access the GC mark stack without a lock.
636    CHECK(!gc_mark_stack_->IsFull());
637    gc_mark_stack_->PushBack(to_ref);
638  }
639}
640
641accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
642  return heap_->allocation_stack_.get();
643}
644
645accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
646  return heap_->live_stack_.get();
647}
648
649inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
650  DCHECK(region_space_->IsInFromSpace(from_ref));
651  LockWord lw = from_ref->GetLockWord(false);
652  if (lw.GetState() == LockWord::kForwardingAddress) {
653    mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
654    CHECK(fwd_ptr != nullptr);
655    return fwd_ptr;
656  } else {
657    return nullptr;
658  }
659}
660
661// The following visitors are that used to verify that there's no
662// references to the from-space left after marking.
663class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
664 public:
665  explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
666      : collector_(collector) {}
667
668  void operator()(mirror::Object* ref) const
669      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
670    if (ref == nullptr) {
671      // OK.
672      return;
673    }
674    collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
675    if (kUseBakerReadBarrier) {
676      if (collector_->RegionSpace()->IsInToSpace(ref)) {
677        CHECK(ref->GetReadBarrierPointer() == nullptr)
678            << "To-space ref " << ref << " " << PrettyTypeOf(ref)
679            << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
680      } else {
681        CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
682              (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
683               collector_->IsOnAllocStack(ref)))
684            << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
685            << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
686            << " but isn't on the alloc stack (and has white rb_ptr)."
687            << " Is it in the non-moving space="
688            << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
689      }
690    }
691  }
692
693  void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
694      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
695    DCHECK(root != nullptr);
696    operator()(root);
697  }
698
699 private:
700  ConcurrentCopying* const collector_;
701};
702
703class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
704 public:
705  explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
706      : collector_(collector) {}
707
708  void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
709      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
710    mirror::Object* ref =
711        obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
712    ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
713    visitor(ref);
714  }
715  void operator()(mirror::Class* klass, mirror::Reference* ref) const
716      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
717    CHECK(klass->IsTypeOfReferenceClass());
718    this->operator()(ref, mirror::Reference::ReferentOffset(), false);
719  }
720
721  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
722      SHARED_REQUIRES(Locks::mutator_lock_) {
723    if (!root->IsNull()) {
724      VisitRoot(root);
725    }
726  }
727
728  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
729      SHARED_REQUIRES(Locks::mutator_lock_) {
730    ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
731    visitor(root->AsMirrorPtr());
732  }
733
734 private:
735  ConcurrentCopying* const collector_;
736};
737
738class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
739 public:
740  explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
741      : collector_(collector) {}
742  void operator()(mirror::Object* obj) const
743      SHARED_REQUIRES(Locks::mutator_lock_) {
744    ObjectCallback(obj, collector_);
745  }
746  static void ObjectCallback(mirror::Object* obj, void *arg)
747      SHARED_REQUIRES(Locks::mutator_lock_) {
748    CHECK(obj != nullptr);
749    ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
750    space::RegionSpace* region_space = collector->RegionSpace();
751    CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
752    ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
753    obj->VisitReferences(visitor, visitor);
754    if (kUseBakerReadBarrier) {
755      if (collector->RegionSpace()->IsInToSpace(obj)) {
756        CHECK(obj->GetReadBarrierPointer() == nullptr)
757            << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
758      } else {
759        CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
760              (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
761               collector->IsOnAllocStack(obj)))
762            << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
763            << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
764            << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
765            << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
766      }
767    }
768  }
769
770 private:
771  ConcurrentCopying* const collector_;
772};
773
774// Verify there's no from-space references left after the marking phase.
775void ConcurrentCopying::VerifyNoFromSpaceReferences() {
776  Thread* self = Thread::Current();
777  DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
778  // Verify all threads have is_gc_marking to be false
779  {
780    MutexLock mu(self, *Locks::thread_list_lock_);
781    std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
782    for (Thread* thread : thread_list) {
783      CHECK(!thread->GetIsGcMarking());
784    }
785  }
786  ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
787  // Roots.
788  {
789    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
790    ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
791    Runtime::Current()->VisitRoots(&ref_visitor);
792  }
793  // The to-space.
794  region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
795                             this);
796  // Non-moving spaces.
797  {
798    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
799    heap_->GetMarkBitmap()->Visit(visitor);
800  }
801  // The alloc stack.
802  {
803    ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
804    for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
805        it < end; ++it) {
806      mirror::Object* const obj = it->AsMirrorPtr();
807      if (obj != nullptr && obj->GetClass() != nullptr) {
808        // TODO: need to call this only if obj is alive?
809        ref_visitor(obj);
810        visitor(obj);
811      }
812    }
813  }
814  // TODO: LOS. But only refs in LOS are classes.
815}
816
817// The following visitors are used to assert the to-space invariant.
818class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
819 public:
820  explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
821      : collector_(collector) {}
822
823  void operator()(mirror::Object* ref) const
824      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
825    if (ref == nullptr) {
826      // OK.
827      return;
828    }
829    collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
830  }
831
832 private:
833  ConcurrentCopying* const collector_;
834};
835
836class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
837 public:
838  explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
839      : collector_(collector) {}
840
841  void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
842      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
843    mirror::Object* ref =
844        obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
845    ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
846    visitor(ref);
847  }
848  void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
849      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
850    CHECK(klass->IsTypeOfReferenceClass());
851  }
852
853  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
854      SHARED_REQUIRES(Locks::mutator_lock_) {
855    if (!root->IsNull()) {
856      VisitRoot(root);
857    }
858  }
859
860  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
861      SHARED_REQUIRES(Locks::mutator_lock_) {
862    ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
863    visitor(root->AsMirrorPtr());
864  }
865
866 private:
867  ConcurrentCopying* const collector_;
868};
869
870class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
871 public:
872  explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
873      : collector_(collector) {}
874  void operator()(mirror::Object* obj) const
875      SHARED_REQUIRES(Locks::mutator_lock_) {
876    ObjectCallback(obj, collector_);
877  }
878  static void ObjectCallback(mirror::Object* obj, void *arg)
879      SHARED_REQUIRES(Locks::mutator_lock_) {
880    CHECK(obj != nullptr);
881    ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
882    space::RegionSpace* region_space = collector->RegionSpace();
883    CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
884    collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
885    ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
886    obj->VisitReferences(visitor, visitor);
887  }
888
889 private:
890  ConcurrentCopying* const collector_;
891};
892
893class RevokeThreadLocalMarkStackCheckpoint : public Closure {
894 public:
895  RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
896                                       bool disable_weak_ref_access)
897      : concurrent_copying_(concurrent_copying),
898        disable_weak_ref_access_(disable_weak_ref_access) {
899  }
900
901  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
902    // Note: self is not necessarily equal to thread since thread may be suspended.
903    Thread* self = Thread::Current();
904    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
905        << thread->GetState() << " thread " << thread << " self " << self;
906    // Revoke thread local mark stacks.
907    accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
908    if (tl_mark_stack != nullptr) {
909      MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
910      concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
911      thread->SetThreadLocalMarkStack(nullptr);
912    }
913    // Disable weak ref access.
914    if (disable_weak_ref_access_) {
915      thread->SetWeakRefAccessEnabled(false);
916    }
917    // If thread is a running mutator, then act on behalf of the garbage collector.
918    // See the code in ThreadList::RunCheckpoint.
919    if (thread->GetState() == kRunnable) {
920      concurrent_copying_->GetBarrier().Pass(self);
921    }
922  }
923
924 private:
925  ConcurrentCopying* const concurrent_copying_;
926  const bool disable_weak_ref_access_;
927};
928
929void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
930  Thread* self = Thread::Current();
931  RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
932  ThreadList* thread_list = Runtime::Current()->GetThreadList();
933  gc_barrier_->Init(self, 0);
934  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
935  // If there are no threads to wait which implys that all the checkpoint functions are finished,
936  // then no need to release the mutator lock.
937  if (barrier_count == 0) {
938    return;
939  }
940  Locks::mutator_lock_->SharedUnlock(self);
941  {
942    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
943    gc_barrier_->Increment(self, barrier_count);
944  }
945  Locks::mutator_lock_->SharedLock(self);
946}
947
948void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
949  Thread* self = Thread::Current();
950  CHECK_EQ(self, thread);
951  accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
952  if (tl_mark_stack != nullptr) {
953    CHECK(is_marking_);
954    MutexLock mu(self, mark_stack_lock_);
955    revoked_mark_stacks_.push_back(tl_mark_stack);
956    thread->SetThreadLocalMarkStack(nullptr);
957  }
958}
959
960void ConcurrentCopying::ProcessMarkStack() {
961  if (kVerboseMode) {
962    LOG(INFO) << "ProcessMarkStack. ";
963  }
964  bool empty_prev = false;
965  while (true) {
966    bool empty = ProcessMarkStackOnce();
967    if (empty_prev && empty) {
968      // Saw empty mark stack for a second time, done.
969      break;
970    }
971    empty_prev = empty;
972  }
973}
974
975bool ConcurrentCopying::ProcessMarkStackOnce() {
976  Thread* self = Thread::Current();
977  CHECK(thread_running_gc_ != nullptr);
978  CHECK(self == thread_running_gc_);
979  CHECK(self->GetThreadLocalMarkStack() == nullptr);
980  size_t count = 0;
981  MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
982  if (mark_stack_mode == kMarkStackModeThreadLocal) {
983    // Process the thread-local mark stacks and the GC mark stack.
984    count += ProcessThreadLocalMarkStacks(false);
985    while (!gc_mark_stack_->IsEmpty()) {
986      mirror::Object* to_ref = gc_mark_stack_->PopBack();
987      ProcessMarkStackRef(to_ref);
988      ++count;
989    }
990    gc_mark_stack_->Reset();
991  } else if (mark_stack_mode == kMarkStackModeShared) {
992    // Process the shared GC mark stack with a lock.
993    {
994      MutexLock mu(self, mark_stack_lock_);
995      CHECK(revoked_mark_stacks_.empty());
996    }
997    while (true) {
998      std::vector<mirror::Object*> refs;
999      {
1000        // Copy refs with lock. Note the number of refs should be small.
1001        MutexLock mu(self, mark_stack_lock_);
1002        if (gc_mark_stack_->IsEmpty()) {
1003          break;
1004        }
1005        for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1006             p != gc_mark_stack_->End(); ++p) {
1007          refs.push_back(p->AsMirrorPtr());
1008        }
1009        gc_mark_stack_->Reset();
1010      }
1011      for (mirror::Object* ref : refs) {
1012        ProcessMarkStackRef(ref);
1013        ++count;
1014      }
1015    }
1016  } else {
1017    CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1018             static_cast<uint32_t>(kMarkStackModeGcExclusive));
1019    {
1020      MutexLock mu(self, mark_stack_lock_);
1021      CHECK(revoked_mark_stacks_.empty());
1022    }
1023    // Process the GC mark stack in the exclusive mode. No need to take the lock.
1024    while (!gc_mark_stack_->IsEmpty()) {
1025      mirror::Object* to_ref = gc_mark_stack_->PopBack();
1026      ProcessMarkStackRef(to_ref);
1027      ++count;
1028    }
1029    gc_mark_stack_->Reset();
1030  }
1031
1032  // Return true if the stack was empty.
1033  return count == 0;
1034}
1035
1036size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
1037  // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
1038  RevokeThreadLocalMarkStacks(disable_weak_ref_access);
1039  size_t count = 0;
1040  std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1041  {
1042    MutexLock mu(Thread::Current(), mark_stack_lock_);
1043    // Make a copy of the mark stack vector.
1044    mark_stacks = revoked_mark_stacks_;
1045    revoked_mark_stacks_.clear();
1046  }
1047  for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1048    for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1049      mirror::Object* to_ref = p->AsMirrorPtr();
1050      ProcessMarkStackRef(to_ref);
1051      ++count;
1052    }
1053    {
1054      MutexLock mu(Thread::Current(), mark_stack_lock_);
1055      if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1056        // The pool has enough. Delete it.
1057        delete mark_stack;
1058      } else {
1059        // Otherwise, put it into the pool for later reuse.
1060        mark_stack->Reset();
1061        pooled_mark_stacks_.push_back(mark_stack);
1062      }
1063    }
1064  }
1065  return count;
1066}
1067
1068void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
1069  DCHECK(!region_space_->IsInFromSpace(to_ref));
1070  if (kUseBakerReadBarrier) {
1071    DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1072        << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1073        << " is_marked=" << IsMarked(to_ref);
1074  }
1075  // Scan ref fields.
1076  Scan(to_ref);
1077  // Mark the gray ref as white or black.
1078  if (kUseBakerReadBarrier) {
1079    DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
1080        << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
1081        << " is_marked=" << IsMarked(to_ref);
1082  }
1083  if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
1084      to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
1085      !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) {
1086    // Leave this Reference gray in the queue so that GetReferent() will trigger a read barrier. We
1087    // will change it to black or white later in ReferenceQueue::DequeuePendingReference().
1088    CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
1089  } else {
1090    // We may occasionally leave a Reference black or white in the queue if its referent happens to
1091    // be concurrently marked after the Scan() call above has enqueued the Reference, in which case
1092    // the above IsInToSpace() evaluates to true and we change the color from gray to black or white
1093    // here in this else block.
1094#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
1095    if (kUseBakerReadBarrier) {
1096      if (region_space_->IsInToSpace(to_ref)) {
1097        // If to-space, change from gray to white.
1098        bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
1099                                                           ReadBarrier::WhitePtr());
1100        CHECK(success) << "Must succeed as we won the race.";
1101        CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1102      } else {
1103        // If non-moving space/unevac from space, change from gray
1104        // to black. We can't change gray to white because it's not
1105        // safe to use CAS if two threads change values in opposite
1106        // directions (A->B and B->A). So, we change it to black to
1107        // indicate non-moving objects that have been marked
1108        // through. Note we'd need to change from black to white
1109        // later (concurrently).
1110        bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
1111                                                           ReadBarrier::BlackPtr());
1112        CHECK(success) << "Must succeed as we won the race.";
1113        CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1114      }
1115    }
1116#else
1117    DCHECK(!kUseBakerReadBarrier);
1118#endif
1119  }
1120  if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
1121    ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
1122    visitor(to_ref);
1123  }
1124}
1125
1126void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1127  Thread* self = Thread::Current();
1128  CHECK(thread_running_gc_ != nullptr);
1129  CHECK_EQ(self, thread_running_gc_);
1130  CHECK(self->GetThreadLocalMarkStack() == nullptr);
1131  MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1132  CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1133           static_cast<uint32_t>(kMarkStackModeThreadLocal));
1134  mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1135  CHECK(weak_ref_access_enabled_.LoadRelaxed());
1136  weak_ref_access_enabled_.StoreRelaxed(false);
1137  QuasiAtomic::ThreadFenceForConstructor();
1138  // Process the thread local mark stacks one last time after switching to the shared mark stack
1139  // mode and disable weak ref accesses.
1140  ProcessThreadLocalMarkStacks(true);
1141  if (kVerboseMode) {
1142    LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1143  }
1144}
1145
1146void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1147  Thread* self = Thread::Current();
1148  CHECK(thread_running_gc_ != nullptr);
1149  CHECK_EQ(self, thread_running_gc_);
1150  CHECK(self->GetThreadLocalMarkStack() == nullptr);
1151  MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1152  CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1153           static_cast<uint32_t>(kMarkStackModeShared));
1154  mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1155  QuasiAtomic::ThreadFenceForConstructor();
1156  if (kVerboseMode) {
1157    LOG(INFO) << "Switched to GC exclusive mark stack mode";
1158  }
1159}
1160
1161void ConcurrentCopying::CheckEmptyMarkStack() {
1162  Thread* self = Thread::Current();
1163  CHECK(thread_running_gc_ != nullptr);
1164  CHECK_EQ(self, thread_running_gc_);
1165  CHECK(self->GetThreadLocalMarkStack() == nullptr);
1166  MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1167  if (mark_stack_mode == kMarkStackModeThreadLocal) {
1168    // Thread-local mark stack mode.
1169    RevokeThreadLocalMarkStacks(false);
1170    MutexLock mu(Thread::Current(), mark_stack_lock_);
1171    if (!revoked_mark_stacks_.empty()) {
1172      for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1173        while (!mark_stack->IsEmpty()) {
1174          mirror::Object* obj = mark_stack->PopBack();
1175          if (kUseBakerReadBarrier) {
1176            mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1177            LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1178                      << " is_marked=" << IsMarked(obj);
1179          } else {
1180            LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1181                      << " is_marked=" << IsMarked(obj);
1182          }
1183        }
1184      }
1185      LOG(FATAL) << "mark stack is not empty";
1186    }
1187  } else {
1188    // Shared, GC-exclusive, or off.
1189    MutexLock mu(Thread::Current(), mark_stack_lock_);
1190    CHECK(gc_mark_stack_->IsEmpty());
1191    CHECK(revoked_mark_stacks_.empty());
1192  }
1193}
1194
1195void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1196  TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1197  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1198  Runtime::Current()->SweepSystemWeaks(this);
1199}
1200
1201void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1202  {
1203    TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1204    accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1205    if (kEnableFromSpaceAccountingCheck) {
1206      CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1207    }
1208    heap_->MarkAllocStackAsLive(live_stack);
1209    live_stack->Reset();
1210  }
1211  CheckEmptyMarkStack();
1212  TimingLogger::ScopedTiming split("Sweep", GetTimings());
1213  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1214    if (space->IsContinuousMemMapAllocSpace()) {
1215      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1216      if (space == region_space_ || immune_region_.ContainsSpace(space)) {
1217        continue;
1218      }
1219      TimingLogger::ScopedTiming split2(
1220          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1221      RecordFree(alloc_space->Sweep(swap_bitmaps));
1222    }
1223  }
1224  SweepLargeObjects(swap_bitmaps);
1225}
1226
1227void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1228  TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1229  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1230}
1231
1232class ConcurrentCopyingClearBlackPtrsVisitor {
1233 public:
1234  explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
1235      : collector_(cc) {}
1236#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
1237  NO_RETURN
1238#endif
1239  void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
1240      SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
1241    DCHECK(obj != nullptr);
1242    DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
1243    DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
1244    obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
1245    DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
1246  }
1247
1248 private:
1249  ConcurrentCopying* const collector_;
1250};
1251
1252// Clear the black ptrs in non-moving objects back to white.
1253void ConcurrentCopying::ClearBlackPtrs() {
1254  CHECK(kUseBakerReadBarrier);
1255  TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
1256  ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
1257  for (auto& space : heap_->GetContinuousSpaces()) {
1258    if (space == region_space_) {
1259      continue;
1260    }
1261    accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1262    if (kVerboseMode) {
1263      LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
1264    }
1265    mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
1266                                  reinterpret_cast<uintptr_t>(space->Limit()),
1267                                  visitor);
1268  }
1269  space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
1270  large_object_space->GetMarkBitmap()->VisitMarkedRange(
1271      reinterpret_cast<uintptr_t>(large_object_space->Begin()),
1272      reinterpret_cast<uintptr_t>(large_object_space->End()),
1273      visitor);
1274  // Objects on the allocation stack?
1275  if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
1276    size_t count = GetAllocationStack()->Size();
1277    auto* it = GetAllocationStack()->Begin();
1278    auto* end = GetAllocationStack()->End();
1279    for (size_t i = 0; i < count; ++i, ++it) {
1280      CHECK_LT(it, end);
1281      mirror::Object* obj = it->AsMirrorPtr();
1282      if (obj != nullptr) {
1283        // Must have been cleared above.
1284        CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
1285      }
1286    }
1287  }
1288}
1289
1290void ConcurrentCopying::ReclaimPhase() {
1291  TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1292  if (kVerboseMode) {
1293    LOG(INFO) << "GC ReclaimPhase";
1294  }
1295  Thread* self = Thread::Current();
1296
1297  {
1298    // Double-check that the mark stack is empty.
1299    // Note: need to set this after VerifyNoFromSpaceRef().
1300    is_asserting_to_space_invariant_ = false;
1301    QuasiAtomic::ThreadFenceForConstructor();
1302    if (kVerboseMode) {
1303      LOG(INFO) << "Issue an empty check point. ";
1304    }
1305    IssueEmptyCheckpoint();
1306    // Disable the check.
1307    is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
1308    CheckEmptyMarkStack();
1309  }
1310
1311  {
1312    // Record freed objects.
1313    TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1314    // Don't include thread-locals that are in the to-space.
1315    uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1316    uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1317    uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1318    uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1319    uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1320    uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1321    if (kEnableFromSpaceAccountingCheck) {
1322      CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1323      CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1324    }
1325    CHECK_LE(to_objects, from_objects);
1326    CHECK_LE(to_bytes, from_bytes);
1327    int64_t freed_bytes = from_bytes - to_bytes;
1328    int64_t freed_objects = from_objects - to_objects;
1329    if (kVerboseMode) {
1330      LOG(INFO) << "RecordFree:"
1331                << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1332                << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1333                << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1334                << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1335                << " from_space size=" << region_space_->FromSpaceSize()
1336                << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1337                << " to_space size=" << region_space_->ToSpaceSize();
1338      LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1339    }
1340    RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1341    if (kVerboseMode) {
1342      LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1343    }
1344  }
1345
1346  {
1347    TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
1348    ComputeUnevacFromSpaceLiveRatio();
1349  }
1350
1351  {
1352    TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1353    region_space_->ClearFromSpace();
1354  }
1355
1356  {
1357    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1358    if (kUseBakerReadBarrier) {
1359      ClearBlackPtrs();
1360    }
1361    Sweep(false);
1362    SwapBitmaps();
1363    heap_->UnBindBitmaps();
1364
1365    // Remove bitmaps for the immune spaces.
1366    while (!cc_bitmaps_.empty()) {
1367      accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
1368      cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
1369      delete cc_bitmap;
1370      cc_bitmaps_.pop_back();
1371    }
1372    region_space_bitmap_ = nullptr;
1373  }
1374
1375  CheckEmptyMarkStack();
1376
1377  if (kVerboseMode) {
1378    LOG(INFO) << "GC end of ReclaimPhase";
1379  }
1380}
1381
1382class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
1383 public:
1384  explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
1385      : collector_(cc) {}
1386  void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_)
1387      SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
1388    DCHECK(ref != nullptr);
1389    DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
1390    DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
1391    if (kUseBakerReadBarrier) {
1392      DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
1393      // Clear the black ptr.
1394      ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
1395      DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
1396    }
1397    size_t obj_size = ref->SizeOf();
1398    size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1399    collector_->region_space_->AddLiveBytes(ref, alloc_size);
1400  }
1401
1402 private:
1403  ConcurrentCopying* const collector_;
1404};
1405
1406// Compute how much live objects are left in regions.
1407void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
1408  region_space_->AssertAllRegionLiveBytesZeroOrCleared();
1409  ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
1410  region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
1411                                         reinterpret_cast<uintptr_t>(region_space_->Limit()),
1412                                         visitor);
1413}
1414
1415// Assert the to-space invariant.
1416void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1417                                               mirror::Object* ref) {
1418  CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1419  if (is_asserting_to_space_invariant_) {
1420    if (region_space_->IsInToSpace(ref)) {
1421      // OK.
1422      return;
1423    } else if (region_space_->IsInUnevacFromSpace(ref)) {
1424      CHECK(region_space_bitmap_->Test(ref)) << ref;
1425    } else if (region_space_->IsInFromSpace(ref)) {
1426      // Not OK. Do extra logging.
1427      if (obj != nullptr) {
1428        LogFromSpaceRefHolder(obj, offset);
1429      }
1430      ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1431      CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1432    } else {
1433      AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1434    }
1435  }
1436}
1437
1438class RootPrinter {
1439 public:
1440  RootPrinter() { }
1441
1442  template <class MirrorType>
1443  ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
1444      SHARED_REQUIRES(Locks::mutator_lock_) {
1445    if (!root->IsNull()) {
1446      VisitRoot(root);
1447    }
1448  }
1449
1450  template <class MirrorType>
1451  void VisitRoot(mirror::Object** root)
1452      SHARED_REQUIRES(Locks::mutator_lock_) {
1453    LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1454  }
1455
1456  template <class MirrorType>
1457  void VisitRoot(mirror::CompressedReference<MirrorType>* root)
1458      SHARED_REQUIRES(Locks::mutator_lock_) {
1459    LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1460  }
1461};
1462
1463void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1464                                               mirror::Object* ref) {
1465  CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1466  if (is_asserting_to_space_invariant_) {
1467    if (region_space_->IsInToSpace(ref)) {
1468      // OK.
1469      return;
1470    } else if (region_space_->IsInUnevacFromSpace(ref)) {
1471      CHECK(region_space_bitmap_->Test(ref)) << ref;
1472    } else if (region_space_->IsInFromSpace(ref)) {
1473      // Not OK. Do extra logging.
1474      if (gc_root_source == nullptr) {
1475        // No info.
1476      } else if (gc_root_source->HasArtField()) {
1477        ArtField* field = gc_root_source->GetArtField();
1478        LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1479        RootPrinter root_printer;
1480        field->VisitRoots(root_printer);
1481      } else if (gc_root_source->HasArtMethod()) {
1482        ArtMethod* method = gc_root_source->GetArtMethod();
1483        LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1484        RootPrinter root_printer;
1485        method->VisitRoots(root_printer, sizeof(void*));
1486      }
1487      ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1488      region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1489      PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1490      MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1491      CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1492    } else {
1493      AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1494    }
1495  }
1496}
1497
1498void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1499  if (kUseBakerReadBarrier) {
1500    LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1501              << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1502  } else {
1503    LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1504  }
1505  if (region_space_->IsInFromSpace(obj)) {
1506    LOG(INFO) << "holder is in the from-space.";
1507  } else if (region_space_->IsInToSpace(obj)) {
1508    LOG(INFO) << "holder is in the to-space.";
1509  } else if (region_space_->IsInUnevacFromSpace(obj)) {
1510    LOG(INFO) << "holder is in the unevac from-space.";
1511    if (region_space_bitmap_->Test(obj)) {
1512      LOG(INFO) << "holder is marked in the region space bitmap.";
1513    } else {
1514      LOG(INFO) << "holder is not marked in the region space bitmap.";
1515    }
1516  } else {
1517    // In a non-moving space.
1518    if (immune_region_.ContainsObject(obj)) {
1519      LOG(INFO) << "holder is in the image or the zygote space.";
1520      accounting::ContinuousSpaceBitmap* cc_bitmap =
1521          cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1522      CHECK(cc_bitmap != nullptr)
1523          << "An immune space object must have a bitmap.";
1524      if (cc_bitmap->Test(obj)) {
1525        LOG(INFO) << "holder is marked in the bit map.";
1526      } else {
1527        LOG(INFO) << "holder is NOT marked in the bit map.";
1528      }
1529    } else {
1530      LOG(INFO) << "holder is in a non-moving (or main) space.";
1531      accounting::ContinuousSpaceBitmap* mark_bitmap =
1532          heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1533      accounting::LargeObjectBitmap* los_bitmap =
1534          heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1535      CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1536      bool is_los = mark_bitmap == nullptr;
1537      if (!is_los && mark_bitmap->Test(obj)) {
1538        LOG(INFO) << "holder is marked in the mark bit map.";
1539      } else if (is_los && los_bitmap->Test(obj)) {
1540        LOG(INFO) << "holder is marked in the los bit map.";
1541      } else {
1542        // If ref is on the allocation stack, then it is considered
1543        // mark/alive (but not necessarily on the live stack.)
1544        if (IsOnAllocStack(obj)) {
1545          LOG(INFO) << "holder is on the alloc stack.";
1546        } else {
1547          LOG(INFO) << "holder is not marked or on the alloc stack.";
1548        }
1549      }
1550    }
1551  }
1552  LOG(INFO) << "offset=" << offset.SizeValue();
1553}
1554
1555void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1556                                                               mirror::Object* ref) {
1557  // In a non-moving spaces. Check that the ref is marked.
1558  if (immune_region_.ContainsObject(ref)) {
1559    accounting::ContinuousSpaceBitmap* cc_bitmap =
1560        cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1561    CHECK(cc_bitmap != nullptr)
1562        << "An immune space ref must have a bitmap. " << ref;
1563    if (kUseBakerReadBarrier) {
1564      CHECK(cc_bitmap->Test(ref))
1565          << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
1566          << obj->GetReadBarrierPointer() << " ref=" << ref;
1567    } else {
1568      CHECK(cc_bitmap->Test(ref))
1569          << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
1570    }
1571  } else {
1572    accounting::ContinuousSpaceBitmap* mark_bitmap =
1573        heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1574    accounting::LargeObjectBitmap* los_bitmap =
1575        heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1576    CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1577    bool is_los = mark_bitmap == nullptr;
1578    if ((!is_los && mark_bitmap->Test(ref)) ||
1579        (is_los && los_bitmap->Test(ref))) {
1580      // OK.
1581    } else {
1582      // If ref is on the allocation stack, then it may not be
1583      // marked live, but considered marked/alive (but not
1584      // necessarily on the live stack).
1585      CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1586                                 << "obj=" << obj << " ref=" << ref;
1587    }
1588  }
1589}
1590
1591// Used to scan ref fields of an object.
1592class ConcurrentCopyingRefFieldsVisitor {
1593 public:
1594  explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
1595      : collector_(collector) {}
1596
1597  void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
1598      const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
1599      SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
1600    collector_->Process(obj, offset);
1601  }
1602
1603  void operator()(mirror::Class* klass, mirror::Reference* ref) const
1604      SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
1605    CHECK(klass->IsTypeOfReferenceClass());
1606    collector_->DelayReferenceReferent(klass, ref);
1607  }
1608
1609  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1610      SHARED_REQUIRES(Locks::mutator_lock_) {
1611    if (!root->IsNull()) {
1612      VisitRoot(root);
1613    }
1614  }
1615
1616  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1617      SHARED_REQUIRES(Locks::mutator_lock_) {
1618    collector_->MarkRoot(root);
1619  }
1620
1621 private:
1622  ConcurrentCopying* const collector_;
1623};
1624
1625// Scan ref fields of an object.
1626void ConcurrentCopying::Scan(mirror::Object* to_ref) {
1627  DCHECK(!region_space_->IsInFromSpace(to_ref));
1628  ConcurrentCopyingRefFieldsVisitor visitor(this);
1629  to_ref->VisitReferences(visitor, visitor);
1630}
1631
1632// Process a field.
1633inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
1634  mirror::Object* ref = obj->GetFieldObject<
1635      mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
1636  if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1637    return;
1638  }
1639  mirror::Object* to_ref = Mark(ref);
1640  if (to_ref == ref) {
1641    return;
1642  }
1643  // This may fail if the mutator writes to the field at the same time. But it's ok.
1644  mirror::Object* expected_ref = ref;
1645  mirror::Object* new_ref = to_ref;
1646  do {
1647    if (expected_ref !=
1648        obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1649      // It was updated by the mutator.
1650      break;
1651    }
1652  } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
1653      false, false, kVerifyNone>(offset, expected_ref, new_ref));
1654}
1655
1656// Process some roots.
1657void ConcurrentCopying::VisitRoots(
1658    mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1659  for (size_t i = 0; i < count; ++i) {
1660    mirror::Object** root = roots[i];
1661    mirror::Object* ref = *root;
1662    if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1663      continue;
1664    }
1665    mirror::Object* to_ref = Mark(ref);
1666    if (to_ref == ref) {
1667      continue;
1668    }
1669    Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1670    mirror::Object* expected_ref = ref;
1671    mirror::Object* new_ref = to_ref;
1672    do {
1673      if (expected_ref != addr->LoadRelaxed()) {
1674        // It was updated by the mutator.
1675        break;
1676      }
1677    } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1678  }
1679}
1680
1681void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
1682  DCHECK(!root->IsNull());
1683  mirror::Object* const ref = root->AsMirrorPtr();
1684  if (region_space_->IsInToSpace(ref)) {
1685    return;
1686  }
1687  mirror::Object* to_ref = Mark(ref);
1688  if (to_ref != ref) {
1689    auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1690    auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1691    auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
1692    // If the cas fails, then it was updated by the mutator.
1693    do {
1694      if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1695        // It was updated by the mutator.
1696        break;
1697      }
1698    } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1699  }
1700}
1701
1702void ConcurrentCopying::VisitRoots(
1703    mirror::CompressedReference<mirror::Object>** roots, size_t count,
1704    const RootInfo& info ATTRIBUTE_UNUSED) {
1705  for (size_t i = 0; i < count; ++i) {
1706    mirror::CompressedReference<mirror::Object>* const root = roots[i];
1707    if (!root->IsNull()) {
1708      MarkRoot(root);
1709    }
1710  }
1711}
1712
1713// Fill the given memory block with a dummy object. Used to fill in a
1714// copy of objects that was lost in race.
1715void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
1716  CHECK_ALIGNED(byte_size, kObjectAlignment);
1717  memset(dummy_obj, 0, byte_size);
1718  mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1719  CHECK(int_array_class != nullptr);
1720  AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1721  size_t component_size = int_array_class->GetComponentSize();
1722  CHECK_EQ(component_size, sizeof(int32_t));
1723  size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1724  if (data_offset > byte_size) {
1725    // An int array is too big. Use java.lang.Object.
1726    mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1727    AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1728    CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1729    dummy_obj->SetClass(java_lang_Object);
1730    CHECK_EQ(byte_size, dummy_obj->SizeOf());
1731  } else {
1732    // Use an int array.
1733    dummy_obj->SetClass(int_array_class);
1734    CHECK(dummy_obj->IsArrayInstance());
1735    int32_t length = (byte_size - data_offset) / component_size;
1736    dummy_obj->AsArray()->SetLength(length);
1737    CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1738        << "byte_size=" << byte_size << " length=" << length
1739        << " component_size=" << component_size << " data_offset=" << data_offset;
1740    CHECK_EQ(byte_size, dummy_obj->SizeOf())
1741        << "byte_size=" << byte_size << " length=" << length
1742        << " component_size=" << component_size << " data_offset=" << data_offset;
1743  }
1744}
1745
1746// Reuse the memory blocks that were copy of objects that were lost in race.
1747mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1748  // Try to reuse the blocks that were unused due to CAS failures.
1749  CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
1750  Thread* self = Thread::Current();
1751  size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1752  MutexLock mu(self, skipped_blocks_lock_);
1753  auto it = skipped_blocks_map_.lower_bound(alloc_size);
1754  if (it == skipped_blocks_map_.end()) {
1755    // Not found.
1756    return nullptr;
1757  }
1758  {
1759    size_t byte_size = it->first;
1760    CHECK_GE(byte_size, alloc_size);
1761    if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1762      // If remainder would be too small for a dummy object, retry with a larger request size.
1763      it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1764      if (it == skipped_blocks_map_.end()) {
1765        // Not found.
1766        return nullptr;
1767      }
1768      CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
1769      CHECK_GE(it->first - alloc_size, min_object_size)
1770          << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1771    }
1772  }
1773  // Found a block.
1774  CHECK(it != skipped_blocks_map_.end());
1775  size_t byte_size = it->first;
1776  uint8_t* addr = it->second;
1777  CHECK_GE(byte_size, alloc_size);
1778  CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1779  CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
1780  if (kVerboseMode) {
1781    LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1782  }
1783  skipped_blocks_map_.erase(it);
1784  memset(addr, 0, byte_size);
1785  if (byte_size > alloc_size) {
1786    // Return the remainder to the map.
1787    CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
1788    CHECK_GE(byte_size - alloc_size, min_object_size);
1789    FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1790                        byte_size - alloc_size);
1791    CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1792    skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1793  }
1794  return reinterpret_cast<mirror::Object*>(addr);
1795}
1796
1797mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1798  DCHECK(region_space_->IsInFromSpace(from_ref));
1799  // No read barrier to avoid nested RB that might violate the to-space
1800  // invariant. Note that from_ref is a from space ref so the SizeOf()
1801  // call will access the from-space meta objects, but it's ok and necessary.
1802  size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1803  size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1804  size_t region_space_bytes_allocated = 0U;
1805  size_t non_moving_space_bytes_allocated = 0U;
1806  size_t bytes_allocated = 0U;
1807  size_t dummy;
1808  mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
1809      region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
1810  bytes_allocated = region_space_bytes_allocated;
1811  if (to_ref != nullptr) {
1812    DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1813  }
1814  bool fall_back_to_non_moving = false;
1815  if (UNLIKELY(to_ref == nullptr)) {
1816    // Failed to allocate in the region space. Try the skipped blocks.
1817    to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1818    if (to_ref != nullptr) {
1819      // Succeeded to allocate in a skipped block.
1820      if (heap_->use_tlab_) {
1821        // This is necessary for the tlab case as it's not accounted in the space.
1822        region_space_->RecordAlloc(to_ref);
1823      }
1824      bytes_allocated = region_space_alloc_size;
1825    } else {
1826      // Fall back to the non-moving space.
1827      fall_back_to_non_moving = true;
1828      if (kVerboseMode) {
1829        LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1830                  << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1831                  << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1832      }
1833      fall_back_to_non_moving = true;
1834      to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
1835                                               &non_moving_space_bytes_allocated, nullptr, &dummy);
1836      CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1837      bytes_allocated = non_moving_space_bytes_allocated;
1838      // Mark it in the mark bitmap.
1839      accounting::ContinuousSpaceBitmap* mark_bitmap =
1840          heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1841      CHECK(mark_bitmap != nullptr);
1842      CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1843    }
1844  }
1845  DCHECK(to_ref != nullptr);
1846
1847  // Attempt to install the forward pointer. This is in a loop as the
1848  // lock word atomic write can fail.
1849  while (true) {
1850    // Copy the object. TODO: copy only the lockword in the second iteration and on?
1851    memcpy(to_ref, from_ref, obj_size);
1852
1853    LockWord old_lock_word = to_ref->GetLockWord(false);
1854
1855    if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1856      // Lost the race. Another thread (either GC or mutator) stored
1857      // the forwarding pointer first. Make the lost copy (to_ref)
1858      // look like a valid but dead (dummy) object and keep it for
1859      // future reuse.
1860      FillWithDummyObject(to_ref, bytes_allocated);
1861      if (!fall_back_to_non_moving) {
1862        DCHECK(region_space_->IsInToSpace(to_ref));
1863        if (bytes_allocated > space::RegionSpace::kRegionSize) {
1864          // Free the large alloc.
1865          region_space_->FreeLarge(to_ref, bytes_allocated);
1866        } else {
1867          // Record the lost copy for later reuse.
1868          heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1869          to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1870          to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1871          MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1872          skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1873                                                    reinterpret_cast<uint8_t*>(to_ref)));
1874        }
1875      } else {
1876        DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1877        DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1878        // Free the non-moving-space chunk.
1879        accounting::ContinuousSpaceBitmap* mark_bitmap =
1880            heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1881        CHECK(mark_bitmap != nullptr);
1882        CHECK(mark_bitmap->Clear(to_ref));
1883        heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1884      }
1885
1886      // Get the winner's forward ptr.
1887      mirror::Object* lost_fwd_ptr = to_ref;
1888      to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1889      CHECK(to_ref != nullptr);
1890      CHECK_NE(to_ref, lost_fwd_ptr);
1891      CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1892      CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1893      return to_ref;
1894    }
1895
1896    // Set the gray ptr.
1897    if (kUseBakerReadBarrier) {
1898      to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1899    }
1900
1901    LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1902
1903    // Try to atomically write the fwd ptr.
1904    bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1905    if (LIKELY(success)) {
1906      // The CAS succeeded.
1907      objects_moved_.FetchAndAddSequentiallyConsistent(1);
1908      bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1909      if (LIKELY(!fall_back_to_non_moving)) {
1910        DCHECK(region_space_->IsInToSpace(to_ref));
1911      } else {
1912        DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1913        DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1914      }
1915      if (kUseBakerReadBarrier) {
1916        DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1917      }
1918      DCHECK(GetFwdPtr(from_ref) == to_ref);
1919      CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1920      PushOntoMarkStack(to_ref);
1921      return to_ref;
1922    } else {
1923      // The CAS failed. It may have lost the race or may have failed
1924      // due to monitor/hashcode ops. Either way, retry.
1925    }
1926  }
1927}
1928
1929mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1930  DCHECK(from_ref != nullptr);
1931  space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1932  if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
1933    // It's already marked.
1934    return from_ref;
1935  }
1936  mirror::Object* to_ref;
1937  if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
1938    to_ref = GetFwdPtr(from_ref);
1939    DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1940           heap_->non_moving_space_->HasAddress(to_ref))
1941        << "from_ref=" << from_ref << " to_ref=" << to_ref;
1942  } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
1943    if (region_space_bitmap_->Test(from_ref)) {
1944      to_ref = from_ref;
1945    } else {
1946      to_ref = nullptr;
1947    }
1948  } else {
1949    // from_ref is in a non-moving space.
1950    if (immune_region_.ContainsObject(from_ref)) {
1951      accounting::ContinuousSpaceBitmap* cc_bitmap =
1952          cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1953      DCHECK(cc_bitmap != nullptr)
1954          << "An immune space object must have a bitmap";
1955      if (kIsDebugBuild) {
1956        DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1957            << "Immune space object must be already marked";
1958      }
1959      if (cc_bitmap->Test(from_ref)) {
1960        // Already marked.
1961        to_ref = from_ref;
1962      } else {
1963        // Newly marked.
1964        to_ref = nullptr;
1965      }
1966    } else {
1967      // Non-immune non-moving space. Use the mark bitmap.
1968      accounting::ContinuousSpaceBitmap* mark_bitmap =
1969          heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1970      accounting::LargeObjectBitmap* los_bitmap =
1971          heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1972      CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1973      bool is_los = mark_bitmap == nullptr;
1974      if (!is_los && mark_bitmap->Test(from_ref)) {
1975        // Already marked.
1976        to_ref = from_ref;
1977      } else if (is_los && los_bitmap->Test(from_ref)) {
1978        // Already marked in LOS.
1979        to_ref = from_ref;
1980      } else {
1981        // Not marked.
1982        if (IsOnAllocStack(from_ref)) {
1983          // If on the allocation stack, it's considered marked.
1984          to_ref = from_ref;
1985        } else {
1986          // Not marked.
1987          to_ref = nullptr;
1988        }
1989      }
1990    }
1991  }
1992  return to_ref;
1993}
1994
1995bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1996  QuasiAtomic::ThreadFenceAcquire();
1997  accounting::ObjectStack* alloc_stack = GetAllocationStack();
1998  return alloc_stack->Contains(ref);
1999}
2000
2001mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
2002  if (from_ref == nullptr) {
2003    return nullptr;
2004  }
2005  DCHECK(from_ref != nullptr);
2006  DCHECK(heap_->collector_type_ == kCollectorTypeCC);
2007  if (kUseBakerReadBarrier && !is_active_) {
2008    // In the lock word forward address state, the read barrier bits
2009    // in the lock word are part of the stored forwarding address and
2010    // invalid. This is usually OK as the from-space copy of objects
2011    // aren't accessed by mutators due to the to-space
2012    // invariant. However, during the dex2oat image writing relocation
2013    // and the zygote compaction, objects can be in the forward
2014    // address state (to store the forward/relocation addresses) and
2015    // they can still be accessed and the invalid read barrier bits
2016    // are consulted. If they look like gray but aren't really, the
2017    // read barriers slow path can trigger when it shouldn't. To guard
2018    // against this, return here if the CC collector isn't running.
2019    return from_ref;
2020  }
2021  DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
2022  space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
2023  if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
2024    // It's already marked.
2025    return from_ref;
2026  }
2027  mirror::Object* to_ref;
2028  if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
2029    to_ref = GetFwdPtr(from_ref);
2030    if (kUseBakerReadBarrier) {
2031      DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref;
2032    }
2033    if (to_ref == nullptr) {
2034      // It isn't marked yet. Mark it by copying it to the to-space.
2035      to_ref = Copy(from_ref);
2036    }
2037    DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
2038        << "from_ref=" << from_ref << " to_ref=" << to_ref;
2039  } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
2040    // This may or may not succeed, which is ok.
2041    if (kUseBakerReadBarrier) {
2042      from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
2043    }
2044    if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
2045      // Already marked.
2046      to_ref = from_ref;
2047    } else {
2048      // Newly marked.
2049      to_ref = from_ref;
2050      if (kUseBakerReadBarrier) {
2051        DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2052      }
2053      PushOntoMarkStack(to_ref);
2054    }
2055  } else {
2056    // from_ref is in a non-moving space.
2057    DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
2058    if (immune_region_.ContainsObject(from_ref)) {
2059      accounting::ContinuousSpaceBitmap* cc_bitmap =
2060          cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
2061      DCHECK(cc_bitmap != nullptr)
2062          << "An immune space object must have a bitmap";
2063      if (kIsDebugBuild) {
2064        DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
2065            << "Immune space object must be already marked";
2066      }
2067      // This may or may not succeed, which is ok.
2068      if (kUseBakerReadBarrier) {
2069        from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
2070      }
2071      if (cc_bitmap->AtomicTestAndSet(from_ref)) {
2072        // Already marked.
2073        to_ref = from_ref;
2074      } else {
2075        // Newly marked.
2076        to_ref = from_ref;
2077        if (kUseBakerReadBarrier) {
2078          DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2079        }
2080        PushOntoMarkStack(to_ref);
2081      }
2082    } else {
2083      // Use the mark bitmap.
2084      accounting::ContinuousSpaceBitmap* mark_bitmap =
2085          heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
2086      accounting::LargeObjectBitmap* los_bitmap =
2087          heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
2088      CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2089      bool is_los = mark_bitmap == nullptr;
2090      if (!is_los && mark_bitmap->Test(from_ref)) {
2091        // Already marked.
2092        to_ref = from_ref;
2093        if (kUseBakerReadBarrier) {
2094          DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2095                 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
2096        }
2097      } else if (is_los && los_bitmap->Test(from_ref)) {
2098        // Already marked in LOS.
2099        to_ref = from_ref;
2100        if (kUseBakerReadBarrier) {
2101          DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
2102                 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
2103        }
2104      } else {
2105        // Not marked.
2106        if (IsOnAllocStack(from_ref)) {
2107          // If it's on the allocation stack, it's considered marked. Keep it white.
2108          to_ref = from_ref;
2109          // Objects on the allocation stack need not be marked.
2110          if (!is_los) {
2111            DCHECK(!mark_bitmap->Test(to_ref));
2112          } else {
2113            DCHECK(!los_bitmap->Test(to_ref));
2114          }
2115          if (kUseBakerReadBarrier) {
2116            DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
2117          }
2118        } else {
2119          // Not marked or on the allocation stack. Try to mark it.
2120          // This may or may not succeed, which is ok.
2121          if (kUseBakerReadBarrier) {
2122            from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
2123          }
2124          if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) {
2125            // Already marked.
2126            to_ref = from_ref;
2127          } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) {
2128            // Already marked in LOS.
2129            to_ref = from_ref;
2130          } else {
2131            // Newly marked.
2132            to_ref = from_ref;
2133            if (kUseBakerReadBarrier) {
2134              DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2135            }
2136            PushOntoMarkStack(to_ref);
2137          }
2138        }
2139      }
2140    }
2141  }
2142  return to_ref;
2143}
2144
2145void ConcurrentCopying::FinishPhase() {
2146  {
2147    MutexLock mu(Thread::Current(), mark_stack_lock_);
2148    CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2149  }
2150  region_space_ = nullptr;
2151  {
2152    MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2153    skipped_blocks_map_.clear();
2154  }
2155  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2156  heap_->ClearMarkedObjects();
2157}
2158
2159bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
2160  mirror::Object* from_ref = field->AsMirrorPtr();
2161  mirror::Object* to_ref = IsMarked(from_ref);
2162  if (to_ref == nullptr) {
2163    return false;
2164  }
2165  if (from_ref != to_ref) {
2166    QuasiAtomic::ThreadFenceRelease();
2167    field->Assign(to_ref);
2168    QuasiAtomic::ThreadFenceSequentiallyConsistent();
2169  }
2170  return true;
2171}
2172
2173mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2174  return Mark(from_ref);
2175}
2176
2177void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
2178  heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
2179}
2180
2181void ConcurrentCopying::ProcessReferences(Thread* self) {
2182  TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
2183  // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
2184  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2185  GetHeap()->GetReferenceProcessor()->ProcessReferences(
2186      true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
2187}
2188
2189void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2190  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2191  region_space_->RevokeAllThreadLocalBuffers();
2192}
2193
2194}  // namespace collector
2195}  // namespace gc
2196}  // namespace art
2197