mark_sweep.cc revision 3e5cf305db800b2989ad57b7cde8fb3cc9fa1b9e
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mark_sweep.h"
18
19#include <functional>
20#include <numeric>
21#include <climits>
22#include <vector>
23
24#include "base/bounded_fifo.h"
25#include "base/logging.h"
26#include "base/macros.h"
27#include "base/mutex-inl.h"
28#include "base/timing_logger.h"
29#include "gc/accounting/card_table-inl.h"
30#include "gc/accounting/heap_bitmap-inl.h"
31#include "gc/accounting/mod_union_table.h"
32#include "gc/accounting/space_bitmap-inl.h"
33#include "gc/heap.h"
34#include "gc/reference_processor.h"
35#include "gc/space/image_space.h"
36#include "gc/space/large_object_space.h"
37#include "gc/space/space-inl.h"
38#include "mark_sweep-inl.h"
39#include "mirror/art_field-inl.h"
40#include "mirror/object-inl.h"
41#include "runtime.h"
42#include "scoped_thread_state_change.h"
43#include "thread-inl.h"
44#include "thread_list.h"
45
46using ::art::mirror::ArtField;
47using ::art::mirror::Class;
48using ::art::mirror::Object;
49using ::art::mirror::ObjectArray;
50
51namespace art {
52namespace gc {
53namespace collector {
54
55// Performance options.
56static constexpr bool kUseRecursiveMark = false;
57static constexpr bool kUseMarkStackPrefetch = true;
58static constexpr size_t kSweepArrayChunkFreeSize = 1024;
59static constexpr bool kPreCleanCards = true;
60
61// Parallelism options.
62static constexpr bool kParallelCardScan = true;
63static constexpr bool kParallelRecursiveMark = true;
64// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
65// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
66// having this can add overhead in ProcessReferences since we may end up doing many calls of
67// ProcessMarkStack with very small mark stacks.
68static constexpr size_t kMinimumParallelMarkStackSize = 128;
69static constexpr bool kParallelProcessMarkStack = true;
70
71// Profiling and information flags.
72static constexpr bool kProfileLargeObjects = false;
73static constexpr bool kMeasureOverhead = false;
74static constexpr bool kCountTasks = false;
75static constexpr bool kCountJavaLangRefs = false;
76static constexpr bool kCountMarkedObjects = false;
77
78// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
79static constexpr bool kCheckLocks = kDebugLocking;
80static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
81
82// If true, revoke the rosalloc thread-local buffers at the
83// checkpoint, as opposed to during the pause.
84static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
85
86void MarkSweep::BindBitmaps() {
87  timings_.StartSplit("BindBitmaps");
88  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
89  // Mark all of the spaces we never collect as immune.
90  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
91    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
92      CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
93    }
94  }
95  timings_.EndSplit();
96}
97
98MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
99    : GarbageCollector(heap,
100                       name_prefix +
101                       (is_concurrent ? "concurrent mark sweep": "mark sweep")),
102      current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
103      gc_barrier_(new Barrier(0)),
104      mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
105      is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
106}
107
108void MarkSweep::InitializePhase() {
109  TimingLogger::ScopedSplit split("InitializePhase", &timings_);
110  mark_stack_ = heap_->GetMarkStack();
111  DCHECK(mark_stack_ != nullptr);
112  immune_region_.Reset();
113  class_count_.StoreRelaxed(0);
114  array_count_.StoreRelaxed(0);
115  other_count_.StoreRelaxed(0);
116  large_object_test_.StoreRelaxed(0);
117  large_object_mark_.StoreRelaxed(0);
118  overhead_time_ .StoreRelaxed(0);
119  work_chunks_created_.StoreRelaxed(0);
120  work_chunks_deleted_.StoreRelaxed(0);
121  reference_count_.StoreRelaxed(0);
122  mark_null_count_.StoreRelaxed(0);
123  mark_immune_count_.StoreRelaxed(0);
124  mark_fastpath_count_.StoreRelaxed(0);
125  mark_slowpath_count_.StoreRelaxed(0);
126  {
127    // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
128    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
129    mark_bitmap_ = heap_->GetMarkBitmap();
130  }
131  if (!clear_soft_references_) {
132    // Always clear soft references if a non-sticky collection.
133    clear_soft_references_ = GetGcType() != collector::kGcTypeSticky;
134  }
135}
136
137void MarkSweep::RunPhases() {
138  Thread* self = Thread::Current();
139  InitializePhase();
140  Locks::mutator_lock_->AssertNotHeld(self);
141  if (IsConcurrent()) {
142    GetHeap()->PreGcVerification(this);
143    {
144      ReaderMutexLock mu(self, *Locks::mutator_lock_);
145      MarkingPhase();
146    }
147    ScopedPause pause(this);
148    GetHeap()->PrePauseRosAllocVerification(this);
149    PausePhase();
150    RevokeAllThreadLocalBuffers();
151  } else {
152    ScopedPause pause(this);
153    GetHeap()->PreGcVerificationPaused(this);
154    MarkingPhase();
155    GetHeap()->PrePauseRosAllocVerification(this);
156    PausePhase();
157    RevokeAllThreadLocalBuffers();
158  }
159  {
160    // Sweeping always done concurrently, even for non concurrent mark sweep.
161    ReaderMutexLock mu(self, *Locks::mutator_lock_);
162    ReclaimPhase();
163  }
164  GetHeap()->PostGcVerification(this);
165  FinishPhase();
166}
167
168void MarkSweep::ProcessReferences(Thread* self) {
169  TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
170  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
171  GetHeap()->GetReferenceProcessor()->ProcessReferences(
172      true, &timings_, clear_soft_references_, &IsMarkedCallback, &MarkObjectCallback,
173      &ProcessMarkStackCallback, this);
174}
175
176void MarkSweep::PausePhase() {
177  TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_);
178  Thread* self = Thread::Current();
179  Locks::mutator_lock_->AssertExclusiveHeld(self);
180  if (IsConcurrent()) {
181    // Handle the dirty objects if we are a concurrent GC.
182    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
183    // Re-mark root set.
184    ReMarkRoots();
185    // Scan dirty objects, this is only required if we are not doing concurrent GC.
186    RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
187  }
188  {
189    TimingLogger::ScopedSplit split("SwapStacks", &timings_);
190    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
191    heap_->SwapStacks(self);
192    live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
193    // Need to revoke all the thread local allocation stacks since we just swapped the allocation
194    // stacks and don't want anybody to allocate into the live stack.
195    RevokeAllThreadLocalAllocationStacks(self);
196  }
197  timings_.StartSplit("PreSweepingGcVerification");
198  heap_->PreSweepingGcVerification(this);
199  timings_.EndSplit();
200  // Disallow new system weaks to prevent a race which occurs when someone adds a new system
201  // weak before we sweep them. Since this new system weak may not be marked, the GC may
202  // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
203  // reference to a string that is about to be swept.
204  Runtime::Current()->DisallowNewSystemWeaks();
205  // Enable the reference processing slow path, needs to be done with mutators paused since there
206  // is no lock in the GetReferent fast path.
207  GetHeap()->GetReferenceProcessor()->EnableSlowPath();
208}
209
210void MarkSweep::PreCleanCards() {
211  // Don't do this for non concurrent GCs since they don't have any dirty cards.
212  if (kPreCleanCards && IsConcurrent()) {
213    Thread* self = Thread::Current();
214    CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
215    // Process dirty cards and add dirty cards to mod union tables, also ages cards.
216    heap_->ProcessCards(timings_, false);
217    // The checkpoint root marking is required to avoid a race condition which occurs if the
218    // following happens during a reference write:
219    // 1. mutator dirties the card (write barrier)
220    // 2. GC ages the card (the above ProcessCards call)
221    // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
222    // 4. mutator writes the value (corresponding to the write barrier in 1.)
223    // This causes the GC to age the card but not necessarily mark the reference which the mutator
224    // wrote into the object stored in the card.
225    // Having the checkpoint fixes this issue since it ensures that the card mark and the
226    // reference write are visible to the GC before the card is scanned (this is due to locks being
227    // acquired / released in the checkpoint code).
228    // The other roots are also marked to help reduce the pause.
229    MarkRootsCheckpoint(self, false);
230    MarkNonThreadRoots();
231    MarkConcurrentRoots(
232        static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
233    // Process the newly aged cards.
234    RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
235    // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
236    // in the next GC.
237  }
238}
239
240void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
241  if (kUseThreadLocalAllocationStack) {
242    timings_.NewSplit("RevokeAllThreadLocalAllocationStacks");
243    Locks::mutator_lock_->AssertExclusiveHeld(self);
244    heap_->RevokeAllThreadLocalAllocationStacks(self);
245  }
246}
247
248void MarkSweep::MarkingPhase() {
249  TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
250  Thread* self = Thread::Current();
251
252  BindBitmaps();
253  FindDefaultSpaceBitmap();
254
255  // Process dirty cards and add dirty cards to mod union tables.
256  heap_->ProcessCards(timings_, false);
257
258  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
259  MarkRoots(self);
260  MarkReachableObjects();
261  // Pre-clean dirtied cards to reduce pauses.
262  PreCleanCards();
263}
264
265void MarkSweep::UpdateAndMarkModUnion() {
266  for (const auto& space : heap_->GetContinuousSpaces()) {
267    if (immune_region_.ContainsSpace(space)) {
268      const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
269          "UpdateAndMarkImageModUnionTable";
270      TimingLogger::ScopedSplit split(name, &timings_);
271      accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
272      CHECK(mod_union_table != nullptr);
273      mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
274    }
275  }
276}
277
278void MarkSweep::MarkReachableObjects() {
279  UpdateAndMarkModUnion();
280  // Recursively mark all the non-image bits set in the mark bitmap.
281  RecursiveMark();
282}
283
284void MarkSweep::ReclaimPhase() {
285  TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
286  Thread* self = Thread::Current();
287  // Process the references concurrently.
288  ProcessReferences(self);
289  SweepSystemWeaks(self);
290  Runtime::Current()->AllowNewSystemWeaks();
291  {
292    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
293
294    // Reclaim unmarked objects.
295    Sweep(false);
296
297    // Swap the live and mark bitmaps for each space which we modified space. This is an
298    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
299    // bitmaps.
300    timings_.StartSplit("SwapBitmaps");
301    SwapBitmaps();
302    timings_.EndSplit();
303
304    // Unbind the live and mark bitmaps.
305    TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
306    GetHeap()->UnBindBitmaps();
307  }
308}
309
310void MarkSweep::FindDefaultSpaceBitmap() {
311  TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
312  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
313    accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
314    // We want to have the main space instead of non moving if possible.
315    if (bitmap != nullptr &&
316        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
317      current_space_bitmap_ = bitmap;
318      // If we are not the non moving space exit the loop early since this will be good enough.
319      if (space != heap_->GetNonMovingSpace()) {
320        break;
321      }
322    }
323  }
324  if (current_space_bitmap_ == nullptr) {
325    heap_->DumpSpaces();
326    LOG(FATAL) << "Could not find a default mark bitmap";
327  }
328}
329
330void MarkSweep::ExpandMarkStack() {
331  ResizeMarkStack(mark_stack_->Capacity() * 2);
332}
333
334void MarkSweep::ResizeMarkStack(size_t new_size) {
335  // Rare case, no need to have Thread::Current be a parameter.
336  if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
337    // Someone else acquired the lock and expanded the mark stack before us.
338    return;
339  }
340  std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
341  CHECK_LE(mark_stack_->Size(), new_size);
342  mark_stack_->Resize(new_size);
343  for (const auto& obj : temp) {
344    mark_stack_->PushBack(obj);
345  }
346}
347
348inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
349  DCHECK(obj != nullptr);
350  if (MarkObjectParallel(obj)) {
351    MutexLock mu(Thread::Current(), mark_stack_lock_);
352    if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
353      ExpandMarkStack();
354    }
355    // The object must be pushed on to the mark stack.
356    mark_stack_->PushBack(obj);
357  }
358}
359
360mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
361  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
362  mark_sweep->MarkObject(obj);
363  return obj;
364}
365
366void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
367  reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
368}
369
370class MarkSweepMarkObjectSlowPath {
371 public:
372  explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
373  }
374
375  void operator()(const Object* obj) const ALWAYS_INLINE {
376    if (kProfileLargeObjects) {
377      // TODO: Differentiate between marking and testing somehow.
378      ++mark_sweep_->large_object_test_;
379      ++mark_sweep_->large_object_mark_;
380    }
381    space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
382    if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
383                 (kIsDebugBuild && !large_object_space->Contains(obj)))) {
384      LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
385      LOG(ERROR) << "Attempting see if it's a bad root";
386      mark_sweep_->VerifyRoots();
387      LOG(FATAL) << "Can't mark invalid object";
388    }
389  }
390
391 private:
392  MarkSweep* const mark_sweep_;
393};
394
395inline void MarkSweep::MarkObjectNonNull(Object* obj) {
396  DCHECK(obj != nullptr);
397  if (kUseBakerOrBrooksReadBarrier) {
398    // Verify all the objects have the correct pointer installed.
399    obj->AssertReadBarrierPointer();
400  }
401  if (immune_region_.ContainsObject(obj)) {
402    if (kCountMarkedObjects) {
403      ++mark_immune_count_;
404    }
405    DCHECK(mark_bitmap_->Test(obj));
406  } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
407    if (kCountMarkedObjects) {
408      ++mark_fastpath_count_;
409    }
410    if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
411      PushOnMarkStack(obj);  // This object was not previously marked.
412    }
413  } else {
414    if (kCountMarkedObjects) {
415      ++mark_slowpath_count_;
416    }
417    MarkSweepMarkObjectSlowPath visitor(this);
418    // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
419    // will check again.
420    if (!mark_bitmap_->Set(obj, visitor)) {
421      PushOnMarkStack(obj);  // Was not already marked, push.
422    }
423  }
424}
425
426inline void MarkSweep::PushOnMarkStack(Object* obj) {
427  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
428    // Lock is not needed but is here anyways to please annotalysis.
429    MutexLock mu(Thread::Current(), mark_stack_lock_);
430    ExpandMarkStack();
431  }
432  // The object must be pushed on to the mark stack.
433  mark_stack_->PushBack(obj);
434}
435
436inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
437  DCHECK(obj != nullptr);
438  if (kUseBakerOrBrooksReadBarrier) {
439    // Verify all the objects have the correct pointer installed.
440    obj->AssertReadBarrierPointer();
441  }
442  if (immune_region_.ContainsObject(obj)) {
443    DCHECK(IsMarked(obj));
444    return false;
445  }
446  // Try to take advantage of locality of references within a space, failing this find the space
447  // the hard way.
448  accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
449  if (LIKELY(object_bitmap->HasAddress(obj))) {
450    return !object_bitmap->AtomicTestAndSet(obj);
451  }
452  MarkSweepMarkObjectSlowPath visitor(this);
453  return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
454}
455
456// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
457inline void MarkSweep::MarkObject(Object* obj) {
458  if (obj != nullptr) {
459    MarkObjectNonNull(obj);
460  } else if (kCountMarkedObjects) {
461    ++mark_null_count_;
462  }
463}
464
465void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/,
466                                         RootType /*root_type*/) {
467  reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
468}
469
470void MarkSweep::VerifyRootMarked(Object** root, void* arg, uint32_t /*thread_id*/,
471                                 RootType /*root_type*/) {
472  CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root));
473}
474
475void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
476                                 RootType /*root_type*/) {
477  reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
478}
479
480void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
481                                   const StackVisitor* visitor, RootType root_type) {
482  reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type);
483}
484
485void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor,
486                           RootType root_type) {
487  // See if the root is on any space bitmap.
488  if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
489    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
490    if (!large_object_space->Contains(root)) {
491      LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type;
492      if (visitor != NULL) {
493        LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
494      }
495    }
496  }
497}
498
499void MarkSweep::VerifyRoots() {
500  Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
501}
502
503void MarkSweep::MarkRoots(Thread* self) {
504  if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
505    // If we exclusively hold the mutator lock, all threads must be suspended.
506    timings_.StartSplit("MarkRoots");
507    Runtime::Current()->VisitRoots(MarkRootCallback, this);
508    timings_.EndSplit();
509    RevokeAllThreadLocalAllocationStacks(self);
510  } else {
511    MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
512    // At this point the live stack should no longer have any mutators which push into it.
513    MarkNonThreadRoots();
514    MarkConcurrentRoots(
515        static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
516  }
517}
518
519void MarkSweep::MarkNonThreadRoots() {
520  timings_.StartSplit("MarkNonThreadRoots");
521  Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
522  timings_.EndSplit();
523}
524
525void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
526  timings_.StartSplit("MarkConcurrentRoots");
527  // Visit all runtime roots and clear dirty flags.
528  Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
529  timings_.EndSplit();
530}
531
532class ScanObjectVisitor {
533 public:
534  explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
535      : mark_sweep_(mark_sweep) {}
536
537  void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
538      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
539    if (kCheckLocks) {
540      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
541      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
542    }
543    mark_sweep_->ScanObject(obj);
544  }
545
546 private:
547  MarkSweep* const mark_sweep_;
548};
549
550class DelayReferenceReferentVisitor {
551 public:
552  explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
553  }
554
555  void operator()(mirror::Class* klass, mirror::Reference* ref) const
556      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
557      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
558    collector_->DelayReferenceReferent(klass, ref);
559  }
560
561 private:
562  MarkSweep* const collector_;
563};
564
565template <bool kUseFinger = false>
566class MarkStackTask : public Task {
567 public:
568  MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
569                Object** mark_stack)
570      : mark_sweep_(mark_sweep),
571        thread_pool_(thread_pool),
572        mark_stack_pos_(mark_stack_size) {
573    // We may have to copy part of an existing mark stack when another mark stack overflows.
574    if (mark_stack_size != 0) {
575      DCHECK(mark_stack != NULL);
576      // TODO: Check performance?
577      std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
578    }
579    if (kCountTasks) {
580      ++mark_sweep_->work_chunks_created_;
581    }
582  }
583
584  static const size_t kMaxSize = 1 * KB;
585
586 protected:
587  class MarkObjectParallelVisitor {
588   public:
589    explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
590                                       MarkSweep* mark_sweep) ALWAYS_INLINE
591            : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
592
593    void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
594        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
595      mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
596      if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
597        if (kUseFinger) {
598          android_memory_barrier();
599          if (reinterpret_cast<uintptr_t>(ref) >=
600              static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
601            return;
602          }
603        }
604        chunk_task_->MarkStackPush(ref);
605      }
606    }
607
608   private:
609    MarkStackTask<kUseFinger>* const chunk_task_;
610    MarkSweep* const mark_sweep_;
611  };
612
613  class ScanObjectParallelVisitor {
614   public:
615    explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
616        : chunk_task_(chunk_task) {}
617
618    // No thread safety analysis since multiple threads will use this visitor.
619    void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
620        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
621      MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
622      MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
623      DelayReferenceReferentVisitor ref_visitor(mark_sweep);
624      mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
625    }
626
627   private:
628    MarkStackTask<kUseFinger>* const chunk_task_;
629  };
630
631  virtual ~MarkStackTask() {
632    // Make sure that we have cleared our mark stack.
633    DCHECK_EQ(mark_stack_pos_, 0U);
634    if (kCountTasks) {
635      ++mark_sweep_->work_chunks_deleted_;
636    }
637  }
638
639  MarkSweep* const mark_sweep_;
640  ThreadPool* const thread_pool_;
641  // Thread local mark stack for this task.
642  Object* mark_stack_[kMaxSize];
643  // Mark stack position.
644  size_t mark_stack_pos_;
645
646  void MarkStackPush(Object* obj) ALWAYS_INLINE {
647    if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
648      // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
649      mark_stack_pos_ /= 2;
650      auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
651                                     mark_stack_ + mark_stack_pos_);
652      thread_pool_->AddTask(Thread::Current(), task);
653    }
654    DCHECK(obj != nullptr);
655    DCHECK_LT(mark_stack_pos_, kMaxSize);
656    mark_stack_[mark_stack_pos_++] = obj;
657  }
658
659  virtual void Finalize() {
660    delete this;
661  }
662
663  // Scans all of the objects
664  virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
665      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
666    ScanObjectParallelVisitor visitor(this);
667    // TODO: Tune this.
668    static const size_t kFifoSize = 4;
669    BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
670    for (;;) {
671      Object* obj = nullptr;
672      if (kUseMarkStackPrefetch) {
673        while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
674          Object* obj = mark_stack_[--mark_stack_pos_];
675          DCHECK(obj != nullptr);
676          __builtin_prefetch(obj);
677          prefetch_fifo.push_back(obj);
678        }
679        if (UNLIKELY(prefetch_fifo.empty())) {
680          break;
681        }
682        obj = prefetch_fifo.front();
683        prefetch_fifo.pop_front();
684      } else {
685        if (UNLIKELY(mark_stack_pos_ == 0)) {
686          break;
687        }
688        obj = mark_stack_[--mark_stack_pos_];
689      }
690      DCHECK(obj != nullptr);
691      visitor(obj);
692    }
693  }
694};
695
696class CardScanTask : public MarkStackTask<false> {
697 public:
698  CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
699               accounting::ContinuousSpaceBitmap* bitmap,
700               byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
701               Object** mark_stack_obj)
702      : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
703        bitmap_(bitmap),
704        begin_(begin),
705        end_(end),
706        minimum_age_(minimum_age) {
707  }
708
709 protected:
710  accounting::ContinuousSpaceBitmap* const bitmap_;
711  byte* const begin_;
712  byte* const end_;
713  const byte minimum_age_;
714
715  virtual void Finalize() {
716    delete this;
717  }
718
719  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
720    ScanObjectParallelVisitor visitor(this);
721    accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
722    size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
723    VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
724        << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
725    // Finish by emptying our local mark stack.
726    MarkStackTask::Run(self);
727  }
728};
729
730size_t MarkSweep::GetThreadCount(bool paused) const {
731  if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
732    return 1;
733  }
734  if (paused) {
735    return heap_->GetParallelGCThreadCount() + 1;
736  } else {
737    return heap_->GetConcGCThreadCount() + 1;
738  }
739}
740
741void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
742  accounting::CardTable* card_table = GetHeap()->GetCardTable();
743  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
744  size_t thread_count = GetThreadCount(paused);
745  // The parallel version with only one thread is faster for card scanning, TODO: fix.
746  if (kParallelCardScan && thread_count > 1) {
747    Thread* self = Thread::Current();
748    // Can't have a different split for each space since multiple spaces can have their cards being
749    // scanned at the same time.
750    timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
751    // Try to take some of the mark stack since we can pass this off to the worker tasks.
752    Object** mark_stack_begin = mark_stack_->Begin();
753    Object** mark_stack_end = mark_stack_->End();
754    const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
755    // Estimated number of work tasks we will create.
756    const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
757    DCHECK_NE(mark_stack_tasks, 0U);
758    const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
759                                             mark_stack_size / mark_stack_tasks + 1);
760    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
761      if (space->GetMarkBitmap() == nullptr) {
762        continue;
763      }
764      byte* card_begin = space->Begin();
765      byte* card_end = space->End();
766      // Align up the end address. For example, the image space's end
767      // may not be card-size-aligned.
768      card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
769      DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
770      DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
771      // Calculate how many bytes of heap we will scan,
772      const size_t address_range = card_end - card_begin;
773      // Calculate how much address range each task gets.
774      const size_t card_delta = RoundUp(address_range / thread_count + 1,
775                                        accounting::CardTable::kCardSize);
776      // Create the worker tasks for this space.
777      while (card_begin != card_end) {
778        // Add a range of cards.
779        size_t addr_remaining = card_end - card_begin;
780        size_t card_increment = std::min(card_delta, addr_remaining);
781        // Take from the back of the mark stack.
782        size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
783        size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
784        mark_stack_end -= mark_stack_increment;
785        mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
786        DCHECK_EQ(mark_stack_end, mark_stack_->End());
787        // Add the new task to the thread pool.
788        auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
789                                      card_begin + card_increment, minimum_age,
790                                      mark_stack_increment, mark_stack_end);
791        thread_pool->AddTask(self, task);
792        card_begin += card_increment;
793      }
794    }
795
796    // Note: the card scan below may dirty new cards (and scan them)
797    // as a side effect when a Reference object is encountered and
798    // queued during the marking. See b/11465268.
799    thread_pool->SetMaxActiveWorkers(thread_count - 1);
800    thread_pool->StartWorkers(self);
801    thread_pool->Wait(self, true, true);
802    thread_pool->StopWorkers(self);
803    timings_.EndSplit();
804  } else {
805    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
806      if (space->GetMarkBitmap() != nullptr) {
807        // Image spaces are handled properly since live == marked for them.
808        switch (space->GetGcRetentionPolicy()) {
809          case space::kGcRetentionPolicyNeverCollect:
810            timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
811                "ScanGrayImageSpaceObjects");
812            break;
813          case space::kGcRetentionPolicyFullCollect:
814            timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
815                "ScanGrayZygoteSpaceObjects");
816            break;
817          case space::kGcRetentionPolicyAlwaysCollect:
818            timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
819                "ScanGrayAllocSpaceObjects");
820            break;
821          }
822        ScanObjectVisitor visitor(this);
823        card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
824        timings_.EndSplit();
825      }
826    }
827  }
828}
829
830class RecursiveMarkTask : public MarkStackTask<false> {
831 public:
832  RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
833                    accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
834      : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
835        bitmap_(bitmap),
836        begin_(begin),
837        end_(end) {
838  }
839
840 protected:
841  accounting::ContinuousSpaceBitmap* const bitmap_;
842  const uintptr_t begin_;
843  const uintptr_t end_;
844
845  virtual void Finalize() {
846    delete this;
847  }
848
849  // Scans all of the objects
850  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
851    ScanObjectParallelVisitor visitor(this);
852    bitmap_->VisitMarkedRange(begin_, end_, visitor);
853    // Finish by emptying our local mark stack.
854    MarkStackTask::Run(self);
855  }
856};
857
858// Populates the mark stack based on the set of marked objects and
859// recursively marks until the mark stack is emptied.
860void MarkSweep::RecursiveMark() {
861  TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
862  // RecursiveMark will build the lists of known instances of the Reference classes. See
863  // DelayReferenceReferent for details.
864  if (kUseRecursiveMark) {
865    const bool partial = GetGcType() == kGcTypePartial;
866    ScanObjectVisitor scan_visitor(this);
867    auto* self = Thread::Current();
868    ThreadPool* thread_pool = heap_->GetThreadPool();
869    size_t thread_count = GetThreadCount(false);
870    const bool parallel = kParallelRecursiveMark && thread_count > 1;
871    mark_stack_->Reset();
872    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
873      if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
874          (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
875        current_space_bitmap_ = space->GetMarkBitmap();
876        if (current_space_bitmap_ == nullptr) {
877          continue;
878        }
879        if (parallel) {
880          // We will use the mark stack the future.
881          // CHECK(mark_stack_->IsEmpty());
882          // This function does not handle heap end increasing, so we must use the space end.
883          uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
884          uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
885          atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
886
887          // Create a few worker tasks.
888          const size_t n = thread_count * 2;
889          while (begin != end) {
890            uintptr_t start = begin;
891            uintptr_t delta = (end - begin) / n;
892            delta = RoundUp(delta, KB);
893            if (delta < 16 * KB) delta = end - begin;
894            begin += delta;
895            auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
896                                               begin);
897            thread_pool->AddTask(self, task);
898          }
899          thread_pool->SetMaxActiveWorkers(thread_count - 1);
900          thread_pool->StartWorkers(self);
901          thread_pool->Wait(self, true, true);
902          thread_pool->StopWorkers(self);
903        } else {
904          // This function does not handle heap end increasing, so we must use the space end.
905          uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
906          uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
907          current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
908        }
909      }
910    }
911  }
912  ProcessMarkStack(false);
913}
914
915mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
916  if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
917    return object;
918  }
919  return nullptr;
920}
921
922void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
923  ScanGrayObjects(paused, minimum_age);
924  ProcessMarkStack(paused);
925}
926
927void MarkSweep::ReMarkRoots() {
928  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
929  timings_.StartSplit("(Paused)ReMarkRoots");
930  Runtime::Current()->VisitRoots(
931      MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
932                                                          kVisitRootFlagStopLoggingNewRoots |
933                                                          kVisitRootFlagClearRootLog));
934  timings_.EndSplit();
935  if (kVerifyRootsMarked) {
936    timings_.StartSplit("(Paused)VerifyRoots");
937    Runtime::Current()->VisitRoots(VerifyRootMarked, this);
938    timings_.EndSplit();
939  }
940}
941
942void MarkSweep::SweepSystemWeaks(Thread* self) {
943  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
944  timings_.StartSplit("SweepSystemWeaks");
945  Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
946  timings_.EndSplit();
947}
948
949mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
950  reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
951  // We don't actually want to sweep the object, so lets return "marked"
952  return obj;
953}
954
955void MarkSweep::VerifyIsLive(const Object* obj) {
956  if (!heap_->GetLiveBitmap()->Test(obj)) {
957    if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
958        heap_->allocation_stack_->End()) {
959      // Object not found!
960      heap_->DumpSpaces();
961      LOG(FATAL) << "Found dead object " << obj;
962    }
963  }
964}
965
966void MarkSweep::VerifySystemWeaks() {
967  // Verify system weaks, uses a special object visitor which returns the input object.
968  Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
969}
970
971class CheckpointMarkThreadRoots : public Closure {
972 public:
973  explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
974                                     bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
975      : mark_sweep_(mark_sweep),
976        revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
977            revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
978  }
979
980  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
981    ATRACE_BEGIN("Marking thread roots");
982    // Note: self is not necessarily equal to thread since thread may be suspended.
983    Thread* self = Thread::Current();
984    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
985        << thread->GetState() << " thread " << thread << " self " << self;
986    thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
987    ATRACE_END();
988    if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
989      ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
990      mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
991      ATRACE_END();
992    }
993    mark_sweep_->GetBarrier().Pass(self);
994  }
995
996 private:
997  MarkSweep* const mark_sweep_;
998  const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
999};
1000
1001void MarkSweep::MarkRootsCheckpoint(Thread* self,
1002                                    bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1003  CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
1004  timings_.StartSplit("MarkRootsCheckpoint");
1005  ThreadList* thread_list = Runtime::Current()->GetThreadList();
1006  // Request the check point is run on all threads returning a count of the threads that must
1007  // run through the barrier including self.
1008  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1009  // Release locks then wait for all mutator threads to pass the barrier.
1010  // TODO: optimize to not release locks when there are no threads to wait for.
1011  Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1012  Locks::mutator_lock_->SharedUnlock(self);
1013  {
1014    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1015    gc_barrier_->Increment(self, barrier_count);
1016  }
1017  Locks::mutator_lock_->SharedLock(self);
1018  Locks::heap_bitmap_lock_->ExclusiveLock(self);
1019  timings_.EndSplit();
1020}
1021
1022void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
1023  timings_.StartSplit("SweepArray");
1024  Thread* self = Thread::Current();
1025  mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize];
1026  size_t chunk_free_pos = 0;
1027  size_t freed_bytes = 0;
1028  size_t freed_large_object_bytes = 0;
1029  size_t freed_objects = 0;
1030  size_t freed_large_objects = 0;
1031  // How many objects are left in the array, modified after each space is swept.
1032  Object** objects = allocations->Begin();
1033  size_t count = allocations->Size();
1034  // Change the order to ensure that the non-moving space last swept as an optimization.
1035  std::vector<space::ContinuousSpace*> sweep_spaces;
1036  space::ContinuousSpace* non_moving_space = nullptr;
1037  for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1038    if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
1039        space->GetLiveBitmap() != nullptr) {
1040      if (space == heap_->GetNonMovingSpace()) {
1041        non_moving_space = space;
1042      } else {
1043        sweep_spaces.push_back(space);
1044      }
1045    }
1046  }
1047  // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1048  // the other alloc spaces as an optimization.
1049  if (non_moving_space != nullptr) {
1050    sweep_spaces.push_back(non_moving_space);
1051  }
1052  // Start by sweeping the continuous spaces.
1053  for (space::ContinuousSpace* space : sweep_spaces) {
1054    space::AllocSpace* alloc_space = space->AsAllocSpace();
1055    accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1056    accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1057    if (swap_bitmaps) {
1058      std::swap(live_bitmap, mark_bitmap);
1059    }
1060    Object** out = objects;
1061    for (size_t i = 0; i < count; ++i) {
1062      Object* obj = objects[i];
1063      if (kUseThreadLocalAllocationStack && obj == nullptr) {
1064        continue;
1065      }
1066      if (space->HasAddress(obj)) {
1067        // This object is in the space, remove it from the array and add it to the sweep buffer
1068        // if needed.
1069        if (!mark_bitmap->Test(obj)) {
1070          if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1071            timings_.StartSplit("FreeList");
1072            freed_objects += chunk_free_pos;
1073            freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1074            timings_.EndSplit();
1075            chunk_free_pos = 0;
1076          }
1077          chunk_free_buffer[chunk_free_pos++] = obj;
1078        }
1079      } else {
1080        *(out++) = obj;
1081      }
1082    }
1083    if (chunk_free_pos > 0) {
1084      timings_.StartSplit("FreeList");
1085      freed_objects += chunk_free_pos;
1086      freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1087      timings_.EndSplit();
1088      chunk_free_pos = 0;
1089    }
1090    // All of the references which space contained are no longer in the allocation stack, update
1091    // the count.
1092    count = out - objects;
1093  }
1094  // Handle the large object space.
1095  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1096  accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1097  accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1098  if (swap_bitmaps) {
1099    std::swap(large_live_objects, large_mark_objects);
1100  }
1101  for (size_t i = 0; i < count; ++i) {
1102    Object* obj = objects[i];
1103    // Handle large objects.
1104    if (kUseThreadLocalAllocationStack && obj == nullptr) {
1105      continue;
1106    }
1107    if (!large_mark_objects->Test(obj)) {
1108      ++freed_large_objects;
1109      freed_large_object_bytes += large_object_space->Free(self, obj);
1110    }
1111  }
1112  timings_.EndSplit();
1113
1114  timings_.StartSplit("RecordFree");
1115  VLOG(heap) << "Freed " << freed_objects << "/" << count << " objects with size "
1116             << PrettySize(freed_bytes);
1117  RecordFree(freed_objects, freed_bytes);
1118  RecordFreeLargeObjects(freed_large_objects, freed_large_object_bytes);
1119  timings_.EndSplit();
1120
1121  timings_.StartSplit("ResetStack");
1122  allocations->Reset();
1123  timings_.EndSplit();
1124}
1125
1126void MarkSweep::Sweep(bool swap_bitmaps) {
1127  // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1128  CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1129  // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1130  // knowing that new allocations won't be marked as live.
1131  timings_.StartSplit("MarkStackAsLive");
1132  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1133  heap_->MarkAllocStackAsLive(live_stack);
1134  live_stack->Reset();
1135  timings_.EndSplit();
1136
1137  DCHECK(mark_stack_->IsEmpty());
1138  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1139    if (space->IsContinuousMemMapAllocSpace()) {
1140      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1141      TimingLogger::ScopedSplit split(
1142          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_);
1143      size_t freed_objects = 0;
1144      size_t freed_bytes = 0;
1145      alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
1146      RecordFree(freed_objects, freed_bytes);
1147    }
1148  }
1149  SweepLargeObjects(swap_bitmaps);
1150}
1151
1152void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1153  TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
1154  size_t freed_objects = 0;
1155  size_t freed_bytes = 0;
1156  heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
1157  RecordFreeLargeObjects(freed_objects, freed_bytes);
1158}
1159
1160// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
1161// marked, put it on the appropriate list in the heap for later processing.
1162void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
1163  DCHECK(klass != nullptr);
1164  if (kCountJavaLangRefs) {
1165    ++reference_count_;
1166  }
1167  heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, IsMarkedCallback, this);
1168}
1169
1170class MarkObjectVisitor {
1171 public:
1172  explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
1173  }
1174
1175  void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
1176      ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1177      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1178    if (kCheckLocks) {
1179      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1180      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1181    }
1182    mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset));
1183  }
1184
1185 private:
1186  MarkSweep* const mark_sweep_;
1187};
1188
1189// Scans an object reference.  Determines the type of the reference
1190// and dispatches to a specialized scanning routine.
1191void MarkSweep::ScanObject(Object* obj) {
1192  MarkObjectVisitor mark_visitor(this);
1193  DelayReferenceReferentVisitor ref_visitor(this);
1194  ScanObjectVisit(obj, mark_visitor, ref_visitor);
1195}
1196
1197void MarkSweep::ProcessMarkStackCallback(void* arg) {
1198  reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
1199}
1200
1201void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
1202  Thread* self = Thread::Current();
1203  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1204  const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1205                                     static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1206  CHECK_GT(chunk_size, 0U);
1207  // Split the current mark stack up into work tasks.
1208  for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1209    const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1210    thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
1211    it += delta;
1212  }
1213  thread_pool->SetMaxActiveWorkers(thread_count - 1);
1214  thread_pool->StartWorkers(self);
1215  thread_pool->Wait(self, true, true);
1216  thread_pool->StopWorkers(self);
1217  mark_stack_->Reset();
1218  CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1219           work_chunks_deleted_.LoadSequentiallyConsistent())
1220      << " some of the work chunks were leaked";
1221}
1222
1223// Scan anything that's on the mark stack.
1224void MarkSweep::ProcessMarkStack(bool paused) {
1225  timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
1226  size_t thread_count = GetThreadCount(paused);
1227  if (kParallelProcessMarkStack && thread_count > 1 &&
1228      mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1229    ProcessMarkStackParallel(thread_count);
1230  } else {
1231    // TODO: Tune this.
1232    static const size_t kFifoSize = 4;
1233    BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
1234    for (;;) {
1235      Object* obj = NULL;
1236      if (kUseMarkStackPrefetch) {
1237        while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1238          Object* obj = mark_stack_->PopBack();
1239          DCHECK(obj != NULL);
1240          __builtin_prefetch(obj);
1241          prefetch_fifo.push_back(obj);
1242        }
1243        if (prefetch_fifo.empty()) {
1244          break;
1245        }
1246        obj = prefetch_fifo.front();
1247        prefetch_fifo.pop_front();
1248      } else {
1249        if (mark_stack_->IsEmpty()) {
1250          break;
1251        }
1252        obj = mark_stack_->PopBack();
1253      }
1254      DCHECK(obj != nullptr);
1255      ScanObject(obj);
1256    }
1257  }
1258  timings_.EndSplit();
1259}
1260
1261inline bool MarkSweep::IsMarked(const Object* object) const
1262    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1263  if (immune_region_.ContainsObject(object)) {
1264    return true;
1265  }
1266  if (current_space_bitmap_->HasAddress(object)) {
1267    return current_space_bitmap_->Test(object);
1268  }
1269  return mark_bitmap_->Test(object);
1270}
1271
1272void MarkSweep::FinishPhase() {
1273  TimingLogger::ScopedSplit split("FinishPhase", &timings_);
1274  if (kCountScannedTypes) {
1275    VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
1276        << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
1277  }
1278  if (kCountTasks) {
1279    VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
1280  }
1281  if (kMeasureOverhead) {
1282    VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
1283  }
1284  if (kProfileLargeObjects) {
1285    VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1286        << " marked " << large_object_mark_.LoadRelaxed();
1287  }
1288  if (kCountJavaLangRefs) {
1289    VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
1290  }
1291  if (kCountMarkedObjects) {
1292    VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1293        << " immune=" <<  mark_immune_count_.LoadRelaxed()
1294        << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1295        << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
1296  }
1297  CHECK(mark_stack_->IsEmpty());  // Ensure that the mark stack is empty.
1298  mark_stack_->Reset();
1299  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1300  heap_->ClearMarkedObjects();
1301}
1302
1303void MarkSweep::RevokeAllThreadLocalBuffers() {
1304  if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1305    // If concurrent, rosalloc thread-local buffers are revoked at the
1306    // thread checkpoint. Bump pointer space thread-local buffers must
1307    // not be in use.
1308    GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1309  } else {
1310    timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
1311    GetHeap()->RevokeAllThreadLocalBuffers();
1312    timings_.EndSplit();
1313  }
1314}
1315
1316}  // namespace collector
1317}  // namespace gc
1318}  // namespace art
1319