mark_sweep.cc revision 08d1b5f2296c0f51507b8b443f4e39dfc161572c
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mark_sweep.h"
18
19#include <atomic>
20#include <functional>
21#include <numeric>
22#include <climits>
23#include <vector>
24
25#define ATRACE_TAG ATRACE_TAG_DALVIK
26#include "cutils/trace.h"
27
28#include "base/bounded_fifo.h"
29#include "base/logging.h"
30#include "base/macros.h"
31#include "base/mutex-inl.h"
32#include "base/timing_logger.h"
33#include "gc/accounting/card_table-inl.h"
34#include "gc/accounting/heap_bitmap-inl.h"
35#include "gc/accounting/mod_union_table.h"
36#include "gc/accounting/space_bitmap-inl.h"
37#include "gc/heap.h"
38#include "gc/reference_processor.h"
39#include "gc/space/image_space.h"
40#include "gc/space/large_object_space.h"
41#include "gc/space/space-inl.h"
42#include "mark_sweep-inl.h"
43#include "mirror/object-inl.h"
44#include "runtime.h"
45#include "scoped_thread_state_change.h"
46#include "thread-inl.h"
47#include "thread_list.h"
48
49using ::art::mirror::Object;
50
51namespace art {
52namespace gc {
53namespace collector {
54
55// Performance options.
56static constexpr bool kUseRecursiveMark = false;
57static constexpr bool kUseMarkStackPrefetch = true;
58static constexpr size_t kSweepArrayChunkFreeSize = 1024;
59static constexpr bool kPreCleanCards = true;
60
61// Parallelism options.
62static constexpr bool kParallelCardScan = true;
63static constexpr bool kParallelRecursiveMark = true;
64// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
65// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
66// having this can add overhead in ProcessReferences since we may end up doing many calls of
67// ProcessMarkStack with very small mark stacks.
68static constexpr size_t kMinimumParallelMarkStackSize = 128;
69static constexpr bool kParallelProcessMarkStack = true;
70
71// Profiling and information flags.
72static constexpr bool kProfileLargeObjects = false;
73static constexpr bool kMeasureOverhead = false;
74static constexpr bool kCountTasks = false;
75static constexpr bool kCountJavaLangRefs = false;
76static constexpr bool kCountMarkedObjects = false;
77
78// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
79static constexpr bool kCheckLocks = kDebugLocking;
80static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
81
82// If true, revoke the rosalloc thread-local buffers at the
83// checkpoint, as opposed to during the pause.
84static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
85
86void MarkSweep::BindBitmaps() {
87  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
88  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
89  // Mark all of the spaces we never collect as immune.
90  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
91    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
92      CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
93    }
94  }
95}
96
97MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
98    : GarbageCollector(heap,
99                       name_prefix +
100                       (is_concurrent ? "concurrent mark sweep": "mark sweep")),
101      current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
102      gc_barrier_(new Barrier(0)),
103      mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
104      is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
105  std::string error_msg;
106  MemMap* mem_map = MemMap::MapAnonymous(
107      "mark sweep sweep array free buffer", nullptr,
108      RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
109      PROT_READ | PROT_WRITE, false, false, &error_msg);
110  CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
111  sweep_array_free_buffer_mem_map_.reset(mem_map);
112}
113
114void MarkSweep::InitializePhase() {
115  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
116  mark_stack_ = heap_->GetMarkStack();
117  DCHECK(mark_stack_ != nullptr);
118  immune_region_.Reset();
119  class_count_.StoreRelaxed(0);
120  array_count_.StoreRelaxed(0);
121  other_count_.StoreRelaxed(0);
122  large_object_test_.StoreRelaxed(0);
123  large_object_mark_.StoreRelaxed(0);
124  overhead_time_ .StoreRelaxed(0);
125  work_chunks_created_.StoreRelaxed(0);
126  work_chunks_deleted_.StoreRelaxed(0);
127  reference_count_.StoreRelaxed(0);
128  mark_null_count_.StoreRelaxed(0);
129  mark_immune_count_.StoreRelaxed(0);
130  mark_fastpath_count_.StoreRelaxed(0);
131  mark_slowpath_count_.StoreRelaxed(0);
132  {
133    // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
134    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
135    mark_bitmap_ = heap_->GetMarkBitmap();
136  }
137  if (!GetCurrentIteration()->GetClearSoftReferences()) {
138    // Always clear soft references if a non-sticky collection.
139    GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
140  }
141}
142
143void MarkSweep::RunPhases() {
144  Thread* self = Thread::Current();
145  InitializePhase();
146  Locks::mutator_lock_->AssertNotHeld(self);
147  if (IsConcurrent()) {
148    GetHeap()->PreGcVerification(this);
149    {
150      ReaderMutexLock mu(self, *Locks::mutator_lock_);
151      MarkingPhase();
152    }
153    ScopedPause pause(this);
154    GetHeap()->PrePauseRosAllocVerification(this);
155    PausePhase();
156    RevokeAllThreadLocalBuffers();
157  } else {
158    ScopedPause pause(this);
159    GetHeap()->PreGcVerificationPaused(this);
160    MarkingPhase();
161    GetHeap()->PrePauseRosAllocVerification(this);
162    PausePhase();
163    RevokeAllThreadLocalBuffers();
164  }
165  {
166    // Sweeping always done concurrently, even for non concurrent mark sweep.
167    ReaderMutexLock mu(self, *Locks::mutator_lock_);
168    ReclaimPhase();
169  }
170  GetHeap()->PostGcVerification(this);
171  FinishPhase();
172}
173
174void MarkSweep::ProcessReferences(Thread* self) {
175  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
176  GetHeap()->GetReferenceProcessor()->ProcessReferences(
177      true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
178      &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
179}
180
181void MarkSweep::PausePhase() {
182  TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
183  Thread* self = Thread::Current();
184  Locks::mutator_lock_->AssertExclusiveHeld(self);
185  if (IsConcurrent()) {
186    // Handle the dirty objects if we are a concurrent GC.
187    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
188    // Re-mark root set.
189    ReMarkRoots();
190    // Scan dirty objects, this is only required if we are not doing concurrent GC.
191    RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
192  }
193  {
194    TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
195    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
196    heap_->SwapStacks(self);
197    live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
198    // Need to revoke all the thread local allocation stacks since we just swapped the allocation
199    // stacks and don't want anybody to allocate into the live stack.
200    RevokeAllThreadLocalAllocationStacks(self);
201  }
202  heap_->PreSweepingGcVerification(this);
203  // Disallow new system weaks to prevent a race which occurs when someone adds a new system
204  // weak before we sweep them. Since this new system weak may not be marked, the GC may
205  // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
206  // reference to a string that is about to be swept.
207  Runtime::Current()->DisallowNewSystemWeaks();
208  // Enable the reference processing slow path, needs to be done with mutators paused since there
209  // is no lock in the GetReferent fast path.
210  GetHeap()->GetReferenceProcessor()->EnableSlowPath();
211}
212
213void MarkSweep::PreCleanCards() {
214  // Don't do this for non concurrent GCs since they don't have any dirty cards.
215  if (kPreCleanCards && IsConcurrent()) {
216    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
217    Thread* self = Thread::Current();
218    CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
219    // Process dirty cards and add dirty cards to mod union tables, also ages cards.
220    heap_->ProcessCards(GetTimings(), false, true, false);
221    // The checkpoint root marking is required to avoid a race condition which occurs if the
222    // following happens during a reference write:
223    // 1. mutator dirties the card (write barrier)
224    // 2. GC ages the card (the above ProcessCards call)
225    // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
226    // 4. mutator writes the value (corresponding to the write barrier in 1.)
227    // This causes the GC to age the card but not necessarily mark the reference which the mutator
228    // wrote into the object stored in the card.
229    // Having the checkpoint fixes this issue since it ensures that the card mark and the
230    // reference write are visible to the GC before the card is scanned (this is due to locks being
231    // acquired / released in the checkpoint code).
232    // The other roots are also marked to help reduce the pause.
233    MarkRootsCheckpoint(self, false);
234    MarkNonThreadRoots();
235    MarkConcurrentRoots(
236        static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
237    // Process the newly aged cards.
238    RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
239    // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
240    // in the next GC.
241  }
242}
243
244void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
245  if (kUseThreadLocalAllocationStack) {
246    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
247    Locks::mutator_lock_->AssertExclusiveHeld(self);
248    heap_->RevokeAllThreadLocalAllocationStacks(self);
249  }
250}
251
252void MarkSweep::MarkingPhase() {
253  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
254  Thread* self = Thread::Current();
255  BindBitmaps();
256  FindDefaultSpaceBitmap();
257  // Process dirty cards and add dirty cards to mod union tables.
258  // If the GC type is non sticky, then we just clear the cards instead of ageing them.
259  heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky);
260  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
261  MarkRoots(self);
262  MarkReachableObjects();
263  // Pre-clean dirtied cards to reduce pauses.
264  PreCleanCards();
265}
266
267void MarkSweep::UpdateAndMarkModUnion() {
268  for (const auto& space : heap_->GetContinuousSpaces()) {
269    if (immune_region_.ContainsSpace(space)) {
270      const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
271          "UpdateAndMarkImageModUnionTable";
272      TimingLogger::ScopedTiming t(name, GetTimings());
273      accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
274      CHECK(mod_union_table != nullptr);
275      mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
276    }
277  }
278}
279
280void MarkSweep::MarkReachableObjects() {
281  UpdateAndMarkModUnion();
282  // Recursively mark all the non-image bits set in the mark bitmap.
283  RecursiveMark();
284}
285
286void MarkSweep::ReclaimPhase() {
287  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
288  Thread* self = Thread::Current();
289  // Process the references concurrently.
290  ProcessReferences(self);
291  SweepSystemWeaks(self);
292  Runtime::Current()->AllowNewSystemWeaks();
293  {
294    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
295    GetHeap()->RecordFreeRevoke();
296    // Reclaim unmarked objects.
297    Sweep(false);
298    // Swap the live and mark bitmaps for each space which we modified space. This is an
299    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
300    // bitmaps.
301    SwapBitmaps();
302    // Unbind the live and mark bitmaps.
303    GetHeap()->UnBindBitmaps();
304  }
305}
306
307void MarkSweep::FindDefaultSpaceBitmap() {
308  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
309  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
310    accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
311    // We want to have the main space instead of non moving if possible.
312    if (bitmap != nullptr &&
313        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
314      current_space_bitmap_ = bitmap;
315      // If we are not the non moving space exit the loop early since this will be good enough.
316      if (space != heap_->GetNonMovingSpace()) {
317        break;
318      }
319    }
320  }
321  CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
322      << heap_->DumpSpaces();
323}
324
325void MarkSweep::ExpandMarkStack() {
326  ResizeMarkStack(mark_stack_->Capacity() * 2);
327}
328
329void MarkSweep::ResizeMarkStack(size_t new_size) {
330  // Rare case, no need to have Thread::Current be a parameter.
331  if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
332    // Someone else acquired the lock and expanded the mark stack before us.
333    return;
334  }
335  std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
336  CHECK_LE(mark_stack_->Size(), new_size);
337  mark_stack_->Resize(new_size);
338  for (auto& obj : temp) {
339    mark_stack_->PushBack(obj.AsMirrorPtr());
340  }
341}
342
343inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
344  DCHECK(obj != nullptr);
345  if (MarkObjectParallel(obj)) {
346    MutexLock mu(Thread::Current(), mark_stack_lock_);
347    if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
348      ExpandMarkStack();
349    }
350    // The object must be pushed on to the mark stack.
351    mark_stack_->PushBack(obj);
352  }
353}
354
355mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
356  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
357  mark_sweep->MarkObject(obj);
358  return obj;
359}
360
361void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
362  reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
363}
364
365bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
366  return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr());
367}
368
369class MarkSweepMarkObjectSlowPath {
370 public:
371  explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, Object* holder = nullptr,
372                                       MemberOffset offset = MemberOffset(0))
373      : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
374  }
375
376  void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
377    if (kProfileLargeObjects) {
378      // TODO: Differentiate between marking and testing somehow.
379      ++mark_sweep_->large_object_test_;
380      ++mark_sweep_->large_object_mark_;
381    }
382    space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
383    if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
384                 (kIsDebugBuild && large_object_space != nullptr &&
385                     !large_object_space->Contains(obj)))) {
386      LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces";
387      if (holder_ != nullptr) {
388        size_t holder_size = holder_->SizeOf();
389        ArtField* field = holder_->FindFieldByOffset(offset_);
390        LOG(INTERNAL_FATAL) << "Field info: "
391                            << " holder=" << holder_
392                            << " holder is "
393                            << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
394                                ? "alive" : "dead")
395                            << " holder_size=" << holder_size
396                            << " holder_type=" << PrettyTypeOf(holder_)
397                            << " offset=" << offset_.Uint32Value()
398                            << " field=" << (field != nullptr ? field->GetName() : "nullptr")
399                            << " field_type="
400                            << (field != nullptr ? field->GetTypeDescriptor() : "")
401                            << " first_ref_field_offset="
402                            << (holder_->IsClass()
403                                ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset()
404                                : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
405                            << " num_of_ref_fields="
406                            << (holder_->IsClass()
407                                ? holder_->AsClass()->NumReferenceStaticFields()
408                                : holder_->GetClass()->NumReferenceInstanceFields())
409                            << "\n";
410        // Print the memory content of the holder.
411        for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) {
412          uint32_t* p = reinterpret_cast<uint32_t*>(holder_);
413          LOG(INTERNAL_FATAL) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = "
414                              << std::hex << p[i];
415        }
416      }
417      PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
418      MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
419      {
420        LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root";
421        Thread* self = Thread::Current();
422        if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
423          mark_sweep_->VerifyRoots();
424        } else {
425          const bool heap_bitmap_exclusive_locked =
426              Locks::heap_bitmap_lock_->IsExclusiveHeld(self);
427          if (heap_bitmap_exclusive_locked) {
428            Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
429          }
430          Locks::mutator_lock_->SharedUnlock(self);
431          ThreadList* tl = Runtime::Current()->GetThreadList();
432          tl->SuspendAll(__FUNCTION__);
433          mark_sweep_->VerifyRoots();
434          tl->ResumeAll();
435          Locks::mutator_lock_->SharedLock(self);
436          if (heap_bitmap_exclusive_locked) {
437            Locks::heap_bitmap_lock_->ExclusiveLock(self);
438          }
439        }
440      }
441      LOG(FATAL) << "Can't mark invalid object";
442    }
443  }
444
445 private:
446  MarkSweep* const mark_sweep_;
447  mirror::Object* const holder_;
448  MemberOffset offset_;
449};
450
451inline void MarkSweep::MarkObjectNonNull(Object* obj, Object* holder, MemberOffset offset) {
452  DCHECK(obj != nullptr);
453  if (kUseBakerOrBrooksReadBarrier) {
454    // Verify all the objects have the correct pointer installed.
455    obj->AssertReadBarrierPointer();
456  }
457  if (immune_region_.ContainsObject(obj)) {
458    if (kCountMarkedObjects) {
459      ++mark_immune_count_;
460    }
461    DCHECK(mark_bitmap_->Test(obj));
462  } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
463    if (kCountMarkedObjects) {
464      ++mark_fastpath_count_;
465    }
466    if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
467      PushOnMarkStack(obj);  // This object was not previously marked.
468    }
469  } else {
470    if (kCountMarkedObjects) {
471      ++mark_slowpath_count_;
472    }
473    MarkSweepMarkObjectSlowPath visitor(this, holder, offset);
474    // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
475    // will check again.
476    if (!mark_bitmap_->Set(obj, visitor)) {
477      PushOnMarkStack(obj);  // Was not already marked, push.
478    }
479  }
480}
481
482inline void MarkSweep::PushOnMarkStack(Object* obj) {
483  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
484    // Lock is not needed but is here anyways to please annotalysis.
485    MutexLock mu(Thread::Current(), mark_stack_lock_);
486    ExpandMarkStack();
487  }
488  // The object must be pushed on to the mark stack.
489  mark_stack_->PushBack(obj);
490}
491
492inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
493  DCHECK(obj != nullptr);
494  if (kUseBakerOrBrooksReadBarrier) {
495    // Verify all the objects have the correct pointer installed.
496    obj->AssertReadBarrierPointer();
497  }
498  if (immune_region_.ContainsObject(obj)) {
499    DCHECK(IsMarked(obj));
500    return false;
501  }
502  // Try to take advantage of locality of references within a space, failing this find the space
503  // the hard way.
504  accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
505  if (LIKELY(object_bitmap->HasAddress(obj))) {
506    return !object_bitmap->AtomicTestAndSet(obj);
507  }
508  MarkSweepMarkObjectSlowPath visitor(this);
509  return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
510}
511
512// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
513inline void MarkSweep::MarkObject(Object* obj, Object* holder, MemberOffset offset) {
514  if (obj != nullptr) {
515    MarkObjectNonNull(obj, holder, offset);
516  } else if (kCountMarkedObjects) {
517    ++mark_null_count_;
518  }
519}
520
521class VerifyRootMarkedVisitor : public SingleRootVisitor {
522 public:
523  explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
524
525  void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
526      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
527    CHECK(collector_->IsMarked(root)) << info.ToString();
528  }
529
530 private:
531  MarkSweep* const collector_;
532};
533
534void MarkSweep::VisitRoots(mirror::Object*** roots, size_t count,
535                           const RootInfo& info ATTRIBUTE_UNUSED) {
536  for (size_t i = 0; i < count; ++i) {
537    MarkObjectNonNull(*roots[i]);
538  }
539}
540
541void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
542                           const RootInfo& info ATTRIBUTE_UNUSED) {
543  for (size_t i = 0; i < count; ++i) {
544    MarkObjectNonNull(roots[i]->AsMirrorPtr());
545  }
546}
547
548class VerifyRootVisitor : public SingleRootVisitor {
549 public:
550  void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
551      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
552    // See if the root is on any space bitmap.
553    auto* heap = Runtime::Current()->GetHeap();
554    if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
555      space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
556      if (large_object_space != nullptr && !large_object_space->Contains(root)) {
557        LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info;
558      }
559    }
560  }
561};
562
563void MarkSweep::VerifyRoots() {
564  VerifyRootVisitor visitor;
565  Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
566}
567
568void MarkSweep::MarkRoots(Thread* self) {
569  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
570  if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
571    // If we exclusively hold the mutator lock, all threads must be suspended.
572    Runtime::Current()->VisitRoots(this);
573    RevokeAllThreadLocalAllocationStacks(self);
574  } else {
575    MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
576    // At this point the live stack should no longer have any mutators which push into it.
577    MarkNonThreadRoots();
578    MarkConcurrentRoots(
579        static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
580  }
581}
582
583void MarkSweep::MarkNonThreadRoots() {
584  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
585  Runtime::Current()->VisitNonThreadRoots(this);
586}
587
588void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
589  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
590  // Visit all runtime roots and clear dirty flags.
591  Runtime::Current()->VisitConcurrentRoots(this, flags);
592}
593
594class ScanObjectVisitor {
595 public:
596  explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
597      : mark_sweep_(mark_sweep) {}
598
599  void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
600      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
601    if (kCheckLocks) {
602      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
603      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
604    }
605    mark_sweep_->ScanObject(obj);
606  }
607
608 private:
609  MarkSweep* const mark_sweep_;
610};
611
612class DelayReferenceReferentVisitor {
613 public:
614  explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
615  }
616
617  void operator()(mirror::Class* klass, mirror::Reference* ref) const
618      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
619      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
620    collector_->DelayReferenceReferent(klass, ref);
621  }
622
623 private:
624  MarkSweep* const collector_;
625};
626
627template <bool kUseFinger = false>
628class MarkStackTask : public Task {
629 public:
630  MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
631                StackReference<Object>* mark_stack)
632      : mark_sweep_(mark_sweep),
633        thread_pool_(thread_pool),
634        mark_stack_pos_(mark_stack_size) {
635    // We may have to copy part of an existing mark stack when another mark stack overflows.
636    if (mark_stack_size != 0) {
637      DCHECK(mark_stack != nullptr);
638      // TODO: Check performance?
639      std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
640    }
641    if (kCountTasks) {
642      ++mark_sweep_->work_chunks_created_;
643    }
644  }
645
646  static const size_t kMaxSize = 1 * KB;
647
648 protected:
649  class MarkObjectParallelVisitor {
650   public:
651    explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
652                                       MarkSweep* mark_sweep) ALWAYS_INLINE
653            : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
654
655    void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
656        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
657      mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
658      if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
659        if (kUseFinger) {
660          std::atomic_thread_fence(std::memory_order_seq_cst);
661          if (reinterpret_cast<uintptr_t>(ref) >=
662              static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
663            return;
664          }
665        }
666        chunk_task_->MarkStackPush(ref);
667      }
668    }
669
670   private:
671    MarkStackTask<kUseFinger>* const chunk_task_;
672    MarkSweep* const mark_sweep_;
673  };
674
675  class ScanObjectParallelVisitor {
676   public:
677    explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
678        : chunk_task_(chunk_task) {}
679
680    // No thread safety analysis since multiple threads will use this visitor.
681    void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
682        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
683      MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
684      MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
685      DelayReferenceReferentVisitor ref_visitor(mark_sweep);
686      mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
687    }
688
689   private:
690    MarkStackTask<kUseFinger>* const chunk_task_;
691  };
692
693  virtual ~MarkStackTask() {
694    // Make sure that we have cleared our mark stack.
695    DCHECK_EQ(mark_stack_pos_, 0U);
696    if (kCountTasks) {
697      ++mark_sweep_->work_chunks_deleted_;
698    }
699  }
700
701  MarkSweep* const mark_sweep_;
702  ThreadPool* const thread_pool_;
703  // Thread local mark stack for this task.
704  StackReference<Object> mark_stack_[kMaxSize];
705  // Mark stack position.
706  size_t mark_stack_pos_;
707
708  ALWAYS_INLINE void MarkStackPush(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
709    if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
710      // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
711      mark_stack_pos_ /= 2;
712      auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
713                                     mark_stack_ + mark_stack_pos_);
714      thread_pool_->AddTask(Thread::Current(), task);
715    }
716    DCHECK(obj != nullptr);
717    DCHECK_LT(mark_stack_pos_, kMaxSize);
718    mark_stack_[mark_stack_pos_++].Assign(obj);
719  }
720
721  virtual void Finalize() {
722    delete this;
723  }
724
725  // Scans all of the objects
726  virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
727      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
728    UNUSED(self);
729    ScanObjectParallelVisitor visitor(this);
730    // TODO: Tune this.
731    static const size_t kFifoSize = 4;
732    BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
733    for (;;) {
734      Object* obj = nullptr;
735      if (kUseMarkStackPrefetch) {
736        while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
737          Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
738          DCHECK(mark_stack_obj != nullptr);
739          __builtin_prefetch(mark_stack_obj);
740          prefetch_fifo.push_back(mark_stack_obj);
741        }
742        if (UNLIKELY(prefetch_fifo.empty())) {
743          break;
744        }
745        obj = prefetch_fifo.front();
746        prefetch_fifo.pop_front();
747      } else {
748        if (UNLIKELY(mark_stack_pos_ == 0)) {
749          break;
750        }
751        obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
752      }
753      DCHECK(obj != nullptr);
754      visitor(obj);
755    }
756  }
757};
758
759class CardScanTask : public MarkStackTask<false> {
760 public:
761  CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
762               accounting::ContinuousSpaceBitmap* bitmap,
763               uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
764               StackReference<Object>* mark_stack_obj, bool clear_card)
765      : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
766        bitmap_(bitmap),
767        begin_(begin),
768        end_(end),
769        minimum_age_(minimum_age), clear_card_(clear_card) {
770  }
771
772 protected:
773  accounting::ContinuousSpaceBitmap* const bitmap_;
774  uint8_t* const begin_;
775  uint8_t* const end_;
776  const uint8_t minimum_age_;
777  const bool clear_card_;
778
779  virtual void Finalize() {
780    delete this;
781  }
782
783  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
784    ScanObjectParallelVisitor visitor(this);
785    accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
786    size_t cards_scanned = clear_card_ ?
787                           card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_) :
788                           card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
789    VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
790        << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
791    // Finish by emptying our local mark stack.
792    MarkStackTask::Run(self);
793  }
794};
795
796size_t MarkSweep::GetThreadCount(bool paused) const {
797  if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
798    return 1;
799  }
800  return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
801}
802
803void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
804  accounting::CardTable* card_table = GetHeap()->GetCardTable();
805  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
806  size_t thread_count = GetThreadCount(paused);
807  // The parallel version with only one thread is faster for card scanning, TODO: fix.
808  if (kParallelCardScan && thread_count > 1) {
809    Thread* self = Thread::Current();
810    // Can't have a different split for each space since multiple spaces can have their cards being
811    // scanned at the same time.
812    TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
813        GetTimings());
814    // Try to take some of the mark stack since we can pass this off to the worker tasks.
815    StackReference<Object>* mark_stack_begin = mark_stack_->Begin();
816    StackReference<Object>* mark_stack_end = mark_stack_->End();
817    const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
818    // Estimated number of work tasks we will create.
819    const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
820    DCHECK_NE(mark_stack_tasks, 0U);
821    const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
822                                             mark_stack_size / mark_stack_tasks + 1);
823    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
824      if (space->GetMarkBitmap() == nullptr) {
825        continue;
826      }
827      uint8_t* card_begin = space->Begin();
828      uint8_t* card_end = space->End();
829      // Align up the end address. For example, the image space's end
830      // may not be card-size-aligned.
831      card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
832      DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
833      DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
834      // Calculate how many bytes of heap we will scan,
835      const size_t address_range = card_end - card_begin;
836      // Calculate how much address range each task gets.
837      const size_t card_delta = RoundUp(address_range / thread_count + 1,
838                                        accounting::CardTable::kCardSize);
839      // If paused and the space is neither zygote nor image space, we could clear the dirty
840      // cards to avoid accumulating them to increase card scanning load in the following GC
841      // cycles. We need to keep dirty cards of image space and zygote space in order to track
842      // references to the other spaces.
843      bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
844      // Create the worker tasks for this space.
845      while (card_begin != card_end) {
846        // Add a range of cards.
847        size_t addr_remaining = card_end - card_begin;
848        size_t card_increment = std::min(card_delta, addr_remaining);
849        // Take from the back of the mark stack.
850        size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
851        size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
852        mark_stack_end -= mark_stack_increment;
853        mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
854        DCHECK_EQ(mark_stack_end, mark_stack_->End());
855        // Add the new task to the thread pool.
856        auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
857                                      card_begin + card_increment, minimum_age,
858                                      mark_stack_increment, mark_stack_end, clear_card);
859        thread_pool->AddTask(self, task);
860        card_begin += card_increment;
861      }
862    }
863
864    // Note: the card scan below may dirty new cards (and scan them)
865    // as a side effect when a Reference object is encountered and
866    // queued during the marking. See b/11465268.
867    thread_pool->SetMaxActiveWorkers(thread_count - 1);
868    thread_pool->StartWorkers(self);
869    thread_pool->Wait(self, true, true);
870    thread_pool->StopWorkers(self);
871  } else {
872    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
873      if (space->GetMarkBitmap() != nullptr) {
874        // Image spaces are handled properly since live == marked for them.
875        const char* name = nullptr;
876        switch (space->GetGcRetentionPolicy()) {
877        case space::kGcRetentionPolicyNeverCollect:
878          name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
879          break;
880        case space::kGcRetentionPolicyFullCollect:
881          name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
882          break;
883        case space::kGcRetentionPolicyAlwaysCollect:
884          name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
885          break;
886        default:
887          LOG(FATAL) << "Unreachable";
888          UNREACHABLE();
889        }
890        TimingLogger::ScopedTiming t(name, GetTimings());
891        ScanObjectVisitor visitor(this);
892        bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
893        if (clear_card) {
894          card_table->Scan<true>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
895                                 minimum_age);
896        } else {
897          card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
898                                  minimum_age);
899        }
900      }
901    }
902  }
903}
904
905class RecursiveMarkTask : public MarkStackTask<false> {
906 public:
907  RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
908                    accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
909      : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin),
910        end_(end) {
911  }
912
913 protected:
914  accounting::ContinuousSpaceBitmap* const bitmap_;
915  const uintptr_t begin_;
916  const uintptr_t end_;
917
918  virtual void Finalize() {
919    delete this;
920  }
921
922  // Scans all of the objects
923  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
924    ScanObjectParallelVisitor visitor(this);
925    bitmap_->VisitMarkedRange(begin_, end_, visitor);
926    // Finish by emptying our local mark stack.
927    MarkStackTask::Run(self);
928  }
929};
930
931// Populates the mark stack based on the set of marked objects and
932// recursively marks until the mark stack is emptied.
933void MarkSweep::RecursiveMark() {
934  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
935  // RecursiveMark will build the lists of known instances of the Reference classes. See
936  // DelayReferenceReferent for details.
937  if (kUseRecursiveMark) {
938    const bool partial = GetGcType() == kGcTypePartial;
939    ScanObjectVisitor scan_visitor(this);
940    auto* self = Thread::Current();
941    ThreadPool* thread_pool = heap_->GetThreadPool();
942    size_t thread_count = GetThreadCount(false);
943    const bool parallel = kParallelRecursiveMark && thread_count > 1;
944    mark_stack_->Reset();
945    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
946      if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
947          (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
948        current_space_bitmap_ = space->GetMarkBitmap();
949        if (current_space_bitmap_ == nullptr) {
950          continue;
951        }
952        if (parallel) {
953          // We will use the mark stack the future.
954          // CHECK(mark_stack_->IsEmpty());
955          // This function does not handle heap end increasing, so we must use the space end.
956          uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
957          uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
958          atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
959
960          // Create a few worker tasks.
961          const size_t n = thread_count * 2;
962          while (begin != end) {
963            uintptr_t start = begin;
964            uintptr_t delta = (end - begin) / n;
965            delta = RoundUp(delta, KB);
966            if (delta < 16 * KB) delta = end - begin;
967            begin += delta;
968            auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
969                                               begin);
970            thread_pool->AddTask(self, task);
971          }
972          thread_pool->SetMaxActiveWorkers(thread_count - 1);
973          thread_pool->StartWorkers(self);
974          thread_pool->Wait(self, true, true);
975          thread_pool->StopWorkers(self);
976        } else {
977          // This function does not handle heap end increasing, so we must use the space end.
978          uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
979          uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
980          current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
981        }
982      }
983    }
984  }
985  ProcessMarkStack(false);
986}
987
988mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
989  if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
990    return object;
991  }
992  return nullptr;
993}
994
995void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
996  ScanGrayObjects(paused, minimum_age);
997  ProcessMarkStack(paused);
998}
999
1000void MarkSweep::ReMarkRoots() {
1001  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1002  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1003  Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>(
1004      kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog));
1005  if (kVerifyRootsMarked) {
1006    TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
1007    VerifyRootMarkedVisitor visitor(this);
1008    Runtime::Current()->VisitRoots(&visitor);
1009  }
1010}
1011
1012void MarkSweep::SweepSystemWeaks(Thread* self) {
1013  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1014  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1015  Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
1016}
1017
1018mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
1019  reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
1020  // We don't actually want to sweep the object, so lets return "marked"
1021  return obj;
1022}
1023
1024void MarkSweep::VerifyIsLive(const Object* obj) {
1025  if (!heap_->GetLiveBitmap()->Test(obj)) {
1026    // TODO: Consider live stack? Has this code bitrotted?
1027    CHECK(!heap_->allocation_stack_->Contains(obj))
1028        << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
1029  }
1030}
1031
1032void MarkSweep::VerifySystemWeaks() {
1033  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1034  // Verify system weaks, uses a special object visitor which returns the input object.
1035  Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
1036}
1037
1038class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
1039 public:
1040  explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
1041                                     bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
1042      : mark_sweep_(mark_sweep),
1043        revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
1044            revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1045  }
1046
1047  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
1048      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1049      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1050    for (size_t i = 0; i < count; ++i) {
1051      mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
1052    }
1053  }
1054
1055  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
1056                  const RootInfo& info ATTRIBUTE_UNUSED)
1057      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1058      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1059    for (size_t i = 0; i < count; ++i) {
1060      mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
1061    }
1062  }
1063
1064  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1065    ATRACE_BEGIN("Marking thread roots");
1066    // Note: self is not necessarily equal to thread since thread may be suspended.
1067    Thread* const self = Thread::Current();
1068    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1069        << thread->GetState() << " thread " << thread << " self " << self;
1070    thread->VisitRoots(this);
1071    ATRACE_END();
1072    if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
1073      ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
1074      mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
1075      ATRACE_END();
1076    }
1077    // If thread is a running mutator, then act on behalf of the garbage collector.
1078    // See the code in ThreadList::RunCheckpoint.
1079    if (thread->GetState() == kRunnable) {
1080      mark_sweep_->GetBarrier().Pass(self);
1081    }
1082  }
1083
1084 private:
1085  MarkSweep* const mark_sweep_;
1086  const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
1087};
1088
1089void MarkSweep::MarkRootsCheckpoint(Thread* self,
1090                                    bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1091  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1092  CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
1093  ThreadList* thread_list = Runtime::Current()->GetThreadList();
1094  // Request the check point is run on all threads returning a count of the threads that must
1095  // run through the barrier including self.
1096  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1097  // Release locks then wait for all mutator threads to pass the barrier.
1098  // If there are no threads to wait which implys that all the checkpoint functions are finished,
1099  // then no need to release locks.
1100  if (barrier_count == 0) {
1101    return;
1102  }
1103  Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1104  Locks::mutator_lock_->SharedUnlock(self);
1105  {
1106    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1107    gc_barrier_->Increment(self, barrier_count);
1108  }
1109  Locks::mutator_lock_->SharedLock(self);
1110  Locks::heap_bitmap_lock_->ExclusiveLock(self);
1111}
1112
1113void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
1114  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1115  Thread* self = Thread::Current();
1116  mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
1117      sweep_array_free_buffer_mem_map_->BaseBegin());
1118  size_t chunk_free_pos = 0;
1119  ObjectBytePair freed;
1120  ObjectBytePair freed_los;
1121  // How many objects are left in the array, modified after each space is swept.
1122  StackReference<Object>* objects = allocations->Begin();
1123  size_t count = allocations->Size();
1124  // Change the order to ensure that the non-moving space last swept as an optimization.
1125  std::vector<space::ContinuousSpace*> sweep_spaces;
1126  space::ContinuousSpace* non_moving_space = nullptr;
1127  for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1128    if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
1129        space->GetLiveBitmap() != nullptr) {
1130      if (space == heap_->GetNonMovingSpace()) {
1131        non_moving_space = space;
1132      } else {
1133        sweep_spaces.push_back(space);
1134      }
1135    }
1136  }
1137  // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1138  // the other alloc spaces as an optimization.
1139  if (non_moving_space != nullptr) {
1140    sweep_spaces.push_back(non_moving_space);
1141  }
1142  // Start by sweeping the continuous spaces.
1143  for (space::ContinuousSpace* space : sweep_spaces) {
1144    space::AllocSpace* alloc_space = space->AsAllocSpace();
1145    accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1146    accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1147    if (swap_bitmaps) {
1148      std::swap(live_bitmap, mark_bitmap);
1149    }
1150    StackReference<Object>* out = objects;
1151    for (size_t i = 0; i < count; ++i) {
1152      Object* const obj = objects[i].AsMirrorPtr();
1153      if (kUseThreadLocalAllocationStack && obj == nullptr) {
1154        continue;
1155      }
1156      if (space->HasAddress(obj)) {
1157        // This object is in the space, remove it from the array and add it to the sweep buffer
1158        // if needed.
1159        if (!mark_bitmap->Test(obj)) {
1160          if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1161            TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1162            freed.objects += chunk_free_pos;
1163            freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1164            chunk_free_pos = 0;
1165          }
1166          chunk_free_buffer[chunk_free_pos++] = obj;
1167        }
1168      } else {
1169        (out++)->Assign(obj);
1170      }
1171    }
1172    if (chunk_free_pos > 0) {
1173      TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1174      freed.objects += chunk_free_pos;
1175      freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1176      chunk_free_pos = 0;
1177    }
1178    // All of the references which space contained are no longer in the allocation stack, update
1179    // the count.
1180    count = out - objects;
1181  }
1182  // Handle the large object space.
1183  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1184  if (large_object_space != nullptr) {
1185    accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1186    accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1187    if (swap_bitmaps) {
1188      std::swap(large_live_objects, large_mark_objects);
1189    }
1190    for (size_t i = 0; i < count; ++i) {
1191      Object* const obj = objects[i].AsMirrorPtr();
1192      // Handle large objects.
1193      if (kUseThreadLocalAllocationStack && obj == nullptr) {
1194        continue;
1195      }
1196      if (!large_mark_objects->Test(obj)) {
1197        ++freed_los.objects;
1198        freed_los.bytes += large_object_space->Free(self, obj);
1199      }
1200    }
1201  }
1202  {
1203    TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
1204    RecordFree(freed);
1205    RecordFreeLOS(freed_los);
1206    t2.NewTiming("ResetStack");
1207    allocations->Reset();
1208  }
1209  sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
1210}
1211
1212void MarkSweep::Sweep(bool swap_bitmaps) {
1213  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1214  // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1215  CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1216  {
1217    TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
1218    // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1219    // knowing that new allocations won't be marked as live.
1220    accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1221    heap_->MarkAllocStackAsLive(live_stack);
1222    live_stack->Reset();
1223    DCHECK(mark_stack_->IsEmpty());
1224  }
1225  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1226    if (space->IsContinuousMemMapAllocSpace()) {
1227      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1228      TimingLogger::ScopedTiming split(
1229          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
1230      RecordFree(alloc_space->Sweep(swap_bitmaps));
1231    }
1232  }
1233  SweepLargeObjects(swap_bitmaps);
1234}
1235
1236void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1237  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
1238  if (los != nullptr) {
1239    TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1240    RecordFreeLOS(los->Sweep(swap_bitmaps));
1241  }
1242}
1243
1244// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
1245// marked, put it on the appropriate list in the heap for later processing.
1246void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
1247  if (kCountJavaLangRefs) {
1248    ++reference_count_;
1249  }
1250  heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback,
1251                                                         this);
1252}
1253
1254class MarkObjectVisitor {
1255 public:
1256  explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
1257  }
1258
1259  void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
1260      ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1261      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1262    if (kCheckLocks) {
1263      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1264      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1265    }
1266    mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
1267  }
1268
1269 private:
1270  MarkSweep* const mark_sweep_;
1271};
1272
1273// Scans an object reference.  Determines the type of the reference
1274// and dispatches to a specialized scanning routine.
1275void MarkSweep::ScanObject(Object* obj) {
1276  MarkObjectVisitor mark_visitor(this);
1277  DelayReferenceReferentVisitor ref_visitor(this);
1278  ScanObjectVisit(obj, mark_visitor, ref_visitor);
1279}
1280
1281void MarkSweep::ProcessMarkStackCallback(void* arg) {
1282  reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
1283}
1284
1285void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
1286  Thread* self = Thread::Current();
1287  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1288  const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1289                                     static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1290  CHECK_GT(chunk_size, 0U);
1291  // Split the current mark stack up into work tasks.
1292  for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) {
1293    const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1294    thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
1295    it += delta;
1296  }
1297  thread_pool->SetMaxActiveWorkers(thread_count - 1);
1298  thread_pool->StartWorkers(self);
1299  thread_pool->Wait(self, true, true);
1300  thread_pool->StopWorkers(self);
1301  mark_stack_->Reset();
1302  CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1303           work_chunks_deleted_.LoadSequentiallyConsistent())
1304      << " some of the work chunks were leaked";
1305}
1306
1307// Scan anything that's on the mark stack.
1308void MarkSweep::ProcessMarkStack(bool paused) {
1309  TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
1310  size_t thread_count = GetThreadCount(paused);
1311  if (kParallelProcessMarkStack && thread_count > 1 &&
1312      mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1313    ProcessMarkStackParallel(thread_count);
1314  } else {
1315    // TODO: Tune this.
1316    static const size_t kFifoSize = 4;
1317    BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
1318    for (;;) {
1319      Object* obj = nullptr;
1320      if (kUseMarkStackPrefetch) {
1321        while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1322          Object* mark_stack_obj = mark_stack_->PopBack();
1323          DCHECK(mark_stack_obj != nullptr);
1324          __builtin_prefetch(mark_stack_obj);
1325          prefetch_fifo.push_back(mark_stack_obj);
1326        }
1327        if (prefetch_fifo.empty()) {
1328          break;
1329        }
1330        obj = prefetch_fifo.front();
1331        prefetch_fifo.pop_front();
1332      } else {
1333        if (mark_stack_->IsEmpty()) {
1334          break;
1335        }
1336        obj = mark_stack_->PopBack();
1337      }
1338      DCHECK(obj != nullptr);
1339      ScanObject(obj);
1340    }
1341  }
1342}
1343
1344inline bool MarkSweep::IsMarked(const Object* object) const {
1345  if (immune_region_.ContainsObject(object)) {
1346    return true;
1347  }
1348  if (current_space_bitmap_->HasAddress(object)) {
1349    return current_space_bitmap_->Test(object);
1350  }
1351  return mark_bitmap_->Test(object);
1352}
1353
1354void MarkSweep::FinishPhase() {
1355  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1356  if (kCountScannedTypes) {
1357    VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
1358        << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
1359  }
1360  if (kCountTasks) {
1361    VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
1362  }
1363  if (kMeasureOverhead) {
1364    VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
1365  }
1366  if (kProfileLargeObjects) {
1367    VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1368        << " marked " << large_object_mark_.LoadRelaxed();
1369  }
1370  if (kCountJavaLangRefs) {
1371    VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
1372  }
1373  if (kCountMarkedObjects) {
1374    VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1375        << " immune=" <<  mark_immune_count_.LoadRelaxed()
1376        << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1377        << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
1378  }
1379  CHECK(mark_stack_->IsEmpty());  // Ensure that the mark stack is empty.
1380  mark_stack_->Reset();
1381  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1382  heap_->ClearMarkedObjects();
1383}
1384
1385void MarkSweep::RevokeAllThreadLocalBuffers() {
1386  if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1387    // If concurrent, rosalloc thread-local buffers are revoked at the
1388    // thread checkpoint. Bump pointer space thread-local buffers must
1389    // not be in use.
1390    GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1391  } else {
1392    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1393    GetHeap()->RevokeAllThreadLocalBuffers();
1394  }
1395}
1396
1397}  // namespace collector
1398}  // namespace gc
1399}  // namespace art
1400