mark_sweep.cc revision 12f7423a2bb4bfab76700d84eb6d4338d211983a
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mark_sweep.h"
18
19#include <functional>
20#include <numeric>
21#include <climits>
22#include <vector>
23
24#include "base/bounded_fifo.h"
25#include "base/logging.h"
26#include "base/macros.h"
27#include "base/mutex-inl.h"
28#include "base/timing_logger.h"
29#include "gc/accounting/card_table-inl.h"
30#include "gc/accounting/heap_bitmap-inl.h"
31#include "gc/accounting/mod_union_table.h"
32#include "gc/accounting/space_bitmap-inl.h"
33#include "gc/heap.h"
34#include "gc/reference_processor.h"
35#include "gc/space/image_space.h"
36#include "gc/space/large_object_space.h"
37#include "gc/space/space-inl.h"
38#include "mark_sweep-inl.h"
39#include "mirror/art_field-inl.h"
40#include "mirror/object-inl.h"
41#include "runtime.h"
42#include "scoped_thread_state_change.h"
43#include "thread-inl.h"
44#include "thread_list.h"
45
46using ::art::mirror::Object;
47
48namespace art {
49namespace gc {
50namespace collector {
51
52// Performance options.
53static constexpr bool kUseRecursiveMark = false;
54static constexpr bool kUseMarkStackPrefetch = true;
55static constexpr size_t kSweepArrayChunkFreeSize = 1024;
56static constexpr bool kPreCleanCards = true;
57
58// Parallelism options.
59static constexpr bool kParallelCardScan = true;
60static constexpr bool kParallelRecursiveMark = true;
61// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
62// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
63// having this can add overhead in ProcessReferences since we may end up doing many calls of
64// ProcessMarkStack with very small mark stacks.
65static constexpr size_t kMinimumParallelMarkStackSize = 128;
66static constexpr bool kParallelProcessMarkStack = true;
67
68// Profiling and information flags.
69static constexpr bool kProfileLargeObjects = false;
70static constexpr bool kMeasureOverhead = false;
71static constexpr bool kCountTasks = false;
72static constexpr bool kCountJavaLangRefs = false;
73static constexpr bool kCountMarkedObjects = false;
74
75// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
76static constexpr bool kCheckLocks = kDebugLocking;
77static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
78
79// If true, revoke the rosalloc thread-local buffers at the
80// checkpoint, as opposed to during the pause.
81static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
82
83void MarkSweep::BindBitmaps() {
84  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
85  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
86  // Mark all of the spaces we never collect as immune.
87  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
88    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
89      CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
90    }
91  }
92}
93
94MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
95    : GarbageCollector(heap,
96                       name_prefix +
97                       (is_concurrent ? "concurrent mark sweep": "mark sweep")),
98      current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
99      gc_barrier_(new Barrier(0)),
100      mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
101      is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
102  std::string error_msg;
103  MemMap* mem_map = MemMap::MapAnonymous(
104      "mark sweep sweep array free buffer", nullptr,
105      RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
106      PROT_READ | PROT_WRITE, false, &error_msg);
107  CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
108  sweep_array_free_buffer_mem_map_.reset(mem_map);
109}
110
111void MarkSweep::InitializePhase() {
112  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
113  mark_stack_ = heap_->GetMarkStack();
114  DCHECK(mark_stack_ != nullptr);
115  immune_region_.Reset();
116  class_count_.StoreRelaxed(0);
117  array_count_.StoreRelaxed(0);
118  other_count_.StoreRelaxed(0);
119  large_object_test_.StoreRelaxed(0);
120  large_object_mark_.StoreRelaxed(0);
121  overhead_time_ .StoreRelaxed(0);
122  work_chunks_created_.StoreRelaxed(0);
123  work_chunks_deleted_.StoreRelaxed(0);
124  reference_count_.StoreRelaxed(0);
125  mark_null_count_.StoreRelaxed(0);
126  mark_immune_count_.StoreRelaxed(0);
127  mark_fastpath_count_.StoreRelaxed(0);
128  mark_slowpath_count_.StoreRelaxed(0);
129  {
130    // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
131    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
132    mark_bitmap_ = heap_->GetMarkBitmap();
133  }
134  if (!GetCurrentIteration()->GetClearSoftReferences()) {
135    // Always clear soft references if a non-sticky collection.
136    GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
137  }
138}
139
140void MarkSweep::RunPhases() {
141  Thread* self = Thread::Current();
142  InitializePhase();
143  Locks::mutator_lock_->AssertNotHeld(self);
144  if (IsConcurrent()) {
145    GetHeap()->PreGcVerification(this);
146    {
147      ReaderMutexLock mu(self, *Locks::mutator_lock_);
148      MarkingPhase();
149    }
150    ScopedPause pause(this);
151    GetHeap()->PrePauseRosAllocVerification(this);
152    PausePhase();
153    RevokeAllThreadLocalBuffers();
154  } else {
155    ScopedPause pause(this);
156    GetHeap()->PreGcVerificationPaused(this);
157    MarkingPhase();
158    GetHeap()->PrePauseRosAllocVerification(this);
159    PausePhase();
160    RevokeAllThreadLocalBuffers();
161  }
162  {
163    // Sweeping always done concurrently, even for non concurrent mark sweep.
164    ReaderMutexLock mu(self, *Locks::mutator_lock_);
165    ReclaimPhase();
166  }
167  GetHeap()->PostGcVerification(this);
168  FinishPhase();
169}
170
171void MarkSweep::ProcessReferences(Thread* self) {
172  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
173  GetHeap()->GetReferenceProcessor()->ProcessReferences(
174      true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
175      &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
176}
177
178void MarkSweep::PausePhase() {
179  TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
180  Thread* self = Thread::Current();
181  Locks::mutator_lock_->AssertExclusiveHeld(self);
182  if (IsConcurrent()) {
183    // Handle the dirty objects if we are a concurrent GC.
184    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
185    // Re-mark root set.
186    ReMarkRoots();
187    // Scan dirty objects, this is only required if we are not doing concurrent GC.
188    RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
189  }
190  {
191    TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
192    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
193    heap_->SwapStacks(self);
194    live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
195    // Need to revoke all the thread local allocation stacks since we just swapped the allocation
196    // stacks and don't want anybody to allocate into the live stack.
197    RevokeAllThreadLocalAllocationStacks(self);
198  }
199  heap_->PreSweepingGcVerification(this);
200  // Disallow new system weaks to prevent a race which occurs when someone adds a new system
201  // weak before we sweep them. Since this new system weak may not be marked, the GC may
202  // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
203  // reference to a string that is about to be swept.
204  Runtime::Current()->DisallowNewSystemWeaks();
205  // Enable the reference processing slow path, needs to be done with mutators paused since there
206  // is no lock in the GetReferent fast path.
207  GetHeap()->GetReferenceProcessor()->EnableSlowPath();
208}
209
210void MarkSweep::PreCleanCards() {
211  // Don't do this for non concurrent GCs since they don't have any dirty cards.
212  if (kPreCleanCards && IsConcurrent()) {
213    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
214    Thread* self = Thread::Current();
215    CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
216    // Process dirty cards and add dirty cards to mod union tables, also ages cards.
217    heap_->ProcessCards(GetTimings(), false);
218    // The checkpoint root marking is required to avoid a race condition which occurs if the
219    // following happens during a reference write:
220    // 1. mutator dirties the card (write barrier)
221    // 2. GC ages the card (the above ProcessCards call)
222    // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
223    // 4. mutator writes the value (corresponding to the write barrier in 1.)
224    // This causes the GC to age the card but not necessarily mark the reference which the mutator
225    // wrote into the object stored in the card.
226    // Having the checkpoint fixes this issue since it ensures that the card mark and the
227    // reference write are visible to the GC before the card is scanned (this is due to locks being
228    // acquired / released in the checkpoint code).
229    // The other roots are also marked to help reduce the pause.
230    MarkRootsCheckpoint(self, false);
231    MarkNonThreadRoots();
232    MarkConcurrentRoots(
233        static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
234    // Process the newly aged cards.
235    RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
236    // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
237    // in the next GC.
238  }
239}
240
241void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
242  if (kUseThreadLocalAllocationStack) {
243    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
244    Locks::mutator_lock_->AssertExclusiveHeld(self);
245    heap_->RevokeAllThreadLocalAllocationStacks(self);
246  }
247}
248
249void MarkSweep::MarkingPhase() {
250  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
251  Thread* self = Thread::Current();
252  BindBitmaps();
253  FindDefaultSpaceBitmap();
254  // Process dirty cards and add dirty cards to mod union tables.
255  heap_->ProcessCards(GetTimings(), false);
256  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
257  MarkRoots(self);
258  MarkReachableObjects();
259  // Pre-clean dirtied cards to reduce pauses.
260  PreCleanCards();
261}
262
263void MarkSweep::UpdateAndMarkModUnion() {
264  for (const auto& space : heap_->GetContinuousSpaces()) {
265    if (immune_region_.ContainsSpace(space)) {
266      const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
267          "UpdateAndMarkImageModUnionTable";
268      TimingLogger::ScopedTiming t(name, GetTimings());
269      accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
270      CHECK(mod_union_table != nullptr);
271      mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
272    }
273  }
274}
275
276void MarkSweep::MarkReachableObjects() {
277  UpdateAndMarkModUnion();
278  // Recursively mark all the non-image bits set in the mark bitmap.
279  RecursiveMark();
280}
281
282void MarkSweep::ReclaimPhase() {
283  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
284  Thread* self = Thread::Current();
285  // Process the references concurrently.
286  ProcessReferences(self);
287  SweepSystemWeaks(self);
288  Runtime::Current()->AllowNewSystemWeaks();
289  {
290    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
291    // Reclaim unmarked objects.
292    Sweep(false);
293    // Swap the live and mark bitmaps for each space which we modified space. This is an
294    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
295    // bitmaps.
296    SwapBitmaps();
297    // Unbind the live and mark bitmaps.
298    GetHeap()->UnBindBitmaps();
299  }
300}
301
302void MarkSweep::FindDefaultSpaceBitmap() {
303  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
304  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
305    accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
306    // We want to have the main space instead of non moving if possible.
307    if (bitmap != nullptr &&
308        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
309      current_space_bitmap_ = bitmap;
310      // If we are not the non moving space exit the loop early since this will be good enough.
311      if (space != heap_->GetNonMovingSpace()) {
312        break;
313      }
314    }
315  }
316  CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
317      << heap_->DumpSpaces();
318}
319
320void MarkSweep::ExpandMarkStack() {
321  ResizeMarkStack(mark_stack_->Capacity() * 2);
322}
323
324void MarkSweep::ResizeMarkStack(size_t new_size) {
325  // Rare case, no need to have Thread::Current be a parameter.
326  if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
327    // Someone else acquired the lock and expanded the mark stack before us.
328    return;
329  }
330  std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
331  CHECK_LE(mark_stack_->Size(), new_size);
332  mark_stack_->Resize(new_size);
333  for (const auto& obj : temp) {
334    mark_stack_->PushBack(obj);
335  }
336}
337
338inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
339  DCHECK(obj != nullptr);
340  if (MarkObjectParallel(obj)) {
341    MutexLock mu(Thread::Current(), mark_stack_lock_);
342    if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
343      ExpandMarkStack();
344    }
345    // The object must be pushed on to the mark stack.
346    mark_stack_->PushBack(obj);
347  }
348}
349
350mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
351  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
352  mark_sweep->MarkObject(obj);
353  return obj;
354}
355
356void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
357  reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
358}
359
360bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
361  return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr());
362}
363
364class MarkSweepMarkObjectSlowPath {
365 public:
366  explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
367  }
368
369  void operator()(const Object* obj) const ALWAYS_INLINE {
370    if (kProfileLargeObjects) {
371      // TODO: Differentiate between marking and testing somehow.
372      ++mark_sweep_->large_object_test_;
373      ++mark_sweep_->large_object_mark_;
374    }
375    space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
376    if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
377                 (kIsDebugBuild && !large_object_space->Contains(obj)))) {
378      LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
379      LOG(ERROR) << "Attempting see if it's a bad root";
380      mark_sweep_->VerifyRoots();
381      LOG(FATAL) << "Can't mark invalid object";
382    }
383  }
384
385 private:
386  MarkSweep* const mark_sweep_;
387};
388
389inline void MarkSweep::MarkObjectNonNull(Object* obj) {
390  DCHECK(obj != nullptr);
391  if (kUseBakerOrBrooksReadBarrier) {
392    // Verify all the objects have the correct pointer installed.
393    obj->AssertReadBarrierPointer();
394  }
395  if (immune_region_.ContainsObject(obj)) {
396    if (kCountMarkedObjects) {
397      ++mark_immune_count_;
398    }
399    DCHECK(mark_bitmap_->Test(obj));
400  } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
401    if (kCountMarkedObjects) {
402      ++mark_fastpath_count_;
403    }
404    if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
405      PushOnMarkStack(obj);  // This object was not previously marked.
406    }
407  } else {
408    if (kCountMarkedObjects) {
409      ++mark_slowpath_count_;
410    }
411    MarkSweepMarkObjectSlowPath visitor(this);
412    // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
413    // will check again.
414    if (!mark_bitmap_->Set(obj, visitor)) {
415      PushOnMarkStack(obj);  // Was not already marked, push.
416    }
417  }
418}
419
420inline void MarkSweep::PushOnMarkStack(Object* obj) {
421  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
422    // Lock is not needed but is here anyways to please annotalysis.
423    MutexLock mu(Thread::Current(), mark_stack_lock_);
424    ExpandMarkStack();
425  }
426  // The object must be pushed on to the mark stack.
427  mark_stack_->PushBack(obj);
428}
429
430inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
431  DCHECK(obj != nullptr);
432  if (kUseBakerOrBrooksReadBarrier) {
433    // Verify all the objects have the correct pointer installed.
434    obj->AssertReadBarrierPointer();
435  }
436  if (immune_region_.ContainsObject(obj)) {
437    DCHECK(IsMarked(obj));
438    return false;
439  }
440  // Try to take advantage of locality of references within a space, failing this find the space
441  // the hard way.
442  accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
443  if (LIKELY(object_bitmap->HasAddress(obj))) {
444    return !object_bitmap->AtomicTestAndSet(obj);
445  }
446  MarkSweepMarkObjectSlowPath visitor(this);
447  return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
448}
449
450// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
451inline void MarkSweep::MarkObject(Object* obj) {
452  if (obj != nullptr) {
453    MarkObjectNonNull(obj);
454  } else if (kCountMarkedObjects) {
455    ++mark_null_count_;
456  }
457}
458
459void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, const RootInfo& /*root_info*/) {
460  reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
461}
462
463void MarkSweep::VerifyRootMarked(Object** root, void* arg, const RootInfo& /*root_info*/) {
464  CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root));
465}
466
467void MarkSweep::MarkRootCallback(Object** root, void* arg, const RootInfo& /*root_info*/) {
468  reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
469}
470
471void MarkSweep::VerifyRootCallback(Object** root, void* arg, const RootInfo& root_info) {
472  reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(*root, root_info);
473}
474
475void MarkSweep::VerifyRoot(const Object* root, const RootInfo& root_info) {
476  // See if the root is on any space bitmap.
477  if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
478    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
479    if (!large_object_space->Contains(root)) {
480      LOG(ERROR) << "Found invalid root: " << root << " ";
481      root_info.Describe(LOG(ERROR));
482    }
483  }
484}
485
486void MarkSweep::VerifyRoots() {
487  Runtime::Current()->GetThreadList()->VisitRoots(VerifyRootCallback, this);
488}
489
490void MarkSweep::MarkRoots(Thread* self) {
491  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
492  if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
493    // If we exclusively hold the mutator lock, all threads must be suspended.
494    Runtime::Current()->VisitRoots(MarkRootCallback, this);
495    RevokeAllThreadLocalAllocationStacks(self);
496  } else {
497    MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
498    // At this point the live stack should no longer have any mutators which push into it.
499    MarkNonThreadRoots();
500    MarkConcurrentRoots(
501        static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
502  }
503}
504
505void MarkSweep::MarkNonThreadRoots() {
506  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
507  Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
508}
509
510void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
511  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
512  // Visit all runtime roots and clear dirty flags.
513  Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
514}
515
516class ScanObjectVisitor {
517 public:
518  explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
519      : mark_sweep_(mark_sweep) {}
520
521  void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
522      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
523    if (kCheckLocks) {
524      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
525      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
526    }
527    mark_sweep_->ScanObject(obj);
528  }
529
530 private:
531  MarkSweep* const mark_sweep_;
532};
533
534class DelayReferenceReferentVisitor {
535 public:
536  explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
537  }
538
539  void operator()(mirror::Class* klass, mirror::Reference* ref) const
540      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
541      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
542    collector_->DelayReferenceReferent(klass, ref);
543  }
544
545 private:
546  MarkSweep* const collector_;
547};
548
549template <bool kUseFinger = false>
550class MarkStackTask : public Task {
551 public:
552  MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
553                Object** mark_stack)
554      : mark_sweep_(mark_sweep),
555        thread_pool_(thread_pool),
556        mark_stack_pos_(mark_stack_size) {
557    // We may have to copy part of an existing mark stack when another mark stack overflows.
558    if (mark_stack_size != 0) {
559      DCHECK(mark_stack != NULL);
560      // TODO: Check performance?
561      std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
562    }
563    if (kCountTasks) {
564      ++mark_sweep_->work_chunks_created_;
565    }
566  }
567
568  static const size_t kMaxSize = 1 * KB;
569
570 protected:
571  class MarkObjectParallelVisitor {
572   public:
573    explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
574                                       MarkSweep* mark_sweep) ALWAYS_INLINE
575            : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
576
577    void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
578        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
579      mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
580      if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
581        if (kUseFinger) {
582          android_memory_barrier();
583          if (reinterpret_cast<uintptr_t>(ref) >=
584              static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
585            return;
586          }
587        }
588        chunk_task_->MarkStackPush(ref);
589      }
590    }
591
592   private:
593    MarkStackTask<kUseFinger>* const chunk_task_;
594    MarkSweep* const mark_sweep_;
595  };
596
597  class ScanObjectParallelVisitor {
598   public:
599    explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
600        : chunk_task_(chunk_task) {}
601
602    // No thread safety analysis since multiple threads will use this visitor.
603    void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
604        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
605      MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
606      MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
607      DelayReferenceReferentVisitor ref_visitor(mark_sweep);
608      mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
609    }
610
611   private:
612    MarkStackTask<kUseFinger>* const chunk_task_;
613  };
614
615  virtual ~MarkStackTask() {
616    // Make sure that we have cleared our mark stack.
617    DCHECK_EQ(mark_stack_pos_, 0U);
618    if (kCountTasks) {
619      ++mark_sweep_->work_chunks_deleted_;
620    }
621  }
622
623  MarkSweep* const mark_sweep_;
624  ThreadPool* const thread_pool_;
625  // Thread local mark stack for this task.
626  Object* mark_stack_[kMaxSize];
627  // Mark stack position.
628  size_t mark_stack_pos_;
629
630  void MarkStackPush(Object* obj) ALWAYS_INLINE {
631    if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
632      // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
633      mark_stack_pos_ /= 2;
634      auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
635                                     mark_stack_ + mark_stack_pos_);
636      thread_pool_->AddTask(Thread::Current(), task);
637    }
638    DCHECK(obj != nullptr);
639    DCHECK_LT(mark_stack_pos_, kMaxSize);
640    mark_stack_[mark_stack_pos_++] = obj;
641  }
642
643  virtual void Finalize() {
644    delete this;
645  }
646
647  // Scans all of the objects
648  virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
649      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
650    ScanObjectParallelVisitor visitor(this);
651    // TODO: Tune this.
652    static const size_t kFifoSize = 4;
653    BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
654    for (;;) {
655      Object* obj = nullptr;
656      if (kUseMarkStackPrefetch) {
657        while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
658          Object* obj = mark_stack_[--mark_stack_pos_];
659          DCHECK(obj != nullptr);
660          __builtin_prefetch(obj);
661          prefetch_fifo.push_back(obj);
662        }
663        if (UNLIKELY(prefetch_fifo.empty())) {
664          break;
665        }
666        obj = prefetch_fifo.front();
667        prefetch_fifo.pop_front();
668      } else {
669        if (UNLIKELY(mark_stack_pos_ == 0)) {
670          break;
671        }
672        obj = mark_stack_[--mark_stack_pos_];
673      }
674      DCHECK(obj != nullptr);
675      visitor(obj);
676    }
677  }
678};
679
680class CardScanTask : public MarkStackTask<false> {
681 public:
682  CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
683               accounting::ContinuousSpaceBitmap* bitmap,
684               byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
685               Object** mark_stack_obj)
686      : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
687        bitmap_(bitmap),
688        begin_(begin),
689        end_(end),
690        minimum_age_(minimum_age) {
691  }
692
693 protected:
694  accounting::ContinuousSpaceBitmap* const bitmap_;
695  byte* const begin_;
696  byte* const end_;
697  const byte minimum_age_;
698
699  virtual void Finalize() {
700    delete this;
701  }
702
703  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
704    ScanObjectParallelVisitor visitor(this);
705    accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
706    size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
707    VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
708        << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
709    // Finish by emptying our local mark stack.
710    MarkStackTask::Run(self);
711  }
712};
713
714size_t MarkSweep::GetThreadCount(bool paused) const {
715  if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
716    return 1;
717  }
718  if (paused) {
719    return heap_->GetParallelGCThreadCount() + 1;
720  } else {
721    return heap_->GetConcGCThreadCount() + 1;
722  }
723}
724
725void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
726  accounting::CardTable* card_table = GetHeap()->GetCardTable();
727  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
728  size_t thread_count = GetThreadCount(paused);
729  // The parallel version with only one thread is faster for card scanning, TODO: fix.
730  if (kParallelCardScan && thread_count > 1) {
731    Thread* self = Thread::Current();
732    // Can't have a different split for each space since multiple spaces can have their cards being
733    // scanned at the same time.
734    TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
735        GetTimings());
736    // Try to take some of the mark stack since we can pass this off to the worker tasks.
737    Object** mark_stack_begin = mark_stack_->Begin();
738    Object** mark_stack_end = mark_stack_->End();
739    const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
740    // Estimated number of work tasks we will create.
741    const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
742    DCHECK_NE(mark_stack_tasks, 0U);
743    const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
744                                             mark_stack_size / mark_stack_tasks + 1);
745    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
746      if (space->GetMarkBitmap() == nullptr) {
747        continue;
748      }
749      byte* card_begin = space->Begin();
750      byte* card_end = space->End();
751      // Align up the end address. For example, the image space's end
752      // may not be card-size-aligned.
753      card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
754      DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
755      DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
756      // Calculate how many bytes of heap we will scan,
757      const size_t address_range = card_end - card_begin;
758      // Calculate how much address range each task gets.
759      const size_t card_delta = RoundUp(address_range / thread_count + 1,
760                                        accounting::CardTable::kCardSize);
761      // Create the worker tasks for this space.
762      while (card_begin != card_end) {
763        // Add a range of cards.
764        size_t addr_remaining = card_end - card_begin;
765        size_t card_increment = std::min(card_delta, addr_remaining);
766        // Take from the back of the mark stack.
767        size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
768        size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
769        mark_stack_end -= mark_stack_increment;
770        mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
771        DCHECK_EQ(mark_stack_end, mark_stack_->End());
772        // Add the new task to the thread pool.
773        auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
774                                      card_begin + card_increment, minimum_age,
775                                      mark_stack_increment, mark_stack_end);
776        thread_pool->AddTask(self, task);
777        card_begin += card_increment;
778      }
779    }
780
781    // Note: the card scan below may dirty new cards (and scan them)
782    // as a side effect when a Reference object is encountered and
783    // queued during the marking. See b/11465268.
784    thread_pool->SetMaxActiveWorkers(thread_count - 1);
785    thread_pool->StartWorkers(self);
786    thread_pool->Wait(self, true, true);
787    thread_pool->StopWorkers(self);
788  } else {
789    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
790      if (space->GetMarkBitmap() != nullptr) {
791        // Image spaces are handled properly since live == marked for them.
792        const char* name = nullptr;
793        switch (space->GetGcRetentionPolicy()) {
794        case space::kGcRetentionPolicyNeverCollect:
795          name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
796          break;
797        case space::kGcRetentionPolicyFullCollect:
798          name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
799          break;
800        case space::kGcRetentionPolicyAlwaysCollect:
801          name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
802          break;
803        default:
804          LOG(FATAL) << "Unreachable";
805        }
806        TimingLogger::ScopedTiming t(name, GetTimings());
807        ScanObjectVisitor visitor(this);
808        card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor,
809                         minimum_age);
810      }
811    }
812  }
813}
814
815class RecursiveMarkTask : public MarkStackTask<false> {
816 public:
817  RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
818                    accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
819      : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin),
820        end_(end) {
821  }
822
823 protected:
824  accounting::ContinuousSpaceBitmap* const bitmap_;
825  const uintptr_t begin_;
826  const uintptr_t end_;
827
828  virtual void Finalize() {
829    delete this;
830  }
831
832  // Scans all of the objects
833  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
834    ScanObjectParallelVisitor visitor(this);
835    bitmap_->VisitMarkedRange(begin_, end_, visitor);
836    // Finish by emptying our local mark stack.
837    MarkStackTask::Run(self);
838  }
839};
840
841// Populates the mark stack based on the set of marked objects and
842// recursively marks until the mark stack is emptied.
843void MarkSweep::RecursiveMark() {
844  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
845  // RecursiveMark will build the lists of known instances of the Reference classes. See
846  // DelayReferenceReferent for details.
847  if (kUseRecursiveMark) {
848    const bool partial = GetGcType() == kGcTypePartial;
849    ScanObjectVisitor scan_visitor(this);
850    auto* self = Thread::Current();
851    ThreadPool* thread_pool = heap_->GetThreadPool();
852    size_t thread_count = GetThreadCount(false);
853    const bool parallel = kParallelRecursiveMark && thread_count > 1;
854    mark_stack_->Reset();
855    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
856      if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
857          (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
858        current_space_bitmap_ = space->GetMarkBitmap();
859        if (current_space_bitmap_ == nullptr) {
860          continue;
861        }
862        if (parallel) {
863          // We will use the mark stack the future.
864          // CHECK(mark_stack_->IsEmpty());
865          // This function does not handle heap end increasing, so we must use the space end.
866          uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
867          uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
868          atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
869
870          // Create a few worker tasks.
871          const size_t n = thread_count * 2;
872          while (begin != end) {
873            uintptr_t start = begin;
874            uintptr_t delta = (end - begin) / n;
875            delta = RoundUp(delta, KB);
876            if (delta < 16 * KB) delta = end - begin;
877            begin += delta;
878            auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
879                                               begin);
880            thread_pool->AddTask(self, task);
881          }
882          thread_pool->SetMaxActiveWorkers(thread_count - 1);
883          thread_pool->StartWorkers(self);
884          thread_pool->Wait(self, true, true);
885          thread_pool->StopWorkers(self);
886        } else {
887          // This function does not handle heap end increasing, so we must use the space end.
888          uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
889          uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
890          current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
891        }
892      }
893    }
894  }
895  ProcessMarkStack(false);
896}
897
898mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
899  if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
900    return object;
901  }
902  return nullptr;
903}
904
905void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
906  ScanGrayObjects(paused, minimum_age);
907  ProcessMarkStack(paused);
908}
909
910void MarkSweep::ReMarkRoots() {
911  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
912  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
913  Runtime::Current()->VisitRoots(
914      MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
915                                                          kVisitRootFlagStopLoggingNewRoots |
916                                                          kVisitRootFlagClearRootLog));
917  if (kVerifyRootsMarked) {
918    TimingLogger::ScopedTiming t("(Paused)VerifyRoots", GetTimings());
919    Runtime::Current()->VisitRoots(VerifyRootMarked, this);
920  }
921}
922
923void MarkSweep::SweepSystemWeaks(Thread* self) {
924  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
925  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
926  Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
927}
928
929mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
930  reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
931  // We don't actually want to sweep the object, so lets return "marked"
932  return obj;
933}
934
935void MarkSweep::VerifyIsLive(const Object* obj) {
936  if (!heap_->GetLiveBitmap()->Test(obj)) {
937    accounting::ObjectStack* allocation_stack = heap_->allocation_stack_.get();
938    CHECK(std::find(allocation_stack->Begin(), allocation_stack->End(), obj) !=
939        allocation_stack->End()) << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
940  }
941}
942
943void MarkSweep::VerifySystemWeaks() {
944  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
945  // Verify system weaks, uses a special object visitor which returns the input object.
946  Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
947}
948
949class CheckpointMarkThreadRoots : public Closure {
950 public:
951  explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
952                                     bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
953      : mark_sweep_(mark_sweep),
954        revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
955            revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
956  }
957
958  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
959    ATRACE_BEGIN("Marking thread roots");
960    // Note: self is not necessarily equal to thread since thread may be suspended.
961    Thread* self = Thread::Current();
962    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
963        << thread->GetState() << " thread " << thread << " self " << self;
964    thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
965    ATRACE_END();
966    if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
967      ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
968      mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
969      ATRACE_END();
970    }
971    mark_sweep_->GetBarrier().Pass(self);
972  }
973
974 private:
975  MarkSweep* const mark_sweep_;
976  const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
977};
978
979void MarkSweep::MarkRootsCheckpoint(Thread* self,
980                                    bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
981  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
982  CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
983  ThreadList* thread_list = Runtime::Current()->GetThreadList();
984  // Request the check point is run on all threads returning a count of the threads that must
985  // run through the barrier including self.
986  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
987  // Release locks then wait for all mutator threads to pass the barrier.
988  // TODO: optimize to not release locks when there are no threads to wait for.
989  Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
990  Locks::mutator_lock_->SharedUnlock(self);
991  {
992    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
993    gc_barrier_->Increment(self, barrier_count);
994  }
995  Locks::mutator_lock_->SharedLock(self);
996  Locks::heap_bitmap_lock_->ExclusiveLock(self);
997}
998
999void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
1000  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1001  Thread* self = Thread::Current();
1002  mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
1003      sweep_array_free_buffer_mem_map_->BaseBegin());
1004  size_t chunk_free_pos = 0;
1005  ObjectBytePair freed;
1006  ObjectBytePair freed_los;
1007  // How many objects are left in the array, modified after each space is swept.
1008  Object** objects = allocations->Begin();
1009  size_t count = allocations->Size();
1010  // Change the order to ensure that the non-moving space last swept as an optimization.
1011  std::vector<space::ContinuousSpace*> sweep_spaces;
1012  space::ContinuousSpace* non_moving_space = nullptr;
1013  for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1014    if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
1015        space->GetLiveBitmap() != nullptr) {
1016      if (space == heap_->GetNonMovingSpace()) {
1017        non_moving_space = space;
1018      } else {
1019        sweep_spaces.push_back(space);
1020      }
1021    }
1022  }
1023  // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1024  // the other alloc spaces as an optimization.
1025  if (non_moving_space != nullptr) {
1026    sweep_spaces.push_back(non_moving_space);
1027  }
1028  // Start by sweeping the continuous spaces.
1029  for (space::ContinuousSpace* space : sweep_spaces) {
1030    space::AllocSpace* alloc_space = space->AsAllocSpace();
1031    accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1032    accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1033    if (swap_bitmaps) {
1034      std::swap(live_bitmap, mark_bitmap);
1035    }
1036    Object** out = objects;
1037    for (size_t i = 0; i < count; ++i) {
1038      Object* obj = objects[i];
1039      if (kUseThreadLocalAllocationStack && obj == nullptr) {
1040        continue;
1041      }
1042      if (space->HasAddress(obj)) {
1043        // This object is in the space, remove it from the array and add it to the sweep buffer
1044        // if needed.
1045        if (!mark_bitmap->Test(obj)) {
1046          if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1047            TimingLogger::ScopedTiming t("FreeList", GetTimings());
1048            freed.objects += chunk_free_pos;
1049            freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1050            chunk_free_pos = 0;
1051          }
1052          chunk_free_buffer[chunk_free_pos++] = obj;
1053        }
1054      } else {
1055        *(out++) = obj;
1056      }
1057    }
1058    if (chunk_free_pos > 0) {
1059      TimingLogger::ScopedTiming t("FreeList", GetTimings());
1060      freed.objects += chunk_free_pos;
1061      freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1062      chunk_free_pos = 0;
1063    }
1064    // All of the references which space contained are no longer in the allocation stack, update
1065    // the count.
1066    count = out - objects;
1067  }
1068  // Handle the large object space.
1069  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1070  accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1071  accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1072  if (swap_bitmaps) {
1073    std::swap(large_live_objects, large_mark_objects);
1074  }
1075  for (size_t i = 0; i < count; ++i) {
1076    Object* obj = objects[i];
1077    // Handle large objects.
1078    if (kUseThreadLocalAllocationStack && obj == nullptr) {
1079      continue;
1080    }
1081    if (!large_mark_objects->Test(obj)) {
1082      ++freed_los.objects;
1083      freed_los.bytes += large_object_space->Free(self, obj);
1084    }
1085  }
1086  {
1087    TimingLogger::ScopedTiming t("RecordFree", GetTimings());
1088    RecordFree(freed);
1089    RecordFreeLOS(freed_los);
1090    t.NewTiming("ResetStack");
1091    allocations->Reset();
1092  }
1093  sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
1094}
1095
1096void MarkSweep::Sweep(bool swap_bitmaps) {
1097  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1098  // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1099  CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1100  {
1101    TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
1102    // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1103    // knowing that new allocations won't be marked as live.
1104    accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1105    heap_->MarkAllocStackAsLive(live_stack);
1106    live_stack->Reset();
1107    DCHECK(mark_stack_->IsEmpty());
1108  }
1109  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1110    if (space->IsContinuousMemMapAllocSpace()) {
1111      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1112      TimingLogger::ScopedTiming split(
1113          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
1114      RecordFree(alloc_space->Sweep(swap_bitmaps));
1115    }
1116  }
1117  SweepLargeObjects(swap_bitmaps);
1118}
1119
1120void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1121  TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1122  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1123}
1124
1125// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
1126// marked, put it on the appropriate list in the heap for later processing.
1127void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
1128  if (kCountJavaLangRefs) {
1129    ++reference_count_;
1130  }
1131  heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback,
1132                                                         this);
1133}
1134
1135class MarkObjectVisitor {
1136 public:
1137  explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
1138  }
1139
1140  void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
1141      ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1142      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1143    if (kCheckLocks) {
1144      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1145      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1146    }
1147    mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset));
1148  }
1149
1150 private:
1151  MarkSweep* const mark_sweep_;
1152};
1153
1154// Scans an object reference.  Determines the type of the reference
1155// and dispatches to a specialized scanning routine.
1156void MarkSweep::ScanObject(Object* obj) {
1157  MarkObjectVisitor mark_visitor(this);
1158  DelayReferenceReferentVisitor ref_visitor(this);
1159  ScanObjectVisit(obj, mark_visitor, ref_visitor);
1160}
1161
1162void MarkSweep::ProcessMarkStackCallback(void* arg) {
1163  reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
1164}
1165
1166void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
1167  Thread* self = Thread::Current();
1168  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1169  const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1170                                     static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1171  CHECK_GT(chunk_size, 0U);
1172  // Split the current mark stack up into work tasks.
1173  for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1174    const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1175    thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
1176    it += delta;
1177  }
1178  thread_pool->SetMaxActiveWorkers(thread_count - 1);
1179  thread_pool->StartWorkers(self);
1180  thread_pool->Wait(self, true, true);
1181  thread_pool->StopWorkers(self);
1182  mark_stack_->Reset();
1183  CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1184           work_chunks_deleted_.LoadSequentiallyConsistent())
1185      << " some of the work chunks were leaked";
1186}
1187
1188// Scan anything that's on the mark stack.
1189void MarkSweep::ProcessMarkStack(bool paused) {
1190  TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
1191  size_t thread_count = GetThreadCount(paused);
1192  if (kParallelProcessMarkStack && thread_count > 1 &&
1193      mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1194    ProcessMarkStackParallel(thread_count);
1195  } else {
1196    // TODO: Tune this.
1197    static const size_t kFifoSize = 4;
1198    BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
1199    for (;;) {
1200      Object* obj = NULL;
1201      if (kUseMarkStackPrefetch) {
1202        while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1203          Object* obj = mark_stack_->PopBack();
1204          DCHECK(obj != NULL);
1205          __builtin_prefetch(obj);
1206          prefetch_fifo.push_back(obj);
1207        }
1208        if (prefetch_fifo.empty()) {
1209          break;
1210        }
1211        obj = prefetch_fifo.front();
1212        prefetch_fifo.pop_front();
1213      } else {
1214        if (mark_stack_->IsEmpty()) {
1215          break;
1216        }
1217        obj = mark_stack_->PopBack();
1218      }
1219      DCHECK(obj != nullptr);
1220      ScanObject(obj);
1221    }
1222  }
1223}
1224
1225inline bool MarkSweep::IsMarked(const Object* object) const {
1226  if (immune_region_.ContainsObject(object)) {
1227    return true;
1228  }
1229  if (current_space_bitmap_->HasAddress(object)) {
1230    return current_space_bitmap_->Test(object);
1231  }
1232  return mark_bitmap_->Test(object);
1233}
1234
1235void MarkSweep::FinishPhase() {
1236  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1237  if (kCountScannedTypes) {
1238    VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
1239        << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
1240  }
1241  if (kCountTasks) {
1242    VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
1243  }
1244  if (kMeasureOverhead) {
1245    VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
1246  }
1247  if (kProfileLargeObjects) {
1248    VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1249        << " marked " << large_object_mark_.LoadRelaxed();
1250  }
1251  if (kCountJavaLangRefs) {
1252    VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
1253  }
1254  if (kCountMarkedObjects) {
1255    VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1256        << " immune=" <<  mark_immune_count_.LoadRelaxed()
1257        << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1258        << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
1259  }
1260  CHECK(mark_stack_->IsEmpty());  // Ensure that the mark stack is empty.
1261  mark_stack_->Reset();
1262  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1263  heap_->ClearMarkedObjects();
1264}
1265
1266void MarkSweep::RevokeAllThreadLocalBuffers() {
1267  if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1268    // If concurrent, rosalloc thread-local buffers are revoked at the
1269    // thread checkpoint. Bump pointer space thread-local buffers must
1270    // not be in use.
1271    GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1272  } else {
1273    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1274    GetHeap()->RevokeAllThreadLocalBuffers();
1275  }
1276}
1277
1278}  // namespace collector
1279}  // namespace gc
1280}  // namespace art
1281