mark_sweep.cc revision 815873ecc312b1d231acce71e1a16f42cdaf09f2
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mark_sweep.h"
18
19#include <functional>
20#include <numeric>
21#include <climits>
22#include <vector>
23
24#include "base/bounded_fifo.h"
25#include "base/logging.h"
26#include "base/macros.h"
27#include "base/mutex-inl.h"
28#include "base/timing_logger.h"
29#include "gc/accounting/card_table-inl.h"
30#include "gc/accounting/heap_bitmap.h"
31#include "gc/accounting/mod_union_table.h"
32#include "gc/accounting/space_bitmap-inl.h"
33#include "gc/heap.h"
34#include "gc/space/image_space.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
37#include "indirect_reference_table.h"
38#include "intern_table.h"
39#include "jni_internal.h"
40#include "monitor.h"
41#include "mark_sweep-inl.h"
42#include "mirror/art_field.h"
43#include "mirror/art_field-inl.h"
44#include "mirror/class-inl.h"
45#include "mirror/class_loader.h"
46#include "mirror/dex_cache.h"
47#include "mirror/object-inl.h"
48#include "mirror/object_array.h"
49#include "mirror/object_array-inl.h"
50#include "runtime.h"
51#include "thread-inl.h"
52#include "thread_list.h"
53#include "verifier/method_verifier.h"
54
55using ::art::mirror::ArtField;
56using ::art::mirror::Class;
57using ::art::mirror::Object;
58using ::art::mirror::ObjectArray;
59
60namespace art {
61namespace gc {
62namespace collector {
63
64// Performance options.
65constexpr bool kUseRecursiveMark = false;
66constexpr bool kUseMarkStackPrefetch = true;
67constexpr size_t kSweepArrayChunkFreeSize = 1024;
68
69// Parallelism options.
70constexpr bool kParallelCardScan = true;
71constexpr bool kParallelRecursiveMark = true;
72// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
73// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
74// having this can add overhead in ProcessReferences since we may end up doing many calls of
75// ProcessMarkStack with very small mark stacks.
76constexpr size_t kMinimumParallelMarkStackSize = 128;
77constexpr bool kParallelProcessMarkStack = true;
78
79// Profiling and information flags.
80constexpr bool kCountClassesMarked = false;
81constexpr bool kProfileLargeObjects = false;
82constexpr bool kMeasureOverhead = false;
83constexpr bool kCountTasks = false;
84constexpr bool kCountJavaLangRefs = false;
85
86// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
87constexpr bool kCheckLocks = kDebugLocking;
88
89void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
90  // Bind live to mark bitmap if necessary.
91  if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
92    CHECK(space->IsContinuousMemMapAllocSpace());
93    space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
94  }
95
96  // Add the space to the immune region.
97  // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc
98  // callbacks.
99  if (immune_begin_ == NULL) {
100    DCHECK(immune_end_ == NULL);
101    SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
102                   reinterpret_cast<Object*>(space->End()));
103  } else {
104    const space::ContinuousSpace* prev_space = nullptr;
105    // Find out if the previous space is immune.
106    for (const space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
107      if (cur_space == space) {
108        break;
109      }
110      prev_space = cur_space;
111    }
112    // If previous space was immune, then extend the immune region. Relies on continuous spaces
113    // being sorted by Heap::AddContinuousSpace.
114    if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
115      immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
116      immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
117    }
118  }
119}
120
121bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const {
122  return
123      immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
124      immune_end_ >= reinterpret_cast<Object*>(space->End());
125}
126
127void MarkSweep::BindBitmaps() {
128  timings_.StartSplit("BindBitmaps");
129  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
130  // Mark all of the spaces we never collect as immune.
131  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
132    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
133      ImmuneSpace(space);
134    }
135  }
136  timings_.EndSplit();
137}
138
139MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
140    : GarbageCollector(heap,
141                       name_prefix +
142                       (is_concurrent ? "concurrent mark sweep": "mark sweep")),
143      current_mark_bitmap_(NULL),
144      mark_stack_(NULL),
145      immune_begin_(NULL),
146      immune_end_(NULL),
147      live_stack_freeze_size_(0),
148      gc_barrier_(new Barrier(0)),
149      large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
150      mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
151      is_concurrent_(is_concurrent) {
152}
153
154void MarkSweep::InitializePhase() {
155  timings_.Reset();
156  TimingLogger::ScopedSplit split("InitializePhase", &timings_);
157  mark_stack_ = heap_->mark_stack_.get();
158  DCHECK(mark_stack_ != nullptr);
159  SetImmuneRange(nullptr, nullptr);
160  class_count_ = 0;
161  array_count_ = 0;
162  other_count_ = 0;
163  large_object_test_ = 0;
164  large_object_mark_ = 0;
165  classes_marked_ = 0;
166  overhead_time_ = 0;
167  work_chunks_created_ = 0;
168  work_chunks_deleted_ = 0;
169  reference_count_ = 0;
170
171  FindDefaultMarkBitmap();
172
173  // Do any pre GC verification.
174  timings_.NewSplit("PreGcVerification");
175  heap_->PreGcVerification(this);
176}
177
178void MarkSweep::ProcessReferences(Thread* self) {
179  TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
180  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
181  GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback,
182                               &RecursiveMarkObjectCallback, this);
183}
184
185bool MarkSweep::HandleDirtyObjectsPhase() {
186  TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_);
187  Thread* self = Thread::Current();
188  Locks::mutator_lock_->AssertExclusiveHeld(self);
189
190  {
191    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
192
193    // Re-mark root set.
194    ReMarkRoots();
195
196    // Scan dirty objects, this is only required if we are not doing concurrent GC.
197    RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
198  }
199
200  ProcessReferences(self);
201
202  // Only need to do this if we have the card mark verification on, and only during concurrent GC.
203  if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
204      GetHeap()->verify_post_gc_heap_) {
205    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
206    // This second sweep makes sure that we don't have any objects in the live stack which point to
207    // freed objects. These cause problems since their references may be previously freed objects.
208    SweepArray(GetHeap()->allocation_stack_.get(), false);
209    // Since SweepArray() above resets the (active) allocation
210    // stack. Need to revoke the thread-local allocation stacks that
211    // point into it.
212    GetHeap()->RevokeAllThreadLocalAllocationStacks(self);
213  }
214
215  timings_.StartSplit("PreSweepingGcVerification");
216  heap_->PreSweepingGcVerification(this);
217  timings_.EndSplit();
218
219  // Ensure that nobody inserted items in the live stack after we swapped the stacks.
220  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
221  CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
222
223  // Disallow new system weaks to prevent a race which occurs when someone adds a new system
224  // weak before we sweep them. Since this new system weak may not be marked, the GC may
225  // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
226  // reference to a string that is about to be swept.
227  Runtime::Current()->DisallowNewSystemWeaks();
228  return true;
229}
230
231bool MarkSweep::IsConcurrent() const {
232  return is_concurrent_;
233}
234
235void MarkSweep::MarkingPhase() {
236  TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
237  Thread* self = Thread::Current();
238
239  BindBitmaps();
240  FindDefaultMarkBitmap();
241
242  // Process dirty cards and add dirty cards to mod union tables.
243  heap_->ProcessCards(timings_);
244
245  // Need to do this before the checkpoint since we don't want any threads to add references to
246  // the live stack during the recursive mark.
247  timings_.NewSplit("SwapStacks");
248  heap_->SwapStacks(self);
249
250  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
251  if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
252    // If we exclusively hold the mutator lock, all threads must be suspended.
253    MarkRoots();
254    if (kUseThreadLocalAllocationStack) {
255      heap_->RevokeAllThreadLocalAllocationStacks(self);
256    }
257  } else {
258    MarkThreadRoots(self);
259    // At this point the live stack should no longer have any mutators which push into it.
260    MarkNonThreadRoots();
261  }
262  live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
263  MarkConcurrentRoots();
264  UpdateAndMarkModUnion();
265  MarkReachableObjects();
266}
267
268void MarkSweep::UpdateAndMarkModUnion() {
269  for (const auto& space : heap_->GetContinuousSpaces()) {
270    if (IsImmuneSpace(space)) {
271      const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
272          "UpdateAndMarkImageModUnionTable";
273      TimingLogger::ScopedSplit split(name, &timings_);
274      accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
275      CHECK(mod_union_table != nullptr);
276      mod_union_table->UpdateAndMarkReferences(MarkObjectCallback, this);
277    }
278  }
279}
280
281void MarkSweep::MarkThreadRoots(Thread* self) {
282  MarkRootsCheckpoint(self);
283}
284
285void MarkSweep::MarkReachableObjects() {
286  // Mark everything allocated since the last as GC live so that we can sweep concurrently,
287  // knowing that new allocations won't be marked as live.
288  timings_.StartSplit("MarkStackAsLive");
289  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
290  heap_->MarkAllocStackAsLive(live_stack);
291  live_stack->Reset();
292  timings_.EndSplit();
293  // Recursively mark all the non-image bits set in the mark bitmap.
294  RecursiveMark();
295}
296
297void MarkSweep::ReclaimPhase() {
298  TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
299  Thread* self = Thread::Current();
300
301  if (!IsConcurrent()) {
302    ProcessReferences(self);
303  }
304
305  {
306    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
307    SweepSystemWeaks();
308  }
309
310  if (IsConcurrent()) {
311    Runtime::Current()->AllowNewSystemWeaks();
312
313    TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
314    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
315    accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
316    // The allocation stack contains things allocated since the start of the GC. These may have been
317    // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
318    // Remove these objects from the mark bitmaps so that they will be eligible for sticky
319    // collection.
320    // There is a race here which is safely handled. Another thread such as the hprof could
321    // have flushed the alloc stack after we resumed the threads. This is safe however, since
322    // reseting the allocation stack zeros it out with madvise. This means that we will either
323    // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
324    // first place.
325    mirror::Object** end = allocation_stack->End();
326    for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
327      const Object* obj = *it;
328      if (obj != NULL) {
329        UnMarkObjectNonNull(obj);
330      }
331    }
332  }
333
334  {
335    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
336
337    // Reclaim unmarked objects.
338    Sweep(false);
339
340    // Swap the live and mark bitmaps for each space which we modified space. This is an
341    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
342    // bitmaps.
343    timings_.StartSplit("SwapBitmaps");
344    SwapBitmaps();
345    timings_.EndSplit();
346
347    // Unbind the live and mark bitmaps.
348    TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
349    GetHeap()->UnBindBitmaps();
350  }
351}
352
353void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
354  immune_begin_ = begin;
355  immune_end_ = end;
356}
357
358void MarkSweep::FindDefaultMarkBitmap() {
359  TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
360  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
361    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
362    if (bitmap != nullptr &&
363        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
364      current_mark_bitmap_ = bitmap;
365      CHECK(current_mark_bitmap_ != NULL);
366      return;
367    }
368  }
369  GetHeap()->DumpSpaces();
370  LOG(FATAL) << "Could not find a default mark bitmap";
371}
372
373void MarkSweep::ExpandMarkStack() {
374  ResizeMarkStack(mark_stack_->Capacity() * 2);
375}
376
377void MarkSweep::ResizeMarkStack(size_t new_size) {
378  // Rare case, no need to have Thread::Current be a parameter.
379  if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
380    // Someone else acquired the lock and expanded the mark stack before us.
381    return;
382  }
383  std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
384  CHECK_LE(mark_stack_->Size(), new_size);
385  mark_stack_->Resize(new_size);
386  for (const auto& obj : temp) {
387    mark_stack_->PushBack(obj);
388  }
389}
390
391inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
392  DCHECK(obj != NULL);
393  if (MarkObjectParallel(obj)) {
394    MutexLock mu(Thread::Current(), mark_stack_lock_);
395    if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
396      ExpandMarkStack();
397    }
398    // The object must be pushed on to the mark stack.
399    mark_stack_->PushBack(const_cast<Object*>(obj));
400  }
401}
402
403mirror::Object* MarkSweep::RecursiveMarkObjectCallback(mirror::Object* obj, void* arg) {
404  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
405  mark_sweep->MarkObject(obj);
406  mark_sweep->ProcessMarkStack(true);
407  return obj;
408}
409
410inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
411  DCHECK(!IsImmune(obj));
412  // Try to take advantage of locality of references within a space, failing this find the space
413  // the hard way.
414  accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
415  if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
416    accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
417    if (LIKELY(new_bitmap != NULL)) {
418      object_bitmap = new_bitmap;
419    } else {
420      MarkLargeObject(obj, false);
421      return;
422    }
423  }
424
425  DCHECK(object_bitmap->HasAddress(obj));
426  object_bitmap->Clear(obj);
427}
428
429inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
430  DCHECK(obj != NULL);
431
432  if (IsImmune(obj)) {
433    DCHECK(IsMarked(obj));
434    return;
435  }
436
437  // Try to take advantage of locality of references within a space, failing this find the space
438  // the hard way.
439  accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
440  if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
441    accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
442    if (LIKELY(new_bitmap != NULL)) {
443      object_bitmap = new_bitmap;
444    } else {
445      MarkLargeObject(obj, true);
446      return;
447    }
448  }
449
450  // This object was not previously marked.
451  if (!object_bitmap->Test(obj)) {
452    object_bitmap->Set(obj);
453    if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
454      // Lock is not needed but is here anyways to please annotalysis.
455      MutexLock mu(Thread::Current(), mark_stack_lock_);
456      ExpandMarkStack();
457    }
458    // The object must be pushed on to the mark stack.
459    mark_stack_->PushBack(const_cast<Object*>(obj));
460  }
461}
462
463// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
464bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
465  // TODO: support >1 discontinuous space.
466  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
467  accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
468  if (kProfileLargeObjects) {
469    ++large_object_test_;
470  }
471  if (UNLIKELY(!large_objects->Test(obj))) {
472    if (!large_object_space->Contains(obj)) {
473      LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
474      LOG(ERROR) << "Attempting see if it's a bad root";
475      VerifyRoots();
476      LOG(FATAL) << "Can't mark bad root";
477    }
478    if (kProfileLargeObjects) {
479      ++large_object_mark_;
480    }
481    if (set) {
482      large_objects->Set(obj);
483    } else {
484      large_objects->Clear(obj);
485    }
486    return true;
487  }
488  return false;
489}
490
491inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
492  DCHECK(obj != NULL);
493
494  if (IsImmune(obj)) {
495    DCHECK(IsMarked(obj));
496    return false;
497  }
498
499  // Try to take advantage of locality of references within a space, failing this find the space
500  // the hard way.
501  accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
502  if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
503    accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
504    if (new_bitmap != NULL) {
505      object_bitmap = new_bitmap;
506    } else {
507      // TODO: Remove the Thread::Current here?
508      // TODO: Convert this to some kind of atomic marking?
509      MutexLock mu(Thread::Current(), large_object_lock_);
510      return MarkLargeObject(obj, true);
511    }
512  }
513
514  // Return true if the object was not previously marked.
515  return !object_bitmap->AtomicTestAndSet(obj);
516}
517
518// Used to mark objects when recursing.  Recursion is done by moving
519// the finger across the bitmaps in address order and marking child
520// objects.  Any newly-marked objects whose addresses are lower than
521// the finger won't be visited by the bitmap scan, so those objects
522// need to be added to the mark stack.
523inline void MarkSweep::MarkObject(const Object* obj) {
524  if (obj != NULL) {
525    MarkObjectNonNull(obj);
526  }
527}
528
529void MarkSweep::MarkRoot(const Object* obj) {
530  if (obj != NULL) {
531    MarkObjectNonNull(obj);
532  }
533}
534
535void MarkSweep::MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
536                                         RootType /*root_type*/) {
537  DCHECK(root != NULL);
538  DCHECK(arg != NULL);
539  reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
540}
541
542void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
543                                 RootType /*root_type*/) {
544  DCHECK(root != nullptr);
545  DCHECK(arg != nullptr);
546  reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
547}
548
549mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* object, void* arg) {
550  DCHECK(object != nullptr);
551  DCHECK(arg != nullptr);
552  reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(object);
553  return object;
554}
555
556void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
557                                   const StackVisitor* visitor) {
558  reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
559}
560
561void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
562  // See if the root is on any space bitmap.
563  if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
564    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
565    if (!large_object_space->Contains(root)) {
566      LOG(ERROR) << "Found invalid root: " << root;
567      if (visitor != NULL) {
568        LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
569      }
570    }
571  }
572}
573
574void MarkSweep::VerifyRoots() {
575  Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
576}
577
578// Marks all objects in the root set.
579void MarkSweep::MarkRoots() {
580  timings_.StartSplit("MarkRoots");
581  Runtime::Current()->VisitNonConcurrentRoots(MarkRootCallback, this);
582  timings_.EndSplit();
583}
584
585void MarkSweep::MarkNonThreadRoots() {
586  timings_.StartSplit("MarkNonThreadRoots");
587  Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
588  timings_.EndSplit();
589}
590
591void MarkSweep::MarkConcurrentRoots() {
592  timings_.StartSplit("MarkConcurrentRoots");
593  // Visit all runtime roots and clear dirty flags.
594  Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, false, true);
595  timings_.EndSplit();
596}
597
598class ScanObjectVisitor {
599 public:
600  explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
601      : mark_sweep_(mark_sweep) {}
602
603  // TODO: Fixme when anotatalysis works with visitors.
604  void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
605    if (kCheckLocks) {
606      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
607      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
608    }
609    mark_sweep_->ScanObject(obj);
610  }
611
612 private:
613  MarkSweep* const mark_sweep_;
614};
615
616template <bool kUseFinger = false>
617class MarkStackTask : public Task {
618 public:
619  MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
620                const Object** mark_stack)
621      : mark_sweep_(mark_sweep),
622        thread_pool_(thread_pool),
623        mark_stack_pos_(mark_stack_size) {
624    // We may have to copy part of an existing mark stack when another mark stack overflows.
625    if (mark_stack_size != 0) {
626      DCHECK(mark_stack != NULL);
627      // TODO: Check performance?
628      std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
629    }
630    if (kCountTasks) {
631      ++mark_sweep_->work_chunks_created_;
632    }
633  }
634
635  static const size_t kMaxSize = 1 * KB;
636
637 protected:
638  class ScanObjectParallelVisitor {
639   public:
640    explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
641        : chunk_task_(chunk_task) {}
642
643    void operator()(Object* obj) const {
644      MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
645      mark_sweep->ScanObjectVisit(obj,
646          [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */,
647              bool /* is_static */) ALWAYS_INLINE_LAMBDA {
648        if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
649          if (kUseFinger) {
650            android_memory_barrier();
651            if (reinterpret_cast<uintptr_t>(ref) >=
652                static_cast<uintptr_t>(mark_sweep->atomic_finger_)) {
653              return;
654            }
655          }
656          chunk_task_->MarkStackPush(ref);
657        }
658      });
659    }
660
661   private:
662    MarkStackTask<kUseFinger>* const chunk_task_;
663  };
664
665  virtual ~MarkStackTask() {
666    // Make sure that we have cleared our mark stack.
667    DCHECK_EQ(mark_stack_pos_, 0U);
668    if (kCountTasks) {
669      ++mark_sweep_->work_chunks_deleted_;
670    }
671  }
672
673  MarkSweep* const mark_sweep_;
674  ThreadPool* const thread_pool_;
675  // Thread local mark stack for this task.
676  const Object* mark_stack_[kMaxSize];
677  // Mark stack position.
678  size_t mark_stack_pos_;
679
680  void MarkStackPush(const Object* obj) ALWAYS_INLINE {
681    if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
682      // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
683      mark_stack_pos_ /= 2;
684      auto* task = new MarkStackTask(thread_pool_, mark_sweep_, kMaxSize - mark_stack_pos_,
685                                     mark_stack_ + mark_stack_pos_);
686      thread_pool_->AddTask(Thread::Current(), task);
687    }
688    DCHECK(obj != nullptr);
689    DCHECK(mark_stack_pos_ < kMaxSize);
690    mark_stack_[mark_stack_pos_++] = obj;
691  }
692
693  virtual void Finalize() {
694    delete this;
695  }
696
697  // Scans all of the objects
698  virtual void Run(Thread* self) {
699    ScanObjectParallelVisitor visitor(this);
700    // TODO: Tune this.
701    static const size_t kFifoSize = 4;
702    BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
703    for (;;) {
704      const Object* obj = nullptr;
705      if (kUseMarkStackPrefetch) {
706        while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
707          const Object* obj = mark_stack_[--mark_stack_pos_];
708          DCHECK(obj != nullptr);
709          __builtin_prefetch(obj);
710          prefetch_fifo.push_back(obj);
711        }
712        if (UNLIKELY(prefetch_fifo.empty())) {
713          break;
714        }
715        obj = prefetch_fifo.front();
716        prefetch_fifo.pop_front();
717      } else {
718        if (UNLIKELY(mark_stack_pos_ == 0)) {
719          break;
720        }
721        obj = mark_stack_[--mark_stack_pos_];
722      }
723      DCHECK(obj != nullptr);
724      visitor(const_cast<mirror::Object*>(obj));
725    }
726  }
727};
728
729class CardScanTask : public MarkStackTask<false> {
730 public:
731  CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
732               byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
733               const Object** mark_stack_obj)
734      : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
735        bitmap_(bitmap),
736        begin_(begin),
737        end_(end),
738        minimum_age_(minimum_age) {
739  }
740
741 protected:
742  accounting::SpaceBitmap* const bitmap_;
743  byte* const begin_;
744  byte* const end_;
745  const byte minimum_age_;
746
747  virtual void Finalize() {
748    delete this;
749  }
750
751  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
752    ScanObjectParallelVisitor visitor(this);
753    accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
754    size_t cards_scanned = card_table->Scan(bitmap_, begin_, end_, visitor, minimum_age_);
755    VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
756        << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
757    // Finish by emptying our local mark stack.
758    MarkStackTask::Run(self);
759  }
760};
761
762size_t MarkSweep::GetThreadCount(bool paused) const {
763  if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
764    return 0;
765  }
766  if (paused) {
767    return heap_->GetParallelGCThreadCount() + 1;
768  } else {
769    return heap_->GetConcGCThreadCount() + 1;
770  }
771}
772
773void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
774  accounting::CardTable* card_table = GetHeap()->GetCardTable();
775  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
776  size_t thread_count = GetThreadCount(paused);
777  // The parallel version with only one thread is faster for card scanning, TODO: fix.
778  if (kParallelCardScan && thread_count > 0) {
779    Thread* self = Thread::Current();
780    // Can't have a different split for each space since multiple spaces can have their cards being
781    // scanned at the same time.
782    timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
783    // Try to take some of the mark stack since we can pass this off to the worker tasks.
784    const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin());
785    const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End());
786    const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
787    // Estimated number of work tasks we will create.
788    const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
789    DCHECK_NE(mark_stack_tasks, 0U);
790    const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
791                                             mark_stack_size / mark_stack_tasks + 1);
792    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
793      if (space->GetMarkBitmap() == nullptr) {
794        continue;
795      }
796      byte* card_begin = space->Begin();
797      byte* card_end = space->End();
798      // Align up the end address. For example, the image space's end
799      // may not be card-size-aligned.
800      card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
801      DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin));
802      DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end));
803      // Calculate how many bytes of heap we will scan,
804      const size_t address_range = card_end - card_begin;
805      // Calculate how much address range each task gets.
806      const size_t card_delta = RoundUp(address_range / thread_count + 1,
807                                        accounting::CardTable::kCardSize);
808      // Create the worker tasks for this space.
809      while (card_begin != card_end) {
810        // Add a range of cards.
811        size_t addr_remaining = card_end - card_begin;
812        size_t card_increment = std::min(card_delta, addr_remaining);
813        // Take from the back of the mark stack.
814        size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
815        size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
816        mark_stack_end -= mark_stack_increment;
817        mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
818        DCHECK_EQ(mark_stack_end, mark_stack_->End());
819        // Add the new task to the thread pool.
820        auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
821                                      card_begin + card_increment, minimum_age,
822                                      mark_stack_increment, mark_stack_end);
823        thread_pool->AddTask(self, task);
824        card_begin += card_increment;
825      }
826    }
827
828    // Note: the card scan below may dirty new cards (and scan them)
829    // as a side effect when a Reference object is encountered and
830    // queued during the marking. See b/11465268.
831    thread_pool->SetMaxActiveWorkers(thread_count - 1);
832    thread_pool->StartWorkers(self);
833    thread_pool->Wait(self, true, true);
834    thread_pool->StopWorkers(self);
835    timings_.EndSplit();
836  } else {
837    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
838      if (space->GetMarkBitmap() != nullptr) {
839        // Image spaces are handled properly since live == marked for them.
840        switch (space->GetGcRetentionPolicy()) {
841          case space::kGcRetentionPolicyNeverCollect:
842            timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
843                "ScanGrayImageSpaceObjects");
844            break;
845          case space::kGcRetentionPolicyFullCollect:
846            timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
847                "ScanGrayZygoteSpaceObjects");
848            break;
849          case space::kGcRetentionPolicyAlwaysCollect:
850            timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
851                "ScanGrayAllocSpaceObjects");
852            break;
853          }
854        ScanObjectVisitor visitor(this);
855        card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
856        timings_.EndSplit();
857      }
858    }
859  }
860}
861
862class RecursiveMarkTask : public MarkStackTask<false> {
863 public:
864  RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
865                    accounting::SpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
866      : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL),
867        bitmap_(bitmap),
868        begin_(begin),
869        end_(end) {
870  }
871
872 protected:
873  accounting::SpaceBitmap* const bitmap_;
874  const uintptr_t begin_;
875  const uintptr_t end_;
876
877  virtual void Finalize() {
878    delete this;
879  }
880
881  // Scans all of the objects
882  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
883    ScanObjectParallelVisitor visitor(this);
884    bitmap_->VisitMarkedRange(begin_, end_, visitor);
885    // Finish by emptying our local mark stack.
886    MarkStackTask::Run(self);
887  }
888};
889
890// Populates the mark stack based on the set of marked objects and
891// recursively marks until the mark stack is emptied.
892void MarkSweep::RecursiveMark() {
893  TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
894  // RecursiveMark will build the lists of known instances of the Reference classes. See
895  // DelayReferenceReferent for details.
896  if (kUseRecursiveMark) {
897    const bool partial = GetGcType() == kGcTypePartial;
898    ScanObjectVisitor scan_visitor(this);
899    auto* self = Thread::Current();
900    ThreadPool* thread_pool = heap_->GetThreadPool();
901    size_t thread_count = GetThreadCount(false);
902    const bool parallel = kParallelRecursiveMark && thread_count > 1;
903    mark_stack_->Reset();
904    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
905      if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
906          (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
907        current_mark_bitmap_ = space->GetMarkBitmap();
908        if (current_mark_bitmap_ == nullptr) {
909          continue;
910        }
911        if (parallel) {
912          // We will use the mark stack the future.
913          // CHECK(mark_stack_->IsEmpty());
914          // This function does not handle heap end increasing, so we must use the space end.
915          uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
916          uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
917          atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
918
919          // Create a few worker tasks.
920          const size_t n = thread_count * 2;
921          while (begin != end) {
922            uintptr_t start = begin;
923            uintptr_t delta = (end - begin) / n;
924            delta = RoundUp(delta, KB);
925            if (delta < 16 * KB) delta = end - begin;
926            begin += delta;
927            auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start,
928                                               begin);
929            thread_pool->AddTask(self, task);
930          }
931          thread_pool->SetMaxActiveWorkers(thread_count - 1);
932          thread_pool->StartWorkers(self);
933          thread_pool->Wait(self, true, true);
934          thread_pool->StopWorkers(self);
935        } else {
936          // This function does not handle heap end increasing, so we must use the space end.
937          uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
938          uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
939          current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
940        }
941      }
942    }
943  }
944  ProcessMarkStack(false);
945}
946
947mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
948  if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
949    return object;
950  }
951  return nullptr;
952}
953
954void MarkSweep::RecursiveMarkDirtyObjects(bool paused, byte minimum_age) {
955  ScanGrayObjects(paused, minimum_age);
956  ProcessMarkStack(paused);
957}
958
959void MarkSweep::ReMarkRoots() {
960  timings_.StartSplit("ReMarkRoots");
961  Runtime::Current()->VisitRoots(MarkRootCallback, this, true, true);
962  timings_.EndSplit();
963}
964
965void MarkSweep::SweepSystemWeaks() {
966  Runtime* runtime = Runtime::Current();
967  timings_.StartSplit("SweepSystemWeaks");
968  runtime->SweepSystemWeaks(IsMarkedCallback, this);
969  timings_.EndSplit();
970}
971
972mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
973  reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
974  // We don't actually want to sweep the object, so lets return "marked"
975  return obj;
976}
977
978void MarkSweep::VerifyIsLive(const Object* obj) {
979  Heap* heap = GetHeap();
980  if (!heap->GetLiveBitmap()->Test(obj)) {
981    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
982    if (!large_object_space->GetLiveObjects()->Test(obj)) {
983      if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
984          heap->allocation_stack_->End()) {
985        // Object not found!
986        heap->DumpSpaces();
987        LOG(FATAL) << "Found dead object " << obj;
988      }
989    }
990  }
991}
992
993void MarkSweep::VerifySystemWeaks() {
994  // Verify system weaks, uses a special object visitor which returns the input object.
995  Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
996}
997
998class CheckpointMarkThreadRoots : public Closure {
999 public:
1000  explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
1001
1002  virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
1003    ATRACE_BEGIN("Marking thread roots");
1004    // Note: self is not necessarily equal to thread since thread may be suspended.
1005    Thread* self = Thread::Current();
1006    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1007        << thread->GetState() << " thread " << thread << " self " << self;
1008    thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
1009    ATRACE_END();
1010    if (kUseThreadLocalAllocationStack) {
1011      thread->RevokeThreadLocalAllocationStack();
1012    }
1013    mark_sweep_->GetBarrier().Pass(self);
1014  }
1015
1016 private:
1017  MarkSweep* mark_sweep_;
1018};
1019
1020void MarkSweep::MarkRootsCheckpoint(Thread* self) {
1021  CheckpointMarkThreadRoots check_point(this);
1022  timings_.StartSplit("MarkRootsCheckpoint");
1023  ThreadList* thread_list = Runtime::Current()->GetThreadList();
1024  // Request the check point is run on all threads returning a count of the threads that must
1025  // run through the barrier including self.
1026  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1027  // Release locks then wait for all mutator threads to pass the barrier.
1028  // TODO: optimize to not release locks when there are no threads to wait for.
1029  Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1030  Locks::mutator_lock_->SharedUnlock(self);
1031  ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
1032  CHECK_EQ(old_state, kWaitingPerformingGc);
1033  gc_barrier_->Increment(self, barrier_count);
1034  self->SetState(kWaitingPerformingGc);
1035  Locks::mutator_lock_->SharedLock(self);
1036  Locks::heap_bitmap_lock_->ExclusiveLock(self);
1037  timings_.EndSplit();
1038}
1039
1040void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
1041  timings_.StartSplit("SweepArray");
1042  Thread* self = Thread::Current();
1043  mirror::Object* chunk_free_buffer[kSweepArrayChunkFreeSize];
1044  size_t chunk_free_pos = 0;
1045  size_t freed_bytes = 0;
1046  size_t freed_large_object_bytes = 0;
1047  size_t freed_objects = 0;
1048  size_t freed_large_objects = 0;
1049  // How many objects are left in the array, modified after each space is swept.
1050  Object** objects = const_cast<Object**>(allocations->Begin());
1051  size_t count = allocations->Size();
1052  // Change the order to ensure that the non-moving space last swept as an optimization.
1053  std::vector<space::ContinuousSpace*> sweep_spaces;
1054  space::ContinuousSpace* non_moving_space = nullptr;
1055  for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1056    if (space->IsAllocSpace() && !IsImmuneSpace(space) && space->GetLiveBitmap() != nullptr) {
1057      if (space == heap_->GetNonMovingSpace()) {
1058        non_moving_space = space;
1059      } else {
1060        sweep_spaces.push_back(space);
1061      }
1062    }
1063  }
1064  // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1065  // the other alloc spaces as an optimization.
1066  if (non_moving_space != nullptr) {
1067    sweep_spaces.push_back(non_moving_space);
1068  }
1069  // Start by sweeping the continuous spaces.
1070  for (space::ContinuousSpace* space : sweep_spaces) {
1071    space::AllocSpace* alloc_space = space->AsAllocSpace();
1072    accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
1073    accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1074    if (swap_bitmaps) {
1075      std::swap(live_bitmap, mark_bitmap);
1076    }
1077    Object** out = objects;
1078    for (size_t i = 0; i < count; ++i) {
1079      Object* obj = objects[i];
1080      if (kUseThreadLocalAllocationStack && obj == nullptr) {
1081        continue;
1082      }
1083      if (space->HasAddress(obj)) {
1084        // This object is in the space, remove it from the array and add it to the sweep buffer
1085        // if needed.
1086        if (!mark_bitmap->Test(obj)) {
1087          if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1088            timings_.StartSplit("FreeList");
1089            freed_objects += chunk_free_pos;
1090            freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1091            timings_.EndSplit();
1092            chunk_free_pos = 0;
1093          }
1094          chunk_free_buffer[chunk_free_pos++] = obj;
1095        }
1096      } else {
1097        *(out++) = obj;
1098      }
1099    }
1100    if (chunk_free_pos > 0) {
1101      timings_.StartSplit("FreeList");
1102      freed_objects += chunk_free_pos;
1103      freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1104      timings_.EndSplit();
1105      chunk_free_pos = 0;
1106    }
1107    // All of the references which space contained are no longer in the allocation stack, update
1108    // the count.
1109    count = out - objects;
1110  }
1111  // Handle the large object space.
1112  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1113  accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
1114  accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects();
1115  if (swap_bitmaps) {
1116    std::swap(large_live_objects, large_mark_objects);
1117  }
1118  for (size_t i = 0; i < count; ++i) {
1119    Object* obj = objects[i];
1120    // Handle large objects.
1121    if (kUseThreadLocalAllocationStack && obj == nullptr) {
1122      continue;
1123    }
1124    if (!large_mark_objects->Test(obj)) {
1125      ++freed_large_objects;
1126      freed_large_object_bytes += large_object_space->Free(self, obj);
1127    }
1128  }
1129  timings_.EndSplit();
1130
1131  timings_.StartSplit("RecordFree");
1132  VLOG(heap) << "Freed " << freed_objects << "/" << count
1133             << " objects with size " << PrettySize(freed_bytes);
1134  heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes);
1135  freed_objects_.FetchAndAdd(freed_objects);
1136  freed_large_objects_.FetchAndAdd(freed_large_objects);
1137  freed_bytes_.FetchAndAdd(freed_bytes);
1138  freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes);
1139  timings_.EndSplit();
1140
1141  timings_.StartSplit("ResetStack");
1142  allocations->Reset();
1143  timings_.EndSplit();
1144}
1145
1146void MarkSweep::Sweep(bool swap_bitmaps) {
1147  DCHECK(mark_stack_->IsEmpty());
1148  TimingLogger::ScopedSplit("Sweep", &timings_);
1149  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1150    if (space->IsContinuousMemMapAllocSpace()) {
1151      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1152      TimingLogger::ScopedSplit split(
1153          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_);
1154      size_t freed_objects = 0;
1155      size_t freed_bytes = 0;
1156      alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
1157      heap_->RecordFree(freed_objects, freed_bytes);
1158      freed_objects_.FetchAndAdd(freed_objects);
1159      freed_bytes_.FetchAndAdd(freed_bytes);
1160    }
1161  }
1162  SweepLargeObjects(swap_bitmaps);
1163}
1164
1165void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1166  TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
1167  size_t freed_objects = 0;
1168  size_t freed_bytes = 0;
1169  GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
1170  freed_large_objects_.FetchAndAdd(freed_objects);
1171  freed_large_object_bytes_.FetchAndAdd(freed_bytes);
1172  GetHeap()->RecordFree(freed_objects, freed_bytes);
1173}
1174
1175// Process the "referent" field in a java.lang.ref.Reference.  If the
1176// referent has not yet been marked, put it on the appropriate list in
1177// the heap for later processing.
1178void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
1179  DCHECK(klass != nullptr);
1180  DCHECK(klass->IsReferenceClass());
1181  DCHECK(obj != NULL);
1182  heap_->DelayReferenceReferent(klass, obj, IsMarkedCallback, this);
1183}
1184
1185class MarkObjectVisitor {
1186 public:
1187  explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {}
1188
1189  // TODO: Fixme when anotatalysis works with visitors.
1190  void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
1191                  bool /* is_static */) const ALWAYS_INLINE
1192      NO_THREAD_SAFETY_ANALYSIS {
1193    if (kCheckLocks) {
1194      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1195      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1196    }
1197    mark_sweep_->MarkObject(ref);
1198  }
1199
1200 private:
1201  MarkSweep* const mark_sweep_;
1202};
1203
1204// Scans an object reference.  Determines the type of the reference
1205// and dispatches to a specialized scanning routine.
1206void MarkSweep::ScanObject(Object* obj) {
1207  MarkObjectVisitor visitor(this);
1208  ScanObjectVisit(obj, visitor);
1209}
1210
1211void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
1212  Thread* self = Thread::Current();
1213  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1214  const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1215                                     static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1216  CHECK_GT(chunk_size, 0U);
1217  // Split the current mark stack up into work tasks.
1218  for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
1219    const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1220    thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta,
1221                                                        const_cast<const mirror::Object**>(it)));
1222    it += delta;
1223  }
1224  thread_pool->SetMaxActiveWorkers(thread_count - 1);
1225  thread_pool->StartWorkers(self);
1226  thread_pool->Wait(self, true, true);
1227  thread_pool->StopWorkers(self);
1228  mark_stack_->Reset();
1229  CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
1230}
1231
1232// Scan anything that's on the mark stack.
1233void MarkSweep::ProcessMarkStack(bool paused) {
1234  timings_.StartSplit("ProcessMarkStack");
1235  size_t thread_count = GetThreadCount(paused);
1236  if (kParallelProcessMarkStack && thread_count > 1 &&
1237      mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1238    ProcessMarkStackParallel(thread_count);
1239  } else {
1240    // TODO: Tune this.
1241    static const size_t kFifoSize = 4;
1242    BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
1243    for (;;) {
1244      Object* obj = NULL;
1245      if (kUseMarkStackPrefetch) {
1246        while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1247          Object* obj = mark_stack_->PopBack();
1248          DCHECK(obj != NULL);
1249          __builtin_prefetch(obj);
1250          prefetch_fifo.push_back(obj);
1251        }
1252        if (prefetch_fifo.empty()) {
1253          break;
1254        }
1255        obj = prefetch_fifo.front();
1256        prefetch_fifo.pop_front();
1257      } else {
1258        if (mark_stack_->IsEmpty()) {
1259          break;
1260        }
1261        obj = mark_stack_->PopBack();
1262      }
1263      DCHECK(obj != NULL);
1264      ScanObject(obj);
1265    }
1266  }
1267  timings_.EndSplit();
1268}
1269
1270inline bool MarkSweep::IsMarked(const Object* object) const
1271    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1272  if (IsImmune(object)) {
1273    return true;
1274  }
1275  DCHECK(current_mark_bitmap_ != NULL);
1276  if (current_mark_bitmap_->HasAddress(object)) {
1277    return current_mark_bitmap_->Test(object);
1278  }
1279  return heap_->GetMarkBitmap()->Test(object);
1280}
1281
1282void MarkSweep::FinishPhase() {
1283  TimingLogger::ScopedSplit split("FinishPhase", &timings_);
1284  // Can't enqueue references if we hold the mutator lock.
1285  Heap* heap = GetHeap();
1286  timings_.NewSplit("PostGcVerification");
1287  heap->PostGcVerification(this);
1288
1289  timings_.NewSplit("RequestHeapTrim");
1290  heap->RequestHeapTrim();
1291
1292  // Update the cumulative statistics
1293  total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
1294  total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
1295
1296  // Ensure that the mark stack is empty.
1297  CHECK(mark_stack_->IsEmpty());
1298
1299  if (kCountScannedTypes) {
1300    VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1301             << " other=" << other_count_;
1302  }
1303
1304  if (kCountTasks) {
1305    VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
1306  }
1307
1308  if (kMeasureOverhead) {
1309    VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
1310  }
1311
1312  if (kProfileLargeObjects) {
1313    VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
1314  }
1315
1316  if (kCountClassesMarked) {
1317    VLOG(gc) << "Classes marked " << classes_marked_;
1318  }
1319
1320  if (kCountJavaLangRefs) {
1321    VLOG(gc) << "References scanned " << reference_count_;
1322  }
1323
1324  // Update the cumulative loggers.
1325  cumulative_timings_.Start();
1326  cumulative_timings_.AddLogger(timings_);
1327  cumulative_timings_.End();
1328
1329  // Clear all of the spaces' mark bitmaps.
1330  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1331    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
1332    if (bitmap != nullptr &&
1333        space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
1334      bitmap->Clear();
1335    }
1336  }
1337  mark_stack_->Reset();
1338
1339  // Reset the marked large objects.
1340  space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
1341  large_objects->GetMarkObjects()->Clear();
1342}
1343
1344}  // namespace collector
1345}  // namespace gc
1346}  // namespace art
1347