mark_sweep.cc revision 10d2508b105427ef1bcaf0c222873bae7acc66d3
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mark_sweep.h"
18
19#include <atomic>
20#include <functional>
21#include <numeric>
22#include <climits>
23#include <vector>
24
25#define ATRACE_TAG ATRACE_TAG_DALVIK
26#include "cutils/trace.h"
27
28#include "base/bounded_fifo.h"
29#include "base/logging.h"
30#include "base/macros.h"
31#include "base/mutex-inl.h"
32#include "base/time_utils.h"
33#include "base/timing_logger.h"
34#include "gc/accounting/card_table-inl.h"
35#include "gc/accounting/heap_bitmap-inl.h"
36#include "gc/accounting/mod_union_table.h"
37#include "gc/accounting/space_bitmap-inl.h"
38#include "gc/heap.h"
39#include "gc/reference_processor.h"
40#include "gc/space/large_object_space.h"
41#include "gc/space/space-inl.h"
42#include "mark_sweep-inl.h"
43#include "mirror/object-inl.h"
44#include "runtime.h"
45#include "scoped_thread_state_change.h"
46#include "thread-inl.h"
47#include "thread_list.h"
48
49namespace art {
50namespace gc {
51namespace collector {
52
53// Performance options.
54static constexpr bool kUseRecursiveMark = false;
55static constexpr bool kUseMarkStackPrefetch = true;
56static constexpr size_t kSweepArrayChunkFreeSize = 1024;
57static constexpr bool kPreCleanCards = true;
58
59// Parallelism options.
60static constexpr bool kParallelCardScan = true;
61static constexpr bool kParallelRecursiveMark = true;
62// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
63// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
64// having this can add overhead in ProcessReferences since we may end up doing many calls of
65// ProcessMarkStack with very small mark stacks.
66static constexpr size_t kMinimumParallelMarkStackSize = 128;
67static constexpr bool kParallelProcessMarkStack = true;
68
69// Profiling and information flags.
70static constexpr bool kProfileLargeObjects = false;
71static constexpr bool kMeasureOverhead = false;
72static constexpr bool kCountTasks = false;
73static constexpr bool kCountMarkedObjects = false;
74
75// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
76static constexpr bool kCheckLocks = kDebugLocking;
77static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
78
79// If true, revoke the rosalloc thread-local buffers at the
80// checkpoint, as opposed to during the pause.
81static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
82
83void MarkSweep::BindBitmaps() {
84  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
85  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
86  // Mark all of the spaces we never collect as immune.
87  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
88    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
89      CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
90    }
91  }
92}
93
94MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
95    : GarbageCollector(heap,
96                       name_prefix +
97                       (is_concurrent ? "concurrent mark sweep": "mark sweep")),
98      current_space_bitmap_(nullptr),
99      mark_bitmap_(nullptr),
100      mark_stack_(nullptr),
101      gc_barrier_(new Barrier(0)),
102      mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
103      is_concurrent_(is_concurrent),
104      live_stack_freeze_size_(0) {
105  std::string error_msg;
106  MemMap* mem_map = MemMap::MapAnonymous(
107      "mark sweep sweep array free buffer", nullptr,
108      RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
109      PROT_READ | PROT_WRITE, false, false, &error_msg);
110  CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
111  sweep_array_free_buffer_mem_map_.reset(mem_map);
112}
113
114void MarkSweep::InitializePhase() {
115  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
116  mark_stack_ = heap_->GetMarkStack();
117  DCHECK(mark_stack_ != nullptr);
118  immune_region_.Reset();
119  no_reference_class_count_.StoreRelaxed(0);
120  normal_count_.StoreRelaxed(0);
121  class_count_.StoreRelaxed(0);
122  object_array_count_.StoreRelaxed(0);
123  other_count_.StoreRelaxed(0);
124  reference_count_.StoreRelaxed(0);
125  large_object_test_.StoreRelaxed(0);
126  large_object_mark_.StoreRelaxed(0);
127  overhead_time_ .StoreRelaxed(0);
128  work_chunks_created_.StoreRelaxed(0);
129  work_chunks_deleted_.StoreRelaxed(0);
130  mark_null_count_.StoreRelaxed(0);
131  mark_immune_count_.StoreRelaxed(0);
132  mark_fastpath_count_.StoreRelaxed(0);
133  mark_slowpath_count_.StoreRelaxed(0);
134  {
135    // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
136    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
137    mark_bitmap_ = heap_->GetMarkBitmap();
138  }
139  if (!GetCurrentIteration()->GetClearSoftReferences()) {
140    // Always clear soft references if a non-sticky collection.
141    GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
142  }
143}
144
145void MarkSweep::RunPhases() {
146  Thread* self = Thread::Current();
147  InitializePhase();
148  Locks::mutator_lock_->AssertNotHeld(self);
149  if (IsConcurrent()) {
150    GetHeap()->PreGcVerification(this);
151    {
152      ReaderMutexLock mu(self, *Locks::mutator_lock_);
153      MarkingPhase();
154    }
155    ScopedPause pause(this);
156    GetHeap()->PrePauseRosAllocVerification(this);
157    PausePhase();
158    RevokeAllThreadLocalBuffers();
159  } else {
160    ScopedPause pause(this);
161    GetHeap()->PreGcVerificationPaused(this);
162    MarkingPhase();
163    GetHeap()->PrePauseRosAllocVerification(this);
164    PausePhase();
165    RevokeAllThreadLocalBuffers();
166  }
167  {
168    // Sweeping always done concurrently, even for non concurrent mark sweep.
169    ReaderMutexLock mu(self, *Locks::mutator_lock_);
170    ReclaimPhase();
171  }
172  GetHeap()->PostGcVerification(this);
173  FinishPhase();
174}
175
176void MarkSweep::ProcessReferences(Thread* self) {
177  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
178  GetHeap()->GetReferenceProcessor()->ProcessReferences(
179      true,
180      GetTimings(),
181      GetCurrentIteration()->GetClearSoftReferences(),
182      this);
183}
184
185void MarkSweep::PausePhase() {
186  TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
187  Thread* self = Thread::Current();
188  Locks::mutator_lock_->AssertExclusiveHeld(self);
189  if (IsConcurrent()) {
190    // Handle the dirty objects if we are a concurrent GC.
191    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
192    // Re-mark root set.
193    ReMarkRoots();
194    // Scan dirty objects, this is only required if we are not doing concurrent GC.
195    RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
196  }
197  {
198    TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
199    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
200    heap_->SwapStacks();
201    live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
202    // Need to revoke all the thread local allocation stacks since we just swapped the allocation
203    // stacks and don't want anybody to allocate into the live stack.
204    RevokeAllThreadLocalAllocationStacks(self);
205  }
206  heap_->PreSweepingGcVerification(this);
207  // Disallow new system weaks to prevent a race which occurs when someone adds a new system
208  // weak before we sweep them. Since this new system weak may not be marked, the GC may
209  // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
210  // reference to a string that is about to be swept.
211  Runtime::Current()->DisallowNewSystemWeaks();
212  // Enable the reference processing slow path, needs to be done with mutators paused since there
213  // is no lock in the GetReferent fast path.
214  GetHeap()->GetReferenceProcessor()->EnableSlowPath();
215}
216
217void MarkSweep::PreCleanCards() {
218  // Don't do this for non concurrent GCs since they don't have any dirty cards.
219  if (kPreCleanCards && IsConcurrent()) {
220    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
221    Thread* self = Thread::Current();
222    CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
223    // Process dirty cards and add dirty cards to mod union tables, also ages cards.
224    heap_->ProcessCards(GetTimings(), false, true, false);
225    // The checkpoint root marking is required to avoid a race condition which occurs if the
226    // following happens during a reference write:
227    // 1. mutator dirties the card (write barrier)
228    // 2. GC ages the card (the above ProcessCards call)
229    // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
230    // 4. mutator writes the value (corresponding to the write barrier in 1.)
231    // This causes the GC to age the card but not necessarily mark the reference which the mutator
232    // wrote into the object stored in the card.
233    // Having the checkpoint fixes this issue since it ensures that the card mark and the
234    // reference write are visible to the GC before the card is scanned (this is due to locks being
235    // acquired / released in the checkpoint code).
236    // The other roots are also marked to help reduce the pause.
237    MarkRootsCheckpoint(self, false);
238    MarkNonThreadRoots();
239    MarkConcurrentRoots(
240        static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
241    // Process the newly aged cards.
242    RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
243    // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
244    // in the next GC.
245  }
246}
247
248void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
249  if (kUseThreadLocalAllocationStack) {
250    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
251    Locks::mutator_lock_->AssertExclusiveHeld(self);
252    heap_->RevokeAllThreadLocalAllocationStacks(self);
253  }
254}
255
256void MarkSweep::MarkingPhase() {
257  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
258  Thread* self = Thread::Current();
259  BindBitmaps();
260  FindDefaultSpaceBitmap();
261  // Process dirty cards and add dirty cards to mod union tables.
262  // If the GC type is non sticky, then we just clear the cards instead of ageing them.
263  heap_->ProcessCards(GetTimings(), false, true, GetGcType() != kGcTypeSticky);
264  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
265  MarkRoots(self);
266  MarkReachableObjects();
267  // Pre-clean dirtied cards to reduce pauses.
268  PreCleanCards();
269}
270
271void MarkSweep::UpdateAndMarkModUnion() {
272  for (const auto& space : heap_->GetContinuousSpaces()) {
273    if (immune_region_.ContainsSpace(space)) {
274      const char* name = space->IsZygoteSpace()
275          ? "UpdateAndMarkZygoteModUnionTable"
276          : "UpdateAndMarkImageModUnionTable";
277      TimingLogger::ScopedTiming t(name, GetTimings());
278      accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
279      CHECK(mod_union_table != nullptr);
280      mod_union_table->UpdateAndMarkReferences(this);
281    }
282  }
283}
284
285void MarkSweep::MarkReachableObjects() {
286  UpdateAndMarkModUnion();
287  // Recursively mark all the non-image bits set in the mark bitmap.
288  RecursiveMark();
289}
290
291void MarkSweep::ReclaimPhase() {
292  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
293  Thread* const self = Thread::Current();
294  // Process the references concurrently.
295  ProcessReferences(self);
296  SweepSystemWeaks(self);
297  Runtime* const runtime = Runtime::Current();
298  runtime->AllowNewSystemWeaks();
299  // Clean up class loaders after system weaks are swept since that is how we know if class
300  // unloading occurred.
301  runtime->GetClassLinker()->CleanupClassLoaders();
302  {
303    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
304    GetHeap()->RecordFreeRevoke();
305    // Reclaim unmarked objects.
306    Sweep(false);
307    // Swap the live and mark bitmaps for each space which we modified space. This is an
308    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
309    // bitmaps.
310    SwapBitmaps();
311    // Unbind the live and mark bitmaps.
312    GetHeap()->UnBindBitmaps();
313  }
314}
315
316void MarkSweep::FindDefaultSpaceBitmap() {
317  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
318  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
319    accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
320    // We want to have the main space instead of non moving if possible.
321    if (bitmap != nullptr &&
322        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
323      current_space_bitmap_ = bitmap;
324      // If we are not the non moving space exit the loop early since this will be good enough.
325      if (space != heap_->GetNonMovingSpace()) {
326        break;
327      }
328    }
329  }
330  CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
331      << heap_->DumpSpaces();
332}
333
334void MarkSweep::ExpandMarkStack() {
335  ResizeMarkStack(mark_stack_->Capacity() * 2);
336}
337
338void MarkSweep::ResizeMarkStack(size_t new_size) {
339  // Rare case, no need to have Thread::Current be a parameter.
340  if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
341    // Someone else acquired the lock and expanded the mark stack before us.
342    return;
343  }
344  std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
345  CHECK_LE(mark_stack_->Size(), new_size);
346  mark_stack_->Resize(new_size);
347  for (auto& obj : temp) {
348    mark_stack_->PushBack(obj.AsMirrorPtr());
349  }
350}
351
352mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) {
353  MarkObject(obj, nullptr, MemberOffset(0));
354  return obj;
355}
356
357inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
358  DCHECK(obj != nullptr);
359  if (MarkObjectParallel(obj)) {
360    MutexLock mu(Thread::Current(), mark_stack_lock_);
361    if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
362      ExpandMarkStack();
363    }
364    // The object must be pushed on to the mark stack.
365    mark_stack_->PushBack(obj);
366  }
367}
368
369bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
370  return IsMarked(ref->AsMirrorPtr());
371}
372
373class MarkSweepMarkObjectSlowPath {
374 public:
375  explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep,
376                                       mirror::Object* holder = nullptr,
377                                       MemberOffset offset = MemberOffset(0))
378      : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {}
379
380  void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
381    if (kProfileLargeObjects) {
382      // TODO: Differentiate between marking and testing somehow.
383      ++mark_sweep_->large_object_test_;
384      ++mark_sweep_->large_object_mark_;
385    }
386    space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
387    if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
388                 (kIsDebugBuild && large_object_space != nullptr &&
389                     !large_object_space->Contains(obj)))) {
390      LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces";
391      if (holder_ != nullptr) {
392        size_t holder_size = holder_->SizeOf();
393        ArtField* field = holder_->FindFieldByOffset(offset_);
394        LOG(INTERNAL_FATAL) << "Field info: "
395                            << " holder=" << holder_
396                            << " holder is "
397                            << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
398                                ? "alive" : "dead")
399                            << " holder_size=" << holder_size
400                            << " holder_type=" << PrettyTypeOf(holder_)
401                            << " offset=" << offset_.Uint32Value()
402                            << " field=" << (field != nullptr ? field->GetName() : "nullptr")
403                            << " field_type="
404                            << (field != nullptr ? field->GetTypeDescriptor() : "")
405                            << " first_ref_field_offset="
406                            << (holder_->IsClass()
407                                ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
408                                    sizeof(void*))
409                                : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
410                            << " num_of_ref_fields="
411                            << (holder_->IsClass()
412                                ? holder_->AsClass()->NumReferenceStaticFields()
413                                : holder_->GetClass()->NumReferenceInstanceFields())
414                            << "\n";
415        // Print the memory content of the holder.
416        for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) {
417          uint32_t* p = reinterpret_cast<uint32_t*>(holder_);
418          LOG(INTERNAL_FATAL) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = "
419                              << std::hex << p[i];
420        }
421      }
422      PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
423      MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
424      {
425        LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root";
426        Thread* self = Thread::Current();
427        if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
428          mark_sweep_->VerifyRoots();
429        } else {
430          const bool heap_bitmap_exclusive_locked =
431              Locks::heap_bitmap_lock_->IsExclusiveHeld(self);
432          if (heap_bitmap_exclusive_locked) {
433            Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
434          }
435          {
436            ScopedThreadSuspension(self, kSuspended);
437            ScopedSuspendAll ssa(__FUNCTION__);
438            mark_sweep_->VerifyRoots();
439          }
440          if (heap_bitmap_exclusive_locked) {
441            Locks::heap_bitmap_lock_->ExclusiveLock(self);
442          }
443        }
444      }
445      LOG(FATAL) << "Can't mark invalid object";
446    }
447  }
448
449 private:
450  MarkSweep* const mark_sweep_;
451  mirror::Object* const holder_;
452  MemberOffset offset_;
453};
454
455inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj,
456                                         mirror::Object* holder,
457                                         MemberOffset offset) {
458  DCHECK(obj != nullptr);
459  if (kUseBakerOrBrooksReadBarrier) {
460    // Verify all the objects have the correct pointer installed.
461    obj->AssertReadBarrierPointer();
462  }
463  if (immune_region_.ContainsObject(obj)) {
464    if (kCountMarkedObjects) {
465      ++mark_immune_count_;
466    }
467    DCHECK(mark_bitmap_->Test(obj));
468  } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
469    if (kCountMarkedObjects) {
470      ++mark_fastpath_count_;
471    }
472    if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
473      PushOnMarkStack(obj);  // This object was not previously marked.
474    }
475  } else {
476    if (kCountMarkedObjects) {
477      ++mark_slowpath_count_;
478    }
479    MarkSweepMarkObjectSlowPath visitor(this, holder, offset);
480    // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
481    // will check again.
482    if (!mark_bitmap_->Set(obj, visitor)) {
483      PushOnMarkStack(obj);  // Was not already marked, push.
484    }
485  }
486}
487
488inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) {
489  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
490    // Lock is not needed but is here anyways to please annotalysis.
491    MutexLock mu(Thread::Current(), mark_stack_lock_);
492    ExpandMarkStack();
493  }
494  // The object must be pushed on to the mark stack.
495  mark_stack_->PushBack(obj);
496}
497
498inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
499  DCHECK(obj != nullptr);
500  if (kUseBakerOrBrooksReadBarrier) {
501    // Verify all the objects have the correct pointer installed.
502    obj->AssertReadBarrierPointer();
503  }
504  if (immune_region_.ContainsObject(obj)) {
505    DCHECK(IsMarked(obj) != nullptr);
506    return false;
507  }
508  // Try to take advantage of locality of references within a space, failing this find the space
509  // the hard way.
510  accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
511  if (LIKELY(object_bitmap->HasAddress(obj))) {
512    return !object_bitmap->AtomicTestAndSet(obj);
513  }
514  MarkSweepMarkObjectSlowPath visitor(this);
515  return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
516}
517
518void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) {
519  MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
520}
521
522// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
523inline void MarkSweep::MarkObject(mirror::Object* obj,
524                                  mirror::Object* holder,
525                                  MemberOffset offset) {
526  if (obj != nullptr) {
527    MarkObjectNonNull(obj, holder, offset);
528  } else if (kCountMarkedObjects) {
529    ++mark_null_count_;
530  }
531}
532
533class VerifyRootMarkedVisitor : public SingleRootVisitor {
534 public:
535  explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
536
537  void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
538      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
539    CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
540  }
541
542 private:
543  MarkSweep* const collector_;
544};
545
546void MarkSweep::VisitRoots(mirror::Object*** roots,
547                           size_t count,
548                           const RootInfo& info ATTRIBUTE_UNUSED) {
549  for (size_t i = 0; i < count; ++i) {
550    MarkObjectNonNull(*roots[i]);
551  }
552}
553
554void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
555                           size_t count,
556                           const RootInfo& info ATTRIBUTE_UNUSED) {
557  for (size_t i = 0; i < count; ++i) {
558    MarkObjectNonNull(roots[i]->AsMirrorPtr());
559  }
560}
561
562class VerifyRootVisitor : public SingleRootVisitor {
563 public:
564  void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
565      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
566    // See if the root is on any space bitmap.
567    auto* heap = Runtime::Current()->GetHeap();
568    if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
569      space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
570      if (large_object_space != nullptr && !large_object_space->Contains(root)) {
571        LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info;
572      }
573    }
574  }
575};
576
577void MarkSweep::VerifyRoots() {
578  VerifyRootVisitor visitor;
579  Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
580}
581
582void MarkSweep::MarkRoots(Thread* self) {
583  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
584  if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
585    // If we exclusively hold the mutator lock, all threads must be suspended.
586    Runtime::Current()->VisitRoots(this);
587    RevokeAllThreadLocalAllocationStacks(self);
588  } else {
589    MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
590    // At this point the live stack should no longer have any mutators which push into it.
591    MarkNonThreadRoots();
592    MarkConcurrentRoots(
593        static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
594  }
595}
596
597void MarkSweep::MarkNonThreadRoots() {
598  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
599  Runtime::Current()->VisitNonThreadRoots(this);
600}
601
602void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
603  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
604  // Visit all runtime roots and clear dirty flags.
605  Runtime::Current()->VisitConcurrentRoots(
606      this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving));
607}
608
609class ScanObjectVisitor {
610 public:
611  explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
612      : mark_sweep_(mark_sweep) {}
613
614  void operator()(mirror::Object* obj) const
615      ALWAYS_INLINE
616      REQUIRES(Locks::heap_bitmap_lock_)
617      SHARED_REQUIRES(Locks::mutator_lock_) {
618    if (kCheckLocks) {
619      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
620      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
621    }
622    mark_sweep_->ScanObject(obj);
623  }
624
625 private:
626  MarkSweep* const mark_sweep_;
627};
628
629class DelayReferenceReferentVisitor {
630 public:
631  explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
632
633  void operator()(mirror::Class* klass, mirror::Reference* ref) const
634      REQUIRES(Locks::heap_bitmap_lock_)
635      SHARED_REQUIRES(Locks::mutator_lock_) {
636    collector_->DelayReferenceReferent(klass, ref);
637  }
638
639 private:
640  MarkSweep* const collector_;
641};
642
643template <bool kUseFinger = false>
644class MarkStackTask : public Task {
645 public:
646  MarkStackTask(ThreadPool* thread_pool,
647                MarkSweep* mark_sweep,
648                size_t mark_stack_size,
649                StackReference<mirror::Object>* mark_stack)
650      : mark_sweep_(mark_sweep),
651        thread_pool_(thread_pool),
652        mark_stack_pos_(mark_stack_size) {
653    // We may have to copy part of an existing mark stack when another mark stack overflows.
654    if (mark_stack_size != 0) {
655      DCHECK(mark_stack != nullptr);
656      // TODO: Check performance?
657      std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
658    }
659    if (kCountTasks) {
660      ++mark_sweep_->work_chunks_created_;
661    }
662  }
663
664  static const size_t kMaxSize = 1 * KB;
665
666 protected:
667  class MarkObjectParallelVisitor {
668   public:
669    ALWAYS_INLINE MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
670                                            MarkSweep* mark_sweep)
671        : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
672
673    ALWAYS_INLINE void operator()(mirror::Object* obj,
674                    MemberOffset offset,
675                    bool is_static ATTRIBUTE_UNUSED) const
676        SHARED_REQUIRES(Locks::mutator_lock_) {
677      Mark(obj->GetFieldObject<mirror::Object>(offset));
678    }
679
680    void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
681        SHARED_REQUIRES(Locks::mutator_lock_) {
682      if (!root->IsNull()) {
683        VisitRoot(root);
684      }
685    }
686
687    void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
688        SHARED_REQUIRES(Locks::mutator_lock_) {
689      if (kCheckLocks) {
690        Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
691        Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
692      }
693      Mark(root->AsMirrorPtr());
694    }
695
696   private:
697    ALWAYS_INLINE void Mark(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) {
698      if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
699        if (kUseFinger) {
700          std::atomic_thread_fence(std::memory_order_seq_cst);
701          if (reinterpret_cast<uintptr_t>(ref) >=
702              static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
703            return;
704          }
705        }
706        chunk_task_->MarkStackPush(ref);
707      }
708    }
709
710    MarkStackTask<kUseFinger>* const chunk_task_;
711    MarkSweep* const mark_sweep_;
712  };
713
714  class ScanObjectParallelVisitor {
715   public:
716    ALWAYS_INLINE explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task)
717        : chunk_task_(chunk_task) {}
718
719    // No thread safety analysis since multiple threads will use this visitor.
720    void operator()(mirror::Object* obj) const
721        REQUIRES(Locks::heap_bitmap_lock_)
722        SHARED_REQUIRES(Locks::mutator_lock_) {
723      MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
724      MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
725      DelayReferenceReferentVisitor ref_visitor(mark_sweep);
726      mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
727    }
728
729   private:
730    MarkStackTask<kUseFinger>* const chunk_task_;
731  };
732
733  virtual ~MarkStackTask() {
734    // Make sure that we have cleared our mark stack.
735    DCHECK_EQ(mark_stack_pos_, 0U);
736    if (kCountTasks) {
737      ++mark_sweep_->work_chunks_deleted_;
738    }
739  }
740
741  MarkSweep* const mark_sweep_;
742  ThreadPool* const thread_pool_;
743  // Thread local mark stack for this task.
744  StackReference<mirror::Object> mark_stack_[kMaxSize];
745  // Mark stack position.
746  size_t mark_stack_pos_;
747
748  ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
749      SHARED_REQUIRES(Locks::mutator_lock_) {
750    if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
751      // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
752      mark_stack_pos_ /= 2;
753      auto* task = new MarkStackTask(thread_pool_,
754                                     mark_sweep_,
755                                     kMaxSize - mark_stack_pos_,
756                                     mark_stack_ + mark_stack_pos_);
757      thread_pool_->AddTask(Thread::Current(), task);
758    }
759    DCHECK(obj != nullptr);
760    DCHECK_LT(mark_stack_pos_, kMaxSize);
761    mark_stack_[mark_stack_pos_++].Assign(obj);
762  }
763
764  virtual void Finalize() {
765    delete this;
766  }
767
768  // Scans all of the objects
769  virtual void Run(Thread* self ATTRIBUTE_UNUSED)
770      REQUIRES(Locks::heap_bitmap_lock_)
771      SHARED_REQUIRES(Locks::mutator_lock_) {
772    ScanObjectParallelVisitor visitor(this);
773    // TODO: Tune this.
774    static const size_t kFifoSize = 4;
775    BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
776    for (;;) {
777      mirror::Object* obj = nullptr;
778      if (kUseMarkStackPrefetch) {
779        while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
780          mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
781          DCHECK(mark_stack_obj != nullptr);
782          __builtin_prefetch(mark_stack_obj);
783          prefetch_fifo.push_back(mark_stack_obj);
784        }
785        if (UNLIKELY(prefetch_fifo.empty())) {
786          break;
787        }
788        obj = prefetch_fifo.front();
789        prefetch_fifo.pop_front();
790      } else {
791        if (UNLIKELY(mark_stack_pos_ == 0)) {
792          break;
793        }
794        obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
795      }
796      DCHECK(obj != nullptr);
797      visitor(obj);
798    }
799  }
800};
801
802class CardScanTask : public MarkStackTask<false> {
803 public:
804  CardScanTask(ThreadPool* thread_pool,
805               MarkSweep* mark_sweep,
806               accounting::ContinuousSpaceBitmap* bitmap,
807               uint8_t* begin,
808               uint8_t* end,
809               uint8_t minimum_age,
810               size_t mark_stack_size,
811               StackReference<mirror::Object>* mark_stack_obj,
812               bool clear_card)
813      : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
814        bitmap_(bitmap),
815        begin_(begin),
816        end_(end),
817        minimum_age_(minimum_age),
818        clear_card_(clear_card) {}
819
820 protected:
821  accounting::ContinuousSpaceBitmap* const bitmap_;
822  uint8_t* const begin_;
823  uint8_t* const end_;
824  const uint8_t minimum_age_;
825  const bool clear_card_;
826
827  virtual void Finalize() {
828    delete this;
829  }
830
831  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
832    ScanObjectParallelVisitor visitor(this);
833    accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
834    size_t cards_scanned = clear_card_
835        ? card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_)
836        : card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
837    VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
838        << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
839    // Finish by emptying our local mark stack.
840    MarkStackTask::Run(self);
841  }
842};
843
844size_t MarkSweep::GetThreadCount(bool paused) const {
845  if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
846    return 1;
847  }
848  return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
849}
850
851void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
852  accounting::CardTable* card_table = GetHeap()->GetCardTable();
853  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
854  size_t thread_count = GetThreadCount(paused);
855  // The parallel version with only one thread is faster for card scanning, TODO: fix.
856  if (kParallelCardScan && thread_count > 1) {
857    Thread* self = Thread::Current();
858    // Can't have a different split for each space since multiple spaces can have their cards being
859    // scanned at the same time.
860    TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
861        GetTimings());
862    // Try to take some of the mark stack since we can pass this off to the worker tasks.
863    StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin();
864    StackReference<mirror::Object>* mark_stack_end = mark_stack_->End();
865    const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
866    // Estimated number of work tasks we will create.
867    const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
868    DCHECK_NE(mark_stack_tasks, 0U);
869    const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
870                                             mark_stack_size / mark_stack_tasks + 1);
871    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
872      if (space->GetMarkBitmap() == nullptr) {
873        continue;
874      }
875      uint8_t* card_begin = space->Begin();
876      uint8_t* card_end = space->End();
877      // Align up the end address. For example, the image space's end
878      // may not be card-size-aligned.
879      card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
880      DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize);
881      DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize);
882      // Calculate how many bytes of heap we will scan,
883      const size_t address_range = card_end - card_begin;
884      // Calculate how much address range each task gets.
885      const size_t card_delta = RoundUp(address_range / thread_count + 1,
886                                        accounting::CardTable::kCardSize);
887      // If paused and the space is neither zygote nor image space, we could clear the dirty
888      // cards to avoid accumulating them to increase card scanning load in the following GC
889      // cycles. We need to keep dirty cards of image space and zygote space in order to track
890      // references to the other spaces.
891      bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
892      // Create the worker tasks for this space.
893      while (card_begin != card_end) {
894        // Add a range of cards.
895        size_t addr_remaining = card_end - card_begin;
896        size_t card_increment = std::min(card_delta, addr_remaining);
897        // Take from the back of the mark stack.
898        size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
899        size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
900        mark_stack_end -= mark_stack_increment;
901        mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
902        DCHECK_EQ(mark_stack_end, mark_stack_->End());
903        // Add the new task to the thread pool.
904        auto* task = new CardScanTask(thread_pool,
905                                      this,
906                                      space->GetMarkBitmap(),
907                                      card_begin,
908                                      card_begin + card_increment,
909                                      minimum_age,
910                                      mark_stack_increment,
911                                      mark_stack_end,
912                                      clear_card);
913        thread_pool->AddTask(self, task);
914        card_begin += card_increment;
915      }
916    }
917
918    // Note: the card scan below may dirty new cards (and scan them)
919    // as a side effect when a Reference object is encountered and
920    // queued during the marking. See b/11465268.
921    thread_pool->SetMaxActiveWorkers(thread_count - 1);
922    thread_pool->StartWorkers(self);
923    thread_pool->Wait(self, true, true);
924    thread_pool->StopWorkers(self);
925  } else {
926    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
927      if (space->GetMarkBitmap() != nullptr) {
928        // Image spaces are handled properly since live == marked for them.
929        const char* name = nullptr;
930        switch (space->GetGcRetentionPolicy()) {
931        case space::kGcRetentionPolicyNeverCollect:
932          name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
933          break;
934        case space::kGcRetentionPolicyFullCollect:
935          name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
936          break;
937        case space::kGcRetentionPolicyAlwaysCollect:
938          name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
939          break;
940        default:
941          LOG(FATAL) << "Unreachable";
942          UNREACHABLE();
943        }
944        TimingLogger::ScopedTiming t(name, GetTimings());
945        ScanObjectVisitor visitor(this);
946        bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
947        if (clear_card) {
948          card_table->Scan<true>(space->GetMarkBitmap(),
949                                 space->Begin(),
950                                 space->End(),
951                                 visitor,
952                                 minimum_age);
953        } else {
954          card_table->Scan<false>(space->GetMarkBitmap(),
955                                  space->Begin(),
956                                  space->End(),
957                                  visitor,
958                                  minimum_age);
959        }
960      }
961    }
962  }
963}
964
965class RecursiveMarkTask : public MarkStackTask<false> {
966 public:
967  RecursiveMarkTask(ThreadPool* thread_pool,
968                    MarkSweep* mark_sweep,
969                    accounting::ContinuousSpaceBitmap* bitmap,
970                    uintptr_t begin,
971                    uintptr_t end)
972      : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr),
973        bitmap_(bitmap),
974        begin_(begin),
975        end_(end) {}
976
977 protected:
978  accounting::ContinuousSpaceBitmap* const bitmap_;
979  const uintptr_t begin_;
980  const uintptr_t end_;
981
982  virtual void Finalize() {
983    delete this;
984  }
985
986  // Scans all of the objects
987  virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
988    ScanObjectParallelVisitor visitor(this);
989    bitmap_->VisitMarkedRange(begin_, end_, visitor);
990    // Finish by emptying our local mark stack.
991    MarkStackTask::Run(self);
992  }
993};
994
995// Populates the mark stack based on the set of marked objects and
996// recursively marks until the mark stack is emptied.
997void MarkSweep::RecursiveMark() {
998  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
999  // RecursiveMark will build the lists of known instances of the Reference classes. See
1000  // DelayReferenceReferent for details.
1001  if (kUseRecursiveMark) {
1002    const bool partial = GetGcType() == kGcTypePartial;
1003    ScanObjectVisitor scan_visitor(this);
1004    auto* self = Thread::Current();
1005    ThreadPool* thread_pool = heap_->GetThreadPool();
1006    size_t thread_count = GetThreadCount(false);
1007    const bool parallel = kParallelRecursiveMark && thread_count > 1;
1008    mark_stack_->Reset();
1009    for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1010      if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
1011          (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
1012        current_space_bitmap_ = space->GetMarkBitmap();
1013        if (current_space_bitmap_ == nullptr) {
1014          continue;
1015        }
1016        if (parallel) {
1017          // We will use the mark stack the future.
1018          // CHECK(mark_stack_->IsEmpty());
1019          // This function does not handle heap end increasing, so we must use the space end.
1020          uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1021          uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
1022          atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
1023
1024          // Create a few worker tasks.
1025          const size_t n = thread_count * 2;
1026          while (begin != end) {
1027            uintptr_t start = begin;
1028            uintptr_t delta = (end - begin) / n;
1029            delta = RoundUp(delta, KB);
1030            if (delta < 16 * KB) delta = end - begin;
1031            begin += delta;
1032            auto* task = new RecursiveMarkTask(thread_pool,
1033                                               this,
1034                                               current_space_bitmap_,
1035                                               start,
1036                                               begin);
1037            thread_pool->AddTask(self, task);
1038          }
1039          thread_pool->SetMaxActiveWorkers(thread_count - 1);
1040          thread_pool->StartWorkers(self);
1041          thread_pool->Wait(self, true, true);
1042          thread_pool->StopWorkers(self);
1043        } else {
1044          // This function does not handle heap end increasing, so we must use the space end.
1045          uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1046          uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
1047          current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
1048        }
1049      }
1050    }
1051  }
1052  ProcessMarkStack(false);
1053}
1054
1055void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
1056  ScanGrayObjects(paused, minimum_age);
1057  ProcessMarkStack(paused);
1058}
1059
1060void MarkSweep::ReMarkRoots() {
1061  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1062  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1063  Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>(
1064      kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog));
1065  if (kVerifyRootsMarked) {
1066    TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
1067    VerifyRootMarkedVisitor visitor(this);
1068    Runtime::Current()->VisitRoots(&visitor);
1069  }
1070}
1071
1072void MarkSweep::SweepSystemWeaks(Thread* self) {
1073  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1074  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1075  Runtime::Current()->SweepSystemWeaks(this);
1076}
1077
1078class VerifySystemWeakVisitor : public IsMarkedVisitor {
1079 public:
1080  explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
1081
1082  virtual mirror::Object* IsMarked(mirror::Object* obj)
1083      OVERRIDE
1084      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1085    mark_sweep_->VerifyIsLive(obj);
1086    return obj;
1087  }
1088
1089  MarkSweep* const mark_sweep_;
1090};
1091
1092void MarkSweep::VerifyIsLive(const mirror::Object* obj) {
1093  if (!heap_->GetLiveBitmap()->Test(obj)) {
1094    // TODO: Consider live stack? Has this code bitrotted?
1095    CHECK(!heap_->allocation_stack_->Contains(obj))
1096        << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
1097  }
1098}
1099
1100void MarkSweep::VerifySystemWeaks() {
1101  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1102  // Verify system weaks, uses a special object visitor which returns the input object.
1103  VerifySystemWeakVisitor visitor(this);
1104  Runtime::Current()->SweepSystemWeaks(&visitor);
1105}
1106
1107class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
1108 public:
1109  CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
1110                            bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
1111      : mark_sweep_(mark_sweep),
1112        revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
1113            revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1114  }
1115
1116  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
1117      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
1118      REQUIRES(Locks::heap_bitmap_lock_) {
1119    for (size_t i = 0; i < count; ++i) {
1120      mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
1121    }
1122  }
1123
1124  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
1125                  size_t count,
1126                  const RootInfo& info ATTRIBUTE_UNUSED)
1127      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
1128      REQUIRES(Locks::heap_bitmap_lock_) {
1129    for (size_t i = 0; i < count; ++i) {
1130      mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
1131    }
1132  }
1133
1134  virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1135    ATRACE_BEGIN("Marking thread roots");
1136    // Note: self is not necessarily equal to thread since thread may be suspended.
1137    Thread* const self = Thread::Current();
1138    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1139        << thread->GetState() << " thread " << thread << " self " << self;
1140    thread->VisitRoots(this);
1141    ATRACE_END();
1142    if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
1143      ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
1144      mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
1145      ATRACE_END();
1146    }
1147    // If thread is a running mutator, then act on behalf of the garbage collector.
1148    // See the code in ThreadList::RunCheckpoint.
1149    mark_sweep_->GetBarrier().Pass(self);
1150  }
1151
1152 private:
1153  MarkSweep* const mark_sweep_;
1154  const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
1155};
1156
1157void MarkSweep::MarkRootsCheckpoint(Thread* self,
1158                                    bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1159  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1160  CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
1161  ThreadList* thread_list = Runtime::Current()->GetThreadList();
1162  // Request the check point is run on all threads returning a count of the threads that must
1163  // run through the barrier including self.
1164  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1165  // Release locks then wait for all mutator threads to pass the barrier.
1166  // If there are no threads to wait which implys that all the checkpoint functions are finished,
1167  // then no need to release locks.
1168  if (barrier_count == 0) {
1169    return;
1170  }
1171  Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1172  Locks::mutator_lock_->SharedUnlock(self);
1173  {
1174    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1175    gc_barrier_->Increment(self, barrier_count);
1176  }
1177  Locks::mutator_lock_->SharedLock(self);
1178  Locks::heap_bitmap_lock_->ExclusiveLock(self);
1179}
1180
1181void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
1182  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1183  Thread* self = Thread::Current();
1184  mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
1185      sweep_array_free_buffer_mem_map_->BaseBegin());
1186  size_t chunk_free_pos = 0;
1187  ObjectBytePair freed;
1188  ObjectBytePair freed_los;
1189  // How many objects are left in the array, modified after each space is swept.
1190  StackReference<mirror::Object>* objects = allocations->Begin();
1191  size_t count = allocations->Size();
1192  // Change the order to ensure that the non-moving space last swept as an optimization.
1193  std::vector<space::ContinuousSpace*> sweep_spaces;
1194  space::ContinuousSpace* non_moving_space = nullptr;
1195  for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
1196    if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
1197        space->GetLiveBitmap() != nullptr) {
1198      if (space == heap_->GetNonMovingSpace()) {
1199        non_moving_space = space;
1200      } else {
1201        sweep_spaces.push_back(space);
1202      }
1203    }
1204  }
1205  // Unlikely to sweep a significant amount of non_movable objects, so we do these after the after
1206  // the other alloc spaces as an optimization.
1207  if (non_moving_space != nullptr) {
1208    sweep_spaces.push_back(non_moving_space);
1209  }
1210  // Start by sweeping the continuous spaces.
1211  for (space::ContinuousSpace* space : sweep_spaces) {
1212    space::AllocSpace* alloc_space = space->AsAllocSpace();
1213    accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1214    accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1215    if (swap_bitmaps) {
1216      std::swap(live_bitmap, mark_bitmap);
1217    }
1218    StackReference<mirror::Object>* out = objects;
1219    for (size_t i = 0; i < count; ++i) {
1220      mirror::Object* const obj = objects[i].AsMirrorPtr();
1221      if (kUseThreadLocalAllocationStack && obj == nullptr) {
1222        continue;
1223      }
1224      if (space->HasAddress(obj)) {
1225        // This object is in the space, remove it from the array and add it to the sweep buffer
1226        // if needed.
1227        if (!mark_bitmap->Test(obj)) {
1228          if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
1229            TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1230            freed.objects += chunk_free_pos;
1231            freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1232            chunk_free_pos = 0;
1233          }
1234          chunk_free_buffer[chunk_free_pos++] = obj;
1235        }
1236      } else {
1237        (out++)->Assign(obj);
1238      }
1239    }
1240    if (chunk_free_pos > 0) {
1241      TimingLogger::ScopedTiming t2("FreeList", GetTimings());
1242      freed.objects += chunk_free_pos;
1243      freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
1244      chunk_free_pos = 0;
1245    }
1246    // All of the references which space contained are no longer in the allocation stack, update
1247    // the count.
1248    count = out - objects;
1249  }
1250  // Handle the large object space.
1251  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1252  if (large_object_space != nullptr) {
1253    accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1254    accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1255    if (swap_bitmaps) {
1256      std::swap(large_live_objects, large_mark_objects);
1257    }
1258    for (size_t i = 0; i < count; ++i) {
1259      mirror::Object* const obj = objects[i].AsMirrorPtr();
1260      // Handle large objects.
1261      if (kUseThreadLocalAllocationStack && obj == nullptr) {
1262        continue;
1263      }
1264      if (!large_mark_objects->Test(obj)) {
1265        ++freed_los.objects;
1266        freed_los.bytes += large_object_space->Free(self, obj);
1267      }
1268    }
1269  }
1270  {
1271    TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
1272    RecordFree(freed);
1273    RecordFreeLOS(freed_los);
1274    t2.NewTiming("ResetStack");
1275    allocations->Reset();
1276  }
1277  sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
1278}
1279
1280void MarkSweep::Sweep(bool swap_bitmaps) {
1281  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1282  // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1283  CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1284  {
1285    TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
1286    // Mark everything allocated since the last as GC live so that we can sweep concurrently,
1287    // knowing that new allocations won't be marked as live.
1288    accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1289    heap_->MarkAllocStackAsLive(live_stack);
1290    live_stack->Reset();
1291    DCHECK(mark_stack_->IsEmpty());
1292  }
1293  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1294    if (space->IsContinuousMemMapAllocSpace()) {
1295      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1296      TimingLogger::ScopedTiming split(
1297          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace",
1298          GetTimings());
1299      RecordFree(alloc_space->Sweep(swap_bitmaps));
1300    }
1301  }
1302  SweepLargeObjects(swap_bitmaps);
1303}
1304
1305void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1306  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
1307  if (los != nullptr) {
1308    TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1309    RecordFreeLOS(los->Sweep(swap_bitmaps));
1310  }
1311}
1312
1313// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
1314// marked, put it on the appropriate list in the heap for later processing.
1315void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
1316  heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
1317}
1318
1319class MarkVisitor {
1320 public:
1321  ALWAYS_INLINE explicit MarkVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
1322
1323  ALWAYS_INLINE void operator()(mirror::Object* obj,
1324                                MemberOffset offset,
1325                                bool is_static ATTRIBUTE_UNUSED) const
1326      REQUIRES(Locks::heap_bitmap_lock_)
1327      SHARED_REQUIRES(Locks::mutator_lock_) {
1328    if (kCheckLocks) {
1329      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1330      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1331    }
1332    mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
1333  }
1334
1335  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1336      REQUIRES(Locks::heap_bitmap_lock_)
1337      SHARED_REQUIRES(Locks::mutator_lock_) {
1338    if (!root->IsNull()) {
1339      VisitRoot(root);
1340    }
1341  }
1342
1343  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1344      REQUIRES(Locks::heap_bitmap_lock_)
1345      SHARED_REQUIRES(Locks::mutator_lock_) {
1346    if (kCheckLocks) {
1347      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1348      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1349    }
1350    mark_sweep_->MarkObject(root->AsMirrorPtr());
1351  }
1352
1353 private:
1354  MarkSweep* const mark_sweep_;
1355};
1356
1357// Scans an object reference.  Determines the type of the reference
1358// and dispatches to a specialized scanning routine.
1359void MarkSweep::ScanObject(mirror::Object* obj) {
1360  MarkVisitor mark_visitor(this);
1361  DelayReferenceReferentVisitor ref_visitor(this);
1362  ScanObjectVisit(obj, mark_visitor, ref_visitor);
1363}
1364
1365void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
1366  Thread* self = Thread::Current();
1367  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1368  const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1369                                     static_cast<size_t>(MarkStackTask<false>::kMaxSize));
1370  CHECK_GT(chunk_size, 0U);
1371  // Split the current mark stack up into work tasks.
1372  for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) {
1373    const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
1374    thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
1375    it += delta;
1376  }
1377  thread_pool->SetMaxActiveWorkers(thread_count - 1);
1378  thread_pool->StartWorkers(self);
1379  thread_pool->Wait(self, true, true);
1380  thread_pool->StopWorkers(self);
1381  mark_stack_->Reset();
1382  CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
1383           work_chunks_deleted_.LoadSequentiallyConsistent())
1384      << " some of the work chunks were leaked";
1385}
1386
1387// Scan anything that's on the mark stack.
1388void MarkSweep::ProcessMarkStack(bool paused) {
1389  TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
1390  size_t thread_count = GetThreadCount(paused);
1391  if (kParallelProcessMarkStack && thread_count > 1 &&
1392      mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1393    ProcessMarkStackParallel(thread_count);
1394  } else {
1395    // TODO: Tune this.
1396    static const size_t kFifoSize = 4;
1397    BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
1398    for (;;) {
1399      mirror::Object* obj = nullptr;
1400      if (kUseMarkStackPrefetch) {
1401        while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
1402          mirror::Object* mark_stack_obj = mark_stack_->PopBack();
1403          DCHECK(mark_stack_obj != nullptr);
1404          __builtin_prefetch(mark_stack_obj);
1405          prefetch_fifo.push_back(mark_stack_obj);
1406        }
1407        if (prefetch_fifo.empty()) {
1408          break;
1409        }
1410        obj = prefetch_fifo.front();
1411        prefetch_fifo.pop_front();
1412      } else {
1413        if (mark_stack_->IsEmpty()) {
1414          break;
1415        }
1416        obj = mark_stack_->PopBack();
1417      }
1418      DCHECK(obj != nullptr);
1419      ScanObject(obj);
1420    }
1421  }
1422}
1423
1424inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) {
1425  if (immune_region_.ContainsObject(object)) {
1426    return object;
1427  }
1428  if (current_space_bitmap_->HasAddress(object)) {
1429    return current_space_bitmap_->Test(object) ? object : nullptr;
1430  }
1431  return mark_bitmap_->Test(object) ? object : nullptr;
1432}
1433
1434void MarkSweep::FinishPhase() {
1435  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1436  if (kCountScannedTypes) {
1437    VLOG(gc)
1438        << "MarkSweep scanned"
1439        << " no reference objects=" << no_reference_class_count_.LoadRelaxed()
1440        << " normal objects=" << normal_count_.LoadRelaxed()
1441        << " classes=" << class_count_.LoadRelaxed()
1442        << " object arrays=" << object_array_count_.LoadRelaxed()
1443        << " references=" << reference_count_.LoadRelaxed()
1444        << " other=" << other_count_.LoadRelaxed();
1445  }
1446  if (kCountTasks) {
1447    VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
1448  }
1449  if (kMeasureOverhead) {
1450    VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
1451  }
1452  if (kProfileLargeObjects) {
1453    VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
1454        << " marked " << large_object_mark_.LoadRelaxed();
1455  }
1456  if (kCountMarkedObjects) {
1457    VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
1458        << " immune=" <<  mark_immune_count_.LoadRelaxed()
1459        << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
1460        << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
1461  }
1462  CHECK(mark_stack_->IsEmpty());  // Ensure that the mark stack is empty.
1463  mark_stack_->Reset();
1464  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1465  heap_->ClearMarkedObjects();
1466}
1467
1468void MarkSweep::RevokeAllThreadLocalBuffers() {
1469  if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1470    // If concurrent, rosalloc thread-local buffers are revoked at the
1471    // thread checkpoint. Bump pointer space thread-local buffers must
1472    // not be in use.
1473    GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1474  } else {
1475    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1476    GetHeap()->RevokeAllThreadLocalBuffers();
1477  }
1478}
1479
1480}  // namespace collector
1481}  // namespace gc
1482}  // namespace art
1483