mark_sweep.cc revision a9a50926963b5093fb851ed966d201f3e95f72d3
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mark_sweep.h"
18
19#include <functional>
20#include <numeric>
21#include <climits>
22#include <vector>
23
24#include "base/logging.h"
25#include "base/macros.h"
26#include "base/mutex-inl.h"
27#include "base/timing_logger.h"
28#include "gc/accounting/card_table-inl.h"
29#include "gc/accounting/heap_bitmap.h"
30#include "gc/accounting/space_bitmap-inl.h"
31#include "gc/heap.h"
32#include "gc/space/image_space.h"
33#include "gc/space/large_object_space.h"
34#include "gc/space/space-inl.h"
35#include "indirect_reference_table.h"
36#include "intern_table.h"
37#include "jni_internal.h"
38#include "monitor.h"
39#include "mark_sweep-inl.h"
40#include "mirror/class-inl.h"
41#include "mirror/class_loader.h"
42#include "mirror/dex_cache.h"
43#include "mirror/field.h"
44#include "mirror/field-inl.h"
45#include "mirror/object-inl.h"
46#include "mirror/object_array.h"
47#include "mirror/object_array-inl.h"
48#include "runtime.h"
49#include "thread-inl.h"
50#include "thread_list.h"
51#include "verifier/method_verifier.h"
52
53using ::art::mirror::Class;
54using ::art::mirror::Field;
55using ::art::mirror::Object;
56using ::art::mirror::ObjectArray;
57
58namespace art {
59namespace gc {
60namespace collector {
61
62// Performance options.
63static const bool kParallelMarkStack = true;
64static const bool kDisableFinger = true;  // TODO: Fix, bit rotten.
65static const bool kUseMarkStackPrefetch = true;
66
67// Profiling and information flags.
68static const bool kCountClassesMarked = false;
69static const bool kProfileLargeObjects = false;
70static const bool kMeasureOverhead = false;
71static const bool kCountTasks = false;
72static const bool kCountJavaLangRefs = false;
73
74void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
75  // Bind live to mark bitmap if necessary.
76  if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
77    BindLiveToMarkBitmap(space);
78  }
79
80  // Add the space to the immune region.
81  if (immune_begin_ == NULL) {
82    DCHECK(immune_end_ == NULL);
83    SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
84                   reinterpret_cast<Object*>(space->End()));
85  } else {
86    const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
87    const space::ContinuousSpace* prev_space = NULL;
88    // Find out if the previous space is immune.
89    // TODO: C++0x
90    typedef std::vector<space::ContinuousSpace*>::const_iterator It;
91    for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
92      if (*it == space) {
93        break;
94      }
95      prev_space = *it;
96    }
97
98    // If previous space was immune, then extend the immune region. Relies on continuous spaces
99    // being sorted by Heap::AddContinuousSpace.
100    if (prev_space != NULL &&
101        immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
102        immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
103      immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
104      immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
105    }
106  }
107}
108
109void MarkSweep::BindBitmaps() {
110  timings_.StartSplit("BindBitmaps");
111  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
112  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
113
114  // Mark all of the spaces we never collect as immune.
115  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
116  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
117    space::ContinuousSpace* space = *it;
118    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
119      ImmuneSpace(space);
120    }
121  }
122  timings_.EndSplit();
123}
124
125MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
126    : GarbageCollector(heap,
127                       name_prefix + (name_prefix.empty() ? "" : " ") +
128                       (is_concurrent ? "concurrent mark sweep": "mark sweep")),
129      current_mark_bitmap_(NULL),
130      java_lang_Class_(NULL),
131      mark_stack_(NULL),
132      immune_begin_(NULL),
133      immune_end_(NULL),
134      soft_reference_list_(NULL),
135      weak_reference_list_(NULL),
136      finalizer_reference_list_(NULL),
137      phantom_reference_list_(NULL),
138      cleared_reference_list_(NULL),
139      gc_barrier_(new Barrier(0)),
140      large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
141      mark_stack_expand_lock_("mark sweep mark stack expand lock"),
142      is_concurrent_(is_concurrent),
143      clear_soft_references_(false) {
144}
145
146void MarkSweep::InitializePhase() {
147  timings_.Reset();
148  timings_.StartSplit("InitializePhase");
149  mark_stack_ = GetHeap()->mark_stack_.get();
150  DCHECK(mark_stack_ != NULL);
151  SetImmuneRange(NULL, NULL);
152  soft_reference_list_ = NULL;
153  weak_reference_list_ = NULL;
154  finalizer_reference_list_ = NULL;
155  phantom_reference_list_ = NULL;
156  cleared_reference_list_ = NULL;
157  freed_bytes_ = 0;
158  freed_objects_ = 0;
159  class_count_ = 0;
160  array_count_ = 0;
161  other_count_ = 0;
162  large_object_test_ = 0;
163  large_object_mark_ = 0;
164  classes_marked_ = 0;
165  overhead_time_ = 0;
166  work_chunks_created_ = 0;
167  work_chunks_deleted_ = 0;
168  reference_count_ = 0;
169  java_lang_Class_ = Class::GetJavaLangClass();
170  CHECK(java_lang_Class_ != NULL);
171  timings_.EndSplit();
172
173  FindDefaultMarkBitmap();
174
175// Do any pre GC verification.
176  timings_.StartSplit("PreGcVerification");
177  heap_->PreGcVerification(this);
178  timings_.EndSplit();
179}
180
181void MarkSweep::ProcessReferences(Thread* self) {
182  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
183  ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
184                    &finalizer_reference_list_, &phantom_reference_list_);
185}
186
187bool MarkSweep::HandleDirtyObjectsPhase() {
188  Thread* self = Thread::Current();
189  accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
190  Locks::mutator_lock_->AssertExclusiveHeld(self);
191
192  {
193    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
194
195    // Re-mark root set.
196    ReMarkRoots();
197
198    // Scan dirty objects, this is only required if we are not doing concurrent GC.
199    RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty);
200  }
201
202  ProcessReferences(self);
203
204  // Only need to do this if we have the card mark verification on, and only during concurrent GC.
205  if (GetHeap()->verify_missing_card_marks_) {
206    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
207    // This second sweep makes sure that we don't have any objects in the live stack which point to
208    // freed objects. These cause problems since their references may be previously freed objects.
209    SweepArray(allocation_stack, false);
210  }
211  return true;
212}
213
214bool MarkSweep::IsConcurrent() const {
215  return is_concurrent_;
216}
217
218void MarkSweep::MarkingPhase() {
219  Heap* heap = GetHeap();
220  Thread* self = Thread::Current();
221
222  BindBitmaps();
223  FindDefaultMarkBitmap();
224
225  // Process dirty cards and add dirty cards to mod union tables.
226  heap->ProcessCards(timings_);
227
228  // Need to do this before the checkpoint since we don't want any threads to add references to
229  // the live stack during the recursive mark.
230  timings_.StartSplit("SwapStacks");
231  heap->SwapStacks();
232  timings_.EndSplit();
233
234  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
235  if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
236    // If we exclusively hold the mutator lock, all threads must be suspended.
237    MarkRoots();
238  } else {
239    MarkRootsCheckpoint(self);
240    MarkNonThreadRoots();
241  }
242  MarkConcurrentRoots();
243
244  heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
245  MarkReachableObjects();
246}
247
248void MarkSweep::MarkReachableObjects() {
249  // Mark everything allocated since the last as GC live so that we can sweep concurrently,
250  // knowing that new allocations won't be marked as live.
251  timings_.StartSplit("MarkStackAsLive");
252  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
253  heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
254                       heap_->large_object_space_->GetLiveObjects(),
255                       live_stack);
256  live_stack->Reset();
257  timings_.EndSplit();
258  // Recursively mark all the non-image bits set in the mark bitmap.
259  RecursiveMark();
260}
261
262void MarkSweep::ReclaimPhase() {
263  Thread* self = Thread::Current();
264
265  if (!IsConcurrent()) {
266    base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
267    ProcessReferences(self);
268  } else {
269    base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
270    accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
271    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
272    // The allocation stack contains things allocated since the start of the GC. These may have been
273    // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
274    // Remove these objects from the mark bitmaps so that they will be eligible for sticky
275    // collection.
276    // There is a race here which is safely handled. Another thread such as the hprof could
277    // have flushed the alloc stack after we resumed the threads. This is safe however, since
278    // reseting the allocation stack zeros it out with madvise. This means that we will either
279    // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
280    // first place.
281    mirror::Object** end = allocation_stack->End();
282    for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
283      Object* obj = *it;
284      if (obj != NULL) {
285        UnMarkObjectNonNull(obj);
286      }
287    }
288  }
289
290  // Before freeing anything, lets verify the heap.
291  if (kIsDebugBuild) {
292    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
293    VerifyImageRoots();
294  }
295  timings_.StartSplit("PreSweepingGcVerification");
296  heap_->PreSweepingGcVerification(this);
297  timings_.EndSplit();
298
299  {
300    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
301
302    // Reclaim unmarked objects.
303    Sweep(false);
304
305    // Swap the live and mark bitmaps for each space which we modified space. This is an
306    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
307    // bitmaps.
308    timings_.StartSplit("SwapBitmaps");
309    SwapBitmaps();
310    timings_.EndSplit();
311
312    // Unbind the live and mark bitmaps.
313    UnBindBitmaps();
314  }
315}
316
317void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
318  immune_begin_ = begin;
319  immune_end_ = end;
320}
321
322void MarkSweep::FindDefaultMarkBitmap() {
323  timings_.StartSplit("FindDefaultMarkBitmap");
324  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
325  // TODO: C++0x
326  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
327  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
328    space::ContinuousSpace* space = *it;
329    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
330      current_mark_bitmap_ = (*it)->GetMarkBitmap();
331      CHECK(current_mark_bitmap_ != NULL);
332      timings_.EndSplit();
333      return;
334    }
335  }
336  GetHeap()->DumpSpaces();
337  LOG(FATAL) << "Could not find a default mark bitmap";
338}
339
340void MarkSweep::ExpandMarkStack() {
341  // Rare case, no need to have Thread::Current be a parameter.
342  MutexLock mu(Thread::Current(), mark_stack_expand_lock_);
343  if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
344    // Someone else acquired the lock and expanded the mark stack before us.
345    return;
346  }
347  std::vector<Object*> temp;
348  temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End());
349  mark_stack_->Resize(mark_stack_->Capacity() * 2);
350  for (size_t i = 0; i < temp.size(); ++i) {
351    mark_stack_->PushBack(temp[i]);
352  }
353}
354
355inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
356  DCHECK(obj != NULL);
357  if (MarkObjectParallel(obj)) {
358    while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) {
359      // Only reason a push can fail is that the mark stack is full.
360      ExpandMarkStack();
361    }
362  }
363}
364
365inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
366  DCHECK(!IsImmune(obj));
367  // Try to take advantage of locality of references within a space, failing this find the space
368  // the hard way.
369  accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
370  if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
371    accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
372    if (LIKELY(new_bitmap != NULL)) {
373      object_bitmap = new_bitmap;
374    } else {
375      MarkLargeObject(obj, false);
376      return;
377    }
378  }
379
380  DCHECK(object_bitmap->HasAddress(obj));
381  object_bitmap->Clear(obj);
382}
383
384inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
385  DCHECK(obj != NULL);
386
387  if (IsImmune(obj)) {
388    DCHECK(IsMarked(obj));
389    return;
390  }
391
392  // Try to take advantage of locality of references within a space, failing this find the space
393  // the hard way.
394  accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
395  if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
396    accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
397    if (LIKELY(new_bitmap != NULL)) {
398      object_bitmap = new_bitmap;
399    } else {
400      MarkLargeObject(obj, true);
401      return;
402    }
403  }
404
405  // This object was not previously marked.
406  if (!object_bitmap->Test(obj)) {
407    object_bitmap->Set(obj);
408    // Do we need to expand the mark stack?
409    if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
410      ExpandMarkStack();
411    }
412    // The object must be pushed on to the mark stack.
413    mark_stack_->PushBack(const_cast<Object*>(obj));
414  }
415}
416
417// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
418bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
419  // TODO: support >1 discontinuous space.
420  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
421  accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
422  if (kProfileLargeObjects) {
423    ++large_object_test_;
424  }
425  if (UNLIKELY(!large_objects->Test(obj))) {
426    if (!large_object_space->Contains(obj)) {
427      LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
428      LOG(ERROR) << "Attempting see if it's a bad root";
429      VerifyRoots();
430      LOG(FATAL) << "Can't mark bad root";
431    }
432    if (kProfileLargeObjects) {
433      ++large_object_mark_;
434    }
435    if (set) {
436      large_objects->Set(obj);
437    } else {
438      large_objects->Clear(obj);
439    }
440    return true;
441  }
442  return false;
443}
444
445inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
446  DCHECK(obj != NULL);
447
448  if (IsImmune(obj)) {
449    DCHECK(IsMarked(obj));
450    return false;
451  }
452
453  // Try to take advantage of locality of references within a space, failing this find the space
454  // the hard way.
455  accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
456  if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
457    accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
458    if (new_bitmap != NULL) {
459      object_bitmap = new_bitmap;
460    } else {
461      // TODO: Remove the Thread::Current here?
462      // TODO: Convert this to some kind of atomic marking?
463      MutexLock mu(Thread::Current(), large_object_lock_);
464      return MarkLargeObject(obj, true);
465    }
466  }
467
468  // Return true if the object was not previously marked.
469  return !object_bitmap->AtomicTestAndSet(obj);
470}
471
472// Used to mark objects when recursing.  Recursion is done by moving
473// the finger across the bitmaps in address order and marking child
474// objects.  Any newly-marked objects whose addresses are lower than
475// the finger won't be visited by the bitmap scan, so those objects
476// need to be added to the mark stack.
477void MarkSweep::MarkObject(const Object* obj) {
478  if (obj != NULL) {
479    MarkObjectNonNull(obj);
480  }
481}
482
483void MarkSweep::MarkRoot(const Object* obj) {
484  if (obj != NULL) {
485    MarkObjectNonNull(obj);
486  }
487}
488
489void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
490  DCHECK(root != NULL);
491  DCHECK(arg != NULL);
492  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
493  mark_sweep->MarkObjectNonNullParallel(root);
494}
495
496void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
497  DCHECK(root != NULL);
498  DCHECK(arg != NULL);
499  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
500  mark_sweep->MarkObjectNonNull(root);
501}
502
503void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
504  DCHECK(root != NULL);
505  DCHECK(arg != NULL);
506  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
507  mark_sweep->MarkObjectNonNull(root);
508}
509
510void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
511                                   const StackVisitor* visitor) {
512  reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
513}
514
515void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
516  // See if the root is on any space bitmap.
517  if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
518    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
519    if (!large_object_space->Contains(root)) {
520      LOG(ERROR) << "Found invalid root: " << root;
521      if (visitor != NULL) {
522        LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
523      }
524    }
525  }
526}
527
528void MarkSweep::VerifyRoots() {
529  Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
530}
531
532// Marks all objects in the root set.
533void MarkSweep::MarkRoots() {
534  timings_.StartSplit("MarkRoots");
535  Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
536  timings_.EndSplit();
537}
538
539void MarkSweep::MarkNonThreadRoots() {
540  timings_.StartSplit("MarkNonThreadRoots");
541  Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
542  timings_.EndSplit();
543}
544
545void MarkSweep::MarkConcurrentRoots() {
546  timings_.StartSplit("MarkConcurrentRoots");
547  // Visit all runtime roots and clear dirty flags.
548  Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
549  timings_.EndSplit();
550}
551
552class CheckObjectVisitor {
553 public:
554  explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
555
556  void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
557      NO_THREAD_SAFETY_ANALYSIS {
558    if (kDebugLocking) {
559      Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
560    }
561    mark_sweep_->CheckReference(obj, ref, offset, is_static);
562  }
563
564 private:
565  MarkSweep* const mark_sweep_;
566};
567
568void MarkSweep::CheckObject(const Object* obj) {
569  DCHECK(obj != NULL);
570  CheckObjectVisitor visitor(this);
571  VisitObjectReferences(obj, visitor);
572}
573
574void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
575  DCHECK(root != NULL);
576  DCHECK(arg != NULL);
577  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
578  DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
579  mark_sweep->CheckObject(root);
580}
581
582void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
583  CHECK(space->IsDlMallocSpace());
584  space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
585  accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
586  accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
587  GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
588  alloc_space->temp_bitmap_.reset(mark_bitmap);
589  alloc_space->mark_bitmap_.reset(live_bitmap);
590}
591
592class ScanObjectVisitor {
593 public:
594  explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
595
596  // TODO: Fixme when anotatalysis works with visitors.
597  void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
598    if (kDebugLocking) {
599      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
600      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
601    }
602    mark_sweep_->ScanObject(obj);
603  }
604
605 private:
606  MarkSweep* const mark_sweep_;
607};
608
609void MarkSweep::ScanGrayObjects(byte minimum_age) {
610  accounting::CardTable* card_table = GetHeap()->GetCardTable();
611  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
612  ScanObjectVisitor visitor(this);
613  // TODO: C++0x
614  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
615  for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) {
616    space::ContinuousSpace* space = *it;
617    switch (space->GetGcRetentionPolicy()) {
618      case space::kGcRetentionPolicyNeverCollect:
619        timings_.StartSplit("ScanGrayImageSpaceObjects");
620        break;
621      case space::kGcRetentionPolicyFullCollect:
622        timings_.StartSplit("ScanGrayZygoteSpaceObjects");
623        break;
624      case space::kGcRetentionPolicyAlwaysCollect:
625        timings_.StartSplit("ScanGrayAllocSpaceObjects");
626        break;
627    }
628    byte* begin = space->Begin();
629    byte* end = space->End();
630    // Image spaces are handled properly since live == marked for them.
631    accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
632    card_table->Scan(mark_bitmap, begin, end, visitor, minimum_age);
633    timings_.EndSplit();
634  }
635}
636
637class CheckBitmapVisitor {
638 public:
639  explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
640
641  void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
642    if (kDebugLocking) {
643      Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
644    }
645    DCHECK(obj != NULL);
646    mark_sweep_->CheckObject(obj);
647  }
648
649 private:
650  MarkSweep* mark_sweep_;
651};
652
653void MarkSweep::VerifyImageRoots() {
654  // Verify roots ensures that all the references inside the image space point
655  // objects which are either in the image space or marked objects in the alloc
656  // space
657  timings_.StartSplit("VerifyImageRoots");
658  CheckBitmapVisitor visitor(this);
659  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
660  // TODO: C++0x
661  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
662  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
663    if ((*it)->IsImageSpace()) {
664      space::ImageSpace* space = (*it)->AsImageSpace();
665      uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
666      uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
667      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
668      DCHECK(live_bitmap != NULL);
669      live_bitmap->VisitMarkedRange(begin, end, visitor);
670    }
671  }
672  timings_.EndSplit();
673}
674
675// Populates the mark stack based on the set of marked objects and
676// recursively marks until the mark stack is emptied.
677void MarkSweep::RecursiveMark() {
678  base::TimingLogger::ScopedSplit("RecursiveMark", &timings_);
679  // RecursiveMark will build the lists of known instances of the Reference classes.
680  // See DelayReferenceReferent for details.
681  CHECK(soft_reference_list_ == NULL);
682  CHECK(weak_reference_list_ == NULL);
683  CHECK(finalizer_reference_list_ == NULL);
684  CHECK(phantom_reference_list_ == NULL);
685  CHECK(cleared_reference_list_ == NULL);
686
687  const bool partial = GetGcType() == kGcTypePartial;
688  ScanObjectVisitor scan_visitor(this);
689  if (!kDisableFinger) {
690    const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
691    // TODO: C++0x
692    typedef std::vector<space::ContinuousSpace*>::const_iterator It;
693    for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
694      space::ContinuousSpace* space = *it;
695      if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
696          (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
697        current_mark_bitmap_ = space->GetMarkBitmap();
698        if (current_mark_bitmap_ == NULL) {
699          GetHeap()->DumpSpaces();
700          LOG(FATAL) << "invalid bitmap";
701        }
702        // This function does not handle heap end increasing, so we must use the space end.
703        uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
704        uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
705        current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
706      }
707    }
708  }
709  ProcessMarkStack();
710}
711
712bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
713  return
714      reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) ||
715      !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object);
716}
717
718void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
719  ScanGrayObjects(minimum_age);
720  ProcessMarkStack();
721}
722
723void MarkSweep::ReMarkRoots() {
724  timings_.StartSplit("ReMarkRoots");
725  Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
726  timings_.EndSplit();
727}
728
729void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
730  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
731  MutexLock mu(Thread::Current(), vm->weak_globals_lock);
732  IndirectReferenceTable* table = &vm->weak_globals;
733  typedef IndirectReferenceTable::iterator It;  // TODO: C++0x auto
734  for (It it = table->begin(), end = table->end(); it != end; ++it) {
735    const Object** entry = *it;
736    if (!is_marked(*entry, arg)) {
737      *entry = kClearedJniWeakGlobal;
738    }
739  }
740}
741
742struct ArrayMarkedCheck {
743  accounting::ObjectStack* live_stack;
744  MarkSweep* mark_sweep;
745};
746
747// Either marked or not live.
748bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
749  ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
750  if (array_check->mark_sweep->IsMarked(object)) {
751    return true;
752  }
753  accounting::ObjectStack* live_stack = array_check->live_stack;
754  return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
755}
756
757void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
758  Runtime* runtime = Runtime::Current();
759  // The callbacks check
760  // !is_marked where is_marked is the callback but we want
761  // !IsMarked && IsLive
762  // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
763  // Or for swapped (IsLive || !IsMarked).
764
765  timings_.StartSplit("SweepSystemWeaksArray");
766  ArrayMarkedCheck visitor;
767  visitor.live_stack = allocations;
768  visitor.mark_sweep = this;
769  runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
770  runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
771  SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
772  timings_.EndSplit();
773}
774
775void MarkSweep::SweepSystemWeaks() {
776  Runtime* runtime = Runtime::Current();
777  // The callbacks check
778  // !is_marked where is_marked is the callback but we want
779  // !IsMarked && IsLive
780  // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
781  // Or for swapped (IsLive || !IsMarked).
782  timings_.StartSplit("SweepSystemWeaks");
783  runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
784  runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
785  SweepJniWeakGlobals(IsMarkedCallback, this);
786  timings_.EndSplit();
787}
788
789bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
790  reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
791  // We don't actually want to sweep the object, so lets return "marked"
792  return true;
793}
794
795void MarkSweep::VerifyIsLive(const Object* obj) {
796  Heap* heap = GetHeap();
797  if (!heap->GetLiveBitmap()->Test(obj)) {
798    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
799    if (!large_object_space->GetLiveObjects()->Test(obj)) {
800      if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
801          heap->allocation_stack_->End()) {
802        // Object not found!
803        heap->DumpSpaces();
804        LOG(FATAL) << "Found dead object " << obj;
805      }
806    }
807  }
808}
809
810void MarkSweep::VerifySystemWeaks() {
811  Runtime* runtime = Runtime::Current();
812  // Verify system weaks, uses a special IsMarked callback which always returns true.
813  runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
814  runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
815
816  JavaVMExt* vm = runtime->GetJavaVM();
817  MutexLock mu(Thread::Current(), vm->weak_globals_lock);
818  IndirectReferenceTable* table = &vm->weak_globals;
819  typedef IndirectReferenceTable::iterator It;  // TODO: C++0x auto
820  for (It it = table->begin(), end = table->end(); it != end; ++it) {
821    const Object** entry = *it;
822    VerifyIsLive(*entry);
823  }
824}
825
826struct SweepCallbackContext {
827  MarkSweep* mark_sweep;
828  space::AllocSpace* space;
829  Thread* self;
830};
831
832class CheckpointMarkThreadRoots : public Closure {
833 public:
834  explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
835
836  virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
837    // Note: self is not necessarily equal to thread since thread may be suspended.
838    Thread* self = Thread::Current();
839    CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
840        << thread->GetState() << " thread " << thread << " self " << self;
841    thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
842    mark_sweep_->GetBarrier().Pass(self);
843  }
844
845 private:
846  MarkSweep* mark_sweep_;
847};
848
849void MarkSweep::MarkRootsCheckpoint(Thread* self) {
850  CheckpointMarkThreadRoots check_point(this);
851  timings_.StartSplit("MarkRootsCheckpoint");
852  ThreadList* thread_list = Runtime::Current()->GetThreadList();
853  // Request the check point is run on all threads returning a count of the threads that must
854  // run through the barrier including self.
855  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
856  // Release locks then wait for all mutator threads to pass the barrier.
857  // TODO: optimize to not release locks when there are no threads to wait for.
858  Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
859  Locks::mutator_lock_->SharedUnlock(self);
860  ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
861  CHECK_EQ(old_state, kWaitingPerformingGc);
862  gc_barrier_->Increment(self, barrier_count);
863  self->SetState(kWaitingPerformingGc);
864  Locks::mutator_lock_->SharedLock(self);
865  Locks::heap_bitmap_lock_->ExclusiveLock(self);
866  timings_.EndSplit();
867}
868
869void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
870  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
871  MarkSweep* mark_sweep = context->mark_sweep;
872  Heap* heap = mark_sweep->GetHeap();
873  space::AllocSpace* space = context->space;
874  Thread* self = context->self;
875  Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
876  // Use a bulk free, that merges consecutive objects before freeing or free per object?
877  // Documentation suggests better free performance with merging, but this may be at the expensive
878  // of allocation.
879  size_t freed_objects = num_ptrs;
880  // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
881  size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
882  heap->RecordFree(freed_objects, freed_bytes);
883  mark_sweep->freed_objects_.fetch_add(freed_objects);
884  mark_sweep->freed_bytes_.fetch_add(freed_bytes);
885}
886
887void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
888  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
889  Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
890  Heap* heap = context->mark_sweep->GetHeap();
891  // We don't free any actual memory to avoid dirtying the shared zygote pages.
892  for (size_t i = 0; i < num_ptrs; ++i) {
893    Object* obj = static_cast<Object*>(ptrs[i]);
894    heap->GetLiveBitmap()->Clear(obj);
895    heap->GetCardTable()->MarkCard(obj);
896  }
897}
898
899void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
900  size_t freed_bytes = 0;
901  space::DlMallocSpace* space = heap_->GetAllocSpace();
902
903  // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
904  // bitmap, resulting in occasional frees of Weaks which are still in use.
905  SweepSystemWeaksArray(allocations);
906
907  timings_.StartSplit("Process allocation stack");
908  // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
909  // going to free.
910  accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
911  accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
912  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
913  accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
914  accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
915  if (swap_bitmaps) {
916    std::swap(live_bitmap, mark_bitmap);
917    std::swap(large_live_objects, large_mark_objects);
918  }
919
920  size_t freed_large_objects = 0;
921  size_t count = allocations->Size();
922  Object** objects = const_cast<Object**>(allocations->Begin());
923  Object** out = objects;
924
925  // Empty the allocation stack.
926  Thread* self = Thread::Current();
927  for (size_t i = 0; i < count; ++i) {
928    Object* obj = objects[i];
929    // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
930    if (LIKELY(mark_bitmap->HasAddress(obj))) {
931      if (!mark_bitmap->Test(obj)) {
932        // Don't bother un-marking since we clear the mark bitmap anyways.
933        *(out++) = obj;
934      }
935    } else if (!large_mark_objects->Test(obj)) {
936      ++freed_large_objects;
937      freed_bytes += large_object_space->Free(self, obj);
938    }
939  }
940  CHECK_EQ(count, allocations->Size());
941  timings_.EndSplit();
942
943  timings_.StartSplit("FreeList");
944  size_t freed_objects = out - objects;
945  freed_bytes += space->FreeList(self, freed_objects, objects);
946  VLOG(heap) << "Freed " << freed_objects << "/" << count
947             << " objects with size " << PrettySize(freed_bytes);
948  heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
949  freed_objects_.fetch_add(freed_objects);
950  freed_bytes_.fetch_add(freed_bytes);
951  timings_.EndSplit();
952
953  timings_.StartSplit("ResetStack");
954  allocations->Reset();
955  timings_.EndSplit();
956}
957
958void MarkSweep::Sweep(bool swap_bitmaps) {
959  DCHECK(mark_stack_->IsEmpty());
960
961  // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
962  // bitmap, resulting in occasional frees of Weaks which are still in use.
963  SweepSystemWeaks();
964
965  const bool partial = (GetGcType() == kGcTypePartial);
966  SweepCallbackContext scc;
967  scc.mark_sweep = this;
968  scc.self = Thread::Current();
969  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
970  // TODO: C++0x
971  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
972  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
973    space::ContinuousSpace* space = *it;
974    // We always sweep always collect spaces.
975    bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
976    if (!partial && !sweep_space) {
977      // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
978      sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
979    }
980    if (sweep_space) {
981      uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
982      uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
983      scc.space = space->AsDlMallocSpace();
984      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
985      accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
986      if (swap_bitmaps) {
987        std::swap(live_bitmap, mark_bitmap);
988      }
989      if (!space->IsZygoteSpace()) {
990        timings_.StartSplit("SweepAllocSpace");
991        // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
992        accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
993                                           &SweepCallback, reinterpret_cast<void*>(&scc));
994        timings_.EndSplit();
995      } else {
996        timings_.StartSplit("SweepZygote");
997        // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
998        // memory.
999        accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
1000                                           &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
1001        timings_.EndSplit();
1002      }
1003    }
1004  }
1005
1006  timings_.StartSplit("SweepLargeObjects");
1007  SweepLargeObjects(swap_bitmaps);
1008  timings_.EndSplit();
1009}
1010
1011void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
1012  // Sweep large objects
1013  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
1014  accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
1015  accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
1016  if (swap_bitmaps) {
1017    std::swap(large_live_objects, large_mark_objects);
1018  }
1019  accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
1020  // O(n*log(n)) but hopefully there are not too many large objects.
1021  size_t freed_objects = 0;
1022  size_t freed_bytes = 0;
1023  Thread* self = Thread::Current();
1024  // TODO: C++0x
1025  typedef accounting::SpaceSetMap::Objects::iterator It;
1026  for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) {
1027    if (!large_mark_objects->Test(*it)) {
1028      freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it));
1029      ++freed_objects;
1030    }
1031  }
1032  freed_objects_.fetch_add(freed_objects);
1033  freed_bytes_.fetch_add(freed_bytes);
1034  GetHeap()->RecordFree(freed_objects, freed_bytes);
1035}
1036
1037void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
1038  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1039  // TODO: C++0x
1040  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1041  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1042    space::ContinuousSpace* space = *it;
1043    if (space->IsDlMallocSpace() && space->Contains(ref)) {
1044      DCHECK(IsMarked(obj));
1045
1046      bool is_marked = IsMarked(ref);
1047      if (!is_marked) {
1048        LOG(INFO) << *space;
1049        LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
1050                     << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
1051                     << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
1052                     << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
1053
1054        const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1055        DCHECK(klass != NULL);
1056        const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
1057        DCHECK(fields != NULL);
1058        bool found = false;
1059        for (int32_t i = 0; i < fields->GetLength(); ++i) {
1060          const Field* cur = fields->Get(i);
1061          if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1062            LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
1063            found = true;
1064            break;
1065          }
1066        }
1067        if (!found) {
1068          LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
1069        }
1070
1071        bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
1072        if (!obj_marked) {
1073          LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
1074                       << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
1075                       << "the alloc space, but wasn't card marked";
1076        }
1077      }
1078    }
1079    break;
1080  }
1081}
1082
1083// Process the "referent" field in a java.lang.ref.Reference.  If the
1084// referent has not yet been marked, put it on the appropriate list in
1085// the gcHeap for later processing.
1086void MarkSweep::DelayReferenceReferent(Object* obj) {
1087  DCHECK(obj != NULL);
1088  Class* klass = obj->GetClass();
1089  DCHECK(klass != NULL);
1090  DCHECK(klass->IsReferenceClass());
1091  Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false);
1092  Object* referent = heap_->GetReferenceReferent(obj);
1093  if (kCountJavaLangRefs) {
1094    ++reference_count_;
1095  }
1096  if (pending == NULL && referent != NULL && !IsMarked(referent)) {
1097    Object** list = NULL;
1098    if (klass->IsSoftReferenceClass()) {
1099      list = &soft_reference_list_;
1100    } else if (klass->IsWeakReferenceClass()) {
1101      list = &weak_reference_list_;
1102    } else if (klass->IsFinalizerReferenceClass()) {
1103      list = &finalizer_reference_list_;
1104    } else if (klass->IsPhantomReferenceClass()) {
1105      list = &phantom_reference_list_;
1106    }
1107    DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags();
1108    // TODO: One lock per list?
1109    heap_->EnqueuePendingReference(obj, list);
1110  }
1111}
1112
1113void MarkSweep::ScanRoot(const Object* obj) {
1114  ScanObject(obj);
1115}
1116
1117class MarkObjectVisitor {
1118 public:
1119  explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
1120
1121  // TODO: Fixme when anotatalysis works with visitors.
1122  void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
1123                  bool /* is_static */) const
1124      NO_THREAD_SAFETY_ANALYSIS {
1125    if (kDebugLocking) {
1126      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1127      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1128    }
1129    mark_sweep_->MarkObject(ref);
1130  }
1131
1132 private:
1133  MarkSweep* const mark_sweep_;
1134};
1135
1136// Scans an object reference.  Determines the type of the reference
1137// and dispatches to a specialized scanning routine.
1138void MarkSweep::ScanObject(const Object* obj) {
1139  MarkObjectVisitor visitor(this);
1140  ScanObjectVisit(obj, visitor);
1141}
1142
1143class MarkStackChunk : public Task {
1144 public:
1145  MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end)
1146      : mark_sweep_(mark_sweep),
1147        thread_pool_(thread_pool),
1148        index_(0),
1149        length_(0),
1150        output_(NULL) {
1151    length_ = end - begin;
1152    if (begin != end) {
1153      // Cost not significant since we only do this for the initial set of mark stack chunks.
1154      memcpy(data_, begin, length_ * sizeof(*begin));
1155    }
1156    if (kCountTasks) {
1157      ++mark_sweep_->work_chunks_created_;
1158    }
1159  }
1160
1161  ~MarkStackChunk() {
1162    DCHECK(output_ == NULL || output_->length_ == 0);
1163    DCHECK_GE(index_, length_);
1164    delete output_;
1165    if (kCountTasks) {
1166      ++mark_sweep_->work_chunks_deleted_;
1167    }
1168  }
1169
1170  MarkSweep* const mark_sweep_;
1171  ThreadPool* const thread_pool_;
1172  static const size_t max_size = 1 * KB;
1173  // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing.
1174  size_t index_;
1175  // Input / output mark stack. We add newly marked references to data_ until length reaches
1176  // max_size. This is an optimization so that less tasks are created.
1177  // TODO: Investigate using a bounded buffer FIFO.
1178  Object* data_[max_size];
1179  // How many elements in data_ we need to scan.
1180  size_t length_;
1181  // Output block, newly marked references get added to the ouput block so that another thread can
1182  // scan them.
1183  MarkStackChunk* output_;
1184
1185  class MarkObjectParallelVisitor {
1186   public:
1187    explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {}
1188
1189    void operator()(const Object* /* obj */, const Object* ref,
1190                    const MemberOffset& /* offset */, bool /* is_static */) const {
1191      if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) {
1192        chunk_task_->MarkStackPush(ref);
1193      }
1194    }
1195
1196   private:
1197    MarkStackChunk* const chunk_task_;
1198  };
1199
1200  // Push an object into the block.
1201  // Don't need to use atomic ++ since we only one thread is writing to an output block at any
1202  // given time.
1203  void Push(Object* obj) {
1204    CHECK(obj != NULL);
1205    data_[length_++] = obj;
1206  }
1207
1208  void MarkStackPush(const Object* obj) {
1209    if (static_cast<size_t>(length_) < max_size) {
1210      Push(const_cast<Object*>(obj));
1211    } else {
1212      // Internal (thread-local) buffer is full, push to a new buffer instead.
1213      if (UNLIKELY(output_ == NULL)) {
1214        AllocateOutputChunk();
1215      } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) {
1216        // Output block is full, queue it up for processing and obtain a new block.
1217        EnqueueOutput();
1218        AllocateOutputChunk();
1219      }
1220      output_->Push(const_cast<Object*>(obj));
1221    }
1222  }
1223
1224  void ScanObject(Object* obj) {
1225    mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this));
1226  }
1227
1228  void EnqueueOutput() {
1229    if (output_ != NULL) {
1230      uint64_t start = 0;
1231      if (kMeasureOverhead) {
1232        start = NanoTime();
1233      }
1234      thread_pool_->AddTask(Thread::Current(), output_);
1235      output_ = NULL;
1236      if (kMeasureOverhead) {
1237        mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
1238      }
1239    }
1240  }
1241
1242  void AllocateOutputChunk() {
1243    uint64_t start = 0;
1244    if (kMeasureOverhead) {
1245      start = NanoTime();
1246    }
1247    output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL);
1248    if (kMeasureOverhead) {
1249      mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
1250    }
1251  }
1252
1253  void Finalize() {
1254    EnqueueOutput();
1255    delete this;
1256  }
1257
1258  // Scans all of the objects
1259  virtual void Run(Thread* self) {
1260    size_t index;
1261    while ((index = index_++) < length_) {
1262      if (kUseMarkStackPrefetch) {
1263        static const size_t prefetch_look_ahead = 1;
1264        __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]);
1265      }
1266      Object* obj = data_[index];
1267      DCHECK(obj != NULL);
1268      ScanObject(obj);
1269    }
1270  }
1271};
1272
1273void MarkSweep::ProcessMarkStackParallel() {
1274  CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled";
1275  Thread* self = Thread::Current();
1276  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1277  // Split the current mark stack up into work tasks.
1278  const size_t num_threads = thread_pool->GetThreadCount();
1279  const size_t stack_size = mark_stack_->Size();
1280  const size_t chunk_size =
1281      std::min((stack_size + num_threads - 1) / num_threads,
1282               static_cast<size_t>(MarkStackChunk::max_size));
1283  size_t index = 0;
1284  for (size_t i = 0; i < num_threads || index < stack_size; ++i) {
1285    Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)];
1286    Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)];
1287    index += chunk_size;
1288    thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end));
1289  }
1290  thread_pool->StartWorkers(self);
1291  thread_pool->Wait(self, true, true);
1292  mark_stack_->Reset();
1293  // LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime());
1294  CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
1295}
1296
1297// Scan anything that's on the mark stack.
1298void MarkSweep::ProcessMarkStack() {
1299  ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1300  timings_.StartSplit("ProcessMarkStack");
1301  if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) {
1302    ProcessMarkStackParallel();
1303    timings_.EndSplit();
1304    return;
1305  }
1306
1307  if (kUseMarkStackPrefetch) {
1308    const size_t fifo_size = 4;
1309    const size_t fifo_mask = fifo_size - 1;
1310    const Object* fifo[fifo_size];
1311    for (size_t i = 0; i < fifo_size; ++i) {
1312      fifo[i] = NULL;
1313    }
1314    size_t fifo_pos = 0;
1315    size_t fifo_count = 0;
1316    for (;;) {
1317      const Object* obj = fifo[fifo_pos & fifo_mask];
1318      if (obj != NULL) {
1319        ScanObject(obj);
1320        fifo[fifo_pos & fifo_mask] = NULL;
1321        --fifo_count;
1322      }
1323
1324      if (!mark_stack_->IsEmpty()) {
1325        const Object* obj = mark_stack_->PopBack();
1326        DCHECK(obj != NULL);
1327        fifo[fifo_pos & fifo_mask] = obj;
1328        __builtin_prefetch(obj);
1329        fifo_count++;
1330      }
1331      fifo_pos++;
1332
1333      if (!fifo_count) {
1334        CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size();
1335        break;
1336      }
1337    }
1338  } else {
1339    while (!mark_stack_->IsEmpty()) {
1340      const Object* obj = mark_stack_->PopBack();
1341      DCHECK(obj != NULL);
1342      ScanObject(obj);
1343    }
1344  }
1345  timings_.EndSplit();
1346}
1347
1348// Walks the reference list marking any references subject to the
1349// reference clearing policy.  References with a black referent are
1350// removed from the list.  References with white referents biased
1351// toward saving are blackened and also removed from the list.
1352void MarkSweep::PreserveSomeSoftReferences(Object** list) {
1353  DCHECK(list != NULL);
1354  Object* clear = NULL;
1355  size_t counter = 0;
1356
1357  DCHECK(mark_stack_->IsEmpty());
1358
1359  timings_.StartSplit("PreserveSomeSoftReferences");
1360  while (*list != NULL) {
1361    Object* ref = heap_->DequeuePendingReference(list);
1362    Object* referent = heap_->GetReferenceReferent(ref);
1363    if (referent == NULL) {
1364      // Referent was cleared by the user during marking.
1365      continue;
1366    }
1367    bool is_marked = IsMarked(referent);
1368    if (!is_marked && ((++counter) & 1)) {
1369      // Referent is white and biased toward saving, mark it.
1370      MarkObject(referent);
1371      is_marked = true;
1372    }
1373    if (!is_marked) {
1374      // Referent is white, queue it for clearing.
1375      heap_->EnqueuePendingReference(ref, &clear);
1376    }
1377  }
1378  *list = clear;
1379  timings_.EndSplit();
1380
1381  // Restart the mark with the newly black references added to the
1382  // root set.
1383  ProcessMarkStack();
1384}
1385
1386inline bool MarkSweep::IsMarked(const Object* object) const
1387    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1388  if (IsImmune(object)) {
1389    return true;
1390  }
1391  DCHECK(current_mark_bitmap_ != NULL);
1392  if (current_mark_bitmap_->HasAddress(object)) {
1393    return current_mark_bitmap_->Test(object);
1394  }
1395  return heap_->GetMarkBitmap()->Test(object);
1396}
1397
1398
1399// Unlink the reference list clearing references objects with white
1400// referents.  Cleared references registered to a reference queue are
1401// scheduled for appending by the heap worker thread.
1402void MarkSweep::ClearWhiteReferences(Object** list) {
1403  DCHECK(list != NULL);
1404  while (*list != NULL) {
1405    Object* ref = heap_->DequeuePendingReference(list);
1406    Object* referent = heap_->GetReferenceReferent(ref);
1407    if (referent != NULL && !IsMarked(referent)) {
1408      // Referent is white, clear it.
1409      heap_->ClearReferenceReferent(ref);
1410      if (heap_->IsEnqueuable(ref)) {
1411        heap_->EnqueueReference(ref, &cleared_reference_list_);
1412      }
1413    }
1414  }
1415  DCHECK(*list == NULL);
1416}
1417
1418// Enqueues finalizer references with white referents.  White
1419// referents are blackened, moved to the zombie field, and the
1420// referent field is cleared.
1421void MarkSweep::EnqueueFinalizerReferences(Object** list) {
1422  DCHECK(list != NULL);
1423  timings_.StartSplit("EnqueueFinalizerReferences");
1424  MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
1425  bool has_enqueued = false;
1426  while (*list != NULL) {
1427    Object* ref = heap_->DequeuePendingReference(list);
1428    Object* referent = heap_->GetReferenceReferent(ref);
1429    if (referent != NULL && !IsMarked(referent)) {
1430      MarkObject(referent);
1431      // If the referent is non-null the reference must queuable.
1432      DCHECK(heap_->IsEnqueuable(ref));
1433      ref->SetFieldObject(zombie_offset, referent, false);
1434      heap_->ClearReferenceReferent(ref);
1435      heap_->EnqueueReference(ref, &cleared_reference_list_);
1436      has_enqueued = true;
1437    }
1438  }
1439  timings_.EndSplit();
1440  if (has_enqueued) {
1441    ProcessMarkStack();
1442  }
1443  DCHECK(*list == NULL);
1444}
1445
1446// Process reference class instances and schedule finalizations.
1447void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
1448                                  Object** weak_references,
1449                                  Object** finalizer_references,
1450                                  Object** phantom_references) {
1451  DCHECK(soft_references != NULL);
1452  DCHECK(weak_references != NULL);
1453  DCHECK(finalizer_references != NULL);
1454  DCHECK(phantom_references != NULL);
1455
1456  // Unless we are in the zygote or required to clear soft references
1457  // with white references, preserve some white referents.
1458  if (!clear_soft && !Runtime::Current()->IsZygote()) {
1459    PreserveSomeSoftReferences(soft_references);
1460  }
1461
1462  timings_.StartSplit("ProcessReferences");
1463  // Clear all remaining soft and weak references with white
1464  // referents.
1465  ClearWhiteReferences(soft_references);
1466  ClearWhiteReferences(weak_references);
1467  timings_.EndSplit();
1468
1469  // Preserve all white objects with finalize methods and schedule
1470  // them for finalization.
1471  EnqueueFinalizerReferences(finalizer_references);
1472
1473  timings_.StartSplit("ProcessReferences");
1474  // Clear all f-reachable soft and weak references with white
1475  // referents.
1476  ClearWhiteReferences(soft_references);
1477  ClearWhiteReferences(weak_references);
1478
1479  // Clear all phantom references with white referents.
1480  ClearWhiteReferences(phantom_references);
1481
1482  // At this point all reference lists should be empty.
1483  DCHECK(*soft_references == NULL);
1484  DCHECK(*weak_references == NULL);
1485  DCHECK(*finalizer_references == NULL);
1486  DCHECK(*phantom_references == NULL);
1487  timings_.EndSplit();
1488}
1489
1490void MarkSweep::UnBindBitmaps() {
1491  timings_.StartSplit("UnBindBitmaps");
1492  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1493  // TODO: C++0x
1494  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1495  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1496    space::ContinuousSpace* space = *it;
1497    if (space->IsDlMallocSpace()) {
1498      space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
1499      if (alloc_space->temp_bitmap_.get() != NULL) {
1500        // At this point, the temp_bitmap holds our old mark bitmap.
1501        accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
1502        GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
1503        CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
1504        alloc_space->mark_bitmap_.reset(new_bitmap);
1505        DCHECK(alloc_space->temp_bitmap_.get() == NULL);
1506      }
1507    }
1508  }
1509  timings_.EndSplit();
1510}
1511
1512void MarkSweep::FinishPhase() {
1513  // Can't enqueue referneces if we hold the mutator lock.
1514  Object* cleared_references = GetClearedReferences();
1515  Heap* heap = GetHeap();
1516  heap->EnqueueClearedReferences(&cleared_references);
1517
1518  heap->PostGcVerification(this);
1519
1520  timings_.StartSplit("GrowForUtilization");
1521  heap->GrowForUtilization(GetGcType(), GetDurationNs());
1522  timings_.EndSplit();
1523
1524  timings_.StartSplit("RequestHeapTrim");
1525  heap->RequestHeapTrim();
1526  timings_.EndSplit();
1527
1528  // Update the cumulative statistics
1529  total_time_ns_ += GetDurationNs();
1530  total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
1531                                           std::plus<uint64_t>());
1532  total_freed_objects_ += GetFreedObjects();
1533  total_freed_bytes_ += GetFreedBytes();
1534
1535  // Ensure that the mark stack is empty.
1536  CHECK(mark_stack_->IsEmpty());
1537
1538  if (kCountScannedTypes) {
1539    VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1540             << " other=" << other_count_;
1541  }
1542
1543  if (kCountTasks) {
1544    VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
1545  }
1546
1547  if (kMeasureOverhead) {
1548    VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
1549  }
1550
1551  if (kProfileLargeObjects) {
1552    VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
1553  }
1554
1555  if (kCountClassesMarked) {
1556    VLOG(gc) << "Classes marked " << classes_marked_;
1557  }
1558
1559  if (kCountJavaLangRefs) {
1560    VLOG(gc) << "References scanned " << reference_count_;
1561  }
1562
1563  // Update the cumulative loggers.
1564  cumulative_timings_.Start();
1565  cumulative_timings_.AddLogger(timings_);
1566  cumulative_timings_.End();
1567
1568  // Clear all of the spaces' mark bitmaps.
1569  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
1570  // TODO: C++0x
1571  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
1572  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
1573    space::ContinuousSpace* space = *it;
1574    if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
1575      space->GetMarkBitmap()->Clear();
1576    }
1577  }
1578  mark_stack_->Reset();
1579
1580  // Reset the marked large objects.
1581  space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
1582  large_objects->GetMarkObjects()->Clear();
1583}
1584
1585}  // namespace collector
1586}  // namespace gc
1587}  // namespace art
1588