garbage_collector.cc revision b2f9936cab87a187f078187c22d9b29d4a188a62
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include <stdio.h>
20#include <cutils/trace.h>
21
22#include "garbage_collector.h"
23
24#include "base/histogram-inl.h"
25#include "base/logging.h"
26#include "base/mutex-inl.h"
27#include "gc/accounting/heap_bitmap.h"
28#include "gc/space/large_object_space.h"
29#include "gc/space/space-inl.h"
30#include "thread-inl.h"
31#include "thread_list.h"
32
33namespace art {
34namespace gc {
35namespace collector {
36
37GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
38    : heap_(heap),
39      name_(name),
40      clear_soft_references_(false),
41      verbose_(VLOG_IS_ON(heap)),
42      duration_ns_(0),
43      timings_(name_.c_str(), true, verbose_),
44      pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
45      cumulative_timings_(name) {
46  ResetCumulativeStatistics();
47}
48
49bool GarbageCollector::HandleDirtyObjectsPhase() {
50  DCHECK(IsConcurrent());
51  return true;
52}
53
54void GarbageCollector::RegisterPause(uint64_t nano_length) {
55  pause_times_.push_back(nano_length);
56}
57
58void GarbageCollector::ResetCumulativeStatistics() {
59  cumulative_timings_.Reset();
60  pause_histogram_.Reset();
61  total_time_ns_ = 0;
62  total_freed_objects_ = 0;
63  total_freed_bytes_ = 0;
64}
65
66void GarbageCollector::Run(bool clear_soft_references) {
67  ThreadList* thread_list = Runtime::Current()->GetThreadList();
68  uint64_t start_time = NanoTime();
69  pause_times_.clear();
70  duration_ns_ = 0;
71  clear_soft_references_ = clear_soft_references;
72
73  // Reset stats.
74  freed_bytes_ = 0;
75  freed_large_object_bytes_ = 0;
76  freed_objects_ = 0;
77  freed_large_objects_ = 0;
78
79  InitializePhase();
80
81  if (!IsConcurrent()) {
82    // Pause is the entire length of the GC.
83    uint64_t pause_start = NanoTime();
84    ATRACE_BEGIN("Application threads suspended");
85    thread_list->SuspendAll();
86    MarkingPhase();
87    ReclaimPhase();
88    GetHeap()->RevokeAllThreadLocalBuffers();
89    thread_list->ResumeAll();
90    ATRACE_END();
91    RegisterPause(NanoTime() - pause_start);
92  } else {
93    Thread* self = Thread::Current();
94    {
95      ReaderMutexLock mu(self, *Locks::mutator_lock_);
96      MarkingPhase();
97    }
98    bool done = false;
99    while (!done) {
100      uint64_t pause_start = NanoTime();
101      ATRACE_BEGIN("Suspending mutator threads");
102      thread_list->SuspendAll();
103      ATRACE_END();
104      ATRACE_BEGIN("All mutator threads suspended");
105      done = HandleDirtyObjectsPhase();
106      if (done) {
107        GetHeap()->RevokeAllThreadLocalBuffers();
108      }
109      ATRACE_END();
110      uint64_t pause_end = NanoTime();
111      ATRACE_BEGIN("Resuming mutator threads");
112      thread_list->ResumeAll();
113      ATRACE_END();
114      RegisterPause(pause_end - pause_start);
115    }
116    {
117      ReaderMutexLock mu(self, *Locks::mutator_lock_);
118      ReclaimPhase();
119    }
120  }
121  FinishPhase();
122  uint64_t end_time = NanoTime();
123  duration_ns_ = end_time - start_time;
124  total_time_ns_ += GetDurationNs();
125  for (uint64_t pause_time : pause_times_) {
126    pause_histogram_.AddValue(pause_time / 1000);
127  }
128}
129
130void GarbageCollector::SwapBitmaps() {
131  // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
132  // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
133  // bits of dead objects in the live bitmap.
134  const GcType gc_type = GetGcType();
135  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
136    // We never allocate into zygote spaces.
137    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
138        (gc_type == kGcTypeFull &&
139         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
140      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
141      accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
142      if (live_bitmap != mark_bitmap) {
143        heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
144        heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
145        space->AsMallocSpace()->SwapBitmaps();
146      }
147    }
148  }
149  for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
150    space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(disc_space);
151    accounting::SpaceSetMap* live_set = space->GetLiveObjects();
152    accounting::SpaceSetMap* mark_set = space->GetMarkObjects();
153    heap_->GetLiveBitmap()->ReplaceObjectSet(live_set, mark_set);
154    heap_->GetMarkBitmap()->ReplaceObjectSet(mark_set, live_set);
155    down_cast<space::LargeObjectSpace*>(space)->SwapBitmaps();
156  }
157}
158
159}  // namespace collector
160}  // namespace gc
161}  // namespace art
162