garbage_collector.cc revision d5307ec41c8344be0c32273ec4f574064036187d
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include <stdio.h>
20#include <cutils/trace.h>
21
22#include "garbage_collector.h"
23
24#include "base/histogram-inl.h"
25#include "base/logging.h"
26#include "base/mutex-inl.h"
27#include "gc/accounting/heap_bitmap.h"
28#include "gc/space/large_object_space.h"
29#include "gc/space/space-inl.h"
30#include "thread-inl.h"
31#include "thread_list.h"
32
33namespace art {
34namespace gc {
35namespace collector {
36
37GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
38    : heap_(heap),
39      name_(name),
40      gc_cause_(kGcCauseForAlloc),
41      clear_soft_references_(false),
42      verbose_(VLOG_IS_ON(heap)),
43      duration_ns_(0),
44      timings_(name_.c_str(), true, verbose_),
45      pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
46      cumulative_timings_(name) {
47  ResetCumulativeStatistics();
48}
49
50void GarbageCollector::HandleDirtyObjectsPhase() {
51  LOG(FATAL) << "Unreachable";
52}
53
54void GarbageCollector::RegisterPause(uint64_t nano_length) {
55  pause_times_.push_back(nano_length);
56}
57
58void GarbageCollector::ResetCumulativeStatistics() {
59  cumulative_timings_.Reset();
60  pause_histogram_.Reset();
61  total_time_ns_ = 0;
62  total_freed_objects_ = 0;
63  total_freed_bytes_ = 0;
64}
65
66void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
67  ThreadList* thread_list = Runtime::Current()->GetThreadList();
68  Thread* self = Thread::Current();
69  uint64_t start_time = NanoTime();
70  pause_times_.clear();
71  duration_ns_ = 0;
72  clear_soft_references_ = clear_soft_references;
73  gc_cause_ = gc_cause;
74
75  // Reset stats.
76  freed_bytes_ = 0;
77  freed_large_object_bytes_ = 0;
78  freed_objects_ = 0;
79  freed_large_objects_ = 0;
80
81  CollectorType collector_type = GetCollectorType();
82  switch (collector_type) {
83    case kCollectorTypeMS:      // Fall through.
84    case kCollectorTypeSS:      // Fall through.
85    case kCollectorTypeGSS: {
86      InitializePhase();
87      // Pause is the entire length of the GC.
88      uint64_t pause_start = NanoTime();
89      ATRACE_BEGIN("Application threads suspended");
90      // Mutator lock may be already exclusively held when we do garbage collections for changing the
91      // current collector / allocator during process state updates.
92      if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
93        // PreGcRosAllocVerification() is called in Heap::TransitionCollector().
94        RevokeAllThreadLocalBuffers();
95        MarkingPhase();
96        ReclaimPhase();
97        // PostGcRosAllocVerification() is called in Heap::TransitionCollector().
98      } else {
99        ATRACE_BEGIN("Suspending mutator threads");
100        thread_list->SuspendAll();
101        ATRACE_END();
102        GetHeap()->PreGcRosAllocVerification(&timings_);
103        RevokeAllThreadLocalBuffers();
104        MarkingPhase();
105        ReclaimPhase();
106        GetHeap()->PostGcRosAllocVerification(&timings_);
107        ATRACE_BEGIN("Resuming mutator threads");
108        thread_list->ResumeAll();
109        ATRACE_END();
110      }
111      ATRACE_END();
112      RegisterPause(NanoTime() - pause_start);
113      FinishPhase();
114      break;
115    }
116    case kCollectorTypeCMS: {
117      InitializePhase();
118      CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
119      {
120        ReaderMutexLock mu(self, *Locks::mutator_lock_);
121        MarkingPhase();
122      }
123      uint64_t pause_start = NanoTime();
124      ATRACE_BEGIN("Suspending mutator threads");
125      thread_list->SuspendAll();
126      ATRACE_END();
127      ATRACE_BEGIN("All mutator threads suspended");
128      GetHeap()->PreGcRosAllocVerification(&timings_);
129      HandleDirtyObjectsPhase();
130      RevokeAllThreadLocalBuffers();
131      GetHeap()->PostGcRosAllocVerification(&timings_);
132      ATRACE_END();
133      uint64_t pause_end = NanoTime();
134      ATRACE_BEGIN("Resuming mutator threads");
135      thread_list->ResumeAll();
136      ATRACE_END();
137      RegisterPause(pause_end - pause_start);
138      {
139        ReaderMutexLock mu(self, *Locks::mutator_lock_);
140        ReclaimPhase();
141      }
142      FinishPhase();
143      break;
144    }
145    case kCollectorTypeCC: {
146      // To be implemented.
147      break;
148    }
149    default: {
150      LOG(FATAL) << "Unreachable collector type=" << static_cast<size_t>(collector_type);
151      break;
152    }
153  }
154
155  uint64_t end_time = NanoTime();
156  duration_ns_ = end_time - start_time;
157  total_time_ns_ += GetDurationNs();
158  for (uint64_t pause_time : pause_times_) {
159    pause_histogram_.AddValue(pause_time / 1000);
160  }
161}
162
163void GarbageCollector::SwapBitmaps() {
164  // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
165  // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
166  // bits of dead objects in the live bitmap.
167  const GcType gc_type = GetGcType();
168  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
169    // We never allocate into zygote spaces.
170    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
171        (gc_type == kGcTypeFull &&
172         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
173      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
174      accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
175      if (live_bitmap != nullptr && live_bitmap != mark_bitmap) {
176        heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
177        heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
178        CHECK(space->IsContinuousMemMapAllocSpace());
179        space->AsContinuousMemMapAllocSpace()->SwapBitmaps();
180      }
181    }
182  }
183  for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
184    space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(disc_space);
185    accounting::ObjectSet* live_set = space->GetLiveObjects();
186    accounting::ObjectSet* mark_set = space->GetMarkObjects();
187    heap_->GetLiveBitmap()->ReplaceObjectSet(live_set, mark_set);
188    heap_->GetMarkBitmap()->ReplaceObjectSet(mark_set, live_set);
189    down_cast<space::LargeObjectSpace*>(space)->SwapBitmaps();
190  }
191}
192
193}  // namespace collector
194}  // namespace gc
195}  // namespace art
196