garbage_collector.cc revision bbd695c71e0bf518f582e84524e1cdeb3de3896c
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include <stdio.h>
20#include <cutils/trace.h>
21
22#include "garbage_collector.h"
23
24#include "base/histogram-inl.h"
25#include "base/logging.h"
26#include "base/mutex-inl.h"
27#include "gc/accounting/heap_bitmap.h"
28#include "gc/space/large_object_space.h"
29#include "gc/space/space-inl.h"
30#include "thread-inl.h"
31#include "thread_list.h"
32
33namespace art {
34namespace gc {
35namespace collector {
36
37GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
38    : heap_(heap),
39      name_(name),
40      gc_cause_(kGcCauseForAlloc),
41      clear_soft_references_(false),
42      duration_ns_(0),
43      timings_(name_.c_str(), true, VLOG_IS_ON(heap)),
44      pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
45      cumulative_timings_(name) {
46  ResetCumulativeStatistics();
47}
48
49void GarbageCollector::PausePhase() {
50}
51
52void GarbageCollector::RegisterPause(uint64_t nano_length) {
53  pause_times_.push_back(nano_length);
54}
55
56void GarbageCollector::ResetCumulativeStatistics() {
57  cumulative_timings_.Reset();
58  pause_histogram_.Reset();
59  total_time_ns_ = 0;
60  total_freed_objects_ = 0;
61  total_freed_bytes_ = 0;
62}
63
64void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
65  ThreadList* thread_list = Runtime::Current()->GetThreadList();
66  Thread* self = Thread::Current();
67  uint64_t start_time = NanoTime();
68  pause_times_.clear();
69  duration_ns_ = 0;
70  clear_soft_references_ = clear_soft_references;
71  gc_cause_ = gc_cause;
72
73  // Reset stats.
74  freed_bytes_ = 0;
75  freed_large_object_bytes_ = 0;
76  freed_objects_ = 0;
77  freed_large_objects_ = 0;
78
79  CollectorType collector_type = GetCollectorType();
80  switch (collector_type) {
81    case kCollectorTypeMS:      // Fall through.
82    case kCollectorTypeSS:      // Fall through.
83    case kCollectorTypeGSS: {
84      InitializePhase();
85      // Pause is the entire length of the GC.
86      uint64_t pause_start = NanoTime();
87      ATRACE_BEGIN("Application threads suspended");
88      // Mutator lock may be already exclusively held when we do garbage collections for changing
89      // the current collector / allocator during process state updates.
90      if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
91        // PreGcRosAllocVerification() is called in Heap::TransitionCollector().
92        RevokeAllThreadLocalBuffers();
93        MarkingPhase();
94        PausePhase();
95        ReclaimPhase();
96        // PostGcRosAllocVerification() is called in Heap::TransitionCollector().
97      } else {
98        ATRACE_BEGIN("Suspending mutator threads");
99        thread_list->SuspendAll();
100        ATRACE_END();
101        GetHeap()->PreGcRosAllocVerification(&timings_);
102        RevokeAllThreadLocalBuffers();
103        MarkingPhase();
104        PausePhase();
105        ReclaimPhase();
106        GetHeap()->PostGcRosAllocVerification(&timings_);
107        ATRACE_BEGIN("Resuming mutator threads");
108        thread_list->ResumeAll();
109        ATRACE_END();
110      }
111      ATRACE_END();
112      RegisterPause(NanoTime() - pause_start);
113      FinishPhase();
114      break;
115    }
116    case kCollectorTypeCMS: {
117      InitializePhase();
118      CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
119      {
120        ReaderMutexLock mu(self, *Locks::mutator_lock_);
121        MarkingPhase();
122      }
123      uint64_t pause_start = NanoTime();
124      ATRACE_BEGIN("Suspending mutator threads");
125      thread_list->SuspendAll();
126      ATRACE_END();
127      ATRACE_BEGIN("All mutator threads suspended");
128      GetHeap()->PreGcRosAllocVerification(&timings_);
129      PausePhase();
130      RevokeAllThreadLocalBuffers();
131      GetHeap()->PostGcRosAllocVerification(&timings_);
132      ATRACE_END();
133      uint64_t pause_end = NanoTime();
134      ATRACE_BEGIN("Resuming mutator threads");
135      thread_list->ResumeAll();
136      ATRACE_END();
137      RegisterPause(pause_end - pause_start);
138      {
139        ReaderMutexLock mu(self, *Locks::mutator_lock_);
140        ReclaimPhase();
141      }
142      FinishPhase();
143      break;
144    }
145    case kCollectorTypeCC: {
146      // To be implemented.
147      break;
148    }
149    default: {
150      LOG(FATAL) << "Unreachable collector type=" << static_cast<size_t>(collector_type);
151      break;
152    }
153  }
154  // Add the current timings to the cumulative timings.
155  cumulative_timings_.AddLogger(timings_);
156  // Update cumulative statistics with how many bytes the GC iteration freed.
157  total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
158  total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
159  uint64_t end_time = NanoTime();
160  duration_ns_ = end_time - start_time;
161  total_time_ns_ += GetDurationNs();
162  for (uint64_t pause_time : pause_times_) {
163    pause_histogram_.AddValue(pause_time / 1000);
164  }
165}
166
167void GarbageCollector::SwapBitmaps() {
168  // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
169  // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
170  // bits of dead objects in the live bitmap.
171  const GcType gc_type = GetGcType();
172  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
173    // We never allocate into zygote spaces.
174    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
175        (gc_type == kGcTypeFull &&
176         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
177      accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
178      accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
179      if (live_bitmap != nullptr && live_bitmap != mark_bitmap) {
180        heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
181        heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
182        CHECK(space->IsContinuousMemMapAllocSpace());
183        space->AsContinuousMemMapAllocSpace()->SwapBitmaps();
184      }
185    }
186  }
187  for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
188    space::LargeObjectSpace* space = disc_space->AsLargeObjectSpace();
189    accounting::LargeObjectBitmap* live_set = space->GetLiveBitmap();
190    accounting::LargeObjectBitmap* mark_set = space->GetMarkBitmap();
191    heap_->GetLiveBitmap()->ReplaceLargeObjectBitmap(live_set, mark_set);
192    heap_->GetMarkBitmap()->ReplaceLargeObjectBitmap(mark_set, live_set);
193    space->SwapBitmaps();
194  }
195}
196
197uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
198  // Add 1ms to prevent possible division by 0.
199  return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
200}
201
202uint64_t GarbageCollector::GetEstimatedLastIterationThroughput() const {
203  // Add 1ms to prevent possible division by 0.
204  return (static_cast<uint64_t>(freed_bytes_) * 1000) / (NsToMs(GetDurationNs()) + 1);
205}
206
207void GarbageCollector::ResetMeasurements() {
208  cumulative_timings_.Reset();
209  pause_histogram_.Reset();
210  total_time_ns_ = 0;
211  total_freed_objects_ = 0;
212  total_freed_bytes_ = 0;
213}
214
215}  // namespace collector
216}  // namespace gc
217}  // namespace art
218