garbage_collector.cc revision 62ab87bb3ff4830def25a1716f6785256c7eebca
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define ATRACE_TAG ATRACE_TAG_DALVIK 18 19#include <stdio.h> 20#include <cutils/trace.h> 21 22#include "garbage_collector.h" 23 24#include "base/histogram-inl.h" 25#include "base/logging.h" 26#include "base/mutex-inl.h" 27#include "gc/accounting/heap_bitmap.h" 28#include "gc/space/large_object_space.h" 29#include "gc/space/space-inl.h" 30#include "thread-inl.h" 31#include "thread_list.h" 32 33namespace art { 34namespace gc { 35namespace collector { 36 37GarbageCollector::GarbageCollector(Heap* heap, const std::string& name) 38 : heap_(heap), 39 name_(name), 40 gc_cause_(kGcCauseForAlloc), 41 clear_soft_references_(false), 42 duration_ns_(0), 43 timings_(name_.c_str(), true, VLOG_IS_ON(heap)), 44 pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount), 45 cumulative_timings_(name) { 46 ResetCumulativeStatistics(); 47} 48 49void GarbageCollector::PausePhase() { 50} 51 52void GarbageCollector::RegisterPause(uint64_t nano_length) { 53 pause_times_.push_back(nano_length); 54} 55 56void GarbageCollector::ResetCumulativeStatistics() { 57 cumulative_timings_.Reset(); 58 pause_histogram_.Reset(); 59 total_time_ns_ = 0; 60 total_freed_objects_ = 0; 61 total_freed_bytes_ = 0; 62} 63 64void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) { 65 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 66 Thread* self = Thread::Current(); 67 uint64_t start_time = NanoTime(); 68 timings_.Reset(); 69 pause_times_.clear(); 70 duration_ns_ = 0; 71 clear_soft_references_ = clear_soft_references; 72 gc_cause_ = gc_cause; 73 74 // Reset stats. 75 freed_bytes_ = 0; 76 freed_large_object_bytes_ = 0; 77 freed_objects_ = 0; 78 freed_large_objects_ = 0; 79 80 CollectorType collector_type = GetCollectorType(); 81 switch (collector_type) { 82 case kCollectorTypeMS: // Fall through. 83 case kCollectorTypeSS: // Fall through. 84 case kCollectorTypeGSS: { 85 InitializePhase(); 86 // Pause is the entire length of the GC. 87 uint64_t pause_start = NanoTime(); 88 ATRACE_BEGIN("Application threads suspended"); 89 // Mutator lock may be already exclusively held when we do garbage collections for changing 90 // the current collector / allocator during process state updates. 91 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 92 // PreGcRosAllocVerification() is called in Heap::TransitionCollector(). 93 RevokeAllThreadLocalBuffers(); 94 MarkingPhase(); 95 PausePhase(); 96 ReclaimPhase(); 97 // PostGcRosAllocVerification() is called in Heap::TransitionCollector(). 98 } else { 99 ATRACE_BEGIN("Suspending mutator threads"); 100 thread_list->SuspendAll(); 101 ATRACE_END(); 102 GetHeap()->PreGcRosAllocVerification(&timings_); 103 RevokeAllThreadLocalBuffers(); 104 MarkingPhase(); 105 PausePhase(); 106 ReclaimPhase(); 107 GetHeap()->PostGcRosAllocVerification(&timings_); 108 ATRACE_BEGIN("Resuming mutator threads"); 109 thread_list->ResumeAll(); 110 ATRACE_END(); 111 } 112 ATRACE_END(); 113 RegisterPause(NanoTime() - pause_start); 114 FinishPhase(); 115 break; 116 } 117 case kCollectorTypeCMS: { 118 InitializePhase(); 119 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)); 120 { 121 ReaderMutexLock mu(self, *Locks::mutator_lock_); 122 MarkingPhase(); 123 } 124 uint64_t pause_start = NanoTime(); 125 ATRACE_BEGIN("Suspending mutator threads"); 126 thread_list->SuspendAll(); 127 ATRACE_END(); 128 ATRACE_BEGIN("All mutator threads suspended"); 129 GetHeap()->PreGcRosAllocVerification(&timings_); 130 PausePhase(); 131 RevokeAllThreadLocalBuffers(); 132 GetHeap()->PostGcRosAllocVerification(&timings_); 133 ATRACE_END(); 134 uint64_t pause_end = NanoTime(); 135 ATRACE_BEGIN("Resuming mutator threads"); 136 thread_list->ResumeAll(); 137 ATRACE_END(); 138 RegisterPause(pause_end - pause_start); 139 { 140 ReaderMutexLock mu(self, *Locks::mutator_lock_); 141 ReclaimPhase(); 142 } 143 FinishPhase(); 144 break; 145 } 146 case kCollectorTypeCC: { 147 // To be implemented. 148 break; 149 } 150 default: { 151 LOG(FATAL) << "Unreachable collector type=" << static_cast<size_t>(collector_type); 152 break; 153 } 154 } 155 // Add the current timings to the cumulative timings. 156 cumulative_timings_.AddLogger(timings_); 157 // Update cumulative statistics with how many bytes the GC iteration freed. 158 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 159 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 160 uint64_t end_time = NanoTime(); 161 duration_ns_ = end_time - start_time; 162 total_time_ns_ += GetDurationNs(); 163 for (uint64_t pause_time : pause_times_) { 164 pause_histogram_.AddValue(pause_time / 1000); 165 } 166} 167 168void GarbageCollector::SwapBitmaps() { 169 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps 170 // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live 171 // bits of dead objects in the live bitmap. 172 const GcType gc_type = GetGcType(); 173 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 174 // We never allocate into zygote spaces. 175 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect || 176 (gc_type == kGcTypeFull && 177 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 178 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 179 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 180 if (live_bitmap != nullptr && live_bitmap != mark_bitmap) { 181 heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap); 182 heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 183 CHECK(space->IsContinuousMemMapAllocSpace()); 184 space->AsContinuousMemMapAllocSpace()->SwapBitmaps(); 185 } 186 } 187 } 188 for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) { 189 space::LargeObjectSpace* space = disc_space->AsLargeObjectSpace(); 190 accounting::LargeObjectBitmap* live_set = space->GetLiveBitmap(); 191 accounting::LargeObjectBitmap* mark_set = space->GetMarkBitmap(); 192 heap_->GetLiveBitmap()->ReplaceLargeObjectBitmap(live_set, mark_set); 193 heap_->GetMarkBitmap()->ReplaceLargeObjectBitmap(mark_set, live_set); 194 space->SwapBitmaps(); 195 } 196} 197 198uint64_t GarbageCollector::GetEstimatedMeanThroughput() const { 199 // Add 1ms to prevent possible division by 0. 200 return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1); 201} 202 203uint64_t GarbageCollector::GetEstimatedLastIterationThroughput() const { 204 // Add 1ms to prevent possible division by 0. 205 return (static_cast<uint64_t>(freed_bytes_) * 1000) / (NsToMs(GetDurationNs()) + 1); 206} 207 208void GarbageCollector::ResetMeasurements() { 209 cumulative_timings_.Reset(); 210 pause_histogram_.Reset(); 211 total_time_ns_ = 0; 212 total_freed_objects_ = 0; 213 total_freed_bytes_ = 0; 214} 215 216} // namespace collector 217} // namespace gc 218} // namespace art 219