garbage_collector.cc revision e6da9af8dfe0a3e3fbc2be700554f6478380e7b9
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include <stdio.h>
20#include <cutils/trace.h>
21
22#include "garbage_collector.h"
23
24#include "base/histogram-inl.h"
25#include "base/logging.h"
26#include "base/mutex-inl.h"
27#include "gc/accounting/heap_bitmap.h"
28#include "gc/space/large_object_space.h"
29#include "gc/space/space-inl.h"
30#include "thread-inl.h"
31#include "thread_list.h"
32
33namespace art {
34namespace gc {
35namespace collector {
36
37GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
38    : heap_(heap),
39      name_(name),
40      clear_soft_references_(false),
41      verbose_(VLOG_IS_ON(heap)),
42      duration_ns_(0),
43      timings_(name_.c_str(), true, verbose_),
44      pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
45      cumulative_timings_(name) {
46  ResetCumulativeStatistics();
47}
48
49bool GarbageCollector::HandleDirtyObjectsPhase() {
50  DCHECK(IsConcurrent());
51  return true;
52}
53
54void GarbageCollector::RegisterPause(uint64_t nano_length) {
55  pause_times_.push_back(nano_length);
56}
57
58void GarbageCollector::ResetCumulativeStatistics() {
59  cumulative_timings_.Reset();
60  pause_histogram_.Reset();
61  total_time_ns_ = 0;
62  total_freed_objects_ = 0;
63  total_freed_bytes_ = 0;
64}
65
66void GarbageCollector::Run(bool clear_soft_references) {
67  ThreadList* thread_list = Runtime::Current()->GetThreadList();
68  Thread* self = Thread::Current();
69  uint64_t start_time = NanoTime();
70  pause_times_.clear();
71  duration_ns_ = 0;
72  clear_soft_references_ = clear_soft_references;
73
74  // Reset stats.
75  freed_bytes_ = 0;
76  freed_large_object_bytes_ = 0;
77  freed_objects_ = 0;
78  freed_large_objects_ = 0;
79
80  InitializePhase();
81
82  if (!IsConcurrent()) {
83    // Pause is the entire length of the GC.
84    uint64_t pause_start = NanoTime();
85    ATRACE_BEGIN("Application threads suspended");
86    // Mutator lock may be already exclusively held when we do garbage collections for changing the
87    // current collector / allocator during process state updates.
88    if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
89      GetHeap()->RevokeAllThreadLocalBuffers();
90      MarkingPhase();
91      ReclaimPhase();
92    } else {
93      thread_list->SuspendAll();
94      GetHeap()->RevokeAllThreadLocalBuffers();
95      MarkingPhase();
96      ReclaimPhase();
97      thread_list->ResumeAll();
98    }
99    ATRACE_END();
100    RegisterPause(NanoTime() - pause_start);
101  } else {
102    CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
103    Thread* self = Thread::Current();
104    {
105      ReaderMutexLock mu(self, *Locks::mutator_lock_);
106      MarkingPhase();
107    }
108    bool done = false;
109    while (!done) {
110      uint64_t pause_start = NanoTime();
111      ATRACE_BEGIN("Suspending mutator threads");
112      thread_list->SuspendAll();
113      ATRACE_END();
114      ATRACE_BEGIN("All mutator threads suspended");
115      done = HandleDirtyObjectsPhase();
116      if (done) {
117        GetHeap()->RevokeAllThreadLocalBuffers();
118      }
119      ATRACE_END();
120      uint64_t pause_end = NanoTime();
121      ATRACE_BEGIN("Resuming mutator threads");
122      thread_list->ResumeAll();
123      ATRACE_END();
124      RegisterPause(pause_end - pause_start);
125    }
126    {
127      ReaderMutexLock mu(self, *Locks::mutator_lock_);
128      ReclaimPhase();
129    }
130  }
131  FinishPhase();
132  uint64_t end_time = NanoTime();
133  duration_ns_ = end_time - start_time;
134  total_time_ns_ += GetDurationNs();
135  for (uint64_t pause_time : pause_times_) {
136    pause_histogram_.AddValue(pause_time / 1000);
137  }
138}
139
140void GarbageCollector::SwapBitmaps() {
141  // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
142  // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
143  // bits of dead objects in the live bitmap.
144  const GcType gc_type = GetGcType();
145  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
146    // We never allocate into zygote spaces.
147    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
148        (gc_type == kGcTypeFull &&
149         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
150      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
151      accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
152      if (live_bitmap != mark_bitmap) {
153        heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
154        heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
155        space->AsMallocSpace()->SwapBitmaps();
156      }
157    }
158  }
159  for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
160    space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(disc_space);
161    accounting::SpaceSetMap* live_set = space->GetLiveObjects();
162    accounting::SpaceSetMap* mark_set = space->GetMarkObjects();
163    heap_->GetLiveBitmap()->ReplaceObjectSet(live_set, mark_set);
164    heap_->GetMarkBitmap()->ReplaceObjectSet(mark_set, live_set);
165    down_cast<space::LargeObjectSpace*>(space)->SwapBitmaps();
166  }
167}
168
169}  // namespace collector
170}  // namespace gc
171}  // namespace art
172