mark_sweep.h revision 375a29a824207fb2187bc3d31bfaae2d8283b3a2
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
18#define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
19
20#include <memory>
21
22#include "atomic.h"
23#include "barrier.h"
24#include "base/macros.h"
25#include "base/mutex.h"
26#include "garbage_collector.h"
27#include "gc_root.h"
28#include "gc/accounting/heap_bitmap.h"
29#include "immune_region.h"
30#include "object_callbacks.h"
31#include "offsets.h"
32
33namespace art {
34
35namespace mirror {
36  class Class;
37  class Object;
38  class Reference;
39}  // namespace mirror
40
41class Thread;
42enum VisitRootFlags : uint8_t;
43
44namespace gc {
45
46class Heap;
47
48namespace accounting {
49  template<typename T> class AtomicStack;
50  typedef AtomicStack<mirror::Object> ObjectStack;
51}  // namespace accounting
52
53namespace collector {
54
55class MarkSweep : public GarbageCollector {
56 public:
57  explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
58
59  ~MarkSweep() {}
60
61  virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
62  void InitializePhase();
63  void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
64  void PausePhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
65  void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
66  void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
67  virtual void MarkReachableObjects()
68      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
69      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
70
71  bool IsConcurrent() const {
72    return is_concurrent_;
73  }
74
75  virtual GcType GetGcType() const OVERRIDE {
76    return kGcTypeFull;
77  }
78
79  virtual CollectorType GetCollectorType() const OVERRIDE {
80    return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
81  }
82
83  // Initializes internal structures.
84  void Init();
85
86  // Find the default mark bitmap.
87  void FindDefaultSpaceBitmap();
88
89  // Marks all objects in the root set at the start of a garbage collection.
90  void MarkRoots(Thread* self)
91      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
92      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
93
94  void MarkNonThreadRoots()
95      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
96      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
97
98  void MarkConcurrentRoots(VisitRootFlags flags)
99      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
100      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
101
102  void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
103      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
104      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
105
106  // Builds a mark stack and recursively mark until it empties.
107  void RecursiveMark()
108      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
109      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
110
111  // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
112  // the image. Mark that portion of the heap as immune.
113  virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
114
115  // Builds a mark stack with objects on dirty cards and recursively mark until it empties.
116  void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
117      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
118      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
119
120  // Remarks the root set after completing the concurrent mark.
121  void ReMarkRoots()
122      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
123      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
124
125  void ProcessReferences(Thread* self)
126      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
127
128  // Update and mark references from immune spaces.
129  void UpdateAndMarkModUnion()
130      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
131
132  // Pre clean cards to reduce how much work is needed in the pause.
133  void PreCleanCards()
134      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
135      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
136
137  // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
138  // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
139  virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
140      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
141
142  // Sweeps unmarked objects to complete the garbage collection.
143  void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
144
145  // Sweep only pointers within an array. WARNING: Trashes objects.
146  void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
147      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
148      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
149
150  // Blackens an object.
151  void ScanObject(mirror::Object* obj)
152      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
153      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
154
155  // No thread safety analysis due to lambdas.
156  template<typename MarkVisitor, typename ReferenceVisitor>
157  void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
158                       const ReferenceVisitor& ref_visitor)
159    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
160    EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
161
162  void SweepSystemWeaks(Thread* self)
163      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
164
165  static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
166      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
167
168  void VerifySystemWeaks()
169      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
170
171  // Verify that an object is live, either in a live bitmap or in the allocation stack.
172  void VerifyIsLive(const mirror::Object* obj)
173      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
174      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
175
176  static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg)
177      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
178      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
179
180  static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
181      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
182      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
183
184  static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
185      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
186      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
187
188  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
189      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
190      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
191
192  virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
193                          const RootInfo& info) OVERRIDE
194      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
195      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
196
197  static void ProcessMarkStackCallback(void* arg)
198      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
199      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
200
201  // Marks an object.
202  void MarkObject(mirror::Object* obj, mirror::Object* holder = nullptr,
203                  MemberOffset offset = MemberOffset(0))
204      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
205      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
206
207  Barrier& GetBarrier() {
208    return *gc_barrier_;
209  }
210
211  // Schedules an unmarked object for reference processing.
212  void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
213      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
214
215 protected:
216  // Returns true if the object has its bit set in the mark bitmap.
217  bool IsMarked(const mirror::Object* object) const
218      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
219
220  static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
221      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
222
223  static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
224      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
225
226  void MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder = nullptr,
227                         MemberOffset offset = MemberOffset(0))
228      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
229      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
230
231  // Marks an object atomically, safe to use from multiple threads.
232  void MarkObjectNonNullParallel(mirror::Object* obj)
233      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
234
235  // Returns true if we need to add obj to a mark stack.
236  bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
237
238  // Verify the roots of the heap and print out information related to any invalid roots.
239  // Called in MarkObject, so may we may not hold the mutator lock.
240  void VerifyRoots()
241      NO_THREAD_SAFETY_ANALYSIS;
242
243  // Expand mark stack to 2x its current size.
244  void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_)
245      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
246  void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_)
247      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
248
249  // Returns how many threads we should use for the current GC phase based on if we are paused,
250  // whether or not we care about pauses.
251  size_t GetThreadCount(bool paused) const;
252
253  // Push a single reference on a mark stack.
254  void PushOnMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
255
256  // Blackens objects grayed during a garbage collection.
257  void ScanGrayObjects(bool paused, uint8_t minimum_age)
258      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
259      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
260
261  // Recursively blackens objects on the mark stack.
262  void ProcessMarkStack(bool paused)
263      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
264      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
265
266  void ProcessMarkStackParallel(size_t thread_count)
267      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
268      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
269
270  // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
271  // IsExclusiveHeld.
272  void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
273
274  // Revoke all the thread-local buffers.
275  void RevokeAllThreadLocalBuffers();
276
277  // Whether or not we count how many of each type of object were scanned.
278  static const bool kCountScannedTypes = false;
279
280  // Current space, we check this space first to avoid searching for the appropriate space for an
281  // object.
282  accounting::ContinuousSpaceBitmap* current_space_bitmap_;
283  // Cache the heap's mark bitmap to prevent having to do 2 loads during slow path marking.
284  accounting::HeapBitmap* mark_bitmap_;
285
286  accounting::ObjectStack* mark_stack_;
287
288  // Immune region, every object inside the immune range is assumed to be marked.
289  ImmuneRegion immune_region_;
290
291  // Parallel finger.
292  AtomicInteger atomic_finger_;
293  // Number of classes scanned, if kCountScannedTypes.
294  AtomicInteger class_count_;
295  // Number of arrays scanned, if kCountScannedTypes.
296  AtomicInteger array_count_;
297  // Number of non-class/arrays scanned, if kCountScannedTypes.
298  AtomicInteger other_count_;
299  AtomicInteger large_object_test_;
300  AtomicInteger large_object_mark_;
301  AtomicInteger overhead_time_;
302  AtomicInteger work_chunks_created_;
303  AtomicInteger work_chunks_deleted_;
304  AtomicInteger reference_count_;
305  AtomicInteger mark_null_count_;
306  AtomicInteger mark_immune_count_;
307  AtomicInteger mark_fastpath_count_;
308  AtomicInteger mark_slowpath_count_;
309
310  std::unique_ptr<Barrier> gc_barrier_;
311  Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_);
312
313  const bool is_concurrent_;
314
315  // Verification.
316  size_t live_stack_freeze_size_;
317
318  std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
319
320 private:
321  friend class AddIfReachesAllocSpaceVisitor;  // Used by mod-union table.
322  friend class CardScanTask;
323  friend class CheckBitmapVisitor;
324  friend class CheckReferenceVisitor;
325  friend class CheckpointMarkThreadRoots;
326  friend class art::gc::Heap;
327  friend class FifoMarkStackChunk;
328  friend class MarkObjectVisitor;
329  template<bool kUseFinger> friend class MarkStackTask;
330  friend class MarkSweepMarkObjectSlowPath;
331  friend class ModUnionCheckReferences;
332  friend class ModUnionClearCardVisitor;
333  friend class ModUnionReferenceVisitor;
334  friend class ModUnionScanImageRootVisitor;
335  friend class ModUnionTableBitmap;
336  friend class ModUnionTableReferenceCache;
337  friend class ModUnionVisitor;
338  friend class VerifyRootMarkedVisitor;
339  friend class VerifyRootVisitor;
340
341  DISALLOW_IMPLICIT_CONSTRUCTORS(MarkSweep);
342};
343
344}  // namespace collector
345}  // namespace gc
346}  // namespace art
347
348#endif  // ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
349