concurrent_copying.h revision 988136bf7a731710c1c0979d6f2deec6abe4574f
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19
20#include "barrier.h"
21#include "garbage_collector.h"
22#include "immune_spaces.h"
23#include "jni.h"
24#include "object_callbacks.h"
25#include "offsets.h"
26#include "gc/accounting/space_bitmap.h"
27#include "mirror/object.h"
28#include "mirror/object_reference.h"
29#include "safe_map.h"
30
31#include <unordered_map>
32#include <vector>
33
34namespace art {
35class Closure;
36class RootInfo;
37
38namespace gc {
39
40namespace accounting {
41  template<typename T> class AtomicStack;
42  typedef AtomicStack<mirror::Object> ObjectStack;
43  typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
44  class HeapBitmap;
45  class ReadBarrierTable;
46}  // namespace accounting
47
48namespace space {
49  class RegionSpace;
50}  // namespace space
51
52namespace collector {
53
54class ConcurrentCopying : public GarbageCollector {
55 public:
56  // Enable the no-from-space-refs verification at the pause.
57  static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
58  // Enable the from-space bytes/objects check.
59  static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
60  // Enable verbose mode.
61  static constexpr bool kVerboseMode = false;
62  // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
63  // pages.
64  static constexpr bool kGrayDirtyImmuneObjects = true;
65
66  explicit ConcurrentCopying(Heap* heap,
67                             const std::string& name_prefix = "",
68                             bool measure_read_barrier_slow_path = false);
69  ~ConcurrentCopying();
70
71  virtual void RunPhases() OVERRIDE
72      REQUIRES(!immune_gray_stack_lock_,
73               !mark_stack_lock_,
74               !rb_slow_path_histogram_lock_,
75               !skipped_blocks_lock_);
76  void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
77      REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
78  void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
79      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
80  void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
81  void FinishPhase() REQUIRES(!mark_stack_lock_,
82                              !rb_slow_path_histogram_lock_,
83                              !skipped_blocks_lock_);
84
85  void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
86      REQUIRES(!Locks::heap_bitmap_lock_);
87  virtual GcType GetGcType() const OVERRIDE {
88    return kGcTypePartial;
89  }
90  virtual CollectorType GetCollectorType() const OVERRIDE {
91    return kCollectorTypeCC;
92  }
93  virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
94  void SetRegionSpace(space::RegionSpace* region_space) {
95    DCHECK(region_space != nullptr);
96    region_space_ = region_space;
97  }
98  space::RegionSpace* RegionSpace() {
99    return region_space_;
100  }
101  void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
102      REQUIRES_SHARED(Locks::mutator_lock_);
103  void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
104      REQUIRES_SHARED(Locks::mutator_lock_);
105  bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
106    DCHECK(ref != nullptr);
107    return IsMarked(ref) == ref;
108  }
109  template<bool kGrayImmuneObject = true, bool kFromGCThread = false>
110  ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref,
111                                     mirror::Object* holder = nullptr,
112                                     MemberOffset offset = MemberOffset(0))
113      REQUIRES_SHARED(Locks::mutator_lock_)
114      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
115  ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
116      REQUIRES_SHARED(Locks::mutator_lock_)
117      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
118  bool IsMarking() const {
119    return is_marking_;
120  }
121  bool IsActive() const {
122    return is_active_;
123  }
124  Barrier& GetBarrier() {
125    return *gc_barrier_;
126  }
127  bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
128    return weak_ref_access_enabled_;
129  }
130  void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
131      REQUIRES(!mark_stack_lock_);
132
133 private:
134  void PushOntoMarkStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
135      REQUIRES(!mark_stack_lock_);
136  mirror::Object* Copy(mirror::Object* from_ref) REQUIRES_SHARED(Locks::mutator_lock_)
137      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
138  void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
139      REQUIRES(!mark_stack_lock_);
140  void Process(mirror::Object* obj, MemberOffset offset)
141      REQUIRES_SHARED(Locks::mutator_lock_)
142      REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
143  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
144      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
145      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
146  template<bool kGrayImmuneObject>
147  void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
148      REQUIRES_SHARED(Locks::mutator_lock_)
149      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
150  virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
151                          const RootInfo& info)
152      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
153      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
154  void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
155  accounting::ObjectStack* GetAllocationStack();
156  accounting::ObjectStack* GetLiveStack();
157  virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
158      REQUIRES(!mark_stack_lock_);
159  bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
160  void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
161      REQUIRES(!mark_stack_lock_);
162  void GrayAllDirtyImmuneObjects()
163      REQUIRES(Locks::mutator_lock_)
164      REQUIRES(!mark_stack_lock_);
165  void VerifyGrayImmuneObjects()
166      REQUIRES(Locks::mutator_lock_)
167      REQUIRES(!mark_stack_lock_);
168  static void VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg)
169      REQUIRES(Locks::mutator_lock_)
170      REQUIRES(!mark_stack_lock_);
171  void VerifyNoMissingCardMarks()
172      REQUIRES(Locks::mutator_lock_)
173      REQUIRES(!mark_stack_lock_);
174  size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
175      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
176  void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
177      REQUIRES_SHARED(Locks::mutator_lock_);
178  void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
179      REQUIRES(!mark_stack_lock_);
180  void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
181  virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
182                                      ObjPtr<mirror::Reference> reference) OVERRIDE
183      REQUIRES_SHARED(Locks::mutator_lock_);
184  void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
185  virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
186      REQUIRES_SHARED(Locks::mutator_lock_)
187      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
188  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
189                                 bool do_atomic_update) OVERRIDE
190      REQUIRES_SHARED(Locks::mutator_lock_)
191      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
192  virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
193      REQUIRES_SHARED(Locks::mutator_lock_);
194  bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
195      REQUIRES_SHARED(Locks::mutator_lock_);
196  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
197                                           bool do_atomic_update) OVERRIDE
198      REQUIRES_SHARED(Locks::mutator_lock_);
199  void SweepSystemWeaks(Thread* self)
200      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
201  void Sweep(bool swap_bitmaps)
202      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
203  void SweepLargeObjects(bool swap_bitmaps)
204      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
205  void MarkZygoteLargeObjects()
206      REQUIRES_SHARED(Locks::mutator_lock_);
207  void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
208      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
209      REQUIRES_SHARED(Locks::mutator_lock_);
210  mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
211      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
212      REQUIRES_SHARED(Locks::mutator_lock_);
213  void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
214  void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
215  bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
216  mirror::Object* GetFwdPtr(mirror::Object* from_ref)
217      REQUIRES_SHARED(Locks::mutator_lock_);
218  void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
219  void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
220  void RecordLiveStackFreezeSize(Thread* self);
221  void ComputeUnevacFromSpaceLiveRatio();
222  void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
223      REQUIRES_SHARED(Locks::mutator_lock_);
224  void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
225      REQUIRES_SHARED(Locks::mutator_lock_);
226  void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
227  void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
228  void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
229  void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
230  mirror::Object* MarkNonMoving(mirror::Object* from_ref,
231                                mirror::Object* holder = nullptr,
232                                MemberOffset offset = MemberOffset(0))
233      REQUIRES_SHARED(Locks::mutator_lock_)
234      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
235  ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref,
236      accounting::SpaceBitmap<kObjectAlignment>* bitmap)
237      REQUIRES_SHARED(Locks::mutator_lock_)
238      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
239  template<bool kGrayImmuneObject>
240  ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref)
241      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
242  void PushOntoFalseGrayStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
243      REQUIRES(!mark_stack_lock_);
244  void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
245      REQUIRES(!mark_stack_lock_);
246  void ScanImmuneObject(mirror::Object* obj)
247      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
248  mirror::Object* MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref)
249      REQUIRES_SHARED(Locks::mutator_lock_)
250      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
251  void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
252
253  space::RegionSpace* region_space_;      // The underlying region space.
254  std::unique_ptr<Barrier> gc_barrier_;
255  std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
256  std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
257  bool rb_mark_bit_stack_full_;
258  std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
259  Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
260  std::vector<accounting::ObjectStack*> revoked_mark_stacks_
261      GUARDED_BY(mark_stack_lock_);
262  static constexpr size_t kMarkStackSize = kPageSize;
263  static constexpr size_t kMarkStackPoolSize = 256;
264  std::vector<accounting::ObjectStack*> pooled_mark_stacks_
265      GUARDED_BY(mark_stack_lock_);
266  Thread* thread_running_gc_;
267  bool is_marking_;                       // True while marking is ongoing.
268  bool is_active_;                        // True while the collection is ongoing.
269  bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
270  ImmuneSpaces immune_spaces_;
271  accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_;
272  // A cache of Heap::GetMarkBitmap().
273  accounting::HeapBitmap* heap_mark_bitmap_;
274  size_t live_stack_freeze_size_;
275  size_t from_space_num_objects_at_first_pause_;
276  size_t from_space_num_bytes_at_first_pause_;
277  Atomic<int> is_mark_stack_push_disallowed_;
278  enum MarkStackMode {
279    kMarkStackModeOff = 0,      // Mark stack is off.
280    kMarkStackModeThreadLocal,  // All threads except for the GC-running thread push refs onto
281                                // thread-local mark stacks. The GC-running thread pushes onto and
282                                // pops off the GC mark stack without a lock.
283    kMarkStackModeShared,       // All threads share the GC mark stack with a lock.
284    kMarkStackModeGcExclusive   // The GC-running thread pushes onto and pops from the GC mark stack
285                                // without a lock. Other threads won't access the mark stack.
286  };
287  Atomic<MarkStackMode> mark_stack_mode_;
288  bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
289
290  // How many objects and bytes we moved. Used for accounting.
291  Atomic<size_t> bytes_moved_;
292  Atomic<size_t> objects_moved_;
293  Atomic<uint64_t> cumulative_bytes_moved_;
294  Atomic<uint64_t> cumulative_objects_moved_;
295
296  // The skipped blocks are memory blocks/chucks that were copies of
297  // objects that were unused due to lost races (cas failures) at
298  // object copy/forward pointer install. They are reused.
299  Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
300  std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
301  Atomic<size_t> to_space_bytes_skipped_;
302  Atomic<size_t> to_space_objects_skipped_;
303
304  // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
305  // and also log.
306  bool measure_read_barrier_slow_path_;
307  // mark_from_read_barrier_measurements_ is true if systrace is enabled or
308  // measure_read_barrier_time_ is true.
309  bool mark_from_read_barrier_measurements_;
310  Atomic<uint64_t> rb_slow_path_ns_;
311  Atomic<uint64_t> rb_slow_path_count_;
312  Atomic<uint64_t> rb_slow_path_count_gc_;
313  mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
314  Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
315  uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
316  uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
317
318  accounting::ReadBarrierTable* rb_table_;
319  bool force_evacuate_all_;  // True if all regions are evacuated.
320  Atomic<bool> updated_all_immune_objects_;
321  bool gc_grays_immune_objects_;
322  Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
323  std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
324
325  // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
326  // be filled in before flipping thread roots so that FillDummyObject can run. Not
327  // ObjPtr since the GC may transition to suspended and runnable between phases.
328  mirror::Class* java_lang_Object_;
329
330  class AssertToSpaceInvariantFieldVisitor;
331  class AssertToSpaceInvariantObjectVisitor;
332  class AssertToSpaceInvariantRefsVisitor;
333  class ClearBlackPtrsVisitor;
334  class ComputeUnevacFromSpaceLiveRatioVisitor;
335  class DisableMarkingCallback;
336  class DisableMarkingCheckpoint;
337  class DisableWeakRefAccessCallback;
338  class FlipCallback;
339  class GrayImmuneObjectVisitor;
340  class ImmuneSpaceScanObjVisitor;
341  class LostCopyVisitor;
342  class RefFieldsVisitor;
343  class RevokeThreadLocalMarkStackCheckpoint;
344  class ScopedGcGraysImmuneObjects;
345  class ThreadFlipVisitor;
346  class VerifyGrayImmuneObjectsVisitor;
347  class VerifyNoFromSpaceRefsFieldVisitor;
348  class VerifyNoFromSpaceRefsObjectVisitor;
349  class VerifyNoFromSpaceRefsVisitor;
350  class VerifyNoMissingCardMarkVisitor;
351
352  DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
353};
354
355}  // namespace collector
356}  // namespace gc
357}  // namespace art
358
359#endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
360