concurrent_copying.h revision ddeb172eeedb58ab96e074a55a0d1578b5df4110
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19
20#include "barrier.h"
21#include "garbage_collector.h"
22#include "immune_spaces.h"
23#include "jni.h"
24#include "object_callbacks.h"
25#include "offsets.h"
26#include "gc/accounting/atomic_stack.h"
27#include "gc/accounting/read_barrier_table.h"
28#include "gc/accounting/space_bitmap.h"
29#include "mirror/object.h"
30#include "mirror/object_reference.h"
31#include "safe_map.h"
32
33#include <unordered_map>
34#include <vector>
35
36namespace art {
37class RootInfo;
38
39namespace gc {
40
41namespace accounting {
42  typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
43  class HeapBitmap;
44}  // namespace accounting
45
46namespace space {
47  class RegionSpace;
48}  // namespace space
49
50namespace collector {
51
52class ConcurrentCopying : public GarbageCollector {
53 public:
54  // Enable the no-from-space-refs verification at the pause.
55  static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
56  // Enable the from-space bytes/objects check.
57  static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
58  // Enable verbose mode.
59  static constexpr bool kVerboseMode = false;
60
61  ConcurrentCopying(Heap* heap, const std::string& name_prefix = "");
62  ~ConcurrentCopying();
63
64  virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
65  void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
66  void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_)
67      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
68  void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
69  void FinishPhase() REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
70
71  void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
72      REQUIRES(!Locks::heap_bitmap_lock_);
73  virtual GcType GetGcType() const OVERRIDE {
74    return kGcTypePartial;
75  }
76  virtual CollectorType GetCollectorType() const OVERRIDE {
77    return kCollectorTypeCC;
78  }
79  virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
80  void SetRegionSpace(space::RegionSpace* region_space) {
81    DCHECK(region_space != nullptr);
82    region_space_ = region_space;
83  }
84  space::RegionSpace* RegionSpace() {
85    return region_space_;
86  }
87  void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
88      SHARED_REQUIRES(Locks::mutator_lock_);
89  void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
90      SHARED_REQUIRES(Locks::mutator_lock_);
91  bool IsInToSpace(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
92    DCHECK(ref != nullptr);
93    return IsMarked(ref) == ref;
94  }
95  ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
96      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
97  bool IsMarking() const {
98    return is_marking_;
99  }
100  bool IsActive() const {
101    return is_active_;
102  }
103  Barrier& GetBarrier() {
104    return *gc_barrier_;
105  }
106  bool IsWeakRefAccessEnabled() {
107    return weak_ref_access_enabled_.LoadRelaxed();
108  }
109  void RevokeThreadLocalMarkStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
110      REQUIRES(!mark_stack_lock_);
111
112 private:
113  void PushOntoMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
114      REQUIRES(!mark_stack_lock_);
115  mirror::Object* Copy(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
116      REQUIRES(!skipped_blocks_lock_, !mark_stack_lock_);
117  void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
118      REQUIRES(!mark_stack_lock_);
119  void Process(mirror::Object* obj, MemberOffset offset)
120      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_);
121  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
122      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
123      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
124  void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
125      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
126  virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
127                          const RootInfo& info)
128      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
129      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
130  void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
131  accounting::ObjectStack* GetAllocationStack();
132  accounting::ObjectStack* GetLiveStack();
133  virtual void ProcessMarkStack() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
134      REQUIRES(!mark_stack_lock_);
135  bool ProcessMarkStackOnce() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
136  void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
137      REQUIRES(!mark_stack_lock_);
138  size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)
139      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
140  void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access)
141      SHARED_REQUIRES(Locks::mutator_lock_);
142  void SwitchToSharedMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_)
143      REQUIRES(!mark_stack_lock_);
144  void SwitchToGcExclusiveMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_);
145  virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
146      SHARED_REQUIRES(Locks::mutator_lock_);
147  void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
148  virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
149      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
150  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
151      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
152  virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
153      SHARED_REQUIRES(Locks::mutator_lock_);
154  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
155      SHARED_REQUIRES(Locks::mutator_lock_);
156  void SweepSystemWeaks(Thread* self)
157      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
158  void Sweep(bool swap_bitmaps)
159      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
160  void SweepLargeObjects(bool swap_bitmaps)
161      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
162  void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
163      SHARED_REQUIRES(Locks::mutator_lock_);
164  mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
165      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!skipped_blocks_lock_);
166  void CheckEmptyMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
167  void IssueEmptyCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
168  bool IsOnAllocStack(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_);
169  mirror::Object* GetFwdPtr(mirror::Object* from_ref)
170      SHARED_REQUIRES(Locks::mutator_lock_);
171  void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
172  void SwapStacks() SHARED_REQUIRES(Locks::mutator_lock_);
173  void RecordLiveStackFreezeSize(Thread* self);
174  void ComputeUnevacFromSpaceLiveRatio();
175  void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
176      SHARED_REQUIRES(Locks::mutator_lock_);
177  void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
178      SHARED_REQUIRES(Locks::mutator_lock_);
179  void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
180  void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_);
181  void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
182  void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
183  mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
184      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
185  ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegionOrImmuneSpace(mirror::Object* from_ref,
186      accounting::SpaceBitmap<kObjectAlignment>* bitmap)
187      SHARED_REQUIRES(Locks::mutator_lock_)
188      REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
189  void PushOntoFalseGrayStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
190      REQUIRES(!mark_stack_lock_);
191  void ProcessFalseGrayStack() SHARED_REQUIRES(Locks::mutator_lock_)
192      REQUIRES(!mark_stack_lock_);
193
194  space::RegionSpace* region_space_;      // The underlying region space.
195  std::unique_ptr<Barrier> gc_barrier_;
196  std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
197  std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
198  Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
199  std::vector<accounting::ObjectStack*> revoked_mark_stacks_
200      GUARDED_BY(mark_stack_lock_);
201  static constexpr size_t kMarkStackSize = kPageSize;
202  static constexpr size_t kMarkStackPoolSize = 256;
203  std::vector<accounting::ObjectStack*> pooled_mark_stacks_
204      GUARDED_BY(mark_stack_lock_);
205  Thread* thread_running_gc_;
206  bool is_marking_;                       // True while marking is ongoing.
207  bool is_active_;                        // True while the collection is ongoing.
208  bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
209  ImmuneSpaces immune_spaces_;
210  std::unique_ptr<accounting::HeapBitmap> cc_heap_bitmap_;
211  std::vector<accounting::SpaceBitmap<kObjectAlignment>*> cc_bitmaps_;
212  accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_;
213  // A cache of Heap::GetMarkBitmap().
214  accounting::HeapBitmap* heap_mark_bitmap_;
215  size_t live_stack_freeze_size_;
216  size_t from_space_num_objects_at_first_pause_;
217  size_t from_space_num_bytes_at_first_pause_;
218  Atomic<int> is_mark_stack_push_disallowed_;
219  enum MarkStackMode {
220    kMarkStackModeOff = 0,      // Mark stack is off.
221    kMarkStackModeThreadLocal,  // All threads except for the GC-running thread push refs onto
222                                // thread-local mark stacks. The GC-running thread pushes onto and
223                                // pops off the GC mark stack without a lock.
224    kMarkStackModeShared,       // All threads share the GC mark stack with a lock.
225    kMarkStackModeGcExclusive   // The GC-running thread pushes onto and pops from the GC mark stack
226                                // without a lock. Other threads won't access the mark stack.
227  };
228  Atomic<MarkStackMode> mark_stack_mode_;
229  Atomic<bool> weak_ref_access_enabled_;
230
231  // How many objects and bytes we moved. Used for accounting.
232  Atomic<size_t> bytes_moved_;
233  Atomic<size_t> objects_moved_;
234
235  // The skipped blocks are memory blocks/chucks that were copies of
236  // objects that were unused due to lost races (cas failures) at
237  // object copy/forward pointer install. They are reused.
238  Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
239  std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
240  Atomic<size_t> to_space_bytes_skipped_;
241  Atomic<size_t> to_space_objects_skipped_;
242
243  accounting::ReadBarrierTable* rb_table_;
244  bool force_evacuate_all_;  // True if all regions are evacuated.
245
246  class AssertToSpaceInvariantFieldVisitor;
247  class AssertToSpaceInvariantObjectVisitor;
248  class AssertToSpaceInvariantRefsVisitor;
249  class ClearBlackPtrsVisitor;
250  class ComputeUnevacFromSpaceLiveRatioVisitor;
251  class DisableMarkingCheckpoint;
252  class FlipCallback;
253  class ImmuneSpaceObjVisitor;
254  class LostCopyVisitor;
255  class RefFieldsVisitor;
256  class RevokeThreadLocalMarkStackCheckpoint;
257  class VerifyNoFromSpaceRefsFieldVisitor;
258  class VerifyNoFromSpaceRefsObjectVisitor;
259  class VerifyNoFromSpaceRefsVisitor;
260  class ThreadFlipVisitor;
261
262  DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
263};
264
265}  // namespace collector
266}  // namespace gc
267}  // namespace art
268
269#endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
270