concurrent_copying.h revision da7c650022a974be10e2f00fa07d5109e3d8826f
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ 18#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ 19 20#include "barrier.h" 21#include "garbage_collector.h" 22#include "immune_region.h" 23#include "jni.h" 24#include "object_callbacks.h" 25#include "offsets.h" 26#include "gc/accounting/atomic_stack.h" 27#include "gc/accounting/read_barrier_table.h" 28#include "gc/accounting/space_bitmap.h" 29#include "mirror/object.h" 30#include "mirror/object_reference.h" 31#include "safe_map.h" 32 33#include <unordered_map> 34#include <vector> 35 36namespace art { 37class RootInfo; 38 39namespace gc { 40 41namespace accounting { 42 typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap; 43 class HeapBitmap; 44} // namespace accounting 45 46namespace space { 47 class RegionSpace; 48} // namespace space 49 50namespace collector { 51 52class ConcurrentCopying : public GarbageCollector { 53 public: 54 // TODO: disable thse flags for production use. 55 // Enable the no-from-space-refs verification at the pause. 56 static constexpr bool kEnableNoFromSpaceRefsVerification = true; 57 // Enable the from-space bytes/objects check. 58 static constexpr bool kEnableFromSpaceAccountingCheck = true; 59 // Enable verbose mode. 60 static constexpr bool kVerboseMode = true; 61 62 ConcurrentCopying(Heap* heap, const std::string& name_prefix = ""); 63 ~ConcurrentCopying(); 64 65 virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 66 void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 67 void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_) 68 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 69 void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 70 void FinishPhase() REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 71 72 void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_) 73 REQUIRES(!Locks::heap_bitmap_lock_); 74 virtual GcType GetGcType() const OVERRIDE { 75 return kGcTypePartial; 76 } 77 virtual CollectorType GetCollectorType() const OVERRIDE { 78 return kCollectorTypeCC; 79 } 80 virtual void RevokeAllThreadLocalBuffers() OVERRIDE; 81 void SetRegionSpace(space::RegionSpace* region_space) { 82 DCHECK(region_space != nullptr); 83 region_space_ = region_space; 84 } 85 space::RegionSpace* RegionSpace() { 86 return region_space_; 87 } 88 void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) 89 SHARED_REQUIRES(Locks::mutator_lock_); 90 void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) 91 SHARED_REQUIRES(Locks::mutator_lock_); 92 bool IsInToSpace(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) { 93 DCHECK(ref != nullptr); 94 return IsMarked(ref) == ref; 95 } 96 mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_) 97 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 98 bool IsMarking() const { 99 return is_marking_; 100 } 101 bool IsActive() const { 102 return is_active_; 103 } 104 Barrier& GetBarrier() { 105 return *gc_barrier_; 106 } 107 bool IsWeakRefAccessEnabled() { 108 return weak_ref_access_enabled_.LoadRelaxed(); 109 } 110 void RevokeThreadLocalMarkStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) 111 REQUIRES(!mark_stack_lock_); 112 113 private: 114 void PushOntoMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) 115 REQUIRES(!mark_stack_lock_); 116 mirror::Object* Copy(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_) 117 REQUIRES(!skipped_blocks_lock_, !mark_stack_lock_); 118 void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_) 119 REQUIRES(!mark_stack_lock_); 120 void Process(mirror::Object* obj, MemberOffset offset) 121 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_); 122 virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) 123 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) 124 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 125 void MarkRoot(mirror::CompressedReference<mirror::Object>* root) 126 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 127 virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, 128 const RootInfo& info) 129 OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) 130 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 131 void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_); 132 accounting::ObjectStack* GetAllocationStack(); 133 accounting::ObjectStack* GetLiveStack(); 134 virtual void ProcessMarkStack() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) 135 REQUIRES(!mark_stack_lock_); 136 bool ProcessMarkStackOnce() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 137 void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_) 138 REQUIRES(!mark_stack_lock_); 139 size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) 140 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 141 void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) 142 SHARED_REQUIRES(Locks::mutator_lock_); 143 void SwitchToSharedMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_) 144 REQUIRES(!mark_stack_lock_); 145 void SwitchToGcExclusiveMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_); 146 virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE 147 SHARED_REQUIRES(Locks::mutator_lock_); 148 void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); 149 virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE 150 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 151 virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE 152 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); 153 virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE 154 SHARED_REQUIRES(Locks::mutator_lock_); 155 virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE 156 SHARED_REQUIRES(Locks::mutator_lock_); 157 void SweepSystemWeaks(Thread* self) 158 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_); 159 void Sweep(bool swap_bitmaps) 160 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); 161 void SweepLargeObjects(bool swap_bitmaps) 162 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); 163 void ClearBlackPtrs() 164 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); 165 void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) 166 SHARED_REQUIRES(Locks::mutator_lock_); 167 mirror::Object* AllocateInSkippedBlock(size_t alloc_size) 168 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!skipped_blocks_lock_); 169 void CheckEmptyMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); 170 void IssueEmptyCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_); 171 bool IsOnAllocStack(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_); 172 mirror::Object* GetFwdPtr(mirror::Object* from_ref) 173 SHARED_REQUIRES(Locks::mutator_lock_); 174 void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_); 175 void SwapStacks(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); 176 void RecordLiveStackFreezeSize(Thread* self); 177 void ComputeUnevacFromSpaceLiveRatio(); 178 void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) 179 SHARED_REQUIRES(Locks::mutator_lock_); 180 void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref) 181 SHARED_REQUIRES(Locks::mutator_lock_); 182 void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); 183 184 space::RegionSpace* region_space_; // The underlying region space. 185 std::unique_ptr<Barrier> gc_barrier_; 186 std::unique_ptr<accounting::ObjectStack> gc_mark_stack_; 187 Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 188 std::vector<accounting::ObjectStack*> revoked_mark_stacks_ 189 GUARDED_BY(mark_stack_lock_); 190 static constexpr size_t kMarkStackSize = kPageSize; 191 static constexpr size_t kMarkStackPoolSize = 256; 192 std::vector<accounting::ObjectStack*> pooled_mark_stacks_ 193 GUARDED_BY(mark_stack_lock_); 194 Thread* thread_running_gc_; 195 bool is_marking_; // True while marking is ongoing. 196 bool is_active_; // True while the collection is ongoing. 197 bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant. 198 ImmuneRegion immune_region_; 199 std::unique_ptr<accounting::HeapBitmap> cc_heap_bitmap_; 200 std::vector<accounting::SpaceBitmap<kObjectAlignment>*> cc_bitmaps_; 201 accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_; 202 // A cache of Heap::GetMarkBitmap(). 203 accounting::HeapBitmap* heap_mark_bitmap_; 204 size_t live_stack_freeze_size_; 205 size_t from_space_num_objects_at_first_pause_; 206 size_t from_space_num_bytes_at_first_pause_; 207 Atomic<int> is_mark_stack_push_disallowed_; 208 enum MarkStackMode { 209 kMarkStackModeOff = 0, // Mark stack is off. 210 kMarkStackModeThreadLocal, // All threads except for the GC-running thread push refs onto 211 // thread-local mark stacks. The GC-running thread pushes onto and 212 // pops off the GC mark stack without a lock. 213 kMarkStackModeShared, // All threads share the GC mark stack with a lock. 214 kMarkStackModeGcExclusive // The GC-running thread pushes onto and pops from the GC mark stack 215 // without a lock. Other threads won't access the mark stack. 216 }; 217 Atomic<MarkStackMode> mark_stack_mode_; 218 Atomic<bool> weak_ref_access_enabled_; 219 220 // How many objects and bytes we moved. Used for accounting. 221 Atomic<size_t> bytes_moved_; 222 Atomic<size_t> objects_moved_; 223 224 // The skipped blocks are memory blocks/chucks that were copies of 225 // objects that were unused due to lost races (cas failures) at 226 // object copy/forward pointer install. They are reused. 227 Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 228 std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_); 229 Atomic<size_t> to_space_bytes_skipped_; 230 Atomic<size_t> to_space_objects_skipped_; 231 232 accounting::ReadBarrierTable* rb_table_; 233 bool force_evacuate_all_; // True if all regions are evacuated. 234 235 friend class ConcurrentCopyingRefFieldsVisitor; 236 friend class ConcurrentCopyingImmuneSpaceObjVisitor; 237 friend class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor; 238 friend class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor; 239 friend class ConcurrentCopyingClearBlackPtrsVisitor; 240 friend class ConcurrentCopyingLostCopyVisitor; 241 friend class ThreadFlipVisitor; 242 friend class FlipCallback; 243 friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor; 244 friend class RevokeThreadLocalMarkStackCheckpoint; 245 246 DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying); 247}; 248 249} // namespace collector 250} // namespace gc 251} // namespace art 252 253#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ 254