mark_sweep.h revision a8e8f9c0a8e259a807d7b99a148d14104c24209d
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 18#define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 19 20#include "atomic.h" 21#include "barrier.h" 22#include "base/macros.h" 23#include "base/mutex.h" 24#include "garbage_collector.h" 25#include "gc/accounting/space_bitmap.h" 26#include "immune_region.h" 27#include "object_callbacks.h" 28#include "offsets.h" 29#include "UniquePtr.h" 30 31namespace art { 32 33namespace mirror { 34 class Class; 35 class Object; 36 class Reference; 37} // namespace mirror 38 39class Thread; 40enum VisitRootFlags : uint8_t; 41 42namespace gc { 43 44class Heap; 45 46namespace accounting { 47 template<typename T> class AtomicStack; 48 typedef AtomicStack<mirror::Object*> ObjectStack; 49} // namespace accounting 50 51namespace collector { 52 53class MarkSweep : public GarbageCollector { 54 public: 55 explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); 56 57 ~MarkSweep() {} 58 59 virtual void InitializePhase() OVERRIDE; 60 virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 61 virtual void PausePhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 62 virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 63 virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 64 virtual void MarkReachableObjects() 65 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 66 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 67 68 bool IsConcurrent() const { 69 return is_concurrent_; 70 } 71 72 virtual GcType GetGcType() const OVERRIDE { 73 return kGcTypeFull; 74 } 75 76 virtual CollectorType GetCollectorType() const OVERRIDE { 77 return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS; 78 } 79 80 // Initializes internal structures. 81 void Init(); 82 83 // Find the default mark bitmap. 84 void FindDefaultSpaceBitmap(); 85 86 // Marks all objects in the root set at the start of a garbage collection. 87 void MarkRoots(Thread* self) 88 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 89 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 90 91 void MarkNonThreadRoots() 92 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 93 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 94 95 void MarkConcurrentRoots(VisitRootFlags flags) 96 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 97 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 98 99 void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) 100 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 101 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 102 103 // Builds a mark stack and recursively mark until it empties. 104 void RecursiveMark() 105 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 106 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 107 108 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie 109 // the image. Mark that portion of the heap as immune. 110 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 111 112 // Builds a mark stack with objects on dirty cards and recursively mark until it empties. 113 void RecursiveMarkDirtyObjects(bool paused, byte minimum_age) 114 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 115 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 116 117 // Remarks the root set after completing the concurrent mark. 118 void ReMarkRoots() 119 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 120 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 121 122 void ProcessReferences(Thread* self) 123 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 124 125 void PreProcessReferences() 126 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 127 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 128 129 // Update and mark references from immune spaces. 130 void UpdateAndMarkModUnion() 131 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 132 133 // Pre clean cards to reduce how much work is needed in the pause. 134 void PreCleanCards() 135 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 136 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 137 138 // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps 139 // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap. 140 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 141 142 // Sweeps unmarked objects to complete the garbage collection. 143 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 144 145 // Sweep only pointers within an array. WARNING: Trashes objects. 146 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 147 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 148 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 149 150 // Blackens an object. 151 void ScanObject(mirror::Object* obj) 152 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 153 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 154 155 // No thread safety analysis due to lambdas. 156 template<typename MarkVisitor, typename ReferenceVisitor> 157 void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor, 158 const ReferenceVisitor& ref_visitor) 159 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 160 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 161 162 void SweepSystemWeaks(Thread* self) 163 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 164 165 static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg) 166 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 167 168 void VerifySystemWeaks() 169 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 170 171 // Verify that an object is live, either in a live bitmap or in the allocation stack. 172 void VerifyIsLive(const mirror::Object* obj) 173 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 174 175 static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg) 176 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 177 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 178 179 static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) 180 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 181 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 182 183 static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t thread_id, 184 RootType root_type) 185 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 186 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 187 188 static void VerifyRootMarked(mirror::Object** root, void* arg, uint32_t /*thread_id*/, 189 RootType /*root_type*/) 190 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 191 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 192 193 static void ProcessMarkStackPausedCallback(void* arg) 194 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 195 196 static void MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t thread_id, 197 RootType root_type) 198 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 199 200 // Marks an object. 201 void MarkObject(mirror::Object* obj) 202 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 203 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 204 205 Barrier& GetBarrier() { 206 return *gc_barrier_; 207 } 208 209 // Schedules an unmarked object for reference processing. 210 void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) 211 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 212 213 protected: 214 // Returns true if the object has its bit set in the mark bitmap. 215 bool IsMarked(const mirror::Object* object) const; 216 217 static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg) 218 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 219 220 static void VerifyImageRootVisitor(mirror::Object* root, void* arg) 221 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 222 223 void MarkObjectNonNull(mirror::Object* obj) 224 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 225 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 226 227 // Marks an object atomically, safe to use from multiple threads. 228 void MarkObjectNonNullParallel(mirror::Object* obj); 229 230 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we 231 // mark, otherwise we unmark. 232 bool MarkLargeObject(const mirror::Object* obj, bool set) 233 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) LOCKS_EXCLUDED(large_object_lock_); 234 235 // Returns true if we need to add obj to a mark stack. 236 bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; 237 238 // Verify the roots of the heap and print out information related to any invalid roots. 239 // Called in MarkObject, so may we may not hold the mutator lock. 240 void VerifyRoots() 241 NO_THREAD_SAFETY_ANALYSIS; 242 243 // Expand mark stack to 2x its current size. 244 void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 245 void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 246 247 // Returns how many threads we should use for the current GC phase based on if we are paused, 248 // whether or not we care about pauses. 249 size_t GetThreadCount(bool paused) const; 250 251 static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, 252 const StackVisitor *visitor, RootType root_type); 253 254 void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor, 255 RootType root_type) NO_THREAD_SAFETY_ANALYSIS; 256 257 // Push a single reference on a mark stack. 258 void PushOnMarkStack(mirror::Object* obj); 259 260 // Blackens objects grayed during a garbage collection. 261 void ScanGrayObjects(bool paused, byte minimum_age) 262 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 263 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 264 265 // Recursively blackens objects on the mark stack. 266 void ProcessMarkStack(bool paused) 267 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 268 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 269 270 void ProcessMarkStackParallel(size_t thread_count) 271 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 272 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 273 274 // Used to get around thread safety annotations. The call is from MarkingPhase and is guarded by 275 // IsExclusiveHeld. 276 void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS; 277 278 // Revoke all the thread-local buffers. 279 void RevokeAllThreadLocalBuffers(); 280 281 // Whether or not we count how many of each type of object were scanned. 282 static const bool kCountScannedTypes = false; 283 284 // Current space, we check this space first to avoid searching for the appropriate space for an 285 // object. 286 accounting::ContinuousSpaceBitmap* current_space_bitmap_; 287 // Cache the heap's mark bitmap to prevent having to do 2 loads during slow path marking. 288 accounting::HeapBitmap* mark_bitmap_; 289 290 accounting::ObjectStack* mark_stack_; 291 292 // Immune region, every object inside the immune range is assumed to be marked. 293 ImmuneRegion immune_region_; 294 295 // Parallel finger. 296 AtomicInteger atomic_finger_; 297 // Number of classes scanned, if kCountScannedTypes. 298 AtomicInteger class_count_; 299 // Number of arrays scanned, if kCountScannedTypes. 300 AtomicInteger array_count_; 301 // Number of non-class/arrays scanned, if kCountScannedTypes. 302 AtomicInteger other_count_; 303 AtomicInteger large_object_test_; 304 AtomicInteger large_object_mark_; 305 AtomicInteger overhead_time_; 306 AtomicInteger work_chunks_created_; 307 AtomicInteger work_chunks_deleted_; 308 AtomicInteger reference_count_; 309 AtomicInteger mark_null_count_; 310 AtomicInteger mark_immune_count_; 311 AtomicInteger mark_fastpath_count_; 312 AtomicInteger mark_slowpath_count_; 313 314 // Verification. 315 size_t live_stack_freeze_size_; 316 317 UniquePtr<Barrier> gc_barrier_; 318 Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 319 Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_); 320 321 const bool is_concurrent_; 322 323 private: 324 friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table. 325 friend class CardScanTask; 326 friend class CheckBitmapVisitor; 327 friend class CheckReferenceVisitor; 328 friend class art::gc::Heap; 329 friend class InternTableEntryIsUnmarked; 330 friend class MarkIfReachesAllocspaceVisitor; 331 friend class MarkObjectVisitor; 332 friend class ModUnionCheckReferences; 333 friend class ModUnionClearCardVisitor; 334 friend class ModUnionReferenceVisitor; 335 friend class ModUnionVisitor; 336 friend class ModUnionTableBitmap; 337 friend class ModUnionTableReferenceCache; 338 friend class ModUnionScanImageRootVisitor; 339 friend class ScanBitmapVisitor; 340 friend class ScanImageRootVisitor; 341 template<bool kUseFinger> friend class MarkStackTask; 342 friend class FifoMarkStackChunk; 343 344 DISALLOW_COPY_AND_ASSIGN(MarkSweep); 345}; 346 347} // namespace collector 348} // namespace gc 349} // namespace art 350 351#endif // ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 352