mark_sweep.h revision 4aeec176eaf11fe03f342aadcbb79142230270ed
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 18#define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 19 20#include "atomic.h" 21#include "barrier.h" 22#include "base/macros.h" 23#include "base/mutex.h" 24#include "garbage_collector.h" 25#include "immune_region.h" 26#include "object_callbacks.h" 27#include "offsets.h" 28#include "UniquePtr.h" 29 30namespace art { 31 32namespace mirror { 33 class Class; 34 class Object; 35 class Reference; 36} // namespace mirror 37 38class Thread; 39enum VisitRootFlags : uint8_t; 40 41namespace gc { 42 43class Heap; 44 45namespace accounting { 46 template<typename T> class AtomicStack; 47 typedef AtomicStack<mirror::Object*> ObjectStack; 48 class SpaceBitmap; 49} // namespace accounting 50 51namespace collector { 52 53class MarkSweep : public GarbageCollector { 54 public: 55 explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); 56 57 ~MarkSweep() {} 58 59 virtual void InitializePhase() OVERRIDE; 60 virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 61 virtual void HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 62 virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 63 virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 64 virtual void MarkReachableObjects() 65 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 66 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 67 68 bool IsConcurrent() const { 69 return is_concurrent_; 70 } 71 72 virtual GcType GetGcType() const OVERRIDE { 73 return kGcTypeFull; 74 } 75 76 virtual CollectorType GetCollectorType() const OVERRIDE { 77 return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS; 78 } 79 80 // Initializes internal structures. 81 void Init(); 82 83 // Find the default mark bitmap. 84 void FindDefaultSpaceBitmap(); 85 86 // Marks all objects in the root set at the start of a garbage collection. 87 void MarkRoots(Thread* self) 88 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 89 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 90 91 void MarkNonThreadRoots() 92 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 93 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 94 95 void MarkConcurrentRoots(VisitRootFlags flags) 96 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 97 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 98 99 void MarkRootsCheckpoint(Thread* self) 100 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 101 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 102 103 // Builds a mark stack and recursively mark until it empties. 104 void RecursiveMark() 105 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 106 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 107 108 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie 109 // the image. Mark that portion of the heap as immune. 110 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 111 112 // Builds a mark stack with objects on dirty cards and recursively mark until it empties. 113 void RecursiveMarkDirtyObjects(bool paused, byte minimum_age) 114 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 115 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 116 117 // Remarks the root set after completing the concurrent mark. 118 void ReMarkRoots() 119 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 120 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 121 122 void ProcessReferences(Thread* self) 123 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 124 125 void PreProcessReferences() 126 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 127 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 128 129 // Update and mark references from immune spaces. 130 void UpdateAndMarkModUnion() 131 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 132 133 // Pre clean cards to reduce how much work is needed in the pause. 134 void PreCleanCards() 135 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 136 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 137 138 // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps 139 // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap. 140 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 141 142 // Sweeps unmarked objects to complete the garbage collection. 143 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 144 145 // Sweep only pointers within an array. WARNING: Trashes objects. 146 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 147 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 148 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 149 150 // Blackens an object. 151 void ScanObject(mirror::Object* obj) 152 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 153 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 154 155 // No thread safety analysis due to lambdas. 156 template<typename MarkVisitor, typename ReferenceVisitor> 157 void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor, 158 const ReferenceVisitor& ref_visitor) 159 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 160 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 161 162 void SweepSystemWeaks() 163 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 164 165 static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg) 166 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 167 168 void VerifySystemWeaks() 169 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 170 171 // Verify that an object is live, either in a live bitmap or in the allocation stack. 172 void VerifyIsLive(const mirror::Object* obj) 173 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 174 175 static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg) 176 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 177 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 178 179 static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) 180 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 181 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 182 183 static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t thread_id, 184 RootType root_type) 185 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 186 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 187 188 static void VerifyRootMarked(mirror::Object** root, void* arg, uint32_t /*thread_id*/, 189 RootType /*root_type*/) 190 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 191 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 192 193 static void ProcessMarkStackPausedCallback(void* arg) 194 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 195 196 static void MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t thread_id, 197 RootType root_type) 198 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 199 200 // Marks an object. 201 void MarkObject(mirror::Object* obj) 202 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 203 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 204 205 Barrier& GetBarrier() { 206 return *gc_barrier_; 207 } 208 209 // Schedules an unmarked object for reference processing. 210 void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) 211 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 212 213 protected: 214 // Returns true if the object has its bit set in the mark bitmap. 215 bool IsMarked(const mirror::Object* object) const; 216 217 static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg) 218 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 219 220 static void VerifyImageRootVisitor(mirror::Object* root, void* arg) 221 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 222 223 void MarkObjectNonNull(mirror::Object* obj) 224 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 225 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 226 227 // Unmarks an object by clearing the bit inside of the corresponding bitmap, or if it is in a 228 // space set, removing the object from the set. 229 void UnMarkObjectNonNull(const mirror::Object* obj) 230 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 231 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 232 233 // Mark the vm thread roots. 234 void MarkThreadRoots(Thread* self) 235 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 236 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 237 238 // Marks an object atomically, safe to use from multiple threads. 239 void MarkObjectNonNullParallel(mirror::Object* obj); 240 241 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we 242 // mark, otherwise we unmark. 243 bool MarkLargeObject(const mirror::Object* obj, bool set) 244 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) LOCKS_EXCLUDED(large_object_lock_); 245 246 // Returns true if we need to add obj to a mark stack. 247 bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; 248 249 // Verify the roots of the heap and print out information related to any invalid roots. 250 // Called in MarkObject, so may we may not hold the mutator lock. 251 void VerifyRoots() 252 NO_THREAD_SAFETY_ANALYSIS; 253 254 // Expand mark stack to 2x its current size. 255 void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 256 void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 257 258 // Returns how many threads we should use for the current GC phase based on if we are paused, 259 // whether or not we care about pauses. 260 size_t GetThreadCount(bool paused) const; 261 262 static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, 263 const StackVisitor *visitor); 264 265 void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor) 266 NO_THREAD_SAFETY_ANALYSIS; 267 268 // Push a single reference on a mark stack. 269 void PushOnMarkStack(mirror::Object* obj); 270 271 // Blackens objects grayed during a garbage collection. 272 void ScanGrayObjects(bool paused, byte minimum_age) 273 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 274 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 275 276 // Recursively blackens objects on the mark stack. 277 void ProcessMarkStack(bool paused) 278 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 279 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 280 281 void ProcessMarkStackParallel(size_t thread_count) 282 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 283 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 284 285 // Used to get around thread safety annotations. The call is from MarkingPhase and is guarded by 286 // IsExclusiveHeld. 287 void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS; 288 289 // Revoke all the thread-local buffers. 290 void RevokeAllThreadLocalBuffers(); 291 292 // Whether or not we count how many of each type of object were scanned. 293 static const bool kCountScannedTypes = false; 294 295 // Current space, we check this space first to avoid searching for the appropriate space for an 296 // object. 297 accounting::SpaceBitmap* current_space_bitmap_; 298 // Cache the heap's mark bitmap to prevent having to do 2 loads during slow path marking. 299 accounting::HeapBitmap* mark_bitmap_; 300 301 accounting::ObjectStack* mark_stack_; 302 303 // Immune region, every object inside the immune range is assumed to be marked. 304 ImmuneRegion immune_region_; 305 306 // Parallel finger. 307 AtomicInteger atomic_finger_; 308 // Number of classes scanned, if kCountScannedTypes. 309 AtomicInteger class_count_; 310 // Number of arrays scanned, if kCountScannedTypes. 311 AtomicInteger array_count_; 312 // Number of non-class/arrays scanned, if kCountScannedTypes. 313 AtomicInteger other_count_; 314 AtomicInteger large_object_test_; 315 AtomicInteger large_object_mark_; 316 AtomicInteger overhead_time_; 317 AtomicInteger work_chunks_created_; 318 AtomicInteger work_chunks_deleted_; 319 AtomicInteger reference_count_; 320 AtomicInteger mark_null_count_; 321 AtomicInteger mark_immune_count_; 322 AtomicInteger mark_fastpath_count_; 323 AtomicInteger mark_slowpath_count_; 324 325 // Verification. 326 size_t live_stack_freeze_size_; 327 328 UniquePtr<Barrier> gc_barrier_; 329 Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 330 Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_); 331 332 const bool is_concurrent_; 333 334 private: 335 friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table. 336 friend class CardScanTask; 337 friend class CheckBitmapVisitor; 338 friend class CheckReferenceVisitor; 339 friend class art::gc::Heap; 340 friend class InternTableEntryIsUnmarked; 341 friend class MarkIfReachesAllocspaceVisitor; 342 friend class MarkObjectVisitor; 343 friend class ModUnionCheckReferences; 344 friend class ModUnionClearCardVisitor; 345 friend class ModUnionReferenceVisitor; 346 friend class ModUnionVisitor; 347 friend class ModUnionTableBitmap; 348 friend class ModUnionTableReferenceCache; 349 friend class ModUnionScanImageRootVisitor; 350 friend class ScanBitmapVisitor; 351 friend class ScanImageRootVisitor; 352 template<bool kUseFinger> friend class MarkStackTask; 353 friend class FifoMarkStackChunk; 354 355 DISALLOW_COPY_AND_ASSIGN(MarkSweep); 356}; 357 358} // namespace collector 359} // namespace gc 360} // namespace art 361 362#endif // ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 363