mark_sweep.h revision c93c530efc175954160c3834c93961a1a946a35a
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 18#define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 19 20#include "atomic.h" 21#include "barrier.h" 22#include "base/macros.h" 23#include "base/mutex.h" 24#include "garbage_collector.h" 25#include "immune_region.h" 26#include "object_callbacks.h" 27#include "offsets.h" 28#include "UniquePtr.h" 29 30namespace art { 31 32namespace mirror { 33 class Class; 34 class Object; 35 template<class T> class ObjectArray; 36} // namespace mirror 37 38class StackVisitor; 39class Thread; 40enum VisitRootFlags : uint8_t; 41 42namespace gc { 43 44namespace accounting { 45 template <typename T> class AtomicStack; 46 class MarkIfReachesAllocspaceVisitor; 47 class ModUnionClearCardVisitor; 48 class ModUnionVisitor; 49 class ModUnionTableBitmap; 50 class MarkStackChunk; 51 typedef AtomicStack<mirror::Object*> ObjectStack; 52 class SpaceBitmap; 53} // namespace accounting 54 55namespace space { 56 class ContinuousSpace; 57} // namespace space 58 59class Heap; 60 61namespace collector { 62 63class MarkSweep : public GarbageCollector { 64 public: 65 explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); 66 67 ~MarkSweep() {} 68 69 virtual void InitializePhase() OVERRIDE; 70 virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 71 virtual void HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 72 virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 73 virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 74 virtual void MarkReachableObjects() 75 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 76 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 77 78 bool IsConcurrent() const { 79 return is_concurrent_; 80 } 81 82 virtual GcType GetGcType() const OVERRIDE { 83 return kGcTypeFull; 84 } 85 86 virtual CollectorType GetCollectorType() const OVERRIDE { 87 return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS; 88 } 89 90 // Initializes internal structures. 91 void Init(); 92 93 // Find the default mark bitmap. 94 void FindDefaultMarkBitmap(); 95 96 // Marks all objects in the root set at the start of a garbage collection. 97 void MarkRoots(Thread* self) 98 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 99 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 100 101 void MarkNonThreadRoots() 102 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 103 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 104 105 void MarkConcurrentRoots(VisitRootFlags flags) 106 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 107 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 108 109 void MarkRootsCheckpoint(Thread* self) 110 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 111 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 112 113 // Builds a mark stack and recursively mark until it empties. 114 void RecursiveMark() 115 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 116 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 117 118 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie 119 // the image. Mark that portion of the heap as immune. 120 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 121 122 // Builds a mark stack with objects on dirty cards and recursively mark until it empties. 123 void RecursiveMarkDirtyObjects(bool paused, byte minimum_age) 124 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 125 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 126 127 // Remarks the root set after completing the concurrent mark. 128 void ReMarkRoots() 129 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 130 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 131 132 void ProcessReferences(Thread* self) 133 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 134 135 void PreProcessReferences() 136 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 137 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 138 139 // Update and mark references from immune spaces. Virtual as overridden by StickyMarkSweep. 140 virtual void UpdateAndMarkModUnion() 141 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 142 143 // Pre clean cards to reduce how much work is needed in the pause. 144 void PreCleanCards() 145 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 146 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 147 148 // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps 149 // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap. 150 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 151 152 // Sweeps unmarked objects to complete the garbage collection. 153 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 154 155 // Sweep only pointers within an array. WARNING: Trashes objects. 156 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 157 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 158 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 159 160 // Blackens an object. 161 void ScanObject(mirror::Object* obj) 162 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 163 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 164 165 // TODO: enable thread safety analysis when in use by multiple worker threads. 166 template <typename MarkVisitor> 167 void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor) 168 NO_THREAD_SAFETY_ANALYSIS; 169 170 void SweepSystemWeaks() 171 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 172 173 static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg) 174 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 175 176 void VerifySystemWeaks() 177 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 178 179 // Verify that an object is live, either in a live bitmap or in the allocation stack. 180 void VerifyIsLive(const mirror::Object* obj) 181 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 182 183 template <typename Visitor> 184 static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor, bool visit_class) 185 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 186 Locks::mutator_lock_); 187 188 static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg) 189 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 190 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 191 192 static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t thread_id, 193 RootType root_type) 194 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 195 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 196 197 static void VerifyRootMarked(mirror::Object** root, void* arg, uint32_t /*thread_id*/, 198 RootType /*root_type*/) 199 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 200 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 201 202 static void ProcessMarkStackPausedCallback(void* arg) 203 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 204 205 static void MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t thread_id, 206 RootType root_type) 207 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 208 209 // Marks an object. 210 void MarkObject(const mirror::Object* obj) 211 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 212 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 213 214 Barrier& GetBarrier() { 215 return *gc_barrier_; 216 } 217 218 protected: 219 // Returns true if the object has its bit set in the mark bitmap. 220 bool IsMarked(const mirror::Object* object) const; 221 222 static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg) 223 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 224 225 static void VerifyImageRootVisitor(mirror::Object* root, void* arg) 226 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 227 Locks::mutator_lock_); 228 229 void MarkObjectNonNull(const mirror::Object* obj) 230 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 231 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 232 233 // Unmarks an object by clearing the bit inside of the corresponding bitmap, or if it is in a 234 // space set, removing the object from the set. 235 void UnMarkObjectNonNull(const mirror::Object* obj) 236 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 237 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 238 239 // Mark the vm thread roots. 240 void MarkThreadRoots(Thread* self) 241 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 242 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 243 244 // Marks an object atomically, safe to use from multiple threads. 245 void MarkObjectNonNullParallel(const mirror::Object* obj); 246 247 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we 248 // mark, otherwise we unmark. 249 bool MarkLargeObject(const mirror::Object* obj, bool set) 250 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 251 252 // Returns true if we need to add obj to a mark stack. 253 bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; 254 255 // Verify the roots of the heap and print out information related to any invalid roots. 256 // Called in MarkObject, so may we may not hold the mutator lock. 257 void VerifyRoots() 258 NO_THREAD_SAFETY_ANALYSIS; 259 260 // Expand mark stack to 2x its current size. 261 void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 262 void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 263 264 // Returns how many threads we should use for the current GC phase based on if we are paused, 265 // whether or not we care about pauses. 266 size_t GetThreadCount(bool paused) const; 267 268 static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, 269 const StackVisitor *visitor); 270 271 void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor) 272 NO_THREAD_SAFETY_ANALYSIS; 273 274 template <typename Visitor> 275 static void VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj, 276 const Visitor& visitor) 277 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 278 279 // Visit the header, static field references, and interface pointers of a class object. 280 template <typename Visitor> 281 static void VisitClassReferences(mirror::Class* klass, mirror::Object* obj, 282 const Visitor& visitor) 283 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 284 285 template <typename Visitor> 286 static void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) 287 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 288 289 template <typename Visitor> 290 static void VisitFieldsReferences(mirror::Object* obj, uint32_t ref_offsets, bool is_static, 291 const Visitor& visitor) 292 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 293 294 // Visit all of the references in an object array. 295 template <typename Visitor> 296 static void VisitObjectArrayReferences(mirror::ObjectArray<mirror::Object>* array, 297 const Visitor& visitor) 298 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 299 300 // Visits the header and field references of a data object. 301 template <typename Visitor> 302 static void VisitOtherReferences(mirror::Class* klass, mirror::Object* obj, 303 const Visitor& visitor) 304 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 305 return VisitInstanceFieldsReferences(klass, obj, visitor); 306 } 307 308 // Blackens objects grayed during a garbage collection. 309 void ScanGrayObjects(bool paused, byte minimum_age) 310 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 311 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 312 313 // Schedules an unmarked object for reference processing. 314 void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference) 315 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 316 317 // Recursively blackens objects on the mark stack. 318 void ProcessMarkStack(bool paused) 319 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 320 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 321 322 void ProcessMarkStackParallel(size_t thread_count) 323 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 324 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 325 326 void EnqueueFinalizerReferences(mirror::Object** ref) 327 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 328 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 329 330 void PreserveSomeSoftReferences(mirror::Object** ref) 331 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 332 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 333 334 void ClearWhiteReferences(mirror::Object** list) 335 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 336 337 // Used to get around thread safety annotations. The call is from MarkingPhase and is guarded by 338 // IsExclusiveHeld. 339 void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS; 340 341 // Revoke all the thread-local buffers. 342 void RevokeAllThreadLocalBuffers(); 343 344 // Whether or not we count how many of each type of object were scanned. 345 static const bool kCountScannedTypes = false; 346 347 // Current space, we check this space first to avoid searching for the appropriate space for an 348 // object. 349 accounting::SpaceBitmap* current_mark_bitmap_; 350 351 accounting::ObjectStack* mark_stack_; 352 353 // Immune range, every object inside the immune range is assumed to be marked. 354 ImmuneRegion immune_region_; 355 356 // Parallel finger. 357 AtomicInteger atomic_finger_; 358 // Number of classes scanned, if kCountScannedTypes. 359 AtomicInteger class_count_; 360 // Number of arrays scanned, if kCountScannedTypes. 361 AtomicInteger array_count_; 362 // Number of non-class/arrays scanned, if kCountScannedTypes. 363 AtomicInteger other_count_; 364 AtomicInteger large_object_test_; 365 AtomicInteger large_object_mark_; 366 AtomicInteger classes_marked_; 367 AtomicInteger overhead_time_; 368 AtomicInteger work_chunks_created_; 369 AtomicInteger work_chunks_deleted_; 370 AtomicInteger reference_count_; 371 372 // Verification. 373 size_t live_stack_freeze_size_; 374 375 UniquePtr<Barrier> gc_barrier_; 376 Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 377 Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_); 378 379 const bool is_concurrent_; 380 381 private: 382 friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table. 383 friend class CardScanTask; 384 friend class CheckBitmapVisitor; 385 friend class CheckReferenceVisitor; 386 friend class art::gc::Heap; 387 friend class InternTableEntryIsUnmarked; 388 friend class MarkIfReachesAllocspaceVisitor; 389 friend class ModUnionCheckReferences; 390 friend class ModUnionClearCardVisitor; 391 friend class ModUnionReferenceVisitor; 392 friend class ModUnionVisitor; 393 friend class ModUnionTableBitmap; 394 friend class ModUnionTableReferenceCache; 395 friend class ModUnionScanImageRootVisitor; 396 friend class ScanBitmapVisitor; 397 friend class ScanImageRootVisitor; 398 template<bool kUseFinger> friend class MarkStackTask; 399 friend class FifoMarkStackChunk; 400 401 DISALLOW_COPY_AND_ASSIGN(MarkSweep); 402}; 403 404} // namespace collector 405} // namespace gc 406} // namespace art 407 408#endif // ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 409