mark_sweep.h revision 11409ae81a3eaf84d7fd2b3c85b8b06d2bae27f0
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 18#define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 19 20#include "atomic_integer.h" 21#include "barrier.h" 22#include "base/macros.h" 23#include "base/mutex.h" 24#include "garbage_collector.h" 25#include "offsets.h" 26#include "root_visitor.h" 27#include "UniquePtr.h" 28 29namespace art { 30 31namespace mirror { 32 class Class; 33 class Object; 34 template<class T> class ObjectArray; 35} // namespace mirror 36 37class StackVisitor; 38class Thread; 39 40namespace gc { 41 42namespace accounting { 43 template <typename T> class AtomicStack; 44 class MarkIfReachesAllocspaceVisitor; 45 class ModUnionClearCardVisitor; 46 class ModUnionVisitor; 47 class ModUnionTableBitmap; 48 class MarkStackChunk; 49 typedef AtomicStack<mirror::Object*> ObjectStack; 50 class SpaceBitmap; 51} // namespace accounting 52 53namespace space { 54 class ContinuousSpace; 55} // namespace space 56 57class Heap; 58 59namespace collector { 60 61class MarkSweep : public GarbageCollector { 62 public: 63 explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); 64 65 ~MarkSweep() {} 66 67 virtual void InitializePhase(); 68 virtual bool IsConcurrent() const; 69 virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 70 virtual void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 71 virtual void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 72 virtual void FinishPhase(); 73 virtual void MarkReachableObjects() 74 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 75 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 76 virtual GcType GetGcType() const { 77 return kGcTypeFull; 78 } 79 80 // Initializes internal structures. 81 void Init(); 82 83 // Find the default mark bitmap. 84 void FindDefaultMarkBitmap(); 85 86 // Marks the root set at the start of a garbage collection. 87 void MarkRoots() 88 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 89 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 90 91 void MarkNonThreadRoots() 92 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 93 94 void MarkConcurrentRoots(); 95 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 96 97 void MarkRootsCheckpoint(Thread* self) 98 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 99 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 100 101 // Verify that image roots point to only marked objects within the alloc space. 102 void VerifyImageRoots() 103 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 104 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 105 106 // Builds a mark stack and recursively mark until it empties. 107 void RecursiveMark() 108 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 109 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 110 111 // Make a space immune, immune spaces have all live objects marked - that is the mark and 112 // live bitmaps are bound together. 113 void ImmuneSpace(space::ContinuousSpace* space) 114 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 115 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 116 117 bool IsImmuneSpace(const space::ContinuousSpace* space) 118 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 119 120 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie 121 // the image. Mark that portion of the heap as immune. 122 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 123 124 void BindLiveToMarkBitmap(space::ContinuousSpace* space) 125 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 126 127 void UnBindBitmaps() 128 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 129 130 // Builds a mark stack with objects on dirty cards and recursively mark until it empties. 131 void RecursiveMarkDirtyObjects(bool paused, byte minimum_age) 132 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 133 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 134 135 // Remarks the root set after completing the concurrent mark. 136 void ReMarkRoots() 137 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 138 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 139 140 void ProcessReferences(Thread* self) 141 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 142 143 virtual void UpdateAndMarkModUnion() 144 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 145 146 // Sweeps unmarked objects to complete the garbage collection. 147 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 148 149 // Sweeps unmarked objects to complete the garbage collection. 150 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 151 152 // Sweep only pointers within an array. WARNING: Trashes objects. 153 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 154 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 155 156 mirror::Object* GetClearedReferences() { 157 return cleared_reference_list_; 158 } 159 160 // Proxy for external access to ScanObject. 161 void ScanRoot(const mirror::Object* obj) 162 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 163 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 164 165 // Blackens an object. 166 void ScanObject(const mirror::Object* obj) 167 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 168 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 169 170 // TODO: enable thread safety analysis when in use by multiple worker threads. 171 template <typename MarkVisitor> 172 void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor) 173 NO_THREAD_SAFETY_ANALYSIS; 174 175 size_t GetFreedBytes() const { 176 return freed_bytes_; 177 } 178 179 size_t GetFreedLargeObjectBytes() const { 180 return freed_large_object_bytes_; 181 } 182 183 size_t GetFreedObjects() const { 184 return freed_objects_; 185 } 186 187 size_t GetFreedLargeObjects() const { 188 return freed_large_objects_; 189 } 190 191 uint64_t GetTotalTimeNs() const { 192 return total_time_ns_; 193 } 194 195 uint64_t GetTotalPausedTimeNs() const { 196 return total_paused_time_ns_; 197 } 198 199 uint64_t GetTotalFreedObjects() const { 200 return total_freed_objects_; 201 } 202 203 uint64_t GetTotalFreedBytes() const { 204 return total_freed_bytes_; 205 } 206 207 // Everything inside the immune range is assumed to be marked. 208 void SetImmuneRange(mirror::Object* begin, mirror::Object* end); 209 210 void SweepSystemWeaks() 211 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 212 213 static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg) 214 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 215 216 void VerifySystemWeaks() 217 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 218 219 // Verify that an object is live, either in a live bitmap or in the allocation stack. 220 void VerifyIsLive(const mirror::Object* obj) 221 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 222 223 template <typename Visitor> 224 static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor, 225 bool visit_class = false) 226 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 227 Locks::mutator_lock_); 228 229 static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg) 230 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 231 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 232 233 static mirror::Object* MarkRootParallelCallback(mirror::Object* root, void* arg); 234 235 // Marks an object. 236 void MarkObject(const mirror::Object* obj) 237 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 238 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 239 240 void MarkRoot(const mirror::Object* obj) 241 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 242 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 243 244 Barrier& GetBarrier() { 245 return *gc_barrier_; 246 } 247 248 protected: 249 // Returns true if the object has its bit set in the mark bitmap. 250 bool IsMarked(const mirror::Object* object) const; 251 252 static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg) 253 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 254 255 static mirror::Object* SystemWeakIsMarkedArrayCallback(mirror::Object* object, void* arg) 256 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 257 258 static void VerifyImageRootVisitor(mirror::Object* root, void* arg) 259 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 260 Locks::mutator_lock_); 261 262 void MarkObjectNonNull(const mirror::Object* obj) 263 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 264 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 265 266 // Unmarks an object by clearing the bit inside of the corresponding bitmap, or if it is in a 267 // space set, removing the object from the set. 268 void UnMarkObjectNonNull(const mirror::Object* obj) 269 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 270 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 271 272 // Mark the vm thread roots. 273 virtual void MarkThreadRoots(Thread* self) 274 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 275 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 276 277 // Marks an object atomically, safe to use from multiple threads. 278 void MarkObjectNonNullParallel(const mirror::Object* obj); 279 280 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we 281 // mark, otherwise we unmark. 282 bool MarkLargeObject(const mirror::Object* obj, bool set) 283 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 284 285 // Returns true if we need to add obj to a mark stack. 286 bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; 287 288 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) 289 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 290 291 // Special sweep for zygote that just marks objects / dirties cards. 292 static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) 293 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 294 295 void CheckReference(const mirror::Object* obj, const mirror::Object* ref, MemberOffset offset, 296 bool is_static) 297 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 298 299 void CheckObject(const mirror::Object* obj) 300 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 301 302 // Verify the roots of the heap and print out information related to any invalid roots. 303 // Called in MarkObject, so may we may not hold the mutator lock. 304 void VerifyRoots() 305 NO_THREAD_SAFETY_ANALYSIS; 306 307 // Expand mark stack to 2x its current size. 308 void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 309 void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 310 311 // Returns how many threads we should use for the current GC phase based on if we are paused, 312 // whether or not we care about pauses. 313 size_t GetThreadCount(bool paused) const; 314 315 // Returns true if an object is inside of the immune region (assumed to be marked). 316 bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE { 317 return obj >= immune_begin_ && obj < immune_end_; 318 } 319 320 static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, 321 const StackVisitor *visitor); 322 323 void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor) 324 NO_THREAD_SAFETY_ANALYSIS; 325 326 template <typename Visitor> 327 static void VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj, 328 const Visitor& visitor) 329 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 330 331 // Visit the header, static field references, and interface pointers of a class object. 332 template <typename Visitor> 333 static void VisitClassReferences(mirror::Class* klass, mirror::Object* obj, 334 const Visitor& visitor) 335 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 336 337 template <typename Visitor> 338 static void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) 339 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 340 341 template <typename Visitor> 342 static void VisitFieldsReferences(mirror::Object* obj, uint32_t ref_offsets, bool is_static, 343 const Visitor& visitor) 344 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 345 346 // Visit all of the references in an object array. 347 template <typename Visitor> 348 static void VisitObjectArrayReferences(mirror::ObjectArray<mirror::Object>* array, 349 const Visitor& visitor) 350 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 351 352 // Visits the header and field references of a data object. 353 template <typename Visitor> 354 static void VisitOtherReferences(mirror::Class* klass, mirror::Object* obj, 355 const Visitor& visitor) 356 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 357 return VisitInstanceFieldsReferences(klass, obj, visitor); 358 } 359 360 // Blackens objects grayed during a garbage collection. 361 void ScanGrayObjects(bool paused, byte minimum_age) 362 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 363 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 364 365 // Schedules an unmarked object for reference processing. 366 void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference) 367 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 368 369 // Recursively blackens objects on the mark stack. 370 void ProcessMarkStack(bool paused) 371 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 372 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 373 374 void ProcessMarkStackParallel(size_t thread_count) 375 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 376 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 377 378 void EnqueueFinalizerReferences(mirror::Object** ref) 379 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 380 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 381 382 void PreserveSomeSoftReferences(mirror::Object** ref) 383 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 384 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 385 386 void ClearWhiteReferences(mirror::Object** list) 387 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 388 389 void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references, 390 mirror::Object** weak_references, 391 mirror::Object** finalizer_references, 392 mirror::Object** phantom_references) 393 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 394 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 395 396 // Whether or not we count how many of each type of object were scanned. 397 static const bool kCountScannedTypes = false; 398 399 // Current space, we check this space first to avoid searching for the appropriate space for an 400 // object. 401 accounting::SpaceBitmap* current_mark_bitmap_; 402 403 // Cache java.lang.Class for optimization. 404 mirror::Class* java_lang_Class_; 405 406 accounting::ObjectStack* mark_stack_; 407 408 // Immune range, every object inside the immune range is assumed to be marked. 409 mirror::Object* immune_begin_; 410 mirror::Object* immune_end_; 411 412 mirror::Object* soft_reference_list_; 413 mirror::Object* weak_reference_list_; 414 mirror::Object* finalizer_reference_list_; 415 mirror::Object* phantom_reference_list_; 416 mirror::Object* cleared_reference_list_; 417 418 // Parallel finger. 419 AtomicInteger atomic_finger_; 420 // Number of non large object bytes freed in this collection. 421 AtomicInteger freed_bytes_; 422 // Number of large object bytes freed. 423 AtomicInteger freed_large_object_bytes_; 424 // Number of objects freed in this collection. 425 AtomicInteger freed_objects_; 426 // Number of freed large objects. 427 AtomicInteger freed_large_objects_; 428 // Number of classes scanned, if kCountScannedTypes. 429 AtomicInteger class_count_; 430 // Number of arrays scanned, if kCountScannedTypes. 431 AtomicInteger array_count_; 432 // Number of non-class/arrays scanned, if kCountScannedTypes. 433 AtomicInteger other_count_; 434 AtomicInteger large_object_test_; 435 AtomicInteger large_object_mark_; 436 AtomicInteger classes_marked_; 437 AtomicInteger overhead_time_; 438 AtomicInteger work_chunks_created_; 439 AtomicInteger work_chunks_deleted_; 440 AtomicInteger reference_count_; 441 AtomicInteger cards_scanned_; 442 443 // Verification. 444 size_t live_stack_freeze_size_; 445 446 UniquePtr<Barrier> gc_barrier_; 447 Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 448 Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_); 449 450 const bool is_concurrent_; 451 452 bool clear_soft_references_; 453 454 private: 455 friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table. 456 friend class CardScanTask; 457 friend class CheckBitmapVisitor; 458 friend class CheckReferenceVisitor; 459 friend class art::gc::Heap; 460 friend class InternTableEntryIsUnmarked; 461 friend class MarkIfReachesAllocspaceVisitor; 462 friend class ModUnionCheckReferences; 463 friend class ModUnionClearCardVisitor; 464 friend class ModUnionReferenceVisitor; 465 friend class ModUnionVisitor; 466 friend class ModUnionTableBitmap; 467 friend class ModUnionTableReferenceCache; 468 friend class ModUnionScanImageRootVisitor; 469 friend class ScanBitmapVisitor; 470 friend class ScanImageRootVisitor; 471 template<bool kUseFinger> friend class MarkStackTask; 472 friend class FifoMarkStackChunk; 473 474 DISALLOW_COPY_AND_ASSIGN(MarkSweep); 475}; 476 477} // namespace collector 478} // namespace gc 479} // namespace art 480 481#endif // ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 482