mark_sweep.h revision 0f72e4136aecaf6976fdb55916bbd7b6d5c9c77b
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 18#define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 19 20#include "atomic_integer.h" 21#include "barrier.h" 22#include "base/macros.h" 23#include "base/mutex.h" 24#include "garbage_collector.h" 25#include "offsets.h" 26#include "root_visitor.h" 27#include "UniquePtr.h" 28 29namespace art { 30 31namespace mirror { 32 class Class; 33 class Object; 34 template<class T> class ObjectArray; 35} // namespace mirror 36 37class StackVisitor; 38class Thread; 39 40namespace gc { 41 42namespace accounting { 43 template <typename T> class AtomicStack; 44 class MarkIfReachesAllocspaceVisitor; 45 class ModUnionClearCardVisitor; 46 class ModUnionVisitor; 47 class ModUnionTableBitmap; 48 class MarkStackChunk; 49 typedef AtomicStack<mirror::Object*> ObjectStack; 50 class SpaceBitmap; 51} // namespace accounting 52 53namespace space { 54 class ContinuousSpace; 55} // namespace space 56 57class Heap; 58 59namespace collector { 60 61class MarkSweep : public GarbageCollector { 62 public: 63 explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); 64 65 ~MarkSweep() {} 66 67 virtual void InitializePhase(); 68 virtual bool IsConcurrent() const; 69 virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 70 virtual void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 71 virtual void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 72 virtual void FinishPhase(); 73 virtual void MarkReachableObjects() 74 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 75 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 76 virtual GcType GetGcType() const { 77 return kGcTypeFull; 78 } 79 80 // Initializes internal structures. 81 void Init(); 82 83 // Find the default mark bitmap. 84 void FindDefaultMarkBitmap(); 85 86 // Marks the root set at the start of a garbage collection. 87 void MarkRoots() 88 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 89 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 90 91 void MarkNonThreadRoots() 92 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 93 94 void MarkConcurrentRoots(); 95 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 96 97 void MarkRootsCheckpoint(Thread* self) 98 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 99 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 100 101 // Verify that image roots point to only marked objects within the alloc space. 102 void VerifyImageRoots() 103 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 104 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 105 106 // Builds a mark stack and recursively mark until it empties. 107 void RecursiveMark() 108 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 109 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 110 111 // Make a space immune, immune spaces have all live objects marked - that is the mark and 112 // live bitmaps are bound together. 113 void ImmuneSpace(space::ContinuousSpace* space) 114 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 115 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 116 117 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie 118 // the image. Mark that portion of the heap as immune. 119 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 120 121 void BindLiveToMarkBitmap(space::ContinuousSpace* space) 122 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 123 124 void UnBindBitmaps() 125 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 126 127 // Builds a mark stack with objects on dirty cards and recursively mark until it empties. 128 void RecursiveMarkDirtyObjects(bool paused, byte minimum_age) 129 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 130 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 131 132 // Remarks the root set after completing the concurrent mark. 133 void ReMarkRoots() 134 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 135 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 136 137 void ProcessReferences(Thread* self) 138 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 139 140 // Sweeps unmarked objects to complete the garbage collection. 141 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 142 143 // Sweeps unmarked objects to complete the garbage collection. 144 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 145 146 // Sweep only pointers within an array. WARNING: Trashes objects. 147 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 148 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 149 150 mirror::Object* GetClearedReferences() { 151 return cleared_reference_list_; 152 } 153 154 // Proxy for external access to ScanObject. 155 void ScanRoot(const mirror::Object* obj) 156 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 157 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 158 159 // Blackens an object. 160 void ScanObject(const mirror::Object* obj) 161 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 162 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 163 164 // TODO: enable thread safety analysis when in use by multiple worker threads. 165 template <typename MarkVisitor> 166 void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) 167 NO_THREAD_SAFETY_ANALYSIS; 168 169 size_t GetFreedBytes() const { 170 return freed_bytes_; 171 } 172 173 size_t GetFreedLargeObjectBytes() const { 174 return freed_large_object_bytes_; 175 } 176 177 size_t GetFreedObjects() const { 178 return freed_objects_; 179 } 180 181 size_t GetFreedLargeObjects() const { 182 return freed_large_objects_; 183 } 184 185 uint64_t GetTotalTimeNs() const { 186 return total_time_ns_; 187 } 188 189 uint64_t GetTotalPausedTimeNs() const { 190 return total_paused_time_ns_; 191 } 192 193 uint64_t GetTotalFreedObjects() const { 194 return total_freed_objects_; 195 } 196 197 uint64_t GetTotalFreedBytes() const { 198 return total_freed_bytes_; 199 } 200 201 // Everything inside the immune range is assumed to be marked. 202 void SetImmuneRange(mirror::Object* begin, mirror::Object* end); 203 204 void SweepSystemWeaks() 205 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 206 207 // Only sweep the weaks which are inside of an allocation stack. 208 void SweepSystemWeaksArray(accounting::ObjectStack* allocations) 209 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 210 211 static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg) 212 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 213 214 void VerifySystemWeaks() 215 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 216 217 // Verify that an object is live, either in a live bitmap or in the allocation stack. 218 void VerifyIsLive(const mirror::Object* obj) 219 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 220 221 template <typename Visitor> 222 static void VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor) 223 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 224 Locks::mutator_lock_); 225 226 static void MarkObjectCallback(const mirror::Object* root, void* arg) 227 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 228 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 229 230 static void MarkRootParallelCallback(const mirror::Object* root, void* arg); 231 232 // Marks an object. 233 void MarkObject(const mirror::Object* obj) 234 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 235 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 236 237 void MarkRoot(const mirror::Object* obj) 238 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 239 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 240 241 Barrier& GetBarrier() { 242 return *gc_barrier_; 243 } 244 245 protected: 246 // Returns true if the object has its bit set in the mark bitmap. 247 bool IsMarked(const mirror::Object* object) const; 248 249 static bool IsMarkedCallback(const mirror::Object* object, void* arg) 250 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 251 252 static bool IsMarkedArrayCallback(const mirror::Object* object, void* arg) 253 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 254 255 static void ReMarkObjectVisitor(const mirror::Object* root, void* arg) 256 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 257 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 258 259 static void VerifyImageRootVisitor(mirror::Object* root, void* arg) 260 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 261 Locks::mutator_lock_); 262 263 void MarkObjectNonNull(const mirror::Object* obj) 264 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 265 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 266 267 // Unmarks an object by clearing the bit inside of the corresponding bitmap, or if it is in a 268 // space set, removing the object from the set. 269 void UnMarkObjectNonNull(const mirror::Object* obj) 270 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 271 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 272 273 // Mark the vm thread roots. 274 virtual void MarkThreadRoots(Thread* self) 275 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 276 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 277 278 // Marks an object atomically, safe to use from multiple threads. 279 void MarkObjectNonNullParallel(const mirror::Object* obj); 280 281 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we 282 // mark, otherwise we unmark. 283 bool MarkLargeObject(const mirror::Object* obj, bool set) 284 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 285 286 // Returns true if we need to add obj to a mark stack. 287 bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; 288 289 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) 290 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 291 292 // Special sweep for zygote that just marks objects / dirties cards. 293 static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) 294 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 295 296 void CheckReference(const mirror::Object* obj, const mirror::Object* ref, MemberOffset offset, 297 bool is_static) 298 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 299 300 void CheckObject(const mirror::Object* obj) 301 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 302 303 // Verify the roots of the heap and print out information related to any invalid roots. 304 // Called in MarkObject, so may we may not hold the mutator lock. 305 void VerifyRoots() 306 NO_THREAD_SAFETY_ANALYSIS; 307 308 // Expand mark stack to 2x its current size. 309 void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 310 void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_); 311 312 // Returns how many threads we should use for the current GC phase based on if we are paused, 313 // whether or not we care about pauses. 314 size_t GetThreadCount(bool paused) const; 315 316 // Returns true if an object is inside of the immune region (assumed to be marked). 317 bool IsImmune(const mirror::Object* obj) const { 318 return obj >= immune_begin_ && obj < immune_end_; 319 } 320 321 static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, 322 const StackVisitor *visitor); 323 324 void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor) 325 NO_THREAD_SAFETY_ANALYSIS; 326 327 template <typename Visitor> 328 static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj, 329 const Visitor& visitor) 330 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 331 332 // Visit the header, static field references, and interface pointers of a class object. 333 template <typename Visitor> 334 static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj, 335 const Visitor& visitor) 336 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 337 338 template <typename Visitor> 339 static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor) 340 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 341 342 template <typename Visitor> 343 static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static, 344 const Visitor& visitor) 345 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 346 347 // Visit all of the references in an object array. 348 template <typename Visitor> 349 static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array, 350 const Visitor& visitor) 351 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 352 353 // Visits the header and field references of a data object. 354 template <typename Visitor> 355 static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj, 356 const Visitor& visitor) 357 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 358 return VisitInstanceFieldsReferences(klass, obj, visitor); 359 } 360 361 // Blackens objects grayed during a garbage collection. 362 void ScanGrayObjects(bool paused, byte minimum_age) 363 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 364 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 365 366 // Schedules an unmarked object for reference processing. 367 void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference) 368 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 369 370 // Recursively blackens objects on the mark stack. 371 void ProcessMarkStack(bool paused) 372 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 373 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 374 375 void ProcessMarkStackParallel(size_t thread_count) 376 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 377 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 378 379 void EnqueueFinalizerReferences(mirror::Object** ref) 380 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 381 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 382 383 void PreserveSomeSoftReferences(mirror::Object** ref) 384 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 385 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 386 387 void ClearWhiteReferences(mirror::Object** list) 388 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 389 390 void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references, 391 mirror::Object** weak_references, 392 mirror::Object** finalizer_references, 393 mirror::Object** phantom_references) 394 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 395 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 396 397 void SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) 398 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 399 400 // Whether or not we count how many of each type of object were scanned. 401 static const bool kCountScannedTypes = false; 402 403 // Current space, we check this space first to avoid searching for the appropriate space for an 404 // object. 405 accounting::SpaceBitmap* current_mark_bitmap_; 406 407 // Cache java.lang.Class for optimization. 408 mirror::Class* java_lang_Class_; 409 410 accounting::ObjectStack* mark_stack_; 411 412 // Immune range, every object inside the immune range is assumed to be marked. 413 mirror::Object* immune_begin_; 414 mirror::Object* immune_end_; 415 416 mirror::Object* soft_reference_list_; 417 mirror::Object* weak_reference_list_; 418 mirror::Object* finalizer_reference_list_; 419 mirror::Object* phantom_reference_list_; 420 mirror::Object* cleared_reference_list_; 421 422 // Parallel finger. 423 AtomicInteger atomic_finger_; 424 // Number of non large object bytes freed in this collection. 425 AtomicInteger freed_bytes_; 426 // Number of large object bytes freed. 427 AtomicInteger freed_large_object_bytes_; 428 // Number of objects freed in this collection. 429 AtomicInteger freed_objects_; 430 // Number of freed large objects. 431 AtomicInteger freed_large_objects_; 432 // Number of classes scanned, if kCountScannedTypes. 433 AtomicInteger class_count_; 434 // Number of arrays scanned, if kCountScannedTypes. 435 AtomicInteger array_count_; 436 // Number of non-class/arrays scanned, if kCountScannedTypes. 437 AtomicInteger other_count_; 438 AtomicInteger large_object_test_; 439 AtomicInteger large_object_mark_; 440 AtomicInteger classes_marked_; 441 AtomicInteger overhead_time_; 442 AtomicInteger work_chunks_created_; 443 AtomicInteger work_chunks_deleted_; 444 AtomicInteger reference_count_; 445 AtomicInteger cards_scanned_; 446 447 // Verification. 448 size_t live_stack_freeze_size_; 449 450 UniquePtr<Barrier> gc_barrier_; 451 Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 452 Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_); 453 454 const bool is_concurrent_; 455 456 bool clear_soft_references_; 457 458 private: 459 friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table. 460 friend class CardScanTask; 461 friend class CheckBitmapVisitor; 462 friend class CheckReferenceVisitor; 463 friend class art::gc::Heap; 464 friend class InternTableEntryIsUnmarked; 465 friend class MarkIfReachesAllocspaceVisitor; 466 friend class ModUnionCheckReferences; 467 friend class ModUnionClearCardVisitor; 468 friend class ModUnionReferenceVisitor; 469 friend class ModUnionVisitor; 470 friend class ModUnionTableBitmap; 471 friend class ModUnionTableReferenceCache; 472 friend class ModUnionScanImageRootVisitor; 473 friend class ScanBitmapVisitor; 474 friend class ScanImageRootVisitor; 475 template<bool kUseFinger> friend class MarkStackTask; 476 friend class FifoMarkStackChunk; 477 478 DISALLOW_COPY_AND_ASSIGN(MarkSweep); 479}; 480 481} // namespace collector 482} // namespace gc 483} // namespace art 484 485#endif // ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_ 486