heap.h revision fc5b528fc46ccff655cfb3578847350f74064e8d
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_HEAP_H_ 18#define ART_RUNTIME_GC_HEAP_H_ 19 20#include <iosfwd> 21#include <string> 22#include <vector> 23 24#include "atomic_integer.h" 25#include "base/timing_logger.h" 26#include "gc/accounting/atomic_stack.h" 27#include "gc/accounting/card_table.h" 28#include "gc/collector/gc_type.h" 29#include "gc/collector_type.h" 30#include "globals.h" 31#include "gtest/gtest.h" 32#include "jni.h" 33#include "locks.h" 34#include "offsets.h" 35#include "reference_queue.h" 36#include "root_visitor.h" 37#include "safe_map.h" 38#include "thread_pool.h" 39 40namespace art { 41 42class ConditionVariable; 43class Mutex; 44class StackVisitor; 45class Thread; 46class TimingLogger; 47 48namespace mirror { 49 class Class; 50 class Object; 51} // namespace mirror 52 53namespace gc { 54namespace accounting { 55 class HeapBitmap; 56 class ModUnionTable; 57 class SpaceSetMap; 58} // namespace accounting 59 60namespace collector { 61 class GarbageCollector; 62 class MarkSweep; 63 class SemiSpace; 64} // namespace collector 65 66namespace space { 67 class AllocSpace; 68 class BumpPointerSpace; 69 class DiscontinuousSpace; 70 class DlMallocSpace; 71 class ImageSpace; 72 class LargeObjectSpace; 73 class MallocSpace; 74 class RosAllocSpace; 75 class Space; 76 class SpaceTest; 77 class ContinuousMemMapAllocSpace; 78} // namespace space 79 80class AgeCardVisitor { 81 public: 82 byte operator()(byte card) const { 83 if (card == accounting::CardTable::kCardDirty) { 84 return card - 1; 85 } else { 86 return 0; 87 } 88 } 89}; 90 91// Different types of allocators. 92enum AllocatorType { 93 kAllocatorTypeBumpPointer, // Use BumpPointer allocator, has entrypoints. 94 kAllocatorTypeTLAB, // Use TLAB allocator, has entrypoints. 95 kAllocatorTypeRosAlloc, // Use RosAlloc allocator, has entrypoints. 96 kAllocatorTypeDlMalloc, // Use dlmalloc allocator, has entrypoints. 97 kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints. 98 kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints. 99}; 100 101// What caused the GC? 102enum GcCause { 103 // GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before 104 // retrying allocation. 105 kGcCauseForAlloc, 106 // A background GC trying to ensure there is free memory ahead of allocations. 107 kGcCauseBackground, 108 // An explicit System.gc() call. 109 kGcCauseExplicit, 110}; 111std::ostream& operator<<(std::ostream& os, const GcCause& policy); 112 113// How we want to sanity check the heap's correctness. 114enum HeapVerificationMode { 115 kHeapVerificationNotPermitted, // Too early in runtime start-up for heap to be verified. 116 kNoHeapVerification, // Production default. 117 kVerifyAllFast, // Sanity check all heap accesses with quick(er) tests. 118 kVerifyAll // Sanity check all heap accesses. 119}; 120static constexpr HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification; 121 122// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace 123static constexpr bool kUseRosAlloc = true; 124 125// The process state passed in from the activity manager, used to determine when to do trimming 126// and compaction. 127enum ProcessState { 128 kProcessStateJankPerceptible = 0, 129 kProcessStateJankImperceptible = 1, 130}; 131std::ostream& operator<<(std::ostream& os, const ProcessState& process_state); 132 133class Heap { 134 public: 135 // If true, measure the total allocation time. 136 static constexpr bool kMeasureAllocationTime = false; 137 // Primitive arrays larger than this size are put in the large object space. 138 static constexpr size_t kLargeObjectThreshold = 3 * kPageSize; 139 140 static constexpr size_t kDefaultInitialSize = 2 * MB; 141 static constexpr size_t kDefaultMaximumSize = 32 * MB; 142 static constexpr size_t kDefaultMaxFree = 2 * MB; 143 static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4; 144 static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5); 145 static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100); 146 static constexpr size_t kDefaultTLABSize = 256 * KB; 147 148 // Default target utilization. 149 static constexpr double kDefaultTargetUtilization = 0.5; 150 151 // Used so that we don't overflow the allocation time atomic integer. 152 static constexpr size_t kTimeAdjust = 1024; 153 154 // Create a heap with the requested sizes. The possible empty 155 // image_file_names names specify Spaces to load based on 156 // ImageWriter output. 157 explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free, 158 size_t max_free, double target_utilization, size_t capacity, 159 const std::string& original_image_file_name, 160 CollectorType post_zygote_collector_type, CollectorType background_collector_type, 161 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode, 162 size_t long_pause_threshold, size_t long_gc_threshold, 163 bool ignore_max_footprint, bool use_tlab); 164 165 ~Heap(); 166 167 // Allocates and initializes storage for an object instance. 168 template <bool kInstrumented> 169 mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes) 170 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 171 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, 172 GetCurrentAllocator()); 173 } 174 template <bool kInstrumented> 175 mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes) 176 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 177 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, 178 GetCurrentNonMovingAllocator()); 179 } 180 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor = VoidFunctor> 181 ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator( 182 Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator, 183 const PreFenceVisitor& pre_fence_visitor = VoidFunctor()) 184 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 185 186 AllocatorType GetCurrentAllocator() const { 187 return current_allocator_; 188 } 189 190 AllocatorType GetCurrentNonMovingAllocator() const { 191 return current_non_moving_allocator_; 192 } 193 194 // Visit all of the live objects in the heap. 195 void VisitObjects(ObjectVisitorCallback callback, void* arg) 196 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 197 198 void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 199 200 void DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) 201 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 202 void ThrowOutOfMemoryError(size_t byte_count, bool large_object_allocation); 203 204 void RegisterNativeAllocation(JNIEnv* env, int bytes); 205 void RegisterNativeFree(JNIEnv* env, int bytes); 206 207 // Change the allocator, updates entrypoints. 208 void ChangeAllocator(AllocatorType allocator); 209 210 // Transition the garbage collector during runtime, may copy objects from one space to another. 211 void TransitionCollector(CollectorType collector_type); 212 213 // Change the collector to be one of the possible options (MS, CMS, SS). 214 void ChangeCollector(CollectorType collector_type); 215 216 // The given reference is believed to be to an object in the Java heap, check the soundness of it. 217 void VerifyObjectImpl(const mirror::Object* o); 218 void VerifyObject(const mirror::Object* o) { 219 if (o != nullptr && this != nullptr && verify_object_mode_ > kNoHeapVerification) { 220 VerifyObjectImpl(o); 221 } 222 } 223 224 // Check sanity of all live references. 225 void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 226 bool VerifyHeapReferences() 227 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 228 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 229 bool VerifyMissingCardMarks() 230 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 231 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 232 233 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, 234 // and doesn't abort on error, allowing the caller to report more 235 // meaningful diagnostics. 236 bool IsValidObjectAddress(const mirror::Object* obj) const; 237 238 // Returns true if the address passed in is a heap address, doesn't need to be aligned. 239 bool IsHeapAddress(const mirror::Object* obj) const; 240 241 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). 242 // Requires the heap lock to be held. 243 bool IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack = true, 244 bool search_live_stack = true, bool sorted = false) 245 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 246 247 // Returns true if there is any chance that the object (obj) will move. 248 bool IsMovableObject(const mirror::Object* obj) const; 249 250 // Returns true if an object is in the temp space, if this happens its usually indicative of 251 // compaction related errors. 252 bool IsInTempSpace(const mirror::Object* obj) const; 253 254 // Enables us to prevent GC until objects are released. 255 void IncrementDisableGC(Thread* self); 256 void DecrementDisableGC(Thread* self); 257 258 // Initiates an explicit garbage collection. 259 void CollectGarbage(bool clear_soft_references) LOCKS_EXCLUDED(Locks::mutator_lock_); 260 261 // Does a concurrent GC, should only be called by the GC daemon thread 262 // through runtime. 263 void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 264 265 // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount. 266 // The boolean decides whether to use IsAssignableFrom or == when comparing classes. 267 void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, 268 uint64_t* counts) 269 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) 270 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 271 // Implements JDWP RT_Instances. 272 void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) 273 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) 274 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 275 // Implements JDWP OR_ReferringObjects. 276 void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects) 277 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) 278 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 279 280 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to 281 // implement dalvik.system.VMRuntime.clearGrowthLimit. 282 void ClearGrowthLimit(); 283 284 // Target ideal heap utilization ratio, implements 285 // dalvik.system.VMRuntime.getTargetHeapUtilization. 286 double GetTargetHeapUtilization() const { 287 return target_utilization_; 288 } 289 290 // Data structure memory usage tracking. 291 void RegisterGCAllocation(size_t bytes); 292 void RegisterGCDeAllocation(size_t bytes); 293 294 // Set target ideal heap utilization ratio, implements 295 // dalvik.system.VMRuntime.setTargetHeapUtilization. 296 void SetTargetHeapUtilization(float target); 297 298 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate 299 // from the system. Doesn't allow the space to exceed its growth limit. 300 void SetIdealFootprint(size_t max_allowed_footprint); 301 302 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we 303 // waited for. 304 collector::GcType WaitForGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_); 305 306 // Update the heap's process state to a new value, may cause compaction to occur. 307 void UpdateProcessState(ProcessState process_state); 308 309 const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const { 310 return continuous_spaces_; 311 } 312 313 const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const { 314 return discontinuous_spaces_; 315 } 316 317 void SetReferenceOffsets(MemberOffset reference_referent_offset, 318 MemberOffset reference_queue_offset, 319 MemberOffset reference_queueNext_offset, 320 MemberOffset reference_pendingNext_offset, 321 MemberOffset finalizer_reference_zombie_offset); 322 MemberOffset GetReferenceReferentOffset() const { 323 return reference_referent_offset_; 324 } 325 MemberOffset GetReferenceQueueOffset() const { 326 return reference_queue_offset_; 327 } 328 MemberOffset GetReferenceQueueNextOffset() const { 329 return reference_queueNext_offset_; 330 } 331 MemberOffset GetReferencePendingNextOffset() const { 332 return reference_pendingNext_offset_; 333 } 334 MemberOffset GetFinalizerReferenceZombieOffset() const { 335 return finalizer_reference_zombie_offset_; 336 } 337 static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg); 338 void ProcessReferences(TimingLogger& timings, bool clear_soft, RootVisitor* is_marked_callback, 339 RootVisitor* recursive_mark_object_callback, void* arg) 340 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 341 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 342 343 // Enable verification of object references when the runtime is sufficiently initialized. 344 void EnableObjectValidation() { 345 verify_object_mode_ = kDesiredHeapVerification; 346 if (verify_object_mode_ > kNoHeapVerification) { 347 VerifyHeap(); 348 } 349 } 350 351 // Disable object reference verification for image writing. 352 void DisableObjectValidation() { 353 verify_object_mode_ = kHeapVerificationNotPermitted; 354 } 355 356 // Other checks may be performed if we know the heap should be in a sane state. 357 bool IsObjectValidationEnabled() const { 358 return kDesiredHeapVerification > kNoHeapVerification && 359 verify_object_mode_ > kHeapVerificationNotPermitted; 360 } 361 362 // Returns true if low memory mode is enabled. 363 bool IsLowMemoryMode() const { 364 return low_memory_mode_; 365 } 366 367 // Freed bytes can be negative in cases where we copy objects from a compacted space to a 368 // free-list backed space. 369 void RecordFree(int64_t freed_objects, int64_t freed_bytes); 370 371 // Must be called if a field of an Object in the heap changes, and before any GC safe-point. 372 // The call is not needed if NULL is stored in the field. 373 void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/, 374 const mirror::Object* /*new_value*/) { 375 card_table_->MarkCard(dst); 376 } 377 378 // Write barrier for array operations that update many field positions 379 void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/, 380 size_t /*length TODO: element_count or byte_count?*/) { 381 card_table_->MarkCard(dst); 382 } 383 384 void WriteBarrierEveryFieldOf(const mirror::Object* obj) { 385 card_table_->MarkCard(obj); 386 } 387 388 accounting::CardTable* GetCardTable() const { 389 return card_table_.get(); 390 } 391 392 void AddFinalizerReference(Thread* self, mirror::Object* object); 393 394 // Returns the number of bytes currently allocated. 395 size_t GetBytesAllocated() const { 396 return num_bytes_allocated_; 397 } 398 399 // Returns the number of objects currently allocated. 400 size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 401 402 // Returns the total number of objects allocated since the heap was created. 403 size_t GetObjectsAllocatedEver() const; 404 405 // Returns the total number of bytes allocated since the heap was created. 406 size_t GetBytesAllocatedEver() const; 407 408 // Returns the total number of objects freed since the heap was created. 409 size_t GetObjectsFreedEver() const { 410 return total_objects_freed_ever_; 411 } 412 413 // Returns the total number of bytes freed since the heap was created. 414 size_t GetBytesFreedEver() const { 415 return total_bytes_freed_ever_; 416 } 417 418 // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can 419 // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx 420 // were specified. Android apps start with a growth limit (small heap size) which is 421 // cleared/extended for large apps. 422 int64_t GetMaxMemory() const { 423 return growth_limit_; 424 } 425 426 // Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an 427 // application. 428 int64_t GetTotalMemory() const; 429 430 // Implements java.lang.Runtime.freeMemory. 431 int64_t GetFreeMemory() const { 432 return GetTotalMemory() - num_bytes_allocated_; 433 } 434 435 // Get the space that corresponds to an object's address. Current implementation searches all 436 // spaces in turn. If fail_ok is false then failing to find a space will cause an abort. 437 // TODO: consider using faster data structure like binary tree. 438 space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const; 439 space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*, 440 bool fail_ok) const; 441 space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const; 442 443 void DumpForSigQuit(std::ostream& os); 444 445 // Trim the managed and native heaps by releasing unused memory back to the OS. 446 void Trim(); 447 448 void RevokeThreadLocalBuffers(Thread* thread); 449 void RevokeAllThreadLocalBuffers(); 450 451 accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 452 return live_bitmap_.get(); 453 } 454 455 accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 456 return mark_bitmap_.get(); 457 } 458 459 accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 460 return live_stack_.get(); 461 } 462 463 void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS; 464 465 // Mark and empty stack. 466 void FlushAllocStack() 467 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 468 469 // Mark all the objects in the allocation stack in the specified bitmap. 470 void MarkAllocStack(accounting::SpaceBitmap* bitmap1, accounting::SpaceBitmap* bitmap2, 471 accounting::SpaceSetMap* large_objects, accounting::ObjectStack* stack) 472 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 473 474 // Mark the specified allocation stack as live. 475 void MarkAllocStackAsLive(accounting::ObjectStack* stack) 476 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 477 478 // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added. 479 // Assumes there is only one image space. 480 space::ImageSpace* GetImageSpace() const; 481 482 space::DlMallocSpace* GetDlMallocSpace() const { 483 return dlmalloc_space_; 484 } 485 486 space::RosAllocSpace* GetRosAllocSpace() const { 487 return rosalloc_space_; 488 } 489 490 space::MallocSpace* GetNonMovingSpace() const { 491 return non_moving_space_; 492 } 493 494 space::LargeObjectSpace* GetLargeObjectsSpace() const { 495 return large_object_space_; 496 } 497 498 void DumpSpaces(std::ostream& stream = LOG(INFO)); 499 500 // GC performance measuring 501 void DumpGcPerformanceInfo(std::ostream& os); 502 503 // Returns true if we currently care about pause times. 504 bool CareAboutPauseTimes() const { 505 return process_state_ == kProcessStateJankPerceptible; 506 } 507 508 // Thread pool. 509 void CreateThreadPool(); 510 void DeleteThreadPool(); 511 ThreadPool* GetThreadPool() { 512 return thread_pool_.get(); 513 } 514 size_t GetParallelGCThreadCount() const { 515 return parallel_gc_threads_; 516 } 517 size_t GetConcGCThreadCount() const { 518 return conc_gc_threads_; 519 } 520 accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space); 521 void AddModUnionTable(accounting::ModUnionTable* mod_union_table); 522 523 bool IsCompilingBoot() const; 524 bool HasImageSpace() const; 525 526 private: 527 void Compact(space::ContinuousMemMapAllocSpace* target_space, 528 space::ContinuousMemMapAllocSpace* source_space); 529 530 bool StartGC(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_); 531 void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_); 532 533 static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) { 534 return 535 allocator_type != kAllocatorTypeBumpPointer && 536 allocator_type != kAllocatorTypeTLAB; 537 } 538 static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) { 539 return AllocatorHasAllocationStack(allocator_type); 540 } 541 bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const; 542 ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, 543 mirror::Object* obj); 544 545 // We don't force this to be inlined since it is a slow path. 546 template <bool kInstrumented, typename PreFenceVisitor> 547 mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count, 548 const PreFenceVisitor& pre_fence_visitor) 549 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 550 551 // Handles Allocate()'s slow allocation path with GC involved after 552 // an initial allocation attempt failed. 553 mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes, 554 size_t* bytes_allocated, mirror::Class** klass) 555 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 556 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 557 558 // Allocate into a specific space. 559 mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c, 560 size_t bytes) 561 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 562 563 // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so 564 // that the switch statement is constant optimized in the entrypoints. 565 template <const bool kInstrumented, const bool kGrow> 566 ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type, 567 size_t alloc_size, size_t* bytes_allocated) 568 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 569 570 void ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) 571 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 572 573 template <bool kGrow> 574 bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size); 575 576 // Pushes a list of cleared references out to the managed heap. 577 void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) 578 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 579 mirror::Object* GetReferenceReferent(mirror::Object* reference) 580 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 581 void ClearReferenceReferent(mirror::Object* reference) 582 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 583 SetReferenceReferent(reference, nullptr); 584 } 585 void EnqueueClearedReferences(); 586 // Returns true if the reference object has not yet been enqueued. 587 bool IsEnqueuable(const mirror::Object* ref) const; 588 bool IsEnqueued(mirror::Object* ref) const; 589 void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, RootVisitor mark_visitor, 590 void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 591 592 // Run the finalizers. 593 void RunFinalization(JNIEnv* env); 594 595 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we 596 // waited for. 597 collector::GcType WaitForGcToCompleteLocked(Thread* self) 598 EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_); 599 600 void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 601 void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 602 bool IsGCRequestPending() const; 603 604 size_t RecordAllocationInstrumented(size_t size, mirror::Object* object) 605 LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_) 606 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 607 608 size_t RecordAllocationUninstrumented(size_t size, mirror::Object* object) 609 LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_) 610 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 611 612 // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns 613 // which type of Gc was actually ran. 614 collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause, 615 bool clear_soft_references) 616 LOCKS_EXCLUDED(gc_complete_lock_, 617 Locks::heap_bitmap_lock_, 618 Locks::thread_suspend_count_lock_); 619 620 void PreGcVerification(collector::GarbageCollector* gc); 621 void PreSweepingGcVerification(collector::GarbageCollector* gc) 622 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 623 void PostGcVerification(collector::GarbageCollector* gc) 624 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 625 626 // Update the watermark for the native allocated bytes based on the current number of native 627 // bytes allocated and the target utilization ratio. 628 void UpdateMaxNativeFootprint(); 629 630 // Given the current contents of the alloc space, increase the allowed heap footprint to match 631 // the target utilization ratio. This should only be called immediately after a full garbage 632 // collection. 633 void GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration); 634 635 size_t GetPercentFree(); 636 637 void AddSpace(space::Space* space, bool set_as_default = true) 638 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 639 void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 640 641 // No thread saftey analysis since we call this everywhere and it is impossible to find a proper 642 // lock ordering for it. 643 void VerifyObjectBody(const mirror::Object *obj) NO_THREAD_SAFETY_ANALYSIS; 644 645 static void VerificationCallback(mirror::Object* obj, void* arg) 646 SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_); 647 648 // Swap the allocation stack with the live stack. 649 void SwapStacks(); 650 651 // Clear cards and update the mod union table. 652 void ProcessCards(TimingLogger& timings); 653 654 // All-known continuous spaces, where objects lie within fixed bounds. 655 std::vector<space::ContinuousSpace*> continuous_spaces_; 656 657 // All-known discontinuous spaces, where objects may be placed throughout virtual memory. 658 std::vector<space::DiscontinuousSpace*> discontinuous_spaces_; 659 660 // All-known alloc spaces, where objects may be or have been allocated. 661 std::vector<space::AllocSpace*> alloc_spaces_; 662 663 // A space where non-movable objects are allocated, when compaction is enabled it contains 664 // Classes, ArtMethods, ArtFields, and non moving objects. 665 space::MallocSpace* non_moving_space_; 666 667 // Space which we use for the kAllocatorTypeROSAlloc. 668 space::RosAllocSpace* rosalloc_space_; 669 670 // Space which we use for the kAllocatorTypeDlMalloc. 671 space::DlMallocSpace* dlmalloc_space_; 672 673 // The main space is the space which the GC copies to and from on process state updates. This 674 // space is typically either the dlmalloc_space_ or the rosalloc_space_. 675 space::MallocSpace* main_space_; 676 677 // The large object space we are currently allocating into. 678 space::LargeObjectSpace* large_object_space_; 679 680 // The card table, dirtied by the write barrier. 681 UniquePtr<accounting::CardTable> card_table_; 682 683 // A mod-union table remembers all of the references from the it's space to other spaces. 684 SafeMap<space::Space*, accounting::ModUnionTable*> mod_union_tables_; 685 686 // Keep the free list allocator mem map lying around when we transition to background so that we 687 // don't have to worry about virtual address space fragmentation. 688 UniquePtr<MemMap> allocator_mem_map_; 689 690 // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark 691 // sweep GC, false for other GC types. 692 bool concurrent_gc_; 693 694 // The current collector type. 695 CollectorType collector_type_; 696 // Which collector we will switch to after zygote fork. 697 CollectorType post_zygote_collector_type_; 698 // Which collector we will use when the app is notified of a transition to background. 699 CollectorType background_collector_type_; 700 701 // How many GC threads we may use for paused parts of garbage collection. 702 const size_t parallel_gc_threads_; 703 704 // How many GC threads we may use for unpaused parts of garbage collection. 705 const size_t conc_gc_threads_; 706 707 // Boolean for if we are in low memory mode. 708 const bool low_memory_mode_; 709 710 // If we get a pause longer than long pause log threshold, then we print out the GC after it 711 // finishes. 712 const size_t long_pause_log_threshold_; 713 714 // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes. 715 const size_t long_gc_log_threshold_; 716 717 // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is 718 // useful for benchmarking since it reduces time spent in GC to a low %. 719 const bool ignore_max_footprint_; 720 721 // If we have a zygote space. 722 bool have_zygote_space_; 723 724 // Number of pinned primitive arrays in the movable space. 725 // Block all GC until this hits zero, or we hit the timeout! 726 size_t number_gc_blockers_; 727 static constexpr size_t KGCBlockTimeout = 30000; 728 729 // Guards access to the state of GC, associated conditional variable is used to signal when a GC 730 // completes. 731 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 732 UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_); 733 734 // Reference queues. 735 ReferenceQueue soft_reference_queue_; 736 ReferenceQueue weak_reference_queue_; 737 ReferenceQueue finalizer_reference_queue_; 738 ReferenceQueue phantom_reference_queue_; 739 ReferenceQueue cleared_references_; 740 741 // True while the garbage collector is running. 742 volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_); 743 744 // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on. 745 volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_); 746 collector::GcType next_gc_type_; 747 748 // Maximum size that the heap can reach. 749 const size_t capacity_; 750 751 // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap 752 // programs it is "cleared" making it the same as capacity. 753 size_t growth_limit_; 754 755 // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating 756 // a GC should be triggered. 757 size_t max_allowed_footprint_; 758 759 // The watermark at which a concurrent GC is requested by registerNativeAllocation. 760 size_t native_footprint_gc_watermark_; 761 762 // The watermark at which a GC is performed inside of registerNativeAllocation. 763 size_t native_footprint_limit_; 764 765 // Whether or not we need to run finalizers in the next native allocation. 766 bool native_need_to_run_finalization_; 767 768 // Whether or not we currently care about pause times. 769 ProcessState process_state_; 770 771 // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that 772 // it completes ahead of an allocation failing. 773 size_t concurrent_start_bytes_; 774 775 // Since the heap was created, how many bytes have been freed. 776 size_t total_bytes_freed_ever_; 777 778 // Since the heap was created, how many objects have been freed. 779 size_t total_objects_freed_ever_; 780 781 // Number of bytes allocated. Adjusted after each allocation and free. 782 AtomicInteger num_bytes_allocated_; 783 784 // Bytes which are allocated and managed by native code but still need to be accounted for. 785 AtomicInteger native_bytes_allocated_; 786 787 // Data structure GC overhead. 788 AtomicInteger gc_memory_overhead_; 789 790 // Heap verification flags. 791 const bool verify_missing_card_marks_; 792 const bool verify_system_weaks_; 793 const bool verify_pre_gc_heap_; 794 const bool verify_post_gc_heap_; 795 const bool verify_mod_union_table_; 796 797 // Parallel GC data structures. 798 UniquePtr<ThreadPool> thread_pool_; 799 800 // Sticky mark bits GC has some overhead, so if we have less a few megabytes of AllocSpace then 801 // it's probably better to just do a partial GC. 802 const size_t min_alloc_space_size_for_sticky_gc_; 803 804 // Minimum remaining size for sticky GC. Since sticky GC doesn't free up as much memory as a 805 // normal GC, it is important to not use it when we are almost out of memory. 806 const size_t min_remaining_space_for_sticky_gc_; 807 808 // The last time a heap trim occurred. 809 uint64_t last_trim_time_ms_; 810 811 // The nanosecond time at which the last GC ended. 812 uint64_t last_gc_time_ns_; 813 814 // How many bytes were allocated at the end of the last GC. 815 uint64_t last_gc_size_; 816 817 // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle 818 // and the start of the current one. 819 uint64_t allocation_rate_; 820 821 // For a GC cycle, a bitmap that is set corresponding to the 822 UniquePtr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); 823 UniquePtr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); 824 825 // Mark stack that we reuse to avoid re-allocating the mark stack. 826 UniquePtr<accounting::ObjectStack> mark_stack_; 827 828 // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us 829 // to use the live bitmap as the old mark bitmap. 830 const size_t max_allocation_stack_size_; 831 UniquePtr<accounting::ObjectStack> allocation_stack_; 832 833 // Second allocation stack so that we can process allocation with the heap unlocked. 834 UniquePtr<accounting::ObjectStack> live_stack_; 835 836 // Allocator type. 837 AllocatorType current_allocator_; 838 const AllocatorType current_non_moving_allocator_; 839 840 // Which GCs we run in order when we an allocation fails. 841 std::vector<collector::GcType> gc_plan_; 842 843 // Bump pointer spaces. 844 space::BumpPointerSpace* bump_pointer_space_; 845 // Temp space is the space which the semispace collector copies to. 846 space::BumpPointerSpace* temp_space_; 847 848 // offset of java.lang.ref.Reference.referent 849 MemberOffset reference_referent_offset_; 850 // offset of java.lang.ref.Reference.queue 851 MemberOffset reference_queue_offset_; 852 // offset of java.lang.ref.Reference.queueNext 853 MemberOffset reference_queueNext_offset_; 854 // offset of java.lang.ref.Reference.pendingNext 855 MemberOffset reference_pendingNext_offset_; 856 // offset of java.lang.ref.FinalizerReference.zombie 857 MemberOffset finalizer_reference_zombie_offset_; 858 859 // Minimum free guarantees that you always have at least min_free_ free bytes after growing for 860 // utilization, regardless of target utilization ratio. 861 size_t min_free_; 862 863 // The ideal maximum free size, when we grow the heap for utilization. 864 size_t max_free_; 865 866 // Target ideal heap utilization ratio 867 double target_utilization_; 868 869 // Total time which mutators are paused or waiting for GC to complete. 870 uint64_t total_wait_time_; 871 872 // Total number of objects allocated in microseconds. 873 AtomicInteger total_allocation_time_; 874 875 // The current state of heap verification, may be enabled or disabled. 876 HeapVerificationMode verify_object_mode_; 877 878 // GC disable count, error on GC if > 0. 879 size_t gc_disable_count_ GUARDED_BY(gc_complete_lock_); 880 881 std::vector<collector::GarbageCollector*> garbage_collectors_; 882 collector::SemiSpace* semi_space_collector_; 883 884 const bool running_on_valgrind_; 885 const bool use_tlab_; 886 887 friend class collector::MarkSweep; 888 friend class collector::SemiSpace; 889 friend class ReferenceQueue; 890 friend class VerifyReferenceCardVisitor; 891 friend class VerifyReferenceVisitor; 892 friend class VerifyObjectVisitor; 893 friend class ScopedHeapLock; 894 friend class space::SpaceTest; 895 896 class AllocationTimer { 897 private: 898 Heap* heap_; 899 mirror::Object** allocated_obj_ptr_; 900 uint64_t allocation_start_time_; 901 public: 902 AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr); 903 ~AllocationTimer(); 904 }; 905 906 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap); 907}; 908 909} // namespace gc 910} // namespace art 911 912#endif // ART_RUNTIME_GC_HEAP_H_ 913