heap.h revision 446f9ee5031cf89b8964e29eba2c9f10a4d4aaf1
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_HEAP_H_ 18#define ART_RUNTIME_GC_HEAP_H_ 19 20#include <iosfwd> 21#include <string> 22#include <vector> 23 24#include "allocator_type.h" 25#include "arch/instruction_set.h" 26#include "atomic.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/atomic_stack.h" 29#include "gc/accounting/card_table.h" 30#include "gc/gc_cause.h" 31#include "gc/collector/garbage_collector.h" 32#include "gc/collector/gc_type.h" 33#include "gc/collector_type.h" 34#include "gc/space/large_object_space.h" 35#include "globals.h" 36#include "jni.h" 37#include "object_callbacks.h" 38#include "offsets.h" 39#include "reference_processor.h" 40#include "safe_map.h" 41#include "thread_pool.h" 42#include "verify_object.h" 43 44namespace art { 45 46class ConditionVariable; 47class Mutex; 48class StackVisitor; 49class Thread; 50class TimingLogger; 51 52namespace mirror { 53 class Class; 54 class Object; 55} // namespace mirror 56 57namespace gc { 58 59class ReferenceProcessor; 60 61namespace accounting { 62 class HeapBitmap; 63 class ModUnionTable; 64 class RememberedSet; 65} // namespace accounting 66 67namespace collector { 68 class ConcurrentCopying; 69 class GarbageCollector; 70 class MarkCompact; 71 class MarkSweep; 72 class SemiSpace; 73} // namespace collector 74 75namespace allocator { 76 class RosAlloc; 77} // namespace allocator 78 79namespace space { 80 class AllocSpace; 81 class BumpPointerSpace; 82 class ContinuousMemMapAllocSpace; 83 class DiscontinuousSpace; 84 class DlMallocSpace; 85 class ImageSpace; 86 class LargeObjectSpace; 87 class MallocSpace; 88 class RosAllocSpace; 89 class Space; 90 class SpaceTest; 91 class ZygoteSpace; 92} // namespace space 93 94class AgeCardVisitor { 95 public: 96 uint8_t operator()(uint8_t card) const { 97 if (card == accounting::CardTable::kCardDirty) { 98 return card - 1; 99 } else { 100 return 0; 101 } 102 } 103}; 104 105enum HomogeneousSpaceCompactResult { 106 // Success. 107 kSuccess, 108 // Reject due to disabled moving GC. 109 kErrorReject, 110 // System is shutting down. 111 kErrorVMShuttingDown, 112}; 113 114// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace 115static constexpr bool kUseRosAlloc = true; 116 117// If true, use thread-local allocation stack. 118static constexpr bool kUseThreadLocalAllocationStack = true; 119 120// The process state passed in from the activity manager, used to determine when to do trimming 121// and compaction. 122enum ProcessState { 123 kProcessStateJankPerceptible = 0, 124 kProcessStateJankImperceptible = 1, 125}; 126std::ostream& operator<<(std::ostream& os, const ProcessState& process_state); 127 128class Heap { 129 public: 130 // If true, measure the total allocation time. 131 static constexpr bool kMeasureAllocationTime = false; 132 static constexpr size_t kDefaultStartingSize = kPageSize; 133 static constexpr size_t kDefaultInitialSize = 2 * MB; 134 static constexpr size_t kDefaultMaximumSize = 256 * MB; 135 static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB; 136 static constexpr size_t kDefaultMaxFree = 2 * MB; 137 static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4; 138 static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5); 139 static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100); 140 static constexpr size_t kDefaultTLABSize = 256 * KB; 141 static constexpr double kDefaultTargetUtilization = 0.5; 142 static constexpr double kDefaultHeapGrowthMultiplier = 2.0; 143 // Primitive arrays larger than this size are put in the large object space. 144 static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize; 145 // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR 146 // since this means that we have to use the slow msync loop in MemMap::MapAnonymous. 147#if USE_ART_LOW_4G_ALLOCATOR 148 static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType = 149 space::kLargeObjectSpaceTypeFreeList; 150#else 151 static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType = 152 space::kLargeObjectSpaceTypeMap; 153#endif 154 // Used so that we don't overflow the allocation time atomic integer. 155 static constexpr size_t kTimeAdjust = 1024; 156 157 // How often we allow heap trimming to happen (nanoseconds). 158 static constexpr uint64_t kHeapTrimWait = MsToNs(5000); 159 // How long we wait after a transition request to perform a collector transition (nanoseconds). 160 static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000); 161 162 // Create a heap with the requested sizes. The possible empty 163 // image_file_names names specify Spaces to load based on 164 // ImageWriter output. 165 explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free, 166 size_t max_free, double target_utilization, 167 double foreground_heap_growth_multiplier, size_t capacity, 168 size_t non_moving_space_capacity, 169 const std::string& original_image_file_name, 170 InstructionSet image_instruction_set, 171 CollectorType foreground_collector_type, CollectorType background_collector_type, 172 space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold, 173 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode, 174 size_t long_pause_threshold, size_t long_gc_threshold, 175 bool ignore_max_footprint, bool use_tlab, 176 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap, 177 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc, 178 bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction, 179 uint64_t min_interval_homogeneous_space_compaction_by_oom); 180 181 ~Heap(); 182 183 // Allocates and initializes storage for an object instance. 184 template <bool kInstrumented, typename PreFenceVisitor> 185 mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes, 186 const PreFenceVisitor& pre_fence_visitor) 187 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 188 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, 189 GetCurrentAllocator(), 190 pre_fence_visitor); 191 } 192 193 template <bool kInstrumented, typename PreFenceVisitor> 194 mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes, 195 const PreFenceVisitor& pre_fence_visitor) 196 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 197 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, 198 GetCurrentNonMovingAllocator(), 199 pre_fence_visitor); 200 } 201 202 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> 203 ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator( 204 Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator, 205 const PreFenceVisitor& pre_fence_visitor) 206 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 207 208 AllocatorType GetCurrentAllocator() const { 209 return current_allocator_; 210 } 211 212 AllocatorType GetCurrentNonMovingAllocator() const { 213 return current_non_moving_allocator_; 214 } 215 216 // Visit all of the live objects in the heap. 217 void VisitObjects(ObjectCallback callback, void* arg) 218 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 219 220 void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) 221 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 222 223 void RegisterNativeAllocation(JNIEnv* env, size_t bytes); 224 void RegisterNativeFree(JNIEnv* env, size_t bytes); 225 226 // Change the allocator, updates entrypoints. 227 void ChangeAllocator(AllocatorType allocator) 228 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 229 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 230 231 // Transition the garbage collector during runtime, may copy objects from one space to another. 232 void TransitionCollector(CollectorType collector_type); 233 234 // Change the collector to be one of the possible options (MS, CMS, SS). 235 void ChangeCollector(CollectorType collector_type) 236 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 237 238 // The given reference is believed to be to an object in the Java heap, check the soundness of it. 239 // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a 240 // proper lock ordering for it. 241 void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS; 242 243 // Check sanity of all live references. 244 void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 245 // Returns how many failures occured. 246 size_t VerifyHeapReferences(bool verify_referents = true) 247 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 248 bool VerifyMissingCardMarks() 249 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 250 251 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, 252 // and doesn't abort on error, allowing the caller to report more 253 // meaningful diagnostics. 254 bool IsValidObjectAddress(const mirror::Object* obj) const 255 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 256 257 // Faster alternative to IsHeapAddress since finding if an object is in the large object space is 258 // very slow. 259 bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const 260 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 261 262 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). 263 // Requires the heap lock to be held. 264 bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true, 265 bool search_live_stack = true, bool sorted = false) 266 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 267 268 // Returns true if there is any chance that the object (obj) will move. 269 bool IsMovableObject(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 270 271 // Enables us to compacting GC until objects are released. 272 void IncrementDisableMovingGC(Thread* self); 273 void DecrementDisableMovingGC(Thread* self); 274 275 // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits. 276 void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 277 278 // Initiates an explicit garbage collection. 279 void CollectGarbage(bool clear_soft_references); 280 281 // Does a concurrent GC, should only be called by the GC daemon thread 282 // through runtime. 283 void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 284 285 // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount. 286 // The boolean decides whether to use IsAssignableFrom or == when comparing classes. 287 void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, 288 uint64_t* counts) 289 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) 290 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 291 // Implements JDWP RT_Instances. 292 void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) 293 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) 294 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 295 // Implements JDWP OR_ReferringObjects. 296 void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects) 297 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) 298 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 299 300 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to 301 // implement dalvik.system.VMRuntime.clearGrowthLimit. 302 void ClearGrowthLimit(); 303 304 // Target ideal heap utilization ratio, implements 305 // dalvik.system.VMRuntime.getTargetHeapUtilization. 306 double GetTargetHeapUtilization() const { 307 return target_utilization_; 308 } 309 310 // Data structure memory usage tracking. 311 void RegisterGCAllocation(size_t bytes); 312 void RegisterGCDeAllocation(size_t bytes); 313 314 // Set the heap's private space pointers to be the same as the space based on it's type. Public 315 // due to usage by tests. 316 void SetSpaceAsDefault(space::ContinuousSpace* continuous_space) 317 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 318 void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 319 void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 320 321 // Set target ideal heap utilization ratio, implements 322 // dalvik.system.VMRuntime.setTargetHeapUtilization. 323 void SetTargetHeapUtilization(float target); 324 325 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate 326 // from the system. Doesn't allow the space to exceed its growth limit. 327 void SetIdealFootprint(size_t max_allowed_footprint); 328 329 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we 330 // waited for. 331 collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) 332 LOCKS_EXCLUDED(gc_complete_lock_); 333 334 // Update the heap's process state to a new value, may cause compaction to occur. 335 void UpdateProcessState(ProcessState process_state); 336 337 const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const { 338 return continuous_spaces_; 339 } 340 341 const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const { 342 return discontinuous_spaces_; 343 } 344 345 const collector::Iteration* GetCurrentGcIteration() const { 346 return ¤t_gc_iteration_; 347 } 348 collector::Iteration* GetCurrentGcIteration() { 349 return ¤t_gc_iteration_; 350 } 351 352 // Enable verification of object references when the runtime is sufficiently initialized. 353 void EnableObjectValidation() { 354 verify_object_mode_ = kVerifyObjectSupport; 355 if (verify_object_mode_ > kVerifyObjectModeDisabled) { 356 VerifyHeap(); 357 } 358 } 359 360 // Disable object reference verification for image writing. 361 void DisableObjectValidation() { 362 verify_object_mode_ = kVerifyObjectModeDisabled; 363 } 364 365 // Other checks may be performed if we know the heap should be in a sane state. 366 bool IsObjectValidationEnabled() const { 367 return verify_object_mode_ > kVerifyObjectModeDisabled; 368 } 369 370 // Returns true if low memory mode is enabled. 371 bool IsLowMemoryMode() const { 372 return low_memory_mode_; 373 } 374 375 // Returns the heap growth multiplier, this affects how much we grow the heap after a GC. 376 // Scales heap growth, min free, and max free. 377 double HeapGrowthMultiplier() const; 378 379 // Freed bytes can be negative in cases where we copy objects from a compacted space to a 380 // free-list backed space. 381 void RecordFree(uint64_t freed_objects, int64_t freed_bytes); 382 383 // Must be called if a field of an Object in the heap changes, and before any GC safe-point. 384 // The call is not needed if NULL is stored in the field. 385 ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/, 386 const mirror::Object* /*new_value*/) { 387 card_table_->MarkCard(dst); 388 } 389 390 // Write barrier for array operations that update many field positions 391 ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/, 392 size_t /*length TODO: element_count or byte_count?*/) { 393 card_table_->MarkCard(dst); 394 } 395 396 ALWAYS_INLINE void WriteBarrierEveryFieldOf(const mirror::Object* obj) { 397 card_table_->MarkCard(obj); 398 } 399 400 accounting::CardTable* GetCardTable() const { 401 return card_table_.get(); 402 } 403 404 void AddFinalizerReference(Thread* self, mirror::Object** object); 405 406 // Returns the number of bytes currently allocated. 407 size_t GetBytesAllocated() const { 408 return num_bytes_allocated_.LoadSequentiallyConsistent(); 409 } 410 411 // Returns the number of objects currently allocated. 412 size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 413 414 // Returns the total number of objects allocated since the heap was created. 415 uint64_t GetObjectsAllocatedEver() const; 416 417 // Returns the total number of bytes allocated since the heap was created. 418 uint64_t GetBytesAllocatedEver() const; 419 420 // Returns the total number of objects freed since the heap was created. 421 uint64_t GetObjectsFreedEver() const { 422 return total_objects_freed_ever_; 423 } 424 425 // Returns the total number of bytes freed since the heap was created. 426 uint64_t GetBytesFreedEver() const { 427 return total_bytes_freed_ever_; 428 } 429 430 // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can 431 // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx 432 // were specified. Android apps start with a growth limit (small heap size) which is 433 // cleared/extended for large apps. 434 size_t GetMaxMemory() const { 435 // There is some race conditions in the allocation code that can cause bytes allocated to 436 // become larger than growth_limit_ in rare cases. 437 return std::max(GetBytesAllocated(), growth_limit_); 438 } 439 440 // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently 441 // consumed by an application. 442 size_t GetTotalMemory() const; 443 444 // Returns approximately how much free memory we have until the next GC happens. 445 size_t GetFreeMemoryUntilGC() const { 446 return max_allowed_footprint_ - GetBytesAllocated(); 447 } 448 449 // Returns approximately how much free memory we have until the next OOME happens. 450 size_t GetFreeMemoryUntilOOME() const { 451 return growth_limit_ - GetBytesAllocated(); 452 } 453 454 // Returns how much free memory we have until we need to grow the heap to perform an allocation. 455 // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory. 456 size_t GetFreeMemory() const { 457 size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent(); 458 size_t total_memory = GetTotalMemory(); 459 // Make sure we don't get a negative number. 460 return total_memory - std::min(total_memory, byte_allocated); 461 } 462 463 // get the space that corresponds to an object's address. Current implementation searches all 464 // spaces in turn. If fail_ok is false then failing to find a space will cause an abort. 465 // TODO: consider using faster data structure like binary tree. 466 space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const; 467 space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*, 468 bool fail_ok) const; 469 space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const; 470 471 void DumpForSigQuit(std::ostream& os); 472 473 // Do a pending heap transition or trim. 474 void DoPendingTransitionOrTrim() LOCKS_EXCLUDED(heap_trim_request_lock_); 475 476 // Trim the managed and native heaps by releasing unused memory back to the OS. 477 void Trim() LOCKS_EXCLUDED(heap_trim_request_lock_); 478 479 void RevokeThreadLocalBuffers(Thread* thread); 480 void RevokeRosAllocThreadLocalBuffers(Thread* thread); 481 void RevokeAllThreadLocalBuffers(); 482 void AssertThreadLocalBuffersAreRevoked(Thread* thread); 483 void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 484 void RosAllocVerification(TimingLogger* timings, const char* name) 485 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 486 487 accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 488 return live_bitmap_.get(); 489 } 490 491 accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 492 return mark_bitmap_.get(); 493 } 494 495 accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 496 return live_stack_.get(); 497 } 498 499 void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS; 500 501 // Mark and empty stack. 502 void FlushAllocStack() 503 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 504 505 // Revoke all the thread-local allocation stacks. 506 void RevokeAllThreadLocalAllocationStacks(Thread* self) 507 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 508 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_); 509 510 // Mark all the objects in the allocation stack in the specified bitmap. 511 // TODO: Refactor? 512 void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1, 513 accounting::SpaceBitmap<kObjectAlignment>* bitmap2, 514 accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects, 515 accounting::ObjectStack* stack) 516 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 517 518 // Mark the specified allocation stack as live. 519 void MarkAllocStackAsLive(accounting::ObjectStack* stack) 520 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 521 522 // Unbind any bound bitmaps. 523 void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 524 525 // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added. 526 // Assumes there is only one image space. 527 space::ImageSpace* GetImageSpace() const; 528 529 // Permenantly disable moving garbage collection. 530 void DisableMovingGc(); 531 532 space::DlMallocSpace* GetDlMallocSpace() const { 533 return dlmalloc_space_; 534 } 535 536 space::RosAllocSpace* GetRosAllocSpace() const { 537 return rosalloc_space_; 538 } 539 540 // Return the corresponding rosalloc space. 541 space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const; 542 543 space::MallocSpace* GetNonMovingSpace() const { 544 return non_moving_space_; 545 } 546 547 space::LargeObjectSpace* GetLargeObjectsSpace() const { 548 return large_object_space_; 549 } 550 551 // Returns the free list space that may contain movable objects (the 552 // one that's not the non-moving space), either rosalloc_space_ or 553 // dlmalloc_space_. 554 space::MallocSpace* GetPrimaryFreeListSpace() { 555 if (kUseRosAlloc) { 556 DCHECK(rosalloc_space_ != nullptr); 557 // reinterpret_cast is necessary as the space class hierarchy 558 // isn't known (#included) yet here. 559 return reinterpret_cast<space::MallocSpace*>(rosalloc_space_); 560 } else { 561 DCHECK(dlmalloc_space_ != nullptr); 562 return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_); 563 } 564 } 565 566 std::string DumpSpaces() const WARN_UNUSED; 567 void DumpSpaces(std::ostream& stream) const; 568 569 // Dump object should only be used by the signal handler. 570 void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; 571 // Safe version of pretty type of which check to make sure objects are heap addresses. 572 std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS; 573 std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; 574 575 // GC performance measuring 576 void DumpGcPerformanceInfo(std::ostream& os); 577 578 // Returns true if we currently care about pause times. 579 bool CareAboutPauseTimes() const { 580 return process_state_ == kProcessStateJankPerceptible; 581 } 582 583 // Thread pool. 584 void CreateThreadPool(); 585 void DeleteThreadPool(); 586 ThreadPool* GetThreadPool() { 587 return thread_pool_.get(); 588 } 589 size_t GetParallelGCThreadCount() const { 590 return parallel_gc_threads_; 591 } 592 size_t GetConcGCThreadCount() const { 593 return conc_gc_threads_; 594 } 595 accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space); 596 void AddModUnionTable(accounting::ModUnionTable* mod_union_table); 597 598 accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space); 599 void AddRememberedSet(accounting::RememberedSet* remembered_set); 600 // Also deletes the remebered set. 601 void RemoveRememberedSet(space::Space* space); 602 603 bool IsCompilingBoot() const; 604 bool HasImageSpace() const; 605 606 ReferenceProcessor* GetReferenceProcessor() { 607 return &reference_processor_; 608 } 609 610 bool HasZygoteSpace() const { 611 return zygote_space_ != nullptr; 612 } 613 614 private: 615 // Compact source space to target space. 616 void Compact(space::ContinuousMemMapAllocSpace* target_space, 617 space::ContinuousMemMapAllocSpace* source_space, 618 GcCause gc_cause) 619 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 620 621 void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_); 622 623 // Create a mem map with a preferred base address. 624 static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, 625 size_t capacity, std::string* out_error_str); 626 627 bool SupportHSpaceCompaction() const { 628 // Returns true if we can do hspace compaction 629 return main_space_backup_ != nullptr; 630 } 631 632 static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) { 633 return 634 allocator_type != kAllocatorTypeBumpPointer && 635 allocator_type != kAllocatorTypeTLAB; 636 } 637 static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) { 638 return AllocatorHasAllocationStack(allocator_type); 639 } 640 static bool IsMovingGc(CollectorType collector_type) { 641 return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS || 642 collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC || 643 collector_type == kCollectorTypeHomogeneousSpaceCompact; 644 } 645 bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const 646 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 647 ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, 648 mirror::Object** obj) 649 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 650 651 accounting::ObjectStack* GetMarkStack() { 652 return mark_stack_.get(); 653 } 654 655 // We don't force this to be inlined since it is a slow path. 656 template <bool kInstrumented, typename PreFenceVisitor> 657 mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count, 658 const PreFenceVisitor& pre_fence_visitor) 659 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 660 661 // Handles Allocate()'s slow allocation path with GC involved after 662 // an initial allocation attempt failed. 663 mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes, 664 size_t* bytes_allocated, size_t* usable_size, 665 mirror::Class** klass) 666 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 667 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 668 669 // Allocate into a specific space. 670 mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c, 671 size_t bytes) 672 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 673 674 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 675 // wrong space. 676 void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 677 678 // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so 679 // that the switch statement is constant optimized in the entrypoints. 680 template <const bool kInstrumented, const bool kGrow> 681 ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type, 682 size_t alloc_size, size_t* bytes_allocated, 683 size_t* usable_size) 684 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 685 686 void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) 687 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 688 689 template <bool kGrow> 690 ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size); 691 692 // Returns true if the address passed in is within the address range of a continuous space. 693 bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const 694 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 695 696 // Run the finalizers. 697 void RunFinalization(JNIEnv* env); 698 699 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we 700 // waited for. 701 collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self) 702 EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_); 703 704 void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) 705 LOCKS_EXCLUDED(heap_trim_request_lock_); 706 void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 707 void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) 708 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 709 void RequestConcurrentGC(Thread* self) 710 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 711 bool IsGCRequestPending() const; 712 713 // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns 714 // which type of Gc was actually ran. 715 collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause, 716 bool clear_soft_references) 717 LOCKS_EXCLUDED(gc_complete_lock_, 718 Locks::heap_bitmap_lock_, 719 Locks::thread_suspend_count_lock_); 720 721 void PreGcVerification(collector::GarbageCollector* gc) 722 LOCKS_EXCLUDED(Locks::mutator_lock_); 723 void PreGcVerificationPaused(collector::GarbageCollector* gc) 724 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 725 void PrePauseRosAllocVerification(collector::GarbageCollector* gc) 726 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 727 void PreSweepingGcVerification(collector::GarbageCollector* gc) 728 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 729 void PostGcVerification(collector::GarbageCollector* gc) 730 LOCKS_EXCLUDED(Locks::mutator_lock_); 731 void PostGcVerificationPaused(collector::GarbageCollector* gc) 732 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 733 734 // Update the watermark for the native allocated bytes based on the current number of native 735 // bytes allocated and the target utilization ratio. 736 void UpdateMaxNativeFootprint(); 737 738 // Find a collector based on GC type. 739 collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type); 740 741 // Create a new alloc space and compact default alloc space to it. 742 HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact(); 743 744 // Create the main free list malloc space, either a RosAlloc space or DlMalloc space. 745 void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit, 746 size_t capacity); 747 748 // Create a malloc space based on a mem map. Does not set the space as default. 749 space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size, 750 size_t growth_limit, size_t capacity, 751 const char* name, bool can_move_objects); 752 753 // Given the current contents of the alloc space, increase the allowed heap footprint to match 754 // the target utilization ratio. This should only be called immediately after a full garbage 755 // collection. 756 void GrowForUtilization(collector::GarbageCollector* collector_ran); 757 758 size_t GetPercentFree(); 759 760 static void VerificationCallback(mirror::Object* obj, void* arg) 761 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 762 763 // Swap the allocation stack with the live stack. 764 void SwapStacks(Thread* self); 765 766 // Clear cards and update the mod union table. 767 void ProcessCards(TimingLogger* timings, bool use_rem_sets); 768 769 // Signal the heap trim daemon that there is something to do, either a heap transition or heap 770 // trim. 771 void SignalHeapTrimDaemon(Thread* self); 772 773 // Push an object onto the allocation stack. 774 void PushOnAllocationStack(Thread* self, mirror::Object** obj) 775 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 776 void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) 777 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 778 void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj) 779 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 780 781 // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark 782 // sweep GC, false for other GC types. 783 bool IsGcConcurrent() const ALWAYS_INLINE { 784 return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC; 785 } 786 787 // All-known continuous spaces, where objects lie within fixed bounds. 788 std::vector<space::ContinuousSpace*> continuous_spaces_; 789 790 // All-known discontinuous spaces, where objects may be placed throughout virtual memory. 791 std::vector<space::DiscontinuousSpace*> discontinuous_spaces_; 792 793 // All-known alloc spaces, where objects may be or have been allocated. 794 std::vector<space::AllocSpace*> alloc_spaces_; 795 796 // A space where non-movable objects are allocated, when compaction is enabled it contains 797 // Classes, ArtMethods, ArtFields, and non moving objects. 798 space::MallocSpace* non_moving_space_; 799 800 // Space which we use for the kAllocatorTypeROSAlloc. 801 space::RosAllocSpace* rosalloc_space_; 802 803 // Space which we use for the kAllocatorTypeDlMalloc. 804 space::DlMallocSpace* dlmalloc_space_; 805 806 // The main space is the space which the GC copies to and from on process state updates. This 807 // space is typically either the dlmalloc_space_ or the rosalloc_space_. 808 space::MallocSpace* main_space_; 809 810 // The large object space we are currently allocating into. 811 space::LargeObjectSpace* large_object_space_; 812 813 // The card table, dirtied by the write barrier. 814 std::unique_ptr<accounting::CardTable> card_table_; 815 816 // A mod-union table remembers all of the references from the it's space to other spaces. 817 AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap> 818 mod_union_tables_; 819 820 // A remembered set remembers all of the references from the it's space to the target space. 821 AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap> 822 remembered_sets_; 823 824 // The current collector type. 825 CollectorType collector_type_; 826 // Which collector we use when the app is in the foreground. 827 CollectorType foreground_collector_type_; 828 // Which collector we will use when the app is notified of a transition to background. 829 CollectorType background_collector_type_; 830 // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_. 831 CollectorType desired_collector_type_; 832 833 // Lock which guards heap trim requests. 834 Mutex* heap_trim_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 835 // When we want to perform the next heap trim (nano seconds). 836 uint64_t last_trim_time_ GUARDED_BY(heap_trim_request_lock_); 837 // When we want to perform the next heap transition (nano seconds) or heap trim. 838 uint64_t heap_transition_or_trim_target_time_ GUARDED_BY(heap_trim_request_lock_); 839 // If we have a heap trim request pending. 840 bool heap_trim_request_pending_ GUARDED_BY(heap_trim_request_lock_); 841 842 // How many GC threads we may use for paused parts of garbage collection. 843 const size_t parallel_gc_threads_; 844 845 // How many GC threads we may use for unpaused parts of garbage collection. 846 const size_t conc_gc_threads_; 847 848 // Boolean for if we are in low memory mode. 849 const bool low_memory_mode_; 850 851 // If we get a pause longer than long pause log threshold, then we print out the GC after it 852 // finishes. 853 const size_t long_pause_log_threshold_; 854 855 // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes. 856 const size_t long_gc_log_threshold_; 857 858 // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is 859 // useful for benchmarking since it reduces time spent in GC to a low %. 860 const bool ignore_max_footprint_; 861 862 // Lock which guards zygote space creation. 863 Mutex zygote_creation_lock_; 864 865 // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before 866 // zygote space creation. 867 space::ZygoteSpace* zygote_space_; 868 869 // Minimum allocation size of large object. 870 size_t large_object_threshold_; 871 872 // Guards access to the state of GC, associated conditional variable is used to signal when a GC 873 // completes. 874 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 875 std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_); 876 877 // Reference processor; 878 ReferenceProcessor reference_processor_; 879 880 // True while the garbage collector is running. 881 volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_); 882 883 // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on. 884 volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_); 885 collector::GcType next_gc_type_; 886 887 // Maximum size that the heap can reach. 888 const size_t capacity_; 889 890 // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap 891 // programs it is "cleared" making it the same as capacity. 892 size_t growth_limit_; 893 894 // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating 895 // a GC should be triggered. 896 size_t max_allowed_footprint_; 897 898 // The watermark at which a concurrent GC is requested by registerNativeAllocation. 899 size_t native_footprint_gc_watermark_; 900 901 // Whether or not we need to run finalizers in the next native allocation. 902 bool native_need_to_run_finalization_; 903 904 // Whether or not we currently care about pause times. 905 ProcessState process_state_; 906 907 // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that 908 // it completes ahead of an allocation failing. 909 size_t concurrent_start_bytes_; 910 911 // Since the heap was created, how many bytes have been freed. 912 uint64_t total_bytes_freed_ever_; 913 914 // Since the heap was created, how many objects have been freed. 915 uint64_t total_objects_freed_ever_; 916 917 // Number of bytes allocated. Adjusted after each allocation and free. 918 Atomic<size_t> num_bytes_allocated_; 919 920 // Bytes which are allocated and managed by native code but still need to be accounted for. 921 Atomic<size_t> native_bytes_allocated_; 922 923 // Info related to the current or previous GC iteration. 924 collector::Iteration current_gc_iteration_; 925 926 // Heap verification flags. 927 const bool verify_missing_card_marks_; 928 const bool verify_system_weaks_; 929 const bool verify_pre_gc_heap_; 930 const bool verify_pre_sweeping_heap_; 931 const bool verify_post_gc_heap_; 932 const bool verify_mod_union_table_; 933 bool verify_pre_gc_rosalloc_; 934 bool verify_pre_sweeping_rosalloc_; 935 bool verify_post_gc_rosalloc_; 936 937 // RAII that temporarily disables the rosalloc verification during 938 // the zygote fork. 939 class ScopedDisableRosAllocVerification { 940 private: 941 Heap* const heap_; 942 const bool orig_verify_pre_gc_; 943 const bool orig_verify_pre_sweeping_; 944 const bool orig_verify_post_gc_; 945 946 public: 947 explicit ScopedDisableRosAllocVerification(Heap* heap) 948 : heap_(heap), 949 orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_), 950 orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_), 951 orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) { 952 heap_->verify_pre_gc_rosalloc_ = false; 953 heap_->verify_pre_sweeping_rosalloc_ = false; 954 heap_->verify_post_gc_rosalloc_ = false; 955 } 956 ~ScopedDisableRosAllocVerification() { 957 heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_; 958 heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_; 959 heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_; 960 } 961 }; 962 963 // Parallel GC data structures. 964 std::unique_ptr<ThreadPool> thread_pool_; 965 966 // The nanosecond time at which the last GC ended. 967 uint64_t last_gc_time_ns_; 968 969 // How many bytes were allocated at the end of the last GC. 970 uint64_t last_gc_size_; 971 972 // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle 973 // and the start of the current one. 974 uint64_t allocation_rate_; 975 976 // For a GC cycle, a bitmap that is set corresponding to the 977 std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); 978 std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); 979 980 // Mark stack that we reuse to avoid re-allocating the mark stack. 981 std::unique_ptr<accounting::ObjectStack> mark_stack_; 982 983 // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us 984 // to use the live bitmap as the old mark bitmap. 985 const size_t max_allocation_stack_size_; 986 std::unique_ptr<accounting::ObjectStack> allocation_stack_; 987 988 // Second allocation stack so that we can process allocation with the heap unlocked. 989 std::unique_ptr<accounting::ObjectStack> live_stack_; 990 991 // Allocator type. 992 AllocatorType current_allocator_; 993 const AllocatorType current_non_moving_allocator_; 994 995 // Which GCs we run in order when we an allocation fails. 996 std::vector<collector::GcType> gc_plan_; 997 998 // Bump pointer spaces. 999 space::BumpPointerSpace* bump_pointer_space_; 1000 // Temp space is the space which the semispace collector copies to. 1001 space::BumpPointerSpace* temp_space_; 1002 1003 // Minimum free guarantees that you always have at least min_free_ free bytes after growing for 1004 // utilization, regardless of target utilization ratio. 1005 size_t min_free_; 1006 1007 // The ideal maximum free size, when we grow the heap for utilization. 1008 size_t max_free_; 1009 1010 // Target ideal heap utilization ratio 1011 double target_utilization_; 1012 1013 // How much more we grow the heap when we are a foreground app instead of background. 1014 double foreground_heap_growth_multiplier_; 1015 1016 // Total time which mutators are paused or waiting for GC to complete. 1017 uint64_t total_wait_time_; 1018 1019 // Total number of objects allocated in microseconds. 1020 AtomicInteger total_allocation_time_; 1021 1022 // The current state of heap verification, may be enabled or disabled. 1023 VerifyObjectMode verify_object_mode_; 1024 1025 // Compacting GC disable count, prevents compacting GC from running iff > 0. 1026 size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_); 1027 1028 std::vector<collector::GarbageCollector*> garbage_collectors_; 1029 collector::SemiSpace* semi_space_collector_; 1030 collector::MarkCompact* mark_compact_collector_; 1031 collector::ConcurrentCopying* concurrent_copying_collector_; 1032 1033 const bool running_on_valgrind_; 1034 const bool use_tlab_; 1035 1036 // Pointer to the space which becomes the new main space when we do homogeneous space compaction. 1037 // Use unique_ptr since the space is only added during the homogeneous compaction phase. 1038 std::unique_ptr<space::MallocSpace> main_space_backup_; 1039 1040 // Minimal interval allowed between two homogeneous space compactions caused by OOM. 1041 uint64_t min_interval_homogeneous_space_compaction_by_oom_; 1042 1043 // Times of the last homogeneous space compaction caused by OOM. 1044 uint64_t last_time_homogeneous_space_compaction_by_oom_; 1045 1046 // Saved OOMs by homogeneous space compaction. 1047 Atomic<size_t> count_delayed_oom_; 1048 1049 // Count for requested homogeneous space compaction. 1050 Atomic<size_t> count_requested_homogeneous_space_compaction_; 1051 1052 // Count for ignored homogeneous space compaction. 1053 Atomic<size_t> count_ignored_homogeneous_space_compaction_; 1054 1055 // Count for performed homogeneous space compaction. 1056 Atomic<size_t> count_performed_homogeneous_space_compaction_; 1057 1058 // Whether or not we use homogeneous space compaction to avoid OOM errors. 1059 bool use_homogeneous_space_compaction_for_oom_; 1060 1061 friend class collector::GarbageCollector; 1062 friend class collector::MarkCompact; 1063 friend class collector::MarkSweep; 1064 friend class collector::SemiSpace; 1065 friend class ReferenceQueue; 1066 friend class VerifyReferenceCardVisitor; 1067 friend class VerifyReferenceVisitor; 1068 friend class VerifyObjectVisitor; 1069 friend class ScopedHeapFill; 1070 friend class ScopedHeapLock; 1071 friend class space::SpaceTest; 1072 1073 class AllocationTimer { 1074 private: 1075 Heap* heap_; 1076 mirror::Object** allocated_obj_ptr_; 1077 uint64_t allocation_start_time_; 1078 public: 1079 AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr); 1080 ~AllocationTimer(); 1081 }; 1082 1083 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap); 1084}; 1085 1086// ScopedHeapFill changes the bytes allocated counter to be equal to the growth limit. This 1087// causes the next allocation to perform a GC and possibly an OOM. It can be used to ensure that a 1088// GC happens in specific methods such as ThrowIllegalMonitorStateExceptionF in Monitor::Wait. 1089class ScopedHeapFill { 1090 public: 1091 explicit ScopedHeapFill(Heap* heap) 1092 : heap_(heap), 1093 delta_(heap_->GetMaxMemory() - heap_->GetBytesAllocated()) { 1094 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(delta_); 1095 } 1096 ~ScopedHeapFill() { 1097 heap_->num_bytes_allocated_.FetchAndSubSequentiallyConsistent(delta_); 1098 } 1099 1100 private: 1101 Heap* const heap_; 1102 const int64_t delta_; 1103}; 1104 1105} // namespace gc 1106} // namespace art 1107 1108#endif // ART_RUNTIME_GC_HEAP_H_ 1109