heap.h revision 4c7fc5950853b0c368e2148db77ced7c4d3c303c
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_HEAP_H_ 18#define ART_RUNTIME_GC_HEAP_H_ 19 20#include <iosfwd> 21#include <string> 22#include <vector> 23 24#include "allocator_type.h" 25#include "atomic.h" 26#include "base/timing_logger.h" 27#include "gc/accounting/atomic_stack.h" 28#include "gc/accounting/card_table.h" 29#include "gc/gc_cause.h" 30#include "gc/collector/garbage_collector.h" 31#include "gc/collector/gc_type.h" 32#include "gc/collector_type.h" 33#include "globals.h" 34#include "gtest/gtest.h" 35#include "instruction_set.h" 36#include "jni.h" 37#include "object_callbacks.h" 38#include "offsets.h" 39#include "reference_processor.h" 40#include "safe_map.h" 41#include "thread_pool.h" 42#include "verify_object.h" 43 44namespace art { 45 46class ConditionVariable; 47class Mutex; 48class StackVisitor; 49class Thread; 50class TimingLogger; 51 52namespace mirror { 53 class Class; 54 class Object; 55} // namespace mirror 56 57namespace gc { 58 59class ReferenceProcessor; 60 61namespace accounting { 62 class HeapBitmap; 63 class ModUnionTable; 64 class RememberedSet; 65} // namespace accounting 66 67namespace collector { 68 class ConcurrentCopying; 69 class GarbageCollector; 70 class MarkCompact; 71 class MarkSweep; 72 class SemiSpace; 73} // namespace collector 74 75namespace allocator { 76 class RosAlloc; 77} // namespace allocator 78 79namespace space { 80 class AllocSpace; 81 class BumpPointerSpace; 82 class DiscontinuousSpace; 83 class DlMallocSpace; 84 class ImageSpace; 85 class LargeObjectSpace; 86 class MallocSpace; 87 class RosAllocSpace; 88 class Space; 89 class SpaceTest; 90 class ContinuousMemMapAllocSpace; 91} // namespace space 92 93class AgeCardVisitor { 94 public: 95 byte operator()(byte card) const { 96 if (card == accounting::CardTable::kCardDirty) { 97 return card - 1; 98 } else { 99 return 0; 100 } 101 } 102}; 103 104enum HomogeneousSpaceCompactResult { 105 // Success. 106 kSuccess, 107 // Reject due to disabled moving GC. 108 kErrorReject, 109 // System is shutting down. 110 kErrorVMShuttingDown, 111}; 112 113// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace 114static constexpr bool kUseRosAlloc = true; 115 116// If true, use thread-local allocation stack. 117static constexpr bool kUseThreadLocalAllocationStack = true; 118 119// The process state passed in from the activity manager, used to determine when to do trimming 120// and compaction. 121enum ProcessState { 122 kProcessStateJankPerceptible = 0, 123 kProcessStateJankImperceptible = 1, 124}; 125std::ostream& operator<<(std::ostream& os, const ProcessState& process_state); 126 127class Heap { 128 public: 129 // If true, measure the total allocation time. 130 static constexpr bool kMeasureAllocationTime = false; 131 // Primitive arrays larger than this size are put in the large object space. 132 static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize; 133 134 static constexpr size_t kDefaultStartingSize = kPageSize; 135 static constexpr size_t kDefaultInitialSize = 2 * MB; 136 static constexpr size_t kDefaultMaximumSize = 256 * MB; 137 static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB; 138 static constexpr size_t kDefaultMaxFree = 2 * MB; 139 static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4; 140 static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5); 141 static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100); 142 static constexpr size_t kDefaultTLABSize = 256 * KB; 143 static constexpr double kDefaultTargetUtilization = 0.5; 144 static constexpr double kDefaultHeapGrowthMultiplier = 2.0; 145 146 // Used so that we don't overflow the allocation time atomic integer. 147 static constexpr size_t kTimeAdjust = 1024; 148 149 // How often we allow heap trimming to happen (nanoseconds). 150 static constexpr uint64_t kHeapTrimWait = MsToNs(5000); 151 // How long we wait after a transition request to perform a collector transition (nanoseconds). 152 static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000); 153 154 // Create a heap with the requested sizes. The possible empty 155 // image_file_names names specify Spaces to load based on 156 // ImageWriter output. 157 explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free, 158 size_t max_free, double target_utilization, 159 double foreground_heap_growth_multiplier, size_t capacity, 160 size_t non_moving_space_capacity, 161 const std::string& original_image_file_name, 162 InstructionSet image_instruction_set, 163 CollectorType foreground_collector_type, CollectorType background_collector_type, 164 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode, 165 size_t long_pause_threshold, size_t long_gc_threshold, 166 bool ignore_max_footprint, bool use_tlab, 167 bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap, 168 bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc, 169 bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction, 170 uint64_t min_interval_homogeneous_space_compaction_by_oom); 171 172 ~Heap(); 173 174 // Allocates and initializes storage for an object instance. 175 template <bool kInstrumented, typename PreFenceVisitor> 176 mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes, 177 const PreFenceVisitor& pre_fence_visitor) 178 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 179 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, 180 GetCurrentAllocator(), 181 pre_fence_visitor); 182 } 183 184 template <bool kInstrumented, typename PreFenceVisitor> 185 mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes, 186 const PreFenceVisitor& pre_fence_visitor) 187 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 188 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, 189 GetCurrentNonMovingAllocator(), 190 pre_fence_visitor); 191 } 192 193 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> 194 ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator( 195 Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator, 196 const PreFenceVisitor& pre_fence_visitor) 197 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 198 199 AllocatorType GetCurrentAllocator() const { 200 return current_allocator_; 201 } 202 203 AllocatorType GetCurrentNonMovingAllocator() const { 204 return current_non_moving_allocator_; 205 } 206 207 // Visit all of the live objects in the heap. 208 void VisitObjects(ObjectCallback callback, void* arg) 209 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 210 211 void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) 212 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 213 214 void RegisterNativeAllocation(JNIEnv* env, size_t bytes); 215 void RegisterNativeFree(JNIEnv* env, size_t bytes); 216 217 // Change the allocator, updates entrypoints. 218 void ChangeAllocator(AllocatorType allocator) 219 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 220 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 221 222 // Transition the garbage collector during runtime, may copy objects from one space to another. 223 void TransitionCollector(CollectorType collector_type); 224 225 // Change the collector to be one of the possible options (MS, CMS, SS). 226 void ChangeCollector(CollectorType collector_type) 227 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 228 229 // The given reference is believed to be to an object in the Java heap, check the soundness of it. 230 // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a 231 // proper lock ordering for it. 232 void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS; 233 234 // Check sanity of all live references. 235 void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 236 // Returns how many failures occured. 237 size_t VerifyHeapReferences(bool verify_referents = true) 238 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 239 bool VerifyMissingCardMarks() 240 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 241 242 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, 243 // and doesn't abort on error, allowing the caller to report more 244 // meaningful diagnostics. 245 bool IsValidObjectAddress(const mirror::Object* obj) const 246 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 247 248 // Faster alternative to IsHeapAddress since finding if an object is in the large object space is 249 // very slow. 250 bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const 251 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 252 253 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). 254 // Requires the heap lock to be held. 255 bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true, 256 bool search_live_stack = true, bool sorted = false) 257 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 258 259 // Returns true if there is any chance that the object (obj) will move. 260 bool IsMovableObject(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 261 262 // Enables us to compacting GC until objects are released. 263 void IncrementDisableMovingGC(Thread* self); 264 void DecrementDisableMovingGC(Thread* self); 265 266 // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits. 267 void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 268 269 // Initiates an explicit garbage collection. 270 void CollectGarbage(bool clear_soft_references); 271 272 // Does a concurrent GC, should only be called by the GC daemon thread 273 // through runtime. 274 void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 275 276 // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount. 277 // The boolean decides whether to use IsAssignableFrom or == when comparing classes. 278 void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, 279 uint64_t* counts) 280 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) 281 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 282 // Implements JDWP RT_Instances. 283 void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) 284 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) 285 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 286 // Implements JDWP OR_ReferringObjects. 287 void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects) 288 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) 289 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 290 291 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to 292 // implement dalvik.system.VMRuntime.clearGrowthLimit. 293 void ClearGrowthLimit(); 294 295 // Target ideal heap utilization ratio, implements 296 // dalvik.system.VMRuntime.getTargetHeapUtilization. 297 double GetTargetHeapUtilization() const { 298 return target_utilization_; 299 } 300 301 // Data structure memory usage tracking. 302 void RegisterGCAllocation(size_t bytes); 303 void RegisterGCDeAllocation(size_t bytes); 304 305 // Set the heap's private space pointers to be the same as the space based on it's type. Public 306 // due to usage by tests. 307 void SetSpaceAsDefault(space::ContinuousSpace* continuous_space) 308 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 309 void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 310 void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 311 312 // Set target ideal heap utilization ratio, implements 313 // dalvik.system.VMRuntime.setTargetHeapUtilization. 314 void SetTargetHeapUtilization(float target); 315 316 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate 317 // from the system. Doesn't allow the space to exceed its growth limit. 318 void SetIdealFootprint(size_t max_allowed_footprint); 319 320 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we 321 // waited for. 322 collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) 323 LOCKS_EXCLUDED(gc_complete_lock_); 324 325 // Update the heap's process state to a new value, may cause compaction to occur. 326 void UpdateProcessState(ProcessState process_state); 327 328 const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const { 329 return continuous_spaces_; 330 } 331 332 const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const { 333 return discontinuous_spaces_; 334 } 335 336 const collector::Iteration* GetCurrentGcIteration() const { 337 return ¤t_gc_iteration_; 338 } 339 collector::Iteration* GetCurrentGcIteration() { 340 return ¤t_gc_iteration_; 341 } 342 343 // Enable verification of object references when the runtime is sufficiently initialized. 344 void EnableObjectValidation() { 345 verify_object_mode_ = kVerifyObjectSupport; 346 if (verify_object_mode_ > kVerifyObjectModeDisabled) { 347 VerifyHeap(); 348 } 349 } 350 351 // Disable object reference verification for image writing. 352 void DisableObjectValidation() { 353 verify_object_mode_ = kVerifyObjectModeDisabled; 354 } 355 356 // Other checks may be performed if we know the heap should be in a sane state. 357 bool IsObjectValidationEnabled() const { 358 return verify_object_mode_ > kVerifyObjectModeDisabled; 359 } 360 361 // Returns true if low memory mode is enabled. 362 bool IsLowMemoryMode() const { 363 return low_memory_mode_; 364 } 365 366 // Returns the heap growth multiplier, this affects how much we grow the heap after a GC. 367 // Scales heap growth, min free, and max free. 368 double HeapGrowthMultiplier() const; 369 370 // Freed bytes can be negative in cases where we copy objects from a compacted space to a 371 // free-list backed space. 372 void RecordFree(uint64_t freed_objects, int64_t freed_bytes); 373 374 // Must be called if a field of an Object in the heap changes, and before any GC safe-point. 375 // The call is not needed if NULL is stored in the field. 376 void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/, 377 const mirror::Object* /*new_value*/) { 378 card_table_->MarkCard(dst); 379 } 380 381 // Write barrier for array operations that update many field positions 382 void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/, 383 size_t /*length TODO: element_count or byte_count?*/) { 384 card_table_->MarkCard(dst); 385 } 386 387 void WriteBarrierEveryFieldOf(const mirror::Object* obj) { 388 card_table_->MarkCard(obj); 389 } 390 391 accounting::CardTable* GetCardTable() const { 392 return card_table_.get(); 393 } 394 395 void AddFinalizerReference(Thread* self, mirror::Object** object); 396 397 // Returns the number of bytes currently allocated. 398 size_t GetBytesAllocated() const { 399 return num_bytes_allocated_.LoadSequentiallyConsistent(); 400 } 401 402 // Returns the number of objects currently allocated. 403 size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 404 405 // Returns the total number of objects allocated since the heap was created. 406 uint64_t GetObjectsAllocatedEver() const; 407 408 // Returns the total number of bytes allocated since the heap was created. 409 uint64_t GetBytesAllocatedEver() const; 410 411 // Returns the total number of objects freed since the heap was created. 412 uint64_t GetObjectsFreedEver() const { 413 return total_objects_freed_ever_; 414 } 415 416 // Returns the total number of bytes freed since the heap was created. 417 uint64_t GetBytesFreedEver() const { 418 return total_bytes_freed_ever_; 419 } 420 421 // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can 422 // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx 423 // were specified. Android apps start with a growth limit (small heap size) which is 424 // cleared/extended for large apps. 425 size_t GetMaxMemory() const { 426 // There is some race conditions in the allocation code that can cause bytes allocated to 427 // become larger than growth_limit_ in rare cases. 428 return std::max(GetBytesAllocated(), growth_limit_); 429 } 430 431 // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently 432 // consumed by an application. 433 size_t GetTotalMemory() const; 434 435 // Returns approximately how much free memory we have until the next GC happens. 436 size_t GetFreeMemoryUntilGC() const { 437 return max_allowed_footprint_ - GetBytesAllocated(); 438 } 439 440 // Returns approximately how much free memory we have until the next OOME happens. 441 size_t GetFreeMemoryUntilOOME() const { 442 return growth_limit_ - GetBytesAllocated(); 443 } 444 445 // Returns how much free memory we have until we need to grow the heap to perform an allocation. 446 // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory. 447 size_t GetFreeMemory() const { 448 size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent(); 449 size_t total_memory = GetTotalMemory(); 450 // Make sure we don't get a negative number. 451 return total_memory - std::min(total_memory, byte_allocated); 452 } 453 454 // get the space that corresponds to an object's address. Current implementation searches all 455 // spaces in turn. If fail_ok is false then failing to find a space will cause an abort. 456 // TODO: consider using faster data structure like binary tree. 457 space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const; 458 space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*, 459 bool fail_ok) const; 460 space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const; 461 462 void DumpForSigQuit(std::ostream& os) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 463 464 // Do a pending heap transition or trim. 465 void DoPendingTransitionOrTrim() LOCKS_EXCLUDED(heap_trim_request_lock_); 466 467 // Trim the managed and native heaps by releasing unused memory back to the OS. 468 void Trim() LOCKS_EXCLUDED(heap_trim_request_lock_); 469 470 void RevokeThreadLocalBuffers(Thread* thread); 471 void RevokeRosAllocThreadLocalBuffers(Thread* thread); 472 void RevokeAllThreadLocalBuffers(); 473 void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); 474 void RosAllocVerification(TimingLogger* timings, const char* name) 475 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 476 477 accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 478 return live_bitmap_.get(); 479 } 480 481 accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 482 return mark_bitmap_.get(); 483 } 484 485 accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 486 return live_stack_.get(); 487 } 488 489 void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS; 490 491 // Mark and empty stack. 492 void FlushAllocStack() 493 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 494 495 // Revoke all the thread-local allocation stacks. 496 void RevokeAllThreadLocalAllocationStacks(Thread* self) 497 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 498 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_); 499 500 // Mark all the objects in the allocation stack in the specified bitmap. 501 // TODO: Refactor? 502 void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1, 503 accounting::SpaceBitmap<kObjectAlignment>* bitmap2, 504 accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects, 505 accounting::ObjectStack* stack) 506 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 507 508 // Mark the specified allocation stack as live. 509 void MarkAllocStackAsLive(accounting::ObjectStack* stack) 510 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 511 512 // Unbind any bound bitmaps. 513 void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 514 515 // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added. 516 // Assumes there is only one image space. 517 space::ImageSpace* GetImageSpace() const; 518 519 // Permenantly disable moving garbage collection. 520 void DisableMovingGc(); 521 522 space::DlMallocSpace* GetDlMallocSpace() const { 523 return dlmalloc_space_; 524 } 525 526 space::RosAllocSpace* GetRosAllocSpace() const { 527 return rosalloc_space_; 528 } 529 530 // Return the corresponding rosalloc space. 531 space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const; 532 533 space::MallocSpace* GetNonMovingSpace() const { 534 return non_moving_space_; 535 } 536 537 space::LargeObjectSpace* GetLargeObjectsSpace() const { 538 return large_object_space_; 539 } 540 541 // Returns the free list space that may contain movable objects (the 542 // one that's not the non-moving space), either rosalloc_space_ or 543 // dlmalloc_space_. 544 space::MallocSpace* GetPrimaryFreeListSpace() { 545 if (kUseRosAlloc) { 546 DCHECK(rosalloc_space_ != nullptr); 547 // reinterpret_cast is necessary as the space class hierarchy 548 // isn't known (#included) yet here. 549 return reinterpret_cast<space::MallocSpace*>(rosalloc_space_); 550 } else { 551 DCHECK(dlmalloc_space_ != nullptr); 552 return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_); 553 } 554 } 555 556 std::string DumpSpaces() const WARN_UNUSED; 557 void DumpSpaces(std::ostream& stream) const; 558 559 // Dump object should only be used by the signal handler. 560 void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; 561 // Safe version of pretty type of which check to make sure objects are heap addresses. 562 std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS; 563 std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; 564 565 // GC performance measuring 566 void DumpGcPerformanceInfo(std::ostream& os); 567 568 // Returns true if we currently care about pause times. 569 bool CareAboutPauseTimes() const { 570 return process_state_ == kProcessStateJankPerceptible; 571 } 572 573 // Thread pool. 574 void CreateThreadPool(); 575 void DeleteThreadPool(); 576 ThreadPool* GetThreadPool() { 577 return thread_pool_.get(); 578 } 579 size_t GetParallelGCThreadCount() const { 580 return parallel_gc_threads_; 581 } 582 size_t GetConcGCThreadCount() const { 583 return conc_gc_threads_; 584 } 585 accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space); 586 void AddModUnionTable(accounting::ModUnionTable* mod_union_table); 587 588 accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space); 589 void AddRememberedSet(accounting::RememberedSet* remembered_set); 590 // Also deletes the remebered set. 591 void RemoveRememberedSet(space::Space* space); 592 593 bool IsCompilingBoot() const; 594 bool RunningOnValgrind() const { 595 return running_on_valgrind_; 596 } 597 bool HasImageSpace() const; 598 599 ReferenceProcessor* GetReferenceProcessor() { 600 return &reference_processor_; 601 } 602 603 private: 604 // Compact source space to target space. 605 void Compact(space::ContinuousMemMapAllocSpace* target_space, 606 space::ContinuousMemMapAllocSpace* source_space, 607 GcCause gc_cause) 608 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 609 610 void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_); 611 612 // Create a mem map with a preferred base address. 613 static MemMap* MapAnonymousPreferredAddress(const char* name, byte* request_begin, 614 size_t capacity, int prot_flags, 615 std::string* out_error_str); 616 617 bool SupportHSpaceCompaction() const { 618 // Returns true if we can do hspace compaction 619 return main_space_backup_ != nullptr; 620 } 621 622 static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) { 623 return 624 allocator_type != kAllocatorTypeBumpPointer && 625 allocator_type != kAllocatorTypeTLAB; 626 } 627 static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) { 628 return AllocatorHasAllocationStack(allocator_type); 629 } 630 static bool IsMovingGc(CollectorType collector_type) { 631 return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS || 632 collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC || 633 collector_type == kCollectorTypeHomogeneousSpaceCompact; 634 } 635 bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const 636 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 637 ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, 638 mirror::Object** obj) 639 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 640 641 accounting::ObjectStack* GetMarkStack() { 642 return mark_stack_.get(); 643 } 644 645 // We don't force this to be inlined since it is a slow path. 646 template <bool kInstrumented, typename PreFenceVisitor> 647 mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count, 648 const PreFenceVisitor& pre_fence_visitor) 649 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 650 651 // Handles Allocate()'s slow allocation path with GC involved after 652 // an initial allocation attempt failed. 653 mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes, 654 size_t* bytes_allocated, size_t* usable_size, 655 mirror::Class** klass) 656 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 657 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 658 659 // Allocate into a specific space. 660 mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c, 661 size_t bytes) 662 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 663 664 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 665 // wrong space. 666 void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 667 668 // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so 669 // that the switch statement is constant optimized in the entrypoints. 670 template <const bool kInstrumented, const bool kGrow> 671 ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type, 672 size_t alloc_size, size_t* bytes_allocated, 673 size_t* usable_size) 674 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 675 676 void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) 677 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 678 679 template <bool kGrow> 680 bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size); 681 682 // Returns true if the address passed in is within the address range of a continuous space. 683 bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const 684 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 685 686 // Run the finalizers. 687 void RunFinalization(JNIEnv* env); 688 689 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we 690 // waited for. 691 collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self) 692 EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_); 693 694 void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) 695 LOCKS_EXCLUDED(heap_trim_request_lock_); 696 void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 697 void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) 698 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 699 void RequestConcurrentGC(Thread* self) 700 LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); 701 bool IsGCRequestPending() const; 702 703 // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns 704 // which type of Gc was actually ran. 705 collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause, 706 bool clear_soft_references) 707 LOCKS_EXCLUDED(gc_complete_lock_, 708 Locks::heap_bitmap_lock_, 709 Locks::thread_suspend_count_lock_); 710 711 void PreGcVerification(collector::GarbageCollector* gc) 712 LOCKS_EXCLUDED(Locks::mutator_lock_); 713 void PreGcVerificationPaused(collector::GarbageCollector* gc) 714 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 715 void PrePauseRosAllocVerification(collector::GarbageCollector* gc) 716 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 717 void PreSweepingGcVerification(collector::GarbageCollector* gc) 718 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 719 void PostGcVerification(collector::GarbageCollector* gc) 720 LOCKS_EXCLUDED(Locks::mutator_lock_); 721 void PostGcVerificationPaused(collector::GarbageCollector* gc) 722 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 723 724 // Update the watermark for the native allocated bytes based on the current number of native 725 // bytes allocated and the target utilization ratio. 726 void UpdateMaxNativeFootprint(); 727 728 // Find a collector based on GC type. 729 collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type); 730 731 // Create a new alloc space and compact default alloc space to it. 732 HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact(); 733 734 // Create the main free list malloc space, either a RosAlloc space or DlMalloc space. 735 void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit, 736 size_t capacity); 737 738 // Create a malloc space based on a mem map. Does not set the space as default. 739 space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size, 740 size_t growth_limit, size_t capacity, 741 const char* name, bool can_move_objects); 742 743 // Given the current contents of the alloc space, increase the allowed heap footprint to match 744 // the target utilization ratio. This should only be called immediately after a full garbage 745 // collection. 746 void GrowForUtilization(collector::GarbageCollector* collector_ran); 747 748 size_t GetPercentFree(); 749 750 static void VerificationCallback(mirror::Object* obj, void* arg) 751 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 752 753 // Swap the allocation stack with the live stack. 754 void SwapStacks(Thread* self); 755 756 // Clear cards and update the mod union table. 757 void ProcessCards(TimingLogger* timings, bool use_rem_sets); 758 759 // Signal the heap trim daemon that there is something to do, either a heap transition or heap 760 // trim. 761 void SignalHeapTrimDaemon(Thread* self); 762 763 // Push an object onto the allocation stack. 764 void PushOnAllocationStack(Thread* self, mirror::Object** obj) 765 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 766 void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) 767 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 768 void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj) 769 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 770 771 // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark 772 // sweep GC, false for other GC types. 773 bool IsGcConcurrent() const ALWAYS_INLINE { 774 return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC; 775 } 776 777 // All-known continuous spaces, where objects lie within fixed bounds. 778 std::vector<space::ContinuousSpace*> continuous_spaces_; 779 780 // All-known discontinuous spaces, where objects may be placed throughout virtual memory. 781 std::vector<space::DiscontinuousSpace*> discontinuous_spaces_; 782 783 // All-known alloc spaces, where objects may be or have been allocated. 784 std::vector<space::AllocSpace*> alloc_spaces_; 785 786 // A space where non-movable objects are allocated, when compaction is enabled it contains 787 // Classes, ArtMethods, ArtFields, and non moving objects. 788 space::MallocSpace* non_moving_space_; 789 790 // Space which we use for the kAllocatorTypeROSAlloc. 791 space::RosAllocSpace* rosalloc_space_; 792 793 // Space which we use for the kAllocatorTypeDlMalloc. 794 space::DlMallocSpace* dlmalloc_space_; 795 796 // The main space is the space which the GC copies to and from on process state updates. This 797 // space is typically either the dlmalloc_space_ or the rosalloc_space_. 798 space::MallocSpace* main_space_; 799 800 // The large object space we are currently allocating into. 801 space::LargeObjectSpace* large_object_space_; 802 803 // The card table, dirtied by the write barrier. 804 std::unique_ptr<accounting::CardTable> card_table_; 805 806 // A mod-union table remembers all of the references from the it's space to other spaces. 807 AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap> 808 mod_union_tables_; 809 810 // A remembered set remembers all of the references from the it's space to the target space. 811 AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap> 812 remembered_sets_; 813 814 // The current collector type. 815 CollectorType collector_type_; 816 // Which collector we use when the app is in the foreground. 817 CollectorType foreground_collector_type_; 818 // Which collector we will use when the app is notified of a transition to background. 819 CollectorType background_collector_type_; 820 // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_. 821 CollectorType desired_collector_type_; 822 823 // Lock which guards heap trim requests. 824 Mutex* heap_trim_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 825 // When we want to perform the next heap trim (nano seconds). 826 uint64_t last_trim_time_ GUARDED_BY(heap_trim_request_lock_); 827 // When we want to perform the next heap transition (nano seconds) or heap trim. 828 uint64_t heap_transition_or_trim_target_time_ GUARDED_BY(heap_trim_request_lock_); 829 // If we have a heap trim request pending. 830 bool heap_trim_request_pending_ GUARDED_BY(heap_trim_request_lock_); 831 832 // How many GC threads we may use for paused parts of garbage collection. 833 const size_t parallel_gc_threads_; 834 835 // How many GC threads we may use for unpaused parts of garbage collection. 836 const size_t conc_gc_threads_; 837 838 // Boolean for if we are in low memory mode. 839 const bool low_memory_mode_; 840 841 // If we get a pause longer than long pause log threshold, then we print out the GC after it 842 // finishes. 843 const size_t long_pause_log_threshold_; 844 845 // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes. 846 const size_t long_gc_log_threshold_; 847 848 // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is 849 // useful for benchmarking since it reduces time spent in GC to a low %. 850 const bool ignore_max_footprint_; 851 852 // Lock which guards zygote space creation. 853 Mutex zygote_creation_lock_; 854 855 // If we have a zygote space. 856 bool have_zygote_space_; 857 858 // Minimum allocation size of large object. 859 size_t large_object_threshold_; 860 861 // Guards access to the state of GC, associated conditional variable is used to signal when a GC 862 // completes. 863 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 864 std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_); 865 866 // Reference processor; 867 ReferenceProcessor reference_processor_; 868 869 // True while the garbage collector is running. 870 volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_); 871 872 // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on. 873 volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_); 874 collector::GcType next_gc_type_; 875 876 // Maximum size that the heap can reach. 877 const size_t capacity_; 878 879 // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap 880 // programs it is "cleared" making it the same as capacity. 881 size_t growth_limit_; 882 883 // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating 884 // a GC should be triggered. 885 size_t max_allowed_footprint_; 886 887 // The watermark at which a concurrent GC is requested by registerNativeAllocation. 888 size_t native_footprint_gc_watermark_; 889 890 // Whether or not we need to run finalizers in the next native allocation. 891 bool native_need_to_run_finalization_; 892 893 // Whether or not we currently care about pause times. 894 ProcessState process_state_; 895 896 // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that 897 // it completes ahead of an allocation failing. 898 size_t concurrent_start_bytes_; 899 900 // Since the heap was created, how many bytes have been freed. 901 uint64_t total_bytes_freed_ever_; 902 903 // Since the heap was created, how many objects have been freed. 904 uint64_t total_objects_freed_ever_; 905 906 // Number of bytes allocated. Adjusted after each allocation and free. 907 Atomic<size_t> num_bytes_allocated_; 908 909 // Bytes which are allocated and managed by native code but still need to be accounted for. 910 Atomic<size_t> native_bytes_allocated_; 911 912 // Info related to the current or previous GC iteration. 913 collector::Iteration current_gc_iteration_; 914 915 // Heap verification flags. 916 const bool verify_missing_card_marks_; 917 const bool verify_system_weaks_; 918 const bool verify_pre_gc_heap_; 919 const bool verify_pre_sweeping_heap_; 920 const bool verify_post_gc_heap_; 921 const bool verify_mod_union_table_; 922 bool verify_pre_gc_rosalloc_; 923 bool verify_pre_sweeping_rosalloc_; 924 bool verify_post_gc_rosalloc_; 925 926 // RAII that temporarily disables the rosalloc verification during 927 // the zygote fork. 928 class ScopedDisableRosAllocVerification { 929 private: 930 Heap* const heap_; 931 const bool orig_verify_pre_gc_; 932 const bool orig_verify_pre_sweeping_; 933 const bool orig_verify_post_gc_; 934 935 public: 936 explicit ScopedDisableRosAllocVerification(Heap* heap) 937 : heap_(heap), 938 orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_), 939 orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_), 940 orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) { 941 heap_->verify_pre_gc_rosalloc_ = false; 942 heap_->verify_pre_sweeping_rosalloc_ = false; 943 heap_->verify_post_gc_rosalloc_ = false; 944 } 945 ~ScopedDisableRosAllocVerification() { 946 heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_; 947 heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_; 948 heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_; 949 } 950 }; 951 952 // Parallel GC data structures. 953 std::unique_ptr<ThreadPool> thread_pool_; 954 955 // The nanosecond time at which the last GC ended. 956 uint64_t last_gc_time_ns_; 957 958 // How many bytes were allocated at the end of the last GC. 959 uint64_t last_gc_size_; 960 961 // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle 962 // and the start of the current one. 963 uint64_t allocation_rate_; 964 965 // For a GC cycle, a bitmap that is set corresponding to the 966 std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); 967 std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); 968 969 // Mark stack that we reuse to avoid re-allocating the mark stack. 970 std::unique_ptr<accounting::ObjectStack> mark_stack_; 971 972 // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us 973 // to use the live bitmap as the old mark bitmap. 974 const size_t max_allocation_stack_size_; 975 std::unique_ptr<accounting::ObjectStack> allocation_stack_; 976 977 // Second allocation stack so that we can process allocation with the heap unlocked. 978 std::unique_ptr<accounting::ObjectStack> live_stack_; 979 980 // Allocator type. 981 AllocatorType current_allocator_; 982 const AllocatorType current_non_moving_allocator_; 983 984 // Which GCs we run in order when we an allocation fails. 985 std::vector<collector::GcType> gc_plan_; 986 987 // Bump pointer spaces. 988 space::BumpPointerSpace* bump_pointer_space_; 989 // Temp space is the space which the semispace collector copies to. 990 space::BumpPointerSpace* temp_space_; 991 992 // Minimum free guarantees that you always have at least min_free_ free bytes after growing for 993 // utilization, regardless of target utilization ratio. 994 size_t min_free_; 995 996 // The ideal maximum free size, when we grow the heap for utilization. 997 size_t max_free_; 998 999 // Target ideal heap utilization ratio 1000 double target_utilization_; 1001 1002 // How much more we grow the heap when we are a foreground app instead of background. 1003 double foreground_heap_growth_multiplier_; 1004 1005 // Total time which mutators are paused or waiting for GC to complete. 1006 uint64_t total_wait_time_; 1007 1008 // Total number of objects allocated in microseconds. 1009 AtomicInteger total_allocation_time_; 1010 1011 // The current state of heap verification, may be enabled or disabled. 1012 VerifyObjectMode verify_object_mode_; 1013 1014 // Compacting GC disable count, prevents compacting GC from running iff > 0. 1015 size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_); 1016 1017 std::vector<collector::GarbageCollector*> garbage_collectors_; 1018 collector::SemiSpace* semi_space_collector_; 1019 collector::MarkCompact* mark_compact_collector_; 1020 collector::ConcurrentCopying* concurrent_copying_collector_; 1021 1022 const bool running_on_valgrind_; 1023 const bool use_tlab_; 1024 1025 // Pointer to the space which becomes the new main space when we do homogeneous space compaction. 1026 // Use unique_ptr since the space is only added during the homogeneous compaction phase. 1027 std::unique_ptr<space::MallocSpace> main_space_backup_; 1028 1029 // Minimal interval allowed between two homogeneous space compactions caused by OOM. 1030 uint64_t min_interval_homogeneous_space_compaction_by_oom_; 1031 1032 // Times of the last homogeneous space compaction caused by OOM. 1033 uint64_t last_time_homogeneous_space_compaction_by_oom_; 1034 1035 // Saved OOMs by homogeneous space compaction. 1036 Atomic<size_t> count_delayed_oom_; 1037 1038 // Count for requested homogeneous space compaction. 1039 Atomic<size_t> count_requested_homogeneous_space_compaction_; 1040 1041 // Count for ignored homogeneous space compaction. 1042 Atomic<size_t> count_ignored_homogeneous_space_compaction_; 1043 1044 // Count for performed homogeneous space compaction. 1045 Atomic<size_t> count_performed_homogeneous_space_compaction_; 1046 1047 // Whether or not we use homogeneous space compaction to avoid OOM errors. 1048 bool use_homogeneous_space_compaction_for_oom_; 1049 1050 friend class collector::GarbageCollector; 1051 friend class collector::MarkCompact; 1052 friend class collector::MarkSweep; 1053 friend class collector::SemiSpace; 1054 friend class ReferenceQueue; 1055 friend class VerifyReferenceCardVisitor; 1056 friend class VerifyReferenceVisitor; 1057 friend class VerifyObjectVisitor; 1058 friend class ScopedHeapFill; 1059 friend class ScopedHeapLock; 1060 friend class space::SpaceTest; 1061 1062 class AllocationTimer { 1063 private: 1064 Heap* heap_; 1065 mirror::Object** allocated_obj_ptr_; 1066 uint64_t allocation_start_time_; 1067 public: 1068 AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr); 1069 ~AllocationTimer(); 1070 }; 1071 1072 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap); 1073}; 1074 1075// ScopedHeapFill changes the bytes allocated counter to be equal to the growth limit. This 1076// causes the next allocation to perform a GC and possibly an OOM. It can be used to ensure that a 1077// GC happens in specific methods such as ThrowIllegalMonitorStateExceptionF in Monitor::Wait. 1078class ScopedHeapFill { 1079 public: 1080 explicit ScopedHeapFill(Heap* heap) 1081 : heap_(heap), 1082 delta_(heap_->GetMaxMemory() - heap_->GetBytesAllocated()) { 1083 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(delta_); 1084 } 1085 ~ScopedHeapFill() { 1086 heap_->num_bytes_allocated_.FetchAndSubSequentiallyConsistent(delta_); 1087 } 1088 1089 private: 1090 Heap* const heap_; 1091 const int64_t delta_; 1092}; 1093 1094} // namespace gc 1095} // namespace art 1096 1097#endif // ART_RUNTIME_GC_HEAP_H_ 1098