1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_HEAP_H_
18#define ART_RUNTIME_GC_HEAP_H_
19
20#include <iosfwd>
21#include <string>
22#include <unordered_set>
23#include <vector>
24
25#include "allocator_type.h"
26#include "arch/instruction_set.h"
27#include "atomic.h"
28#include "base/time_utils.h"
29#include "gc/accounting/atomic_stack.h"
30#include "gc/accounting/card_table.h"
31#include "gc/accounting/read_barrier_table.h"
32#include "gc/gc_cause.h"
33#include "gc/collector/gc_type.h"
34#include "gc/collector_type.h"
35#include "gc/space/large_object_space.h"
36#include "globals.h"
37#include "handle.h"
38#include "obj_ptr.h"
39#include "object_callbacks.h"
40#include "offsets.h"
41#include "process_state.h"
42#include "safe_map.h"
43#include "verify_object.h"
44
45namespace art {
46
47class ConditionVariable;
48class Mutex;
49class StackVisitor;
50class Thread;
51class ThreadPool;
52class TimingLogger;
53class VariableSizedHandleScope;
54
55namespace mirror {
56  class Class;
57  class Object;
58}  // namespace mirror
59
60namespace gc {
61
62class AllocationListener;
63class AllocRecordObjectMap;
64class GcPauseListener;
65class ReferenceProcessor;
66class TaskProcessor;
67class Verification;
68
69namespace accounting {
70  class HeapBitmap;
71  class ModUnionTable;
72  class RememberedSet;
73}  // namespace accounting
74
75namespace collector {
76  class ConcurrentCopying;
77  class GarbageCollector;
78  class MarkCompact;
79  class MarkSweep;
80  class SemiSpace;
81}  // namespace collector
82
83namespace allocator {
84  class RosAlloc;
85}  // namespace allocator
86
87namespace space {
88  class AllocSpace;
89  class BumpPointerSpace;
90  class ContinuousMemMapAllocSpace;
91  class DiscontinuousSpace;
92  class DlMallocSpace;
93  class ImageSpace;
94  class LargeObjectSpace;
95  class MallocSpace;
96  class RegionSpace;
97  class RosAllocSpace;
98  class Space;
99  class ZygoteSpace;
100}  // namespace space
101
102class AgeCardVisitor {
103 public:
104  uint8_t operator()(uint8_t card) const {
105    return (card == accounting::CardTable::kCardDirty) ? card - 1 : 0;
106  }
107};
108
109enum HomogeneousSpaceCompactResult {
110  // Success.
111  kSuccess,
112  // Reject due to disabled moving GC.
113  kErrorReject,
114  // Unsupported due to the current configuration.
115  kErrorUnsupported,
116  // System is shutting down.
117  kErrorVMShuttingDown,
118};
119
120// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
121static constexpr bool kUseRosAlloc = true;
122
123// If true, use thread-local allocation stack.
124static constexpr bool kUseThreadLocalAllocationStack = true;
125
126class Heap {
127 public:
128  // If true, measure the total allocation time.
129  static constexpr size_t kDefaultStartingSize = kPageSize;
130  static constexpr size_t kDefaultInitialSize = 2 * MB;
131  static constexpr size_t kDefaultMaximumSize = 256 * MB;
132  static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
133  static constexpr size_t kDefaultMaxFree = 2 * MB;
134  static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
135  static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
136  static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
137  static constexpr size_t kDefaultTLABSize = 32 * KB;
138  static constexpr double kDefaultTargetUtilization = 0.5;
139  static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
140  // Primitive arrays larger than this size are put in the large object space.
141  static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
142  static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
143  // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
144  static constexpr bool kDefaultEnableParallelGC = false;
145
146  // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
147  // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
148  static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
149      USE_ART_LOW_4G_ALLOCATOR ?
150          space::LargeObjectSpaceType::kFreeList
151        : space::LargeObjectSpaceType::kMap;
152
153  // Used so that we don't overflow the allocation time atomic integer.
154  static constexpr size_t kTimeAdjust = 1024;
155
156  // How often we allow heap trimming to happen (nanoseconds).
157  static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
158  // How long we wait after a transition request to perform a collector transition (nanoseconds).
159  static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
160
161  // Create a heap with the requested sizes. The possible empty
162  // image_file_names names specify Spaces to load based on
163  // ImageWriter output.
164  Heap(size_t initial_size,
165       size_t growth_limit,
166       size_t min_free,
167       size_t max_free,
168       double target_utilization,
169       double foreground_heap_growth_multiplier,
170       size_t capacity,
171       size_t non_moving_space_capacity,
172       const std::string& original_image_file_name,
173       InstructionSet image_instruction_set,
174       CollectorType foreground_collector_type,
175       CollectorType background_collector_type,
176       space::LargeObjectSpaceType large_object_space_type,
177       size_t large_object_threshold,
178       size_t parallel_gc_threads,
179       size_t conc_gc_threads,
180       bool low_memory_mode,
181       size_t long_pause_threshold,
182       size_t long_gc_threshold,
183       bool ignore_max_footprint,
184       bool use_tlab,
185       bool verify_pre_gc_heap,
186       bool verify_pre_sweeping_heap,
187       bool verify_post_gc_heap,
188       bool verify_pre_gc_rosalloc,
189       bool verify_pre_sweeping_rosalloc,
190       bool verify_post_gc_rosalloc,
191       bool gc_stress_mode,
192       bool measure_gc_performance,
193       bool use_homogeneous_space_compaction,
194       uint64_t min_interval_homogeneous_space_compaction_by_oom);
195
196  ~Heap();
197
198  // Allocates and initializes storage for an object instance.
199  template <bool kInstrumented, typename PreFenceVisitor>
200  mirror::Object* AllocObject(Thread* self,
201                              ObjPtr<mirror::Class> klass,
202                              size_t num_bytes,
203                              const PreFenceVisitor& pre_fence_visitor)
204      REQUIRES_SHARED(Locks::mutator_lock_)
205      REQUIRES(!*gc_complete_lock_,
206               !*pending_task_lock_,
207               !*backtrace_lock_,
208               !Roles::uninterruptible_) {
209    return AllocObjectWithAllocator<kInstrumented, true>(self,
210                                                         klass,
211                                                         num_bytes,
212                                                         GetCurrentAllocator(),
213                                                         pre_fence_visitor);
214  }
215
216  template <bool kInstrumented, typename PreFenceVisitor>
217  mirror::Object* AllocNonMovableObject(Thread* self,
218                                        ObjPtr<mirror::Class> klass,
219                                        size_t num_bytes,
220                                        const PreFenceVisitor& pre_fence_visitor)
221      REQUIRES_SHARED(Locks::mutator_lock_)
222      REQUIRES(!*gc_complete_lock_,
223               !*pending_task_lock_,
224               !*backtrace_lock_,
225               !Roles::uninterruptible_) {
226    return AllocObjectWithAllocator<kInstrumented, true>(self,
227                                                         klass,
228                                                         num_bytes,
229                                                         GetCurrentNonMovingAllocator(),
230                                                         pre_fence_visitor);
231  }
232
233  template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
234  ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
235                                                         ObjPtr<mirror::Class> klass,
236                                                         size_t byte_count,
237                                                         AllocatorType allocator,
238                                                         const PreFenceVisitor& pre_fence_visitor)
239      REQUIRES_SHARED(Locks::mutator_lock_)
240      REQUIRES(!*gc_complete_lock_,
241               !*pending_task_lock_,
242               !*backtrace_lock_,
243               !Roles::uninterruptible_);
244
245  AllocatorType GetCurrentAllocator() const {
246    return current_allocator_;
247  }
248
249  AllocatorType GetCurrentNonMovingAllocator() const {
250    return current_non_moving_allocator_;
251  }
252
253  // Visit all of the live objects in the heap.
254  void VisitObjects(ObjectCallback callback, void* arg)
255      REQUIRES_SHARED(Locks::mutator_lock_)
256      REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
257  void VisitObjectsPaused(ObjectCallback callback, void* arg)
258      REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
259
260  void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
261      REQUIRES_SHARED(Locks::mutator_lock_);
262
263  void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
264      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*native_blocking_gc_lock_);
265  void RegisterNativeFree(JNIEnv* env, size_t bytes);
266
267  // Change the allocator, updates entrypoints.
268  void ChangeAllocator(AllocatorType allocator)
269      REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
270
271  // Transition the garbage collector during runtime, may copy objects from one space to another.
272  void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_);
273
274  // Change the collector to be one of the possible options (MS, CMS, SS).
275  void ChangeCollector(CollectorType collector_type)
276      REQUIRES(Locks::mutator_lock_);
277
278  // The given reference is believed to be to an object in the Java heap, check the soundness of it.
279  // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
280  // proper lock ordering for it.
281  void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
282
283  // Check sanity of all live references.
284  void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
285  // Returns how many failures occured.
286  size_t VerifyHeapReferences(bool verify_referents = true)
287      REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
288  bool VerifyMissingCardMarks()
289      REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
290
291  // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
292  // and doesn't abort on error, allowing the caller to report more
293  // meaningful diagnostics.
294  bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
295
296  // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
297  // very slow.
298  bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
299      REQUIRES_SHARED(Locks::mutator_lock_);
300
301  // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
302  // Requires the heap lock to be held.
303  bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
304                          bool search_allocation_stack = true,
305                          bool search_live_stack = true,
306                          bool sorted = false)
307      REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
308
309  // Returns true if there is any chance that the object (obj) will move.
310  bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
311
312  // Enables us to compacting GC until objects are released.
313  void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
314  void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
315
316  // Temporarily disable thread flip for JNI critical calls.
317  void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
318  void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
319  void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
320  void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
321
322  // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
323  // Mutator lock is required for GetContinuousSpaces.
324  void ClearMarkedObjects()
325      REQUIRES(Locks::heap_bitmap_lock_)
326      REQUIRES_SHARED(Locks::mutator_lock_);
327
328  // Initiates an explicit garbage collection.
329  void CollectGarbage(bool clear_soft_references)
330      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
331
332  // Does a concurrent GC, should only be called by the GC daemon thread
333  // through runtime.
334  void ConcurrentGC(Thread* self, GcCause cause, bool force_full)
335      REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_);
336
337  // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
338  // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
339  void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
340                      bool use_is_assignable_from,
341                      uint64_t* counts)
342      REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
343      REQUIRES_SHARED(Locks::mutator_lock_);
344
345  // Implements JDWP RT_Instances.
346  void GetInstances(VariableSizedHandleScope& scope,
347                    Handle<mirror::Class> c,
348                    int32_t max_count,
349                    std::vector<Handle<mirror::Object>>& instances)
350      REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
351      REQUIRES_SHARED(Locks::mutator_lock_);
352
353  // Implements JDWP OR_ReferringObjects.
354  void GetReferringObjects(VariableSizedHandleScope& scope,
355                           Handle<mirror::Object> o,
356                           int32_t max_count,
357                           std::vector<Handle<mirror::Object>>& referring_objects)
358      REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
359      REQUIRES_SHARED(Locks::mutator_lock_);
360
361  // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
362  // implement dalvik.system.VMRuntime.clearGrowthLimit.
363  void ClearGrowthLimit();
364
365  // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
366  // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
367  void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
368
369  // Target ideal heap utilization ratio, implements
370  // dalvik.system.VMRuntime.getTargetHeapUtilization.
371  double GetTargetHeapUtilization() const {
372    return target_utilization_;
373  }
374
375  // Data structure memory usage tracking.
376  void RegisterGCAllocation(size_t bytes);
377  void RegisterGCDeAllocation(size_t bytes);
378
379  // Set the heap's private space pointers to be the same as the space based on it's type. Public
380  // due to usage by tests.
381  void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
382      REQUIRES(!Locks::heap_bitmap_lock_);
383  void AddSpace(space::Space* space)
384      REQUIRES(!Locks::heap_bitmap_lock_)
385      REQUIRES(Locks::mutator_lock_);
386  void RemoveSpace(space::Space* space)
387    REQUIRES(!Locks::heap_bitmap_lock_)
388    REQUIRES(Locks::mutator_lock_);
389
390  // Set target ideal heap utilization ratio, implements
391  // dalvik.system.VMRuntime.setTargetHeapUtilization.
392  void SetTargetHeapUtilization(float target);
393
394  // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
395  // from the system. Doesn't allow the space to exceed its growth limit.
396  void SetIdealFootprint(size_t max_allowed_footprint);
397
398  // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
399  // waited for.
400  collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
401
402  // Update the heap's process state to a new value, may cause compaction to occur.
403  void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
404      REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
405
406  bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
407    // No lock since vector empty is thread safe.
408    return !continuous_spaces_.empty();
409  }
410
411  const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
412      REQUIRES_SHARED(Locks::mutator_lock_) {
413    return continuous_spaces_;
414  }
415
416  const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
417    return discontinuous_spaces_;
418  }
419
420  const collector::Iteration* GetCurrentGcIteration() const {
421    return &current_gc_iteration_;
422  }
423  collector::Iteration* GetCurrentGcIteration() {
424    return &current_gc_iteration_;
425  }
426
427  // Enable verification of object references when the runtime is sufficiently initialized.
428  void EnableObjectValidation() {
429    verify_object_mode_ = kVerifyObjectSupport;
430    if (verify_object_mode_ > kVerifyObjectModeDisabled) {
431      VerifyHeap();
432    }
433  }
434
435  // Disable object reference verification for image writing.
436  void DisableObjectValidation() {
437    verify_object_mode_ = kVerifyObjectModeDisabled;
438  }
439
440  // Other checks may be performed if we know the heap should be in a sane state.
441  bool IsObjectValidationEnabled() const {
442    return verify_object_mode_ > kVerifyObjectModeDisabled;
443  }
444
445  // Returns true if low memory mode is enabled.
446  bool IsLowMemoryMode() const {
447    return low_memory_mode_;
448  }
449
450  // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
451  // Scales heap growth, min free, and max free.
452  double HeapGrowthMultiplier() const;
453
454  // Freed bytes can be negative in cases where we copy objects from a compacted space to a
455  // free-list backed space.
456  void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
457
458  // Record the bytes freed by thread-local buffer revoke.
459  void RecordFreeRevoke();
460
461  // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
462  // The call is not needed if null is stored in the field.
463  ALWAYS_INLINE void WriteBarrierField(ObjPtr<mirror::Object> dst,
464                                       MemberOffset offset,
465                                       ObjPtr<mirror::Object> new_value)
466      REQUIRES_SHARED(Locks::mutator_lock_);
467
468  // Write barrier for array operations that update many field positions
469  ALWAYS_INLINE void WriteBarrierArray(ObjPtr<mirror::Object> dst,
470                                       int start_offset,
471                                       // TODO: element_count or byte_count?
472                                       size_t length)
473      REQUIRES_SHARED(Locks::mutator_lock_);
474
475  ALWAYS_INLINE void WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj)
476      REQUIRES_SHARED(Locks::mutator_lock_);
477
478  accounting::CardTable* GetCardTable() const {
479    return card_table_.get();
480  }
481
482  accounting::ReadBarrierTable* GetReadBarrierTable() const {
483    return rb_table_.get();
484  }
485
486  void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
487
488  // Returns the number of bytes currently allocated.
489  size_t GetBytesAllocated() const {
490    return num_bytes_allocated_.LoadSequentiallyConsistent();
491  }
492
493  // Returns the number of objects currently allocated.
494  size_t GetObjectsAllocated() const
495      REQUIRES(!Locks::heap_bitmap_lock_);
496
497  // Returns the total number of objects allocated since the heap was created.
498  uint64_t GetObjectsAllocatedEver() const;
499
500  // Returns the total number of bytes allocated since the heap was created.
501  uint64_t GetBytesAllocatedEver() const;
502
503  // Returns the total number of objects freed since the heap was created.
504  uint64_t GetObjectsFreedEver() const {
505    return total_objects_freed_ever_;
506  }
507
508  // Returns the total number of bytes freed since the heap was created.
509  uint64_t GetBytesFreedEver() const {
510    return total_bytes_freed_ever_;
511  }
512
513  // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
514  // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
515  // were specified. Android apps start with a growth limit (small heap size) which is
516  // cleared/extended for large apps.
517  size_t GetMaxMemory() const {
518    // There is some race conditions in the allocation code that can cause bytes allocated to
519    // become larger than growth_limit_ in rare cases.
520    return std::max(GetBytesAllocated(), growth_limit_);
521  }
522
523  // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
524  // consumed by an application.
525  size_t GetTotalMemory() const;
526
527  // Returns approximately how much free memory we have until the next GC happens.
528  size_t GetFreeMemoryUntilGC() const {
529    return max_allowed_footprint_ - GetBytesAllocated();
530  }
531
532  // Returns approximately how much free memory we have until the next OOME happens.
533  size_t GetFreeMemoryUntilOOME() const {
534    return growth_limit_ - GetBytesAllocated();
535  }
536
537  // Returns how much free memory we have until we need to grow the heap to perform an allocation.
538  // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
539  size_t GetFreeMemory() const {
540    size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
541    size_t total_memory = GetTotalMemory();
542    // Make sure we don't get a negative number.
543    return total_memory - std::min(total_memory, byte_allocated);
544  }
545
546  // get the space that corresponds to an object's address. Current implementation searches all
547  // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
548  // TODO: consider using faster data structure like binary tree.
549  space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
550      REQUIRES_SHARED(Locks::mutator_lock_);
551
552  space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
553      REQUIRES_SHARED(Locks::mutator_lock_);
554
555  space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
556                                                              bool fail_ok) const
557      REQUIRES_SHARED(Locks::mutator_lock_);
558
559  space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
560      REQUIRES_SHARED(Locks::mutator_lock_);
561
562  space::Space* FindSpaceFromAddress(const void* ptr) const
563      REQUIRES_SHARED(Locks::mutator_lock_);
564
565  void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
566
567  // Do a pending collector transition.
568  void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
569
570  // Deflate monitors, ... and trim the spaces.
571  void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
572
573  void RevokeThreadLocalBuffers(Thread* thread);
574  void RevokeRosAllocThreadLocalBuffers(Thread* thread);
575  void RevokeAllThreadLocalBuffers();
576  void AssertThreadLocalBuffersAreRevoked(Thread* thread);
577  void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
578  void RosAllocVerification(TimingLogger* timings, const char* name)
579      REQUIRES(Locks::mutator_lock_);
580
581  accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
582    return live_bitmap_.get();
583  }
584
585  accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
586    return mark_bitmap_.get();
587  }
588
589  accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
590    return live_stack_.get();
591  }
592
593  void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
594
595  // Mark and empty stack.
596  void FlushAllocStack()
597      REQUIRES_SHARED(Locks::mutator_lock_)
598      REQUIRES(Locks::heap_bitmap_lock_);
599
600  // Revoke all the thread-local allocation stacks.
601  void RevokeAllThreadLocalAllocationStacks(Thread* self)
602      REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
603
604  // Mark all the objects in the allocation stack in the specified bitmap.
605  // TODO: Refactor?
606  void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
607                      accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
608                      accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
609                      accounting::ObjectStack* stack)
610      REQUIRES_SHARED(Locks::mutator_lock_)
611      REQUIRES(Locks::heap_bitmap_lock_);
612
613  // Mark the specified allocation stack as live.
614  void MarkAllocStackAsLive(accounting::ObjectStack* stack)
615      REQUIRES_SHARED(Locks::mutator_lock_)
616      REQUIRES(Locks::heap_bitmap_lock_);
617
618  // Unbind any bound bitmaps.
619  void UnBindBitmaps()
620      REQUIRES(Locks::heap_bitmap_lock_)
621      REQUIRES_SHARED(Locks::mutator_lock_);
622
623  // Returns the boot image spaces. There may be multiple boot image spaces.
624  const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
625    return boot_image_spaces_;
626  }
627
628  bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
629      REQUIRES_SHARED(Locks::mutator_lock_);
630
631  bool IsInBootImageOatFile(const void* p) const
632      REQUIRES_SHARED(Locks::mutator_lock_);
633
634  void GetBootImagesSize(uint32_t* boot_image_begin,
635                         uint32_t* boot_image_end,
636                         uint32_t* boot_oat_begin,
637                         uint32_t* boot_oat_end);
638
639  // Permenantly disable moving garbage collection.
640  void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
641
642  space::DlMallocSpace* GetDlMallocSpace() const {
643    return dlmalloc_space_;
644  }
645
646  space::RosAllocSpace* GetRosAllocSpace() const {
647    return rosalloc_space_;
648  }
649
650  // Return the corresponding rosalloc space.
651  space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
652      REQUIRES_SHARED(Locks::mutator_lock_);
653
654  space::MallocSpace* GetNonMovingSpace() const {
655    return non_moving_space_;
656  }
657
658  space::LargeObjectSpace* GetLargeObjectsSpace() const {
659    return large_object_space_;
660  }
661
662  // Returns the free list space that may contain movable objects (the
663  // one that's not the non-moving space), either rosalloc_space_ or
664  // dlmalloc_space_.
665  space::MallocSpace* GetPrimaryFreeListSpace() {
666    if (kUseRosAlloc) {
667      DCHECK(rosalloc_space_ != nullptr);
668      // reinterpret_cast is necessary as the space class hierarchy
669      // isn't known (#included) yet here.
670      return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
671    } else {
672      DCHECK(dlmalloc_space_ != nullptr);
673      return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
674    }
675  }
676
677  void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
678  std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
679
680  // GC performance measuring
681  void DumpGcPerformanceInfo(std::ostream& os)
682      REQUIRES(!*gc_complete_lock_);
683  void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
684
685  // Thread pool.
686  void CreateThreadPool();
687  void DeleteThreadPool();
688  ThreadPool* GetThreadPool() {
689    return thread_pool_.get();
690  }
691  size_t GetParallelGCThreadCount() const {
692    return parallel_gc_threads_;
693  }
694  size_t GetConcGCThreadCount() const {
695    return conc_gc_threads_;
696  }
697  accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
698  void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
699
700  accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
701  void AddRememberedSet(accounting::RememberedSet* remembered_set);
702  // Also deletes the remebered set.
703  void RemoveRememberedSet(space::Space* space);
704
705  bool IsCompilingBoot() const;
706  bool HasBootImageSpace() const {
707    return !boot_image_spaces_.empty();
708  }
709
710  ReferenceProcessor* GetReferenceProcessor() {
711    return reference_processor_.get();
712  }
713  TaskProcessor* GetTaskProcessor() {
714    return task_processor_.get();
715  }
716
717  bool HasZygoteSpace() const {
718    return zygote_space_ != nullptr;
719  }
720
721  collector::ConcurrentCopying* ConcurrentCopyingCollector() {
722    return concurrent_copying_collector_;
723  }
724
725  CollectorType CurrentCollectorType() {
726    return collector_type_;
727  }
728
729  bool IsGcConcurrentAndMoving() const {
730    if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
731      // Assume no transition when a concurrent moving collector is used.
732      DCHECK_EQ(collector_type_, foreground_collector_type_);
733      return true;
734    }
735    return false;
736  }
737
738  bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
739    MutexLock mu(self, *gc_complete_lock_);
740    return disable_moving_gc_count_ > 0;
741  }
742
743  // Request an asynchronous trim.
744  void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
745
746  // Request asynchronous GC.
747  void RequestConcurrentGC(Thread* self, GcCause cause, bool force_full)
748      REQUIRES(!*pending_task_lock_);
749
750  // Whether or not we may use a garbage collector, used so that we only create collectors we need.
751  bool MayUseCollector(CollectorType type) const;
752
753  // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
754  void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
755    min_interval_homogeneous_space_compaction_by_oom_ = interval;
756  }
757
758  // Helpers for android.os.Debug.getRuntimeStat().
759  uint64_t GetGcCount() const;
760  uint64_t GetGcTime() const;
761  uint64_t GetBlockingGcCount() const;
762  uint64_t GetBlockingGcTime() const;
763  void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
764  void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
765
766  // Allocation tracking support
767  // Callers to this function use double-checked locking to ensure safety on allocation_records_
768  bool IsAllocTrackingEnabled() const {
769    return alloc_tracking_enabled_.LoadRelaxed();
770  }
771
772  void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
773    alloc_tracking_enabled_.StoreRelaxed(enabled);
774  }
775
776  AllocRecordObjectMap* GetAllocationRecords() const
777      REQUIRES(Locks::alloc_tracker_lock_) {
778    return allocation_records_.get();
779  }
780
781  void SetAllocationRecords(AllocRecordObjectMap* records)
782      REQUIRES(Locks::alloc_tracker_lock_);
783
784  void VisitAllocationRecords(RootVisitor* visitor) const
785      REQUIRES_SHARED(Locks::mutator_lock_)
786      REQUIRES(!Locks::alloc_tracker_lock_);
787
788  void SweepAllocationRecords(IsMarkedVisitor* visitor) const
789      REQUIRES_SHARED(Locks::mutator_lock_)
790      REQUIRES(!Locks::alloc_tracker_lock_);
791
792  void DisallowNewAllocationRecords() const
793      REQUIRES_SHARED(Locks::mutator_lock_)
794      REQUIRES(!Locks::alloc_tracker_lock_);
795
796  void AllowNewAllocationRecords() const
797      REQUIRES_SHARED(Locks::mutator_lock_)
798      REQUIRES(!Locks::alloc_tracker_lock_);
799
800  void BroadcastForNewAllocationRecords() const
801      REQUIRES(!Locks::alloc_tracker_lock_);
802
803  void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
804
805  // Create a new alloc space and compact default alloc space to it.
806  HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
807  bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
808
809  // Install an allocation listener.
810  void SetAllocationListener(AllocationListener* l);
811  // Remove an allocation listener. Note: the listener must not be deleted, as for performance
812  // reasons, we assume it stays valid when we read it (so that we don't require a lock).
813  void RemoveAllocationListener();
814
815  // Install a gc pause listener.
816  void SetGcPauseListener(GcPauseListener* l);
817  // Get the currently installed gc pause listener, or null.
818  GcPauseListener* GetGcPauseListener() {
819    return gc_pause_listener_.LoadAcquire();
820  }
821  // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
822  // reasons, we assume it stays valid when we read it (so that we don't require a lock).
823  void RemoveGcPauseListener();
824
825  const Verification* GetVerification() const;
826
827 private:
828  class ConcurrentGCTask;
829  class CollectorTransitionTask;
830  class HeapTrimTask;
831
832  // Compact source space to target space. Returns the collector used.
833  collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
834                                       space::ContinuousMemMapAllocSpace* source_space,
835                                       GcCause gc_cause)
836      REQUIRES(Locks::mutator_lock_);
837
838  void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
839  void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
840      REQUIRES(!*gc_complete_lock_);
841  void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
842
843  // Create a mem map with a preferred base address.
844  static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
845                                              size_t capacity, std::string* out_error_str);
846
847  bool SupportHSpaceCompaction() const {
848    // Returns true if we can do hspace compaction
849    return main_space_backup_ != nullptr;
850  }
851
852  static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
853    return
854        allocator_type != kAllocatorTypeBumpPointer &&
855        allocator_type != kAllocatorTypeTLAB &&
856        allocator_type != kAllocatorTypeRegion &&
857        allocator_type != kAllocatorTypeRegionTLAB;
858  }
859  static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
860    if (kUseReadBarrier) {
861      // Read barrier may have the TLAB allocator but is always concurrent. TODO: clean this up.
862      return true;
863    }
864    return
865        allocator_type != kAllocatorTypeBumpPointer &&
866        allocator_type != kAllocatorTypeTLAB;
867  }
868  static bool IsMovingGc(CollectorType collector_type) {
869    return
870        collector_type == kCollectorTypeSS ||
871        collector_type == kCollectorTypeGSS ||
872        collector_type == kCollectorTypeCC ||
873        collector_type == kCollectorTypeCCBackground ||
874        collector_type == kCollectorTypeMC ||
875        collector_type == kCollectorTypeHomogeneousSpaceCompact;
876  }
877  bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
878      REQUIRES_SHARED(Locks::mutator_lock_);
879  ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
880                                       size_t new_num_bytes_allocated,
881                                       ObjPtr<mirror::Object>* obj)
882      REQUIRES_SHARED(Locks::mutator_lock_)
883      REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
884
885  accounting::ObjectStack* GetMarkStack() {
886    return mark_stack_.get();
887  }
888
889  // We don't force this to be inlined since it is a slow path.
890  template <bool kInstrumented, typename PreFenceVisitor>
891  mirror::Object* AllocLargeObject(Thread* self,
892                                   ObjPtr<mirror::Class>* klass,
893                                   size_t byte_count,
894                                   const PreFenceVisitor& pre_fence_visitor)
895      REQUIRES_SHARED(Locks::mutator_lock_)
896      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
897
898  // Handles Allocate()'s slow allocation path with GC involved after
899  // an initial allocation attempt failed.
900  mirror::Object* AllocateInternalWithGc(Thread* self,
901                                         AllocatorType allocator,
902                                         bool instrumented,
903                                         size_t num_bytes,
904                                         size_t* bytes_allocated,
905                                         size_t* usable_size,
906                                         size_t* bytes_tl_bulk_allocated,
907                                         ObjPtr<mirror::Class>* klass)
908      REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
909      REQUIRES_SHARED(Locks::mutator_lock_);
910
911  // Allocate into a specific space.
912  mirror::Object* AllocateInto(Thread* self,
913                               space::AllocSpace* space,
914                               ObjPtr<mirror::Class> c,
915                               size_t bytes)
916      REQUIRES_SHARED(Locks::mutator_lock_);
917
918  // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
919  // wrong space.
920  void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
921
922  // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
923  // that the switch statement is constant optimized in the entrypoints.
924  template <const bool kInstrumented, const bool kGrow>
925  ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
926                                              AllocatorType allocator_type,
927                                              size_t alloc_size,
928                                              size_t* bytes_allocated,
929                                              size_t* usable_size,
930                                              size_t* bytes_tl_bulk_allocated)
931      REQUIRES_SHARED(Locks::mutator_lock_);
932
933  mirror::Object* AllocWithNewTLAB(Thread* self,
934                                   size_t alloc_size,
935                                   bool grow,
936                                   size_t* bytes_allocated,
937                                   size_t* usable_size,
938                                   size_t* bytes_tl_bulk_allocated)
939      REQUIRES_SHARED(Locks::mutator_lock_);
940
941  void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
942      REQUIRES_SHARED(Locks::mutator_lock_);
943
944  ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
945                                               size_t alloc_size,
946                                               bool grow);
947
948  // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
949  void RunFinalization(JNIEnv* env, uint64_t timeout);
950
951  // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
952  // waited for.
953  collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
954      REQUIRES(gc_complete_lock_);
955
956  void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
957      REQUIRES(!*pending_task_lock_);
958
959  void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, ObjPtr<mirror::Object>* obj)
960      REQUIRES_SHARED(Locks::mutator_lock_)
961      REQUIRES(!*pending_task_lock_);
962  bool IsGCRequestPending() const;
963
964  // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
965  // which type of Gc was actually ran.
966  collector::GcType CollectGarbageInternal(collector::GcType gc_plan,
967                                           GcCause gc_cause,
968                                           bool clear_soft_references)
969      REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
970               !*pending_task_lock_);
971
972  void PreGcVerification(collector::GarbageCollector* gc)
973      REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
974  void PreGcVerificationPaused(collector::GarbageCollector* gc)
975      REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
976  void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
977      REQUIRES(Locks::mutator_lock_);
978  void PreSweepingGcVerification(collector::GarbageCollector* gc)
979      REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
980  void PostGcVerification(collector::GarbageCollector* gc)
981      REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
982  void PostGcVerificationPaused(collector::GarbageCollector* gc)
983      REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
984
985  // Find a collector based on GC type.
986  collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
987
988  // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
989  void CreateMainMallocSpace(MemMap* mem_map,
990                             size_t initial_size,
991                             size_t growth_limit,
992                             size_t capacity);
993
994  // Create a malloc space based on a mem map. Does not set the space as default.
995  space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map,
996                                                  size_t initial_size,
997                                                  size_t growth_limit,
998                                                  size_t capacity,
999                                                  const char* name,
1000                                                  bool can_move_objects);
1001
1002  // Given the current contents of the alloc space, increase the allowed heap footprint to match
1003  // the target utilization ratio.  This should only be called immediately after a full garbage
1004  // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
1005  // the GC was run.
1006  void GrowForUtilization(collector::GarbageCollector* collector_ran,
1007                          uint64_t bytes_allocated_before_gc = 0);
1008
1009  size_t GetPercentFree();
1010
1011  static void VerificationCallback(mirror::Object* obj, void* arg)
1012      REQUIRES_SHARED(Locks::heap_bitmap_lock_);
1013
1014  // Swap the allocation stack with the live stack.
1015  void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
1016
1017  // Clear cards and update the mod union table. When process_alloc_space_cards is true,
1018  // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
1019  // not process the alloc space if process_alloc_space_cards is false.
1020  void ProcessCards(TimingLogger* timings,
1021                    bool use_rem_sets,
1022                    bool process_alloc_space_cards,
1023                    bool clear_alloc_space_cards)
1024      REQUIRES_SHARED(Locks::mutator_lock_);
1025
1026  // Push an object onto the allocation stack.
1027  void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
1028      REQUIRES_SHARED(Locks::mutator_lock_)
1029      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
1030  void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
1031      REQUIRES_SHARED(Locks::mutator_lock_)
1032      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
1033  void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
1034      REQUIRES_SHARED(Locks::mutator_lock_)
1035      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
1036
1037  void ClearConcurrentGCRequest();
1038  void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
1039  void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
1040
1041  // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
1042  // sweep GC, false for other GC types.
1043  bool IsGcConcurrent() const ALWAYS_INLINE {
1044    return collector_type_ == kCollectorTypeCMS ||
1045        collector_type_ == kCollectorTypeCC ||
1046        collector_type_ == kCollectorTypeCCBackground;
1047  }
1048
1049  // Trim the managed and native spaces by releasing unused memory back to the OS.
1050  void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
1051
1052  // Trim 0 pages at the end of reference tables.
1053  void TrimIndirectReferenceTables(Thread* self);
1054
1055  void VisitObjectsInternal(ObjectCallback callback, void* arg)
1056      REQUIRES_SHARED(Locks::mutator_lock_)
1057      REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1058  void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
1059      REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1060
1061  void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
1062
1063  // GC stress mode attempts to do one GC per unique backtrace.
1064  void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
1065      REQUIRES_SHARED(Locks::mutator_lock_)
1066      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
1067
1068  collector::GcType NonStickyGcType() const {
1069    return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
1070  }
1071
1072  // How large new_native_bytes_allocated_ can grow before we trigger a new
1073  // GC.
1074  ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
1075    // Reuse max_free_ for the native allocation gc watermark, so that the
1076    // native heap is treated in the same way as the Java heap in the case
1077    // where the gc watermark update would exceed max_free_. Using max_free_
1078    // instead of the target utilization means the watermark doesn't depend on
1079    // the current number of registered native allocations.
1080    return max_free_;
1081  }
1082
1083  // How large new_native_bytes_allocated_ can grow while GC is in progress
1084  // before we block the allocating thread to allow GC to catch up.
1085  ALWAYS_INLINE size_t NativeAllocationBlockingGcWatermark() const {
1086    // Historically the native allocations were bounded by growth_limit_. This
1087    // uses that same value, dividing growth_limit_ by 2 to account for
1088    // the fact that now the bound is relative to the number of retained
1089    // registered native allocations rather than absolute.
1090    return growth_limit_ / 2;
1091  }
1092
1093  // All-known continuous spaces, where objects lie within fixed bounds.
1094  std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1095
1096  // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
1097  std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
1098
1099  // All-known alloc spaces, where objects may be or have been allocated.
1100  std::vector<space::AllocSpace*> alloc_spaces_;
1101
1102  // A space where non-movable objects are allocated, when compaction is enabled it contains
1103  // Classes, ArtMethods, ArtFields, and non moving objects.
1104  space::MallocSpace* non_moving_space_;
1105
1106  // Space which we use for the kAllocatorTypeROSAlloc.
1107  space::RosAllocSpace* rosalloc_space_;
1108
1109  // Space which we use for the kAllocatorTypeDlMalloc.
1110  space::DlMallocSpace* dlmalloc_space_;
1111
1112  // The main space is the space which the GC copies to and from on process state updates. This
1113  // space is typically either the dlmalloc_space_ or the rosalloc_space_.
1114  space::MallocSpace* main_space_;
1115
1116  // The large object space we are currently allocating into.
1117  space::LargeObjectSpace* large_object_space_;
1118
1119  // The card table, dirtied by the write barrier.
1120  std::unique_ptr<accounting::CardTable> card_table_;
1121
1122  std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
1123
1124  // A mod-union table remembers all of the references from the it's space to other spaces.
1125  AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
1126      mod_union_tables_;
1127
1128  // A remembered set remembers all of the references from the it's space to the target space.
1129  AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
1130      remembered_sets_;
1131
1132  // The current collector type.
1133  CollectorType collector_type_;
1134  // Which collector we use when the app is in the foreground.
1135  CollectorType foreground_collector_type_;
1136  // Which collector we will use when the app is notified of a transition to background.
1137  CollectorType background_collector_type_;
1138  // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
1139  CollectorType desired_collector_type_;
1140
1141  // Lock which guards pending tasks.
1142  Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1143
1144  // How many GC threads we may use for paused parts of garbage collection.
1145  const size_t parallel_gc_threads_;
1146
1147  // How many GC threads we may use for unpaused parts of garbage collection.
1148  const size_t conc_gc_threads_;
1149
1150  // Boolean for if we are in low memory mode.
1151  const bool low_memory_mode_;
1152
1153  // If we get a pause longer than long pause log threshold, then we print out the GC after it
1154  // finishes.
1155  const size_t long_pause_log_threshold_;
1156
1157  // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
1158  const size_t long_gc_log_threshold_;
1159
1160  // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
1161  // useful for benchmarking since it reduces time spent in GC to a low %.
1162  const bool ignore_max_footprint_;
1163
1164  // Lock which guards zygote space creation.
1165  Mutex zygote_creation_lock_;
1166
1167  // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
1168  // zygote space creation.
1169  space::ZygoteSpace* zygote_space_;
1170
1171  // Minimum allocation size of large object.
1172  size_t large_object_threshold_;
1173
1174  // Guards access to the state of GC, associated conditional variable is used to signal when a GC
1175  // completes.
1176  Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1177  std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
1178
1179  // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
1180  Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1181  std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
1182  // This counter keeps track of how many threads are currently in a JNI critical section. This is
1183  // incremented once per thread even with nested enters.
1184  size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
1185  bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
1186
1187  // Reference processor;
1188  std::unique_ptr<ReferenceProcessor> reference_processor_;
1189
1190  // Task processor, proxies heap trim requests to the daemon threads.
1191  std::unique_ptr<TaskProcessor> task_processor_;
1192
1193  // True while the garbage collector is running.
1194  volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
1195
1196  // The thread currently running the GC.
1197  volatile Thread* thread_running_gc_ GUARDED_BY(gc_complete_lock_);
1198
1199  // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
1200  volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
1201  collector::GcType next_gc_type_;
1202
1203  // Maximum size that the heap can reach.
1204  size_t capacity_;
1205
1206  // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
1207  // programs it is "cleared" making it the same as capacity.
1208  size_t growth_limit_;
1209
1210  // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
1211  // a GC should be triggered.
1212  size_t max_allowed_footprint_;
1213
1214  // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
1215  // it completes ahead of an allocation failing.
1216  size_t concurrent_start_bytes_;
1217
1218  // Since the heap was created, how many bytes have been freed.
1219  uint64_t total_bytes_freed_ever_;
1220
1221  // Since the heap was created, how many objects have been freed.
1222  uint64_t total_objects_freed_ever_;
1223
1224  // Number of bytes allocated.  Adjusted after each allocation and free.
1225  Atomic<size_t> num_bytes_allocated_;
1226
1227  // Number of registered native bytes allocated since the last time GC was
1228  // triggered. Adjusted after each RegisterNativeAllocation and
1229  // RegisterNativeFree. Used to determine when to trigger GC for native
1230  // allocations.
1231  // See the REDESIGN section of go/understanding-register-native-allocation.
1232  Atomic<size_t> new_native_bytes_allocated_;
1233
1234  // Number of registered native bytes allocated prior to the last time GC was
1235  // triggered, for debugging purposes. The current number of registered
1236  // native bytes is determined by taking the sum of
1237  // old_native_bytes_allocated_ and new_native_bytes_allocated_.
1238  Atomic<size_t> old_native_bytes_allocated_;
1239
1240  // Used for synchronization of blocking GCs triggered by
1241  // RegisterNativeAllocation.
1242  Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1243  std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
1244  bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
1245  uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
1246
1247  // Number of bytes freed by thread local buffer revokes. This will
1248  // cancel out the ahead-of-time bulk counting of bytes allocated in
1249  // rosalloc thread-local buffers.  It is temporarily accumulated
1250  // here to be subtracted from num_bytes_allocated_ later at the next
1251  // GC.
1252  Atomic<size_t> num_bytes_freed_revoke_;
1253
1254  // Info related to the current or previous GC iteration.
1255  collector::Iteration current_gc_iteration_;
1256
1257  // Heap verification flags.
1258  const bool verify_missing_card_marks_;
1259  const bool verify_system_weaks_;
1260  const bool verify_pre_gc_heap_;
1261  const bool verify_pre_sweeping_heap_;
1262  const bool verify_post_gc_heap_;
1263  const bool verify_mod_union_table_;
1264  bool verify_pre_gc_rosalloc_;
1265  bool verify_pre_sweeping_rosalloc_;
1266  bool verify_post_gc_rosalloc_;
1267  const bool gc_stress_mode_;
1268
1269  // RAII that temporarily disables the rosalloc verification during
1270  // the zygote fork.
1271  class ScopedDisableRosAllocVerification {
1272   private:
1273    Heap* const heap_;
1274    const bool orig_verify_pre_gc_;
1275    const bool orig_verify_pre_sweeping_;
1276    const bool orig_verify_post_gc_;
1277
1278   public:
1279    explicit ScopedDisableRosAllocVerification(Heap* heap)
1280        : heap_(heap),
1281          orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
1282          orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
1283          orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
1284      heap_->verify_pre_gc_rosalloc_ = false;
1285      heap_->verify_pre_sweeping_rosalloc_ = false;
1286      heap_->verify_post_gc_rosalloc_ = false;
1287    }
1288    ~ScopedDisableRosAllocVerification() {
1289      heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
1290      heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
1291      heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
1292    }
1293  };
1294
1295  // Parallel GC data structures.
1296  std::unique_ptr<ThreadPool> thread_pool_;
1297
1298  // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
1299  // and the start of the current one.
1300  uint64_t allocation_rate_;
1301
1302  // For a GC cycle, a bitmap that is set corresponding to the
1303  std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1304  std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
1305
1306  // Mark stack that we reuse to avoid re-allocating the mark stack.
1307  std::unique_ptr<accounting::ObjectStack> mark_stack_;
1308
1309  // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
1310  // to use the live bitmap as the old mark bitmap.
1311  const size_t max_allocation_stack_size_;
1312  std::unique_ptr<accounting::ObjectStack> allocation_stack_;
1313
1314  // Second allocation stack so that we can process allocation with the heap unlocked.
1315  std::unique_ptr<accounting::ObjectStack> live_stack_;
1316
1317  // Allocator type.
1318  AllocatorType current_allocator_;
1319  const AllocatorType current_non_moving_allocator_;
1320
1321  // Which GCs we run in order when we an allocation fails.
1322  std::vector<collector::GcType> gc_plan_;
1323
1324  // Bump pointer spaces.
1325  space::BumpPointerSpace* bump_pointer_space_;
1326  // Temp space is the space which the semispace collector copies to.
1327  space::BumpPointerSpace* temp_space_;
1328
1329  space::RegionSpace* region_space_;
1330
1331  // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
1332  // utilization, regardless of target utilization ratio.
1333  size_t min_free_;
1334
1335  // The ideal maximum free size, when we grow the heap for utilization.
1336  size_t max_free_;
1337
1338  // Target ideal heap utilization ratio
1339  double target_utilization_;
1340
1341  // How much more we grow the heap when we are a foreground app instead of background.
1342  double foreground_heap_growth_multiplier_;
1343
1344  // Total time which mutators are paused or waiting for GC to complete.
1345  uint64_t total_wait_time_;
1346
1347  // The current state of heap verification, may be enabled or disabled.
1348  VerifyObjectMode verify_object_mode_;
1349
1350  // Compacting GC disable count, prevents compacting GC from running iff > 0.
1351  size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
1352
1353  std::vector<collector::GarbageCollector*> garbage_collectors_;
1354  collector::SemiSpace* semi_space_collector_;
1355  collector::MarkCompact* mark_compact_collector_;
1356  collector::ConcurrentCopying* concurrent_copying_collector_;
1357
1358  const bool is_running_on_memory_tool_;
1359  const bool use_tlab_;
1360
1361  // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
1362  // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1363  std::unique_ptr<space::MallocSpace> main_space_backup_;
1364
1365  // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1366  uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1367
1368  // Times of the last homogeneous space compaction caused by OOM.
1369  uint64_t last_time_homogeneous_space_compaction_by_oom_;
1370
1371  // Saved OOMs by homogeneous space compaction.
1372  Atomic<size_t> count_delayed_oom_;
1373
1374  // Count for requested homogeneous space compaction.
1375  Atomic<size_t> count_requested_homogeneous_space_compaction_;
1376
1377  // Count for ignored homogeneous space compaction.
1378  Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1379
1380  // Count for performed homogeneous space compaction.
1381  Atomic<size_t> count_performed_homogeneous_space_compaction_;
1382
1383  // Whether or not a concurrent GC is pending.
1384  Atomic<bool> concurrent_gc_pending_;
1385
1386  // Active tasks which we can modify (change target time, desired collector type, etc..).
1387  CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
1388  HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
1389
1390  // Whether or not we use homogeneous space compaction to avoid OOM errors.
1391  bool use_homogeneous_space_compaction_for_oom_;
1392
1393  // True if the currently running collection has made some thread wait.
1394  bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
1395  // The number of blocking GC runs.
1396  uint64_t blocking_gc_count_;
1397  // The total duration of blocking GC runs.
1398  uint64_t blocking_gc_time_;
1399  // The duration of the window for the GC count rate histograms.
1400  static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000);  // 10s.
1401  // The last time when the GC count rate histograms were updated.
1402  // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
1403  uint64_t last_update_time_gc_count_rate_histograms_;
1404  // The running count of GC runs in the last window.
1405  uint64_t gc_count_last_window_;
1406  // The running count of blocking GC runs in the last window.
1407  uint64_t blocking_gc_count_last_window_;
1408  // The maximum number of buckets in the GC count rate histograms.
1409  static constexpr size_t kGcCountRateMaxBucketCount = 200;
1410  // The histogram of the number of GC invocations per window duration.
1411  Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1412  // The histogram of the number of blocking GC invocations per window duration.
1413  Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1414
1415  // Allocation tracking support
1416  Atomic<bool> alloc_tracking_enabled_;
1417  std::unique_ptr<AllocRecordObjectMap> allocation_records_;
1418
1419  // GC stress related data structures.
1420  Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1421  // Debugging variables, seen backtraces vs unique backtraces.
1422  Atomic<uint64_t> seen_backtrace_count_;
1423  Atomic<uint64_t> unique_backtrace_count_;
1424  // Stack trace hashes that we already saw,
1425  std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
1426
1427  // We disable GC when we are shutting down the runtime in case there are daemon threads still
1428  // allocating.
1429  bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
1430
1431  // Boot image spaces.
1432  std::vector<space::ImageSpace*> boot_image_spaces_;
1433
1434  // An installed allocation listener.
1435  Atomic<AllocationListener*> alloc_listener_;
1436  // An installed GC Pause listener.
1437  Atomic<GcPauseListener*> gc_pause_listener_;
1438
1439  std::unique_ptr<Verification> verification_;
1440
1441  friend class CollectorTransitionTask;
1442  friend class collector::GarbageCollector;
1443  friend class collector::MarkCompact;
1444  friend class collector::ConcurrentCopying;
1445  friend class collector::MarkSweep;
1446  friend class collector::SemiSpace;
1447  friend class ReferenceQueue;
1448  friend class ScopedGCCriticalSection;
1449  friend class VerifyReferenceCardVisitor;
1450  friend class VerifyReferenceVisitor;
1451  friend class VerifyObjectVisitor;
1452
1453  DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1454};
1455
1456}  // namespace gc
1457}  // namespace art
1458
1459#endif  // ART_RUNTIME_GC_HEAP_H_
1460