heap.h revision 5189e24fb6d42c04c48169ab2f15de56ecf3c828
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_HEAP_H_
18#define ART_RUNTIME_GC_HEAP_H_
19
20#include <iosfwd>
21#include <string>
22#include <vector>
23
24#include "allocator_type.h"
25#include "atomic.h"
26#include "base/timing_logger.h"
27#include "gc/accounting/atomic_stack.h"
28#include "gc/accounting/card_table.h"
29#include "gc/gc_cause.h"
30#include "gc/collector/garbage_collector.h"
31#include "gc/collector/gc_type.h"
32#include "gc/collector_type.h"
33#include "globals.h"
34#include "gtest/gtest.h"
35#include "instruction_set.h"
36#include "jni.h"
37#include "object_callbacks.h"
38#include "offsets.h"
39#include "reference_processor.h"
40#include "safe_map.h"
41#include "thread_pool.h"
42#include "verify_object.h"
43
44namespace art {
45
46class ConditionVariable;
47class Mutex;
48class StackVisitor;
49class Thread;
50class TimingLogger;
51
52namespace mirror {
53  class Class;
54  class Object;
55}  // namespace mirror
56
57namespace gc {
58
59class ReferenceProcessor;
60
61namespace accounting {
62  class HeapBitmap;
63  class ModUnionTable;
64  class RememberedSet;
65}  // namespace accounting
66
67namespace collector {
68  class ConcurrentCopying;
69  class GarbageCollector;
70  class MarkCompact;
71  class MarkSweep;
72  class SemiSpace;
73}  // namespace collector
74
75namespace allocator {
76  class RosAlloc;
77}  // namespace allocator
78
79namespace space {
80  class AllocSpace;
81  class BumpPointerSpace;
82  class DiscontinuousSpace;
83  class DlMallocSpace;
84  class ImageSpace;
85  class LargeObjectSpace;
86  class MallocSpace;
87  class RosAllocSpace;
88  class Space;
89  class SpaceTest;
90  class ContinuousMemMapAllocSpace;
91}  // namespace space
92
93class AgeCardVisitor {
94 public:
95  byte operator()(byte card) const {
96    if (card == accounting::CardTable::kCardDirty) {
97      return card - 1;
98    } else {
99      return 0;
100    }
101  }
102};
103
104enum HomogeneousSpaceCompactResult {
105  // Success.
106  kSuccess,
107  // Reject due to disabled moving GC.
108  kErrorReject,
109  // System is shutting down.
110  kErrorVMShuttingDown,
111};
112
113// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
114static constexpr bool kUseRosAlloc = true;
115
116// If true, use thread-local allocation stack.
117static constexpr bool kUseThreadLocalAllocationStack = true;
118
119// The process state passed in from the activity manager, used to determine when to do trimming
120// and compaction.
121enum ProcessState {
122  kProcessStateJankPerceptible = 0,
123  kProcessStateJankImperceptible = 1,
124};
125std::ostream& operator<<(std::ostream& os, const ProcessState& process_state);
126
127class Heap {
128 public:
129  // If true, measure the total allocation time.
130  static constexpr bool kMeasureAllocationTime = false;
131  // Primitive arrays larger than this size are put in the large object space.
132  static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
133
134  static constexpr size_t kDefaultStartingSize = kPageSize;
135  static constexpr size_t kDefaultInitialSize = 2 * MB;
136  static constexpr size_t kDefaultMaximumSize = 256 * MB;
137  static constexpr size_t kDefaultMaxFree = 2 * MB;
138  static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
139  static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
140  static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
141  static constexpr size_t kDefaultTLABSize = 256 * KB;
142  static constexpr double kDefaultTargetUtilization = 0.5;
143  static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
144
145  // Used so that we don't overflow the allocation time atomic integer.
146  static constexpr size_t kTimeAdjust = 1024;
147
148  // How often we allow heap trimming to happen (nanoseconds).
149  static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
150  // How long we wait after a transition request to perform a collector transition (nanoseconds).
151  static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
152
153  // Create a heap with the requested sizes. The possible empty
154  // image_file_names names specify Spaces to load based on
155  // ImageWriter output.
156  explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
157                size_t max_free, double target_utilization,
158                double foreground_heap_growth_multiplier, size_t capacity,
159                const std::string& original_image_file_name,
160                InstructionSet image_instruction_set,
161                CollectorType foreground_collector_type, CollectorType background_collector_type,
162                size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
163                size_t long_pause_threshold, size_t long_gc_threshold,
164                bool ignore_max_footprint, bool use_tlab,
165                bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
166                bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
167                bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction,
168                uint64_t min_interval_homogeneous_space_compaction_by_oom);
169
170  ~Heap();
171
172  // Allocates and initializes storage for an object instance.
173  template <bool kInstrumented, typename PreFenceVisitor>
174  mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
175                              const PreFenceVisitor& pre_fence_visitor)
176      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
177    return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
178                                                         GetCurrentAllocator(),
179                                                         pre_fence_visitor);
180  }
181
182  template <bool kInstrumented, typename PreFenceVisitor>
183  mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
184                                        const PreFenceVisitor& pre_fence_visitor)
185      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
186    return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
187                                                         GetCurrentNonMovingAllocator(),
188                                                         pre_fence_visitor);
189  }
190
191  template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
192  ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
193      Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
194      const PreFenceVisitor& pre_fence_visitor)
195      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
196
197  AllocatorType GetCurrentAllocator() const {
198    return current_allocator_;
199  }
200
201  AllocatorType GetCurrentNonMovingAllocator() const {
202    return current_non_moving_allocator_;
203  }
204
205  // Visit all of the live objects in the heap.
206  void VisitObjects(ObjectCallback callback, void* arg)
207      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
208
209  void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
210      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
211
212  void RegisterNativeAllocation(JNIEnv* env, int bytes);
213  void RegisterNativeFree(JNIEnv* env, int bytes);
214
215  // Change the allocator, updates entrypoints.
216  void ChangeAllocator(AllocatorType allocator)
217      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
218      LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
219
220  // Transition the garbage collector during runtime, may copy objects from one space to another.
221  void TransitionCollector(CollectorType collector_type);
222
223  // Change the collector to be one of the possible options (MS, CMS, SS).
224  void ChangeCollector(CollectorType collector_type)
225      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
226
227  // The given reference is believed to be to an object in the Java heap, check the soundness of it.
228  // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
229  // proper lock ordering for it.
230  void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
231
232  // Check sanity of all live references.
233  void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
234  // Returns how many failures occured.
235  size_t VerifyHeapReferences(bool verify_referents = true)
236      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
237  bool VerifyMissingCardMarks()
238      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
239
240  // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
241  // and doesn't abort on error, allowing the caller to report more
242  // meaningful diagnostics.
243  bool IsValidObjectAddress(const mirror::Object* obj) const
244      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
245
246  // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
247  // very slow.
248  bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
249      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
250
251  // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
252  // Requires the heap lock to be held.
253  bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true,
254                          bool search_live_stack = true, bool sorted = false)
255      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
256
257  // Returns true if there is any chance that the object (obj) will move.
258  bool IsMovableObject(const mirror::Object* obj) const;
259
260  // Enables us to compacting GC until objects are released.
261  void IncrementDisableMovingGC(Thread* self);
262  void DecrementDisableMovingGC(Thread* self);
263
264  // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
265  void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
266
267  // Initiates an explicit garbage collection.
268  void CollectGarbage(bool clear_soft_references);
269
270  // Does a concurrent GC, should only be called by the GC daemon thread
271  // through runtime.
272  void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
273
274  // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
275  // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
276  void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
277                      uint64_t* counts)
278      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
279      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
280  // Implements JDWP RT_Instances.
281  void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
282      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
283      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
284  // Implements JDWP OR_ReferringObjects.
285  void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects)
286      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
287      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
288
289  // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
290  // implement dalvik.system.VMRuntime.clearGrowthLimit.
291  void ClearGrowthLimit();
292
293  // Target ideal heap utilization ratio, implements
294  // dalvik.system.VMRuntime.getTargetHeapUtilization.
295  double GetTargetHeapUtilization() const {
296    return target_utilization_;
297  }
298
299  // Data structure memory usage tracking.
300  void RegisterGCAllocation(size_t bytes);
301  void RegisterGCDeAllocation(size_t bytes);
302
303  // Set the heap's private space pointers to be the same as the space based on it's type. Public
304  // due to usage by tests.
305  void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
306      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
307  void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
308  void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
309
310  // Set target ideal heap utilization ratio, implements
311  // dalvik.system.VMRuntime.setTargetHeapUtilization.
312  void SetTargetHeapUtilization(float target);
313
314  // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
315  // from the system. Doesn't allow the space to exceed its growth limit.
316  void SetIdealFootprint(size_t max_allowed_footprint);
317
318  // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
319  // waited for.
320  collector::GcType WaitForGcToComplete(GcCause cause, Thread* self)
321      LOCKS_EXCLUDED(gc_complete_lock_);
322
323  // Update the heap's process state to a new value, may cause compaction to occur.
324  void UpdateProcessState(ProcessState process_state);
325
326  const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
327    return continuous_spaces_;
328  }
329
330  const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
331    return discontinuous_spaces_;
332  }
333
334  const collector::Iteration* GetCurrentGcIteration() const {
335    return &current_gc_iteration_;
336  }
337  collector::Iteration* GetCurrentGcIteration() {
338    return &current_gc_iteration_;
339  }
340
341  // Enable verification of object references when the runtime is sufficiently initialized.
342  void EnableObjectValidation() {
343    verify_object_mode_ = kVerifyObjectSupport;
344    if (verify_object_mode_ > kVerifyObjectModeDisabled) {
345      VerifyHeap();
346    }
347  }
348
349  // Disable object reference verification for image writing.
350  void DisableObjectValidation() {
351    verify_object_mode_ = kVerifyObjectModeDisabled;
352  }
353
354  // Other checks may be performed if we know the heap should be in a sane state.
355  bool IsObjectValidationEnabled() const {
356    return verify_object_mode_ > kVerifyObjectModeDisabled;
357  }
358
359  // Returns true if low memory mode is enabled.
360  bool IsLowMemoryMode() const {
361    return low_memory_mode_;
362  }
363
364  // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
365  // Scales heap growth, min free, and max free.
366  double HeapGrowthMultiplier() const;
367
368  // Freed bytes can be negative in cases where we copy objects from a compacted space to a
369  // free-list backed space.
370  void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
371
372  // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
373  // The call is not needed if NULL is stored in the field.
374  void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
375                         const mirror::Object* /*new_value*/) {
376    card_table_->MarkCard(dst);
377  }
378
379  // Write barrier for array operations that update many field positions
380  void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
381                         size_t /*length TODO: element_count or byte_count?*/) {
382    card_table_->MarkCard(dst);
383  }
384
385  void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
386    card_table_->MarkCard(obj);
387  }
388
389  accounting::CardTable* GetCardTable() const {
390    return card_table_.get();
391  }
392
393  void AddFinalizerReference(Thread* self, mirror::Object** object);
394
395  // Returns the number of bytes currently allocated.
396  size_t GetBytesAllocated() const {
397    return num_bytes_allocated_.LoadSequentiallyConsistent();
398  }
399
400  // Returns the number of objects currently allocated.
401  size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
402
403  // Returns the total number of objects allocated since the heap was created.
404  size_t GetObjectsAllocatedEver() const;
405
406  // Returns the total number of bytes allocated since the heap was created.
407  size_t GetBytesAllocatedEver() const;
408
409  // Returns the total number of objects freed since the heap was created.
410  size_t GetObjectsFreedEver() const {
411    return total_objects_freed_ever_;
412  }
413
414  // Returns the total number of bytes freed since the heap was created.
415  size_t GetBytesFreedEver() const {
416    return total_bytes_freed_ever_;
417  }
418
419  // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
420  // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
421  // were specified. Android apps start with a growth limit (small heap size) which is
422  // cleared/extended for large apps.
423  size_t GetMaxMemory() const {
424    return growth_limit_;
425  }
426
427  // Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an
428  // application.
429  size_t GetTotalMemory() const;
430
431  // Implements java.lang.Runtime.freeMemory.
432  size_t GetFreeMemory() const {
433    size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
434    // Make sure we don't get a negative number since the max allowed footprint is only updated
435    // after the GC. But we can still allocate even if bytes_allocated > max_allowed_footprint_.
436    return std::max(max_allowed_footprint_, byte_allocated) - byte_allocated;
437  }
438
439  // get the space that corresponds to an object's address. Current implementation searches all
440  // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
441  // TODO: consider using faster data structure like binary tree.
442  space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
443  space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
444                                                              bool fail_ok) const;
445  space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
446
447  void DumpForSigQuit(std::ostream& os) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
448
449  // Do a pending heap transition or trim.
450  void DoPendingTransitionOrTrim() LOCKS_EXCLUDED(heap_trim_request_lock_);
451
452  // Trim the managed and native heaps by releasing unused memory back to the OS.
453  void Trim() LOCKS_EXCLUDED(heap_trim_request_lock_);
454
455  void RevokeThreadLocalBuffers(Thread* thread);
456  void RevokeRosAllocThreadLocalBuffers(Thread* thread);
457  void RevokeAllThreadLocalBuffers();
458  void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
459  void RosAllocVerification(TimingLogger* timings, const char* name)
460      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
461
462  accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
463    return live_bitmap_.get();
464  }
465
466  accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
467    return mark_bitmap_.get();
468  }
469
470  accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
471    return live_stack_.get();
472  }
473
474  void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
475
476  // Mark and empty stack.
477  void FlushAllocStack()
478      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
479
480  // Revoke all the thread-local allocation stacks.
481  void RevokeAllThreadLocalAllocationStacks(Thread* self)
482      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
483      LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_);
484
485  // Mark all the objects in the allocation stack in the specified bitmap.
486  // TODO: Refactor?
487  void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
488                      accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
489                      accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
490                      accounting::ObjectStack* stack)
491      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
492
493  // Mark the specified allocation stack as live.
494  void MarkAllocStackAsLive(accounting::ObjectStack* stack)
495      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
496
497  // Unbind any bound bitmaps.
498  void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
499
500  // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
501  // Assumes there is only one image space.
502  space::ImageSpace* GetImageSpace() const;
503
504  // Permenantly disable compaction.
505  void DisableCompaction();
506
507  space::DlMallocSpace* GetDlMallocSpace() const {
508    return dlmalloc_space_;
509  }
510
511  space::RosAllocSpace* GetRosAllocSpace() const {
512    return rosalloc_space_;
513  }
514
515  // Return the corresponding rosalloc space.
516  space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const;
517
518  space::MallocSpace* GetNonMovingSpace() const {
519    return non_moving_space_;
520  }
521
522  space::LargeObjectSpace* GetLargeObjectsSpace() const {
523    return large_object_space_;
524  }
525
526  // Returns the free list space that may contain movable objects (the
527  // one that's not the non-moving space), either rosalloc_space_ or
528  // dlmalloc_space_.
529  space::MallocSpace* GetPrimaryFreeListSpace() {
530    if (kUseRosAlloc) {
531      DCHECK(rosalloc_space_ != nullptr);
532      // reinterpret_cast is necessary as the space class hierarchy
533      // isn't known (#included) yet here.
534      return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
535    } else {
536      DCHECK(dlmalloc_space_ != nullptr);
537      return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
538    }
539  }
540
541  std::string DumpSpaces() const WARN_UNUSED;
542  void DumpSpaces(std::ostream& stream) const;
543
544  // Dump object should only be used by the signal handler.
545  void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
546  // Safe version of pretty type of which check to make sure objects are heap addresses.
547  std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
548  std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
549
550  // GC performance measuring
551  void DumpGcPerformanceInfo(std::ostream& os);
552
553  // Returns true if we currently care about pause times.
554  bool CareAboutPauseTimes() const {
555    return process_state_ == kProcessStateJankPerceptible;
556  }
557
558  // Thread pool.
559  void CreateThreadPool();
560  void DeleteThreadPool();
561  ThreadPool* GetThreadPool() {
562    return thread_pool_.get();
563  }
564  size_t GetParallelGCThreadCount() const {
565    return parallel_gc_threads_;
566  }
567  size_t GetConcGCThreadCount() const {
568    return conc_gc_threads_;
569  }
570  accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
571  void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
572
573  accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
574  void AddRememberedSet(accounting::RememberedSet* remembered_set);
575  // Also deletes the remebered set.
576  void RemoveRememberedSet(space::Space* space);
577
578  bool IsCompilingBoot() const;
579  bool RunningOnValgrind() const {
580    return running_on_valgrind_;
581  }
582  bool HasImageSpace() const;
583
584  ReferenceProcessor* GetReferenceProcessor() {
585    return &reference_processor_;
586  }
587
588 private:
589  // Compact source space to target space.
590  void Compact(space::ContinuousMemMapAllocSpace* target_space,
591               space::ContinuousMemMapAllocSpace* source_space,
592               GcCause gc_cause)
593      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
594
595  void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
596
597  // Create a mem map with a preferred base address.
598  static MemMap* MapAnonymousPreferredAddress(const char* name, byte* request_begin,
599                                              size_t capacity, int prot_flags,
600                                              std::string* out_error_str);
601
602  bool SupportHSpaceCompaction() const {
603    // Returns true if we can do hspace compaction
604    return main_space_backup_ != nullptr;
605  }
606
607  static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
608    return
609        allocator_type != kAllocatorTypeBumpPointer &&
610        allocator_type != kAllocatorTypeTLAB;
611  }
612  static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
613    return AllocatorHasAllocationStack(allocator_type);
614  }
615  static bool IsMovingGc(CollectorType collector_type) {
616    return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
617        collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC ||
618        collector_type == kCollectorTypeHomogeneousSpaceCompact;
619  }
620  bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
621      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
622  ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
623                                       mirror::Object** obj)
624      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
625
626  accounting::ObjectStack* GetMarkStack() {
627    return mark_stack_.get();
628  }
629
630  // We don't force this to be inlined since it is a slow path.
631  template <bool kInstrumented, typename PreFenceVisitor>
632  mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
633                                   const PreFenceVisitor& pre_fence_visitor)
634      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
635
636  // Handles Allocate()'s slow allocation path with GC involved after
637  // an initial allocation attempt failed.
638  mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes,
639                                         size_t* bytes_allocated, size_t* usable_size,
640                                         mirror::Class** klass)
641      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
642      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
643
644  // Allocate into a specific space.
645  mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c,
646                               size_t bytes)
647      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
648
649  // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
650  // wrong space.
651  void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
652
653  // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
654  // that the switch statement is constant optimized in the entrypoints.
655  template <const bool kInstrumented, const bool kGrow>
656  ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type,
657                                              size_t alloc_size, size_t* bytes_allocated,
658                                              size_t* usable_size)
659      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
660
661  void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
662      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
663
664  template <bool kGrow>
665  bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
666
667  // Returns true if the address passed in is within the address range of a continuous space.
668  bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
669      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
670
671  // Run the finalizers.
672  void RunFinalization(JNIEnv* env);
673
674  // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
675  // waited for.
676  collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
677      EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
678
679  void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
680      LOCKS_EXCLUDED(heap_trim_request_lock_);
681  void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
682  void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj)
683      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
684  void RequestConcurrentGC(Thread* self)
685      LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
686  bool IsGCRequestPending() const;
687
688  // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
689  // which type of Gc was actually ran.
690  collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause,
691                                           bool clear_soft_references)
692      LOCKS_EXCLUDED(gc_complete_lock_,
693                     Locks::heap_bitmap_lock_,
694                     Locks::thread_suspend_count_lock_);
695
696  void PreGcVerification(collector::GarbageCollector* gc)
697      LOCKS_EXCLUDED(Locks::mutator_lock_);
698  void PreGcVerificationPaused(collector::GarbageCollector* gc)
699      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
700  void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
701      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
702  void PreSweepingGcVerification(collector::GarbageCollector* gc)
703      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
704  void PostGcVerification(collector::GarbageCollector* gc)
705      LOCKS_EXCLUDED(Locks::mutator_lock_);
706  void PostGcVerificationPaused(collector::GarbageCollector* gc)
707      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
708
709  // Update the watermark for the native allocated bytes based on the current number of native
710  // bytes allocated and the target utilization ratio.
711  void UpdateMaxNativeFootprint();
712
713  // Find a collector based on GC type.
714  collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
715
716  // Create a new alloc space and compact default alloc space to it.
717  HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact();
718
719  // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
720  void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
721                             size_t capacity);
722
723  // Create a malloc space based on a mem map. Does not set the space as default.
724  space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
725                                                  size_t growth_limit, size_t capacity,
726                                                  const char* name, bool can_move_objects);
727
728  // Given the current contents of the alloc space, increase the allowed heap footprint to match
729  // the target utilization ratio.  This should only be called immediately after a full garbage
730  // collection.
731  void GrowForUtilization(collector::GarbageCollector* collector_ran);
732
733  size_t GetPercentFree();
734
735  static void VerificationCallback(mirror::Object* obj, void* arg)
736      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
737
738  // Swap the allocation stack with the live stack.
739  void SwapStacks(Thread* self);
740
741  // Clear cards and update the mod union table.
742  void ProcessCards(TimingLogger* timings, bool use_rem_sets);
743
744  // Signal the heap trim daemon that there is something to do, either a heap transition or heap
745  // trim.
746  void SignalHeapTrimDaemon(Thread* self);
747
748  // Push an object onto the allocation stack.
749  void PushOnAllocationStack(Thread* self, mirror::Object** obj)
750      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
751  void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
752      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
753  void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
754      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
755
756  // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
757  // sweep GC, false for other GC types.
758  bool IsGcConcurrent() const ALWAYS_INLINE {
759    return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC;
760  }
761
762  // All-known continuous spaces, where objects lie within fixed bounds.
763  std::vector<space::ContinuousSpace*> continuous_spaces_;
764
765  // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
766  std::vector<space::DiscontinuousSpace*> discontinuous_spaces_;
767
768  // All-known alloc spaces, where objects may be or have been allocated.
769  std::vector<space::AllocSpace*> alloc_spaces_;
770
771  // A space where non-movable objects are allocated, when compaction is enabled it contains
772  // Classes, ArtMethods, ArtFields, and non moving objects.
773  space::MallocSpace* non_moving_space_;
774
775  // Space which we use for the kAllocatorTypeROSAlloc.
776  space::RosAllocSpace* rosalloc_space_;
777
778  // Space which we use for the kAllocatorTypeDlMalloc.
779  space::DlMallocSpace* dlmalloc_space_;
780
781  // The main space is the space which the GC copies to and from on process state updates. This
782  // space is typically either the dlmalloc_space_ or the rosalloc_space_.
783  space::MallocSpace* main_space_;
784
785  // The large object space we are currently allocating into.
786  space::LargeObjectSpace* large_object_space_;
787
788  // The card table, dirtied by the write barrier.
789  std::unique_ptr<accounting::CardTable> card_table_;
790
791  // A mod-union table remembers all of the references from the it's space to other spaces.
792  SafeMap<space::Space*, accounting::ModUnionTable*> mod_union_tables_;
793
794  // A remembered set remembers all of the references from the it's space to the target space.
795  SafeMap<space::Space*, accounting::RememberedSet*> remembered_sets_;
796
797  // The current collector type.
798  CollectorType collector_type_;
799  // Which collector we use when the app is in the foreground.
800  CollectorType foreground_collector_type_;
801  // Which collector we will use when the app is notified of a transition to background.
802  CollectorType background_collector_type_;
803  // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
804  CollectorType desired_collector_type_;
805
806  // Lock which guards heap trim requests.
807  Mutex* heap_trim_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
808  // When we want to perform the next heap trim (nano seconds).
809  uint64_t last_trim_time_ GUARDED_BY(heap_trim_request_lock_);
810  // When we want to perform the next heap transition (nano seconds) or heap trim.
811  uint64_t heap_transition_or_trim_target_time_ GUARDED_BY(heap_trim_request_lock_);
812  // If we have a heap trim request pending.
813  bool heap_trim_request_pending_ GUARDED_BY(heap_trim_request_lock_);
814
815  // How many GC threads we may use for paused parts of garbage collection.
816  const size_t parallel_gc_threads_;
817
818  // How many GC threads we may use for unpaused parts of garbage collection.
819  const size_t conc_gc_threads_;
820
821  // Boolean for if we are in low memory mode.
822  const bool low_memory_mode_;
823
824  // If we get a pause longer than long pause log threshold, then we print out the GC after it
825  // finishes.
826  const size_t long_pause_log_threshold_;
827
828  // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
829  const size_t long_gc_log_threshold_;
830
831  // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
832  // useful for benchmarking since it reduces time spent in GC to a low %.
833  const bool ignore_max_footprint_;
834
835  // Lock which guards zygote space creation.
836  Mutex zygote_creation_lock_;
837
838  // If we have a zygote space.
839  bool have_zygote_space_;
840
841  // Minimum allocation size of large object.
842  size_t large_object_threshold_;
843
844  // Guards access to the state of GC, associated conditional variable is used to signal when a GC
845  // completes.
846  Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
847  std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
848
849  // Reference processor;
850  ReferenceProcessor reference_processor_;
851
852  // True while the garbage collector is running.
853  volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
854
855  // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
856  volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
857  collector::GcType next_gc_type_;
858
859  // Maximum size that the heap can reach.
860  const size_t capacity_;
861
862  // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
863  // programs it is "cleared" making it the same as capacity.
864  size_t growth_limit_;
865
866  // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
867  // a GC should be triggered.
868  size_t max_allowed_footprint_;
869
870  // The watermark at which a concurrent GC is requested by registerNativeAllocation.
871  size_t native_footprint_gc_watermark_;
872
873  // The watermark at which a GC is performed inside of registerNativeAllocation.
874  size_t native_footprint_limit_;
875
876  // Whether or not we need to run finalizers in the next native allocation.
877  bool native_need_to_run_finalization_;
878
879  // Whether or not we currently care about pause times.
880  ProcessState process_state_;
881
882  // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
883  // it completes ahead of an allocation failing.
884  size_t concurrent_start_bytes_;
885
886  // Since the heap was created, how many bytes have been freed.
887  size_t total_bytes_freed_ever_;
888
889  // Since the heap was created, how many objects have been freed.
890  size_t total_objects_freed_ever_;
891
892  // Number of bytes allocated.  Adjusted after each allocation and free.
893  Atomic<size_t> num_bytes_allocated_;
894
895  // Bytes which are allocated and managed by native code but still need to be accounted for.
896  Atomic<size_t> native_bytes_allocated_;
897
898  // Data structure GC overhead.
899  Atomic<size_t> gc_memory_overhead_;
900
901  // Info related to the current or previous GC iteration.
902  collector::Iteration current_gc_iteration_;
903
904  // Heap verification flags.
905  const bool verify_missing_card_marks_;
906  const bool verify_system_weaks_;
907  const bool verify_pre_gc_heap_;
908  const bool verify_pre_sweeping_heap_;
909  const bool verify_post_gc_heap_;
910  const bool verify_mod_union_table_;
911  bool verify_pre_gc_rosalloc_;
912  bool verify_pre_sweeping_rosalloc_;
913  bool verify_post_gc_rosalloc_;
914
915  // RAII that temporarily disables the rosalloc verification during
916  // the zygote fork.
917  class ScopedDisableRosAllocVerification {
918   private:
919    Heap* const heap_;
920    const bool orig_verify_pre_gc_;
921    const bool orig_verify_pre_sweeping_;
922    const bool orig_verify_post_gc_;
923
924   public:
925    explicit ScopedDisableRosAllocVerification(Heap* heap)
926        : heap_(heap),
927          orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
928          orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
929          orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
930      heap_->verify_pre_gc_rosalloc_ = false;
931      heap_->verify_pre_sweeping_rosalloc_ = false;
932      heap_->verify_post_gc_rosalloc_ = false;
933    }
934    ~ScopedDisableRosAllocVerification() {
935      heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
936      heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
937      heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
938    }
939  };
940
941  // Parallel GC data structures.
942  std::unique_ptr<ThreadPool> thread_pool_;
943
944  // The nanosecond time at which the last GC ended.
945  uint64_t last_gc_time_ns_;
946
947  // How many bytes were allocated at the end of the last GC.
948  uint64_t last_gc_size_;
949
950  // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
951  // and the start of the current one.
952  uint64_t allocation_rate_;
953
954  // For a GC cycle, a bitmap that is set corresponding to the
955  std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
956  std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
957
958  // Mark stack that we reuse to avoid re-allocating the mark stack.
959  std::unique_ptr<accounting::ObjectStack> mark_stack_;
960
961  // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
962  // to use the live bitmap as the old mark bitmap.
963  const size_t max_allocation_stack_size_;
964  std::unique_ptr<accounting::ObjectStack> allocation_stack_;
965
966  // Second allocation stack so that we can process allocation with the heap unlocked.
967  std::unique_ptr<accounting::ObjectStack> live_stack_;
968
969  // Allocator type.
970  AllocatorType current_allocator_;
971  const AllocatorType current_non_moving_allocator_;
972
973  // Which GCs we run in order when we an allocation fails.
974  std::vector<collector::GcType> gc_plan_;
975
976  // Bump pointer spaces.
977  space::BumpPointerSpace* bump_pointer_space_;
978  // Temp space is the space which the semispace collector copies to.
979  space::BumpPointerSpace* temp_space_;
980
981  // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
982  // utilization, regardless of target utilization ratio.
983  size_t min_free_;
984
985  // The ideal maximum free size, when we grow the heap for utilization.
986  size_t max_free_;
987
988  // Target ideal heap utilization ratio
989  double target_utilization_;
990
991  // How much more we grow the heap when we are a foreground app instead of background.
992  double foreground_heap_growth_multiplier_;
993
994  // Total time which mutators are paused or waiting for GC to complete.
995  uint64_t total_wait_time_;
996
997  // Total number of objects allocated in microseconds.
998  AtomicInteger total_allocation_time_;
999
1000  // The current state of heap verification, may be enabled or disabled.
1001  VerifyObjectMode verify_object_mode_;
1002
1003  // Compacting GC disable count, prevents compacting GC from running iff > 0.
1004  size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
1005
1006  std::vector<collector::GarbageCollector*> garbage_collectors_;
1007  collector::SemiSpace* semi_space_collector_;
1008  collector::MarkCompact* mark_compact_collector_;
1009  collector::ConcurrentCopying* concurrent_copying_collector_;
1010
1011  const bool running_on_valgrind_;
1012  const bool use_tlab_;
1013
1014  // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
1015  // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1016  std::unique_ptr<space::MallocSpace> main_space_backup_;
1017
1018  // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1019  uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1020
1021  // Times of the last homogeneous space compaction caused by OOM.
1022  uint64_t last_time_homogeneous_space_compaction_by_oom_;
1023
1024  // Saved OOMs by homogeneous space compaction.
1025  Atomic<size_t> count_delayed_oom_;
1026
1027  // Count for requested homogeneous space compaction.
1028  Atomic<size_t> count_requested_homogeneous_space_compaction_;
1029
1030  // Count for ignored homogeneous space compaction.
1031  Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1032
1033  // Count for performed homogeneous space compaction.
1034  Atomic<size_t> count_performed_homogeneous_space_compaction_;
1035
1036  // Whether or not we use homogeneous space compaction to avoid OOM errors.
1037  bool use_homogeneous_space_compaction_for_oom_;
1038
1039  friend class collector::GarbageCollector;
1040  friend class collector::MarkCompact;
1041  friend class collector::MarkSweep;
1042  friend class collector::SemiSpace;
1043  friend class ReferenceQueue;
1044  friend class VerifyReferenceCardVisitor;
1045  friend class VerifyReferenceVisitor;
1046  friend class VerifyObjectVisitor;
1047  friend class ScopedHeapFill;
1048  friend class ScopedHeapLock;
1049  friend class space::SpaceTest;
1050
1051  class AllocationTimer {
1052   private:
1053    Heap* heap_;
1054    mirror::Object** allocated_obj_ptr_;
1055    uint64_t allocation_start_time_;
1056   public:
1057    AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
1058    ~AllocationTimer();
1059  };
1060
1061  DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1062};
1063
1064// ScopedHeapFill changes the bytes allocated counter to be equal to the growth limit. This
1065// causes the next allocation to perform a GC and possibly an OOM. It can be used to ensure that a
1066// GC happens in specific methods such as ThrowIllegalMonitorStateExceptionF in Monitor::Wait.
1067class ScopedHeapFill {
1068 public:
1069  explicit ScopedHeapFill(Heap* heap)
1070      : heap_(heap),
1071        delta_(heap_->GetMaxMemory() - heap_->GetBytesAllocated()) {
1072    heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(delta_);
1073  }
1074  ~ScopedHeapFill() {
1075    heap_->num_bytes_allocated_.FetchAndSubSequentiallyConsistent(delta_);
1076  }
1077
1078 private:
1079  Heap* const heap_;
1080  const int64_t delta_;
1081};
1082
1083}  // namespace gc
1084}  // namespace art
1085
1086#endif  // ART_RUNTIME_GC_HEAP_H_
1087