heap.cc revision a1b730c90691321edeb67bd11baea261da59128e
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "heap.h"
18
19#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
21
22#include <limits>
23#include <memory>
24#include <vector>
25
26#include "base/histogram-inl.h"
27#include "base/stl_util.h"
28#include "common_throws.h"
29#include "cutils/sched_policy.h"
30#include "debugger.h"
31#include "gc/accounting/atomic_stack.h"
32#include "gc/accounting/card_table-inl.h"
33#include "gc/accounting/heap_bitmap-inl.h"
34#include "gc/accounting/mod_union_table.h"
35#include "gc/accounting/mod_union_table-inl.h"
36#include "gc/accounting/remembered_set.h"
37#include "gc/accounting/space_bitmap-inl.h"
38#include "gc/collector/concurrent_copying.h"
39#include "gc/collector/mark_compact.h"
40#include "gc/collector/mark_sweep-inl.h"
41#include "gc/collector/partial_mark_sweep.h"
42#include "gc/collector/semi_space.h"
43#include "gc/collector/sticky_mark_sweep.h"
44#include "gc/reference_processor.h"
45#include "gc/space/bump_pointer_space.h"
46#include "gc/space/dlmalloc_space-inl.h"
47#include "gc/space/image_space.h"
48#include "gc/space/large_object_space.h"
49#include "gc/space/rosalloc_space-inl.h"
50#include "gc/space/space-inl.h"
51#include "gc/space/zygote_space.h"
52#include "entrypoints/quick/quick_alloc_entrypoints.h"
53#include "heap-inl.h"
54#include "image.h"
55#include "mirror/art_field-inl.h"
56#include "mirror/class-inl.h"
57#include "mirror/object.h"
58#include "mirror/object-inl.h"
59#include "mirror/object_array-inl.h"
60#include "mirror/reference-inl.h"
61#include "object_utils.h"
62#include "os.h"
63#include "reflection.h"
64#include "runtime.h"
65#include "ScopedLocalRef.h"
66#include "scoped_thread_state_change.h"
67#include "handle_scope-inl.h"
68#include "thread_list.h"
69#include "well_known_classes.h"
70
71namespace art {
72
73namespace gc {
74
75static constexpr size_t kCollectorTransitionStressIterations = 0;
76static constexpr size_t kCollectorTransitionStressWait = 10 * 1000;  // Microseconds
77static constexpr bool kGCALotMode = false;
78static constexpr size_t kGcAlotInterval = KB;
79// Minimum amount of remaining bytes before a concurrent GC is triggered.
80static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
81static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
82// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
83// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
84// threads (lower pauses, use less memory bandwidth).
85static constexpr double kStickyGcThroughputAdjustment = 1.0;
86// Whether or not we use the free list large object space.
87static constexpr bool kUseFreeListSpaceForLOS = false;
88// Whether or not we compact the zygote in PreZygoteFork.
89static constexpr bool kCompactZygote = kMovingCollector;
90static constexpr size_t kNonMovingSpaceCapacity = 64 * MB;
91// How many reserve entries are at the end of the allocation stack, these are only needed if the
92// allocation stack overflows.
93static constexpr size_t kAllocationStackReserveSize = 1024;
94// Default mark stack size in bytes.
95static const size_t kDefaultMarkStackSize = 64 * KB;
96// Define space name.
97static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
98static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
99static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
100static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
101
102Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
103           double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
104           const std::string& image_file_name, const InstructionSet image_instruction_set,
105           CollectorType foreground_collector_type, CollectorType background_collector_type,
106           size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
107           size_t long_pause_log_threshold, size_t long_gc_log_threshold,
108           bool ignore_max_footprint, bool use_tlab,
109           bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
110           bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
111           bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction_for_oom,
112           uint64_t min_interval_homogeneous_space_compaction_by_oom)
113    : non_moving_space_(nullptr),
114      rosalloc_space_(nullptr),
115      dlmalloc_space_(nullptr),
116      main_space_(nullptr),
117      collector_type_(kCollectorTypeNone),
118      foreground_collector_type_(foreground_collector_type),
119      background_collector_type_(background_collector_type),
120      desired_collector_type_(foreground_collector_type_),
121      heap_trim_request_lock_(nullptr),
122      last_trim_time_(0),
123      heap_transition_or_trim_target_time_(0),
124      heap_trim_request_pending_(false),
125      parallel_gc_threads_(parallel_gc_threads),
126      conc_gc_threads_(conc_gc_threads),
127      low_memory_mode_(low_memory_mode),
128      long_pause_log_threshold_(long_pause_log_threshold),
129      long_gc_log_threshold_(long_gc_log_threshold),
130      ignore_max_footprint_(ignore_max_footprint),
131      zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
132      have_zygote_space_(false),
133      large_object_threshold_(std::numeric_limits<size_t>::max()),  // Starts out disabled.
134      collector_type_running_(kCollectorTypeNone),
135      last_gc_type_(collector::kGcTypeNone),
136      next_gc_type_(collector::kGcTypePartial),
137      capacity_(capacity),
138      growth_limit_(growth_limit),
139      max_allowed_footprint_(initial_size),
140      native_footprint_gc_watermark_(initial_size),
141      native_footprint_limit_(2 * initial_size),
142      native_need_to_run_finalization_(false),
143      // Initially assume we perceive jank in case the process state is never updated.
144      process_state_(kProcessStateJankPerceptible),
145      concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
146      total_bytes_freed_ever_(0),
147      total_objects_freed_ever_(0),
148      num_bytes_allocated_(0),
149      native_bytes_allocated_(0),
150      gc_memory_overhead_(0),
151      verify_missing_card_marks_(false),
152      verify_system_weaks_(false),
153      verify_pre_gc_heap_(verify_pre_gc_heap),
154      verify_pre_sweeping_heap_(verify_pre_sweeping_heap),
155      verify_post_gc_heap_(verify_post_gc_heap),
156      verify_mod_union_table_(false),
157      verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
158      verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
159      verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
160      last_gc_time_ns_(NanoTime()),
161      allocation_rate_(0),
162      /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
163       * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
164       * verification is enabled, we limit the size of allocation stacks to speed up their
165       * searching.
166       */
167      max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
168          : (kVerifyObjectSupport > kVerifyObjectModeFast) ? KB : MB),
169      current_allocator_(kAllocatorTypeDlMalloc),
170      current_non_moving_allocator_(kAllocatorTypeNonMoving),
171      bump_pointer_space_(nullptr),
172      temp_space_(nullptr),
173      min_free_(min_free),
174      max_free_(max_free),
175      target_utilization_(target_utilization),
176      foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
177      total_wait_time_(0),
178      total_allocation_time_(0),
179      verify_object_mode_(kVerifyObjectModeDisabled),
180      disable_moving_gc_count_(0),
181      running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
182      use_tlab_(use_tlab),
183      main_space_backup_(nullptr),
184      min_interval_homogeneous_space_compaction_by_oom_(
185          min_interval_homogeneous_space_compaction_by_oom),
186      last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
187      use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
188  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
189    LOG(INFO) << "Heap() entering";
190  }
191  // If we aren't the zygote, switch to the default non zygote allocator. This may update the
192  // entrypoints.
193  if (!Runtime::Current()->IsZygote()) {
194    large_object_threshold_ = kDefaultLargeObjectThreshold;
195    // Background compaction is currently not supported for command line runs.
196    if (background_collector_type_ != foreground_collector_type_) {
197      VLOG(heap) << "Disabling background compaction for non zygote";
198      background_collector_type_ = foreground_collector_type_;
199    }
200  }
201  ChangeCollector(desired_collector_type_);
202  live_bitmap_.reset(new accounting::HeapBitmap(this));
203  mark_bitmap_.reset(new accounting::HeapBitmap(this));
204  // Requested begin for the alloc space, to follow the mapped image and oat files
205  byte* requested_alloc_space_begin = nullptr;
206  if (!image_file_name.empty()) {
207    space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
208                                                               image_instruction_set);
209    CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name;
210    AddSpace(image_space);
211    // Oat files referenced by image files immediately follow them in memory, ensure alloc space
212    // isn't going to get in the middle
213    byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
214    CHECK_GT(oat_file_end_addr, image_space->End());
215    requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
216  }
217  /*
218  requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
219                                     +-  nonmoving space (kNonMovingSpaceCapacity) +-
220                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
221                                     +-main alloc space / bump space 1 (capacity_) +-
222                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
223                                     +-????????????????????????????????????????????+-
224                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
225                                     +-main alloc space2 / bump space 2 (capacity_)+-
226                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
227  */
228  bool support_homogeneous_space_compaction =
229      background_collector_type == gc::kCollectorTypeHomogeneousSpaceCompact ||
230      use_homogeneous_space_compaction_for_oom;
231  // We may use the same space the main space for the non moving space if we don't need to compact
232  // from the main space.
233  // This is not the case if we support homogeneous compaction or have a moving background
234  // collector type.
235  const bool is_zygote = Runtime::Current()->IsZygote();
236  bool separate_non_moving_space = is_zygote ||
237      support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
238      IsMovingGc(background_collector_type_);
239  if (foreground_collector_type == kCollectorTypeGSS) {
240    separate_non_moving_space = false;
241  }
242  std::unique_ptr<MemMap> main_mem_map_1;
243  std::unique_ptr<MemMap> main_mem_map_2;
244  byte* request_begin = requested_alloc_space_begin;
245  if (request_begin != nullptr && separate_non_moving_space) {
246    request_begin += kNonMovingSpaceCapacity;
247  }
248  std::string error_str;
249  std::unique_ptr<MemMap> non_moving_space_mem_map;
250  if (separate_non_moving_space) {
251    // Reserve the non moving mem map before the other two since it needs to be at a specific
252    // address.
253    non_moving_space_mem_map.reset(
254        MemMap::MapAnonymous("non moving space", requested_alloc_space_begin,
255                             kNonMovingSpaceCapacity, PROT_READ | PROT_WRITE, true, &error_str));
256    CHECK(non_moving_space_mem_map != nullptr) << error_str;
257  }
258  // Attempt to create 2 mem maps at or after the requested begin.
259  main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
260                                                    PROT_READ | PROT_WRITE, &error_str));
261  CHECK(main_mem_map_1.get() != nullptr) << error_str;
262  if (support_homogeneous_space_compaction ||
263      background_collector_type_ == kCollectorTypeSS ||
264      foreground_collector_type_ == kCollectorTypeSS) {
265    main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
266                                                      capacity_, PROT_READ | PROT_WRITE,
267                                                      &error_str));
268    CHECK(main_mem_map_2.get() != nullptr) << error_str;
269  }
270  // Create the non moving space first so that bitmaps don't take up the address range.
271  if (separate_non_moving_space) {
272    // Non moving space is always dlmalloc since we currently don't have support for multiple
273    // active rosalloc spaces.
274    const size_t size = non_moving_space_mem_map->Size();
275    non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
276        non_moving_space_mem_map.release(), "zygote / non moving space", initial_size,
277        initial_size, size, size, false);
278    non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
279    CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
280        << requested_alloc_space_begin;
281    AddSpace(non_moving_space_);
282  }
283  // Create other spaces based on whether or not we have a moving GC.
284  if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
285    // Create bump pointer spaces.
286    // We only to create the bump pointer if the foreground collector is a compacting GC.
287    // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
288    bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
289                                                                    main_mem_map_1.release());
290    CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
291    AddSpace(bump_pointer_space_);
292    temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
293                                                            main_mem_map_2.release());
294    CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
295    AddSpace(temp_space_);
296    CHECK(separate_non_moving_space);
297  } else {
298    CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
299    CHECK(main_space_ != nullptr);
300    AddSpace(main_space_);
301    if (!separate_non_moving_space) {
302      non_moving_space_ = main_space_;
303      CHECK(!non_moving_space_->CanMoveObjects());
304    }
305    if (foreground_collector_type_ == kCollectorTypeGSS) {
306      CHECK_EQ(foreground_collector_type_, background_collector_type_);
307      // Create bump pointer spaces instead of a backup space.
308      main_mem_map_2.release();
309      bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
310                                                            kGSSBumpPointerSpaceCapacity, nullptr);
311      CHECK(bump_pointer_space_ != nullptr);
312      AddSpace(bump_pointer_space_);
313      temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
314                                                    kGSSBumpPointerSpaceCapacity, nullptr);
315      CHECK(temp_space_ != nullptr);
316      AddSpace(temp_space_);
317    } else if (main_mem_map_2.get() != nullptr) {
318      const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
319      main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
320                                                           growth_limit_, capacity_, name, true));
321      CHECK(main_space_backup_.get() != nullptr);
322      // Add the space so its accounted for in the heap_begin and heap_end.
323      AddSpace(main_space_backup_.get());
324    }
325  }
326  CHECK(non_moving_space_ != nullptr);
327  CHECK(!non_moving_space_->CanMoveObjects());
328  // Allocate the large object space.
329  if (kUseFreeListSpaceForLOS) {
330    large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
331  } else {
332    large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
333  }
334  CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
335  AddSpace(large_object_space_);
336  // Compute heap capacity. Continuous spaces are sorted in order of Begin().
337  CHECK(!continuous_spaces_.empty());
338  // Relies on the spaces being sorted.
339  byte* heap_begin = continuous_spaces_.front()->Begin();
340  byte* heap_end = continuous_spaces_.back()->Limit();
341  size_t heap_capacity = heap_end - heap_begin;
342  // Remove the main backup space since it slows down the GC to have unused extra spaces.
343  if (main_space_backup_.get() != nullptr) {
344    RemoveSpace(main_space_backup_.get());
345  }
346  // Allocate the card table.
347  card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
348  CHECK(card_table_.get() != NULL) << "Failed to create card table";
349  // Card cache for now since it makes it easier for us to update the references to the copying
350  // spaces.
351  accounting::ModUnionTable* mod_union_table =
352      new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
353                                                      GetImageSpace());
354  CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
355  AddModUnionTable(mod_union_table);
356  if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
357    accounting::RememberedSet* non_moving_space_rem_set =
358        new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
359    CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
360    AddRememberedSet(non_moving_space_rem_set);
361  }
362  // TODO: Count objects in the image space here?
363  num_bytes_allocated_.StoreRelaxed(0);
364  mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
365                                                    kDefaultMarkStackSize));
366  const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
367  allocation_stack_.reset(accounting::ObjectStack::Create(
368      "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
369  live_stack_.reset(accounting::ObjectStack::Create(
370      "live stack", max_allocation_stack_size_, alloc_stack_capacity));
371  // It's still too early to take a lock because there are no threads yet, but we can create locks
372  // now. We don't create it earlier to make it clear that you can't use locks during heap
373  // initialization.
374  gc_complete_lock_ = new Mutex("GC complete lock");
375  gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
376                                                *gc_complete_lock_));
377  heap_trim_request_lock_ = new Mutex("Heap trim request lock");
378  last_gc_size_ = GetBytesAllocated();
379  if (ignore_max_footprint_) {
380    SetIdealFootprint(std::numeric_limits<size_t>::max());
381    concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
382  }
383  CHECK_NE(max_allowed_footprint_, 0U);
384  // Create our garbage collectors.
385  for (size_t i = 0; i < 2; ++i) {
386    const bool concurrent = i != 0;
387    garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
388    garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
389    garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
390  }
391  if (kMovingCollector) {
392    // TODO: Clean this up.
393    const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
394    semi_space_collector_ = new collector::SemiSpace(this, generational,
395                                                     generational ? "generational" : "");
396    garbage_collectors_.push_back(semi_space_collector_);
397    concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
398    garbage_collectors_.push_back(concurrent_copying_collector_);
399    mark_compact_collector_ = new collector::MarkCompact(this);
400    garbage_collectors_.push_back(mark_compact_collector_);
401  }
402  if (GetImageSpace() != nullptr && non_moving_space_ != nullptr) {
403    // Check that there's no gap between the image space and the non moving space so that the
404    // immune region won't break (eg. due to a large object allocated in the gap).
405    bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
406                                      non_moving_space_->GetMemMap());
407    if (!no_gap) {
408      MemMap::DumpMaps(LOG(ERROR));
409      LOG(FATAL) << "There's a gap between the image space and the main space";
410    }
411  }
412  if (running_on_valgrind_) {
413    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
414  }
415  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
416    LOG(INFO) << "Heap() exiting";
417  }
418}
419
420MemMap* Heap::MapAnonymousPreferredAddress(const char* name, byte* request_begin, size_t capacity,
421                                           int prot_flags, std::string* out_error_str) {
422  while (true) {
423    MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
424                                       PROT_READ | PROT_WRITE, true, out_error_str);
425    if (map != nullptr || request_begin == nullptr) {
426      return map;
427    }
428    // Retry a  second time with no specified request begin.
429    request_begin = nullptr;
430  }
431  return nullptr;
432}
433
434space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
435                                                      size_t growth_limit, size_t capacity,
436                                                      const char* name, bool can_move_objects) {
437  space::MallocSpace* malloc_space = nullptr;
438  if (kUseRosAlloc) {
439    // Create rosalloc space.
440    malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
441                                                          initial_size, growth_limit, capacity,
442                                                          low_memory_mode_, can_move_objects);
443  } else {
444    malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
445                                                          initial_size, growth_limit, capacity,
446                                                          can_move_objects);
447  }
448  if (collector::SemiSpace::kUseRememberedSet) {
449    accounting::RememberedSet* rem_set  =
450        new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
451    CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
452    AddRememberedSet(rem_set);
453  }
454  CHECK(malloc_space != nullptr) << "Failed to create " << name;
455  malloc_space->SetFootprintLimit(malloc_space->Capacity());
456  return malloc_space;
457}
458
459void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
460                                 size_t capacity) {
461  // Is background compaction is enabled?
462  bool can_move_objects = IsMovingGc(background_collector_type_) !=
463      IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
464  // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
465  // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
466  // from the main space to the zygote space. If background compaction is enabled, always pass in
467  // that we can move objets.
468  if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
469    // After the zygote we want this to be false if we don't have background compaction enabled so
470    // that getting primitive array elements is faster.
471    // We never have homogeneous compaction with GSS and don't need a space with movable objects.
472    can_move_objects = !have_zygote_space_ && foreground_collector_type_ != kCollectorTypeGSS;
473  }
474  if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
475    RemoveRememberedSet(main_space_);
476  }
477  const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
478  main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
479                                            can_move_objects);
480  SetSpaceAsDefault(main_space_);
481  VLOG(heap) << "Created main space " << main_space_;
482}
483
484void Heap::ChangeAllocator(AllocatorType allocator) {
485  if (current_allocator_ != allocator) {
486    // These two allocators are only used internally and don't have any entrypoints.
487    CHECK_NE(allocator, kAllocatorTypeLOS);
488    CHECK_NE(allocator, kAllocatorTypeNonMoving);
489    current_allocator_ = allocator;
490    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
491    SetQuickAllocEntryPointsAllocator(current_allocator_);
492    Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
493  }
494}
495
496void Heap::DisableCompaction() {
497  if (IsMovingGc(foreground_collector_type_)) {
498    foreground_collector_type_  = kCollectorTypeCMS;
499  }
500  if (IsMovingGc(background_collector_type_)) {
501    background_collector_type_ = foreground_collector_type_;
502  }
503  TransitionCollector(foreground_collector_type_);
504}
505
506std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
507  if (!IsValidContinuousSpaceObjectAddress(klass)) {
508    return StringPrintf("<non heap address klass %p>", klass);
509  }
510  mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
511  if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
512    std::string result("[");
513    result += SafeGetClassDescriptor(component_type);
514    return result;
515  } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
516    return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
517  } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
518    return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
519  } else {
520    mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
521    if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
522      return StringPrintf("<non heap address dex_cache %p>", dex_cache);
523    }
524    const DexFile* dex_file = dex_cache->GetDexFile();
525    uint16_t class_def_idx = klass->GetDexClassDefIndex();
526    if (class_def_idx == DexFile::kDexNoIndex16) {
527      return "<class def not found>";
528    }
529    const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
530    const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
531    return dex_file->GetTypeDescriptor(type_id);
532  }
533}
534
535std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
536  if (obj == nullptr) {
537    return "null";
538  }
539  mirror::Class* klass = obj->GetClass<kVerifyNone>();
540  if (klass == nullptr) {
541    return "(class=null)";
542  }
543  std::string result(SafeGetClassDescriptor(klass));
544  if (obj->IsClass()) {
545    result += "<" + SafeGetClassDescriptor(obj->AsClass<kVerifyNone>()) + ">";
546  }
547  return result;
548}
549
550void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
551  if (obj == nullptr) {
552    stream << "(obj=null)";
553    return;
554  }
555  if (IsAligned<kObjectAlignment>(obj)) {
556    space::Space* space = nullptr;
557    // Don't use find space since it only finds spaces which actually contain objects instead of
558    // spaces which may contain objects (e.g. cleared bump pointer spaces).
559    for (const auto& cur_space : continuous_spaces_) {
560      if (cur_space->HasAddress(obj)) {
561        space = cur_space;
562        break;
563      }
564    }
565    // Unprotect all the spaces.
566    for (const auto& space : continuous_spaces_) {
567      mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
568    }
569    stream << "Object " << obj;
570    if (space != nullptr) {
571      stream << " in space " << *space;
572    }
573    mirror::Class* klass = obj->GetClass<kVerifyNone>();
574    stream << "\nclass=" << klass;
575    if (klass != nullptr) {
576      stream << " type= " << SafePrettyTypeOf(obj);
577    }
578    // Re-protect the address we faulted on.
579    mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
580  }
581}
582
583bool Heap::IsCompilingBoot() const {
584  for (const auto& space : continuous_spaces_) {
585    if (space->IsImageSpace() || space->IsZygoteSpace()) {
586      return false;
587    }
588  }
589  return true;
590}
591
592bool Heap::HasImageSpace() const {
593  for (const auto& space : continuous_spaces_) {
594    if (space->IsImageSpace()) {
595      return true;
596    }
597  }
598  return false;
599}
600
601void Heap::IncrementDisableMovingGC(Thread* self) {
602  // Need to do this holding the lock to prevent races where the GC is about to run / running when
603  // we attempt to disable it.
604  ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
605  MutexLock mu(self, *gc_complete_lock_);
606  ++disable_moving_gc_count_;
607  if (IsMovingGc(collector_type_running_)) {
608    WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self);
609  }
610}
611
612void Heap::DecrementDisableMovingGC(Thread* self) {
613  MutexLock mu(self, *gc_complete_lock_);
614  CHECK_GE(disable_moving_gc_count_, 0U);
615  --disable_moving_gc_count_;
616}
617
618void Heap::UpdateProcessState(ProcessState process_state) {
619  if (process_state_ != process_state) {
620    process_state_ = process_state;
621    for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
622      // Start at index 1 to avoid "is always false" warning.
623      // Have iteration 1 always transition the collector.
624      TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
625                          ? foreground_collector_type_ : background_collector_type_);
626      usleep(kCollectorTransitionStressWait);
627    }
628    if (process_state_ == kProcessStateJankPerceptible) {
629      // Transition back to foreground right away to prevent jank.
630      RequestCollectorTransition(foreground_collector_type_, 0);
631    } else {
632      // Don't delay for debug builds since we may want to stress test the GC.
633      // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
634      // special handling which does a homogenous space compaction once but then doesn't transition
635      // the collector.
636      RequestCollectorTransition(background_collector_type_,
637                                 kIsDebugBuild ? 0 : kCollectorTransitionWait);
638    }
639  }
640}
641
642void Heap::CreateThreadPool() {
643  const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
644  if (num_threads != 0) {
645    thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
646  }
647}
648
649void Heap::VisitObjects(ObjectCallback callback, void* arg) {
650  Thread* self = Thread::Current();
651  // GCs can move objects, so don't allow this.
652  const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
653  if (bump_pointer_space_ != nullptr) {
654    // Visit objects in bump pointer space.
655    bump_pointer_space_->Walk(callback, arg);
656  }
657  // TODO: Switch to standard begin and end to use ranged a based loop.
658  for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
659      it < end; ++it) {
660    mirror::Object* obj = *it;
661    if (obj != nullptr && obj->GetClass() != nullptr) {
662      // Avoid the race condition caused by the object not yet being written into the allocation
663      // stack or the class not yet being written in the object. Or, if kUseThreadLocalAllocationStack,
664      // there can be nulls on the allocation stack.
665      callback(obj, arg);
666    }
667  }
668  GetLiveBitmap()->Walk(callback, arg);
669  self->EndAssertNoThreadSuspension(old_cause);
670}
671
672void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
673  space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
674  space::ContinuousSpace* space2 = non_moving_space_;
675  // TODO: Generalize this to n bitmaps?
676  CHECK(space1 != nullptr);
677  CHECK(space2 != nullptr);
678  MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
679                 large_object_space_->GetLiveBitmap(), stack);
680}
681
682void Heap::DeleteThreadPool() {
683  thread_pool_.reset(nullptr);
684}
685
686void Heap::AddSpace(space::Space* space) {
687  CHECK(space != nullptr);
688  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
689  if (space->IsContinuousSpace()) {
690    DCHECK(!space->IsDiscontinuousSpace());
691    space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
692    // Continuous spaces don't necessarily have bitmaps.
693    accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
694    accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
695    if (live_bitmap != nullptr) {
696      DCHECK(mark_bitmap != nullptr);
697      live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
698      mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
699    }
700    continuous_spaces_.push_back(continuous_space);
701    // Ensure that spaces remain sorted in increasing order of start address.
702    std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
703              [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
704      return a->Begin() < b->Begin();
705    });
706  } else {
707    DCHECK(space->IsDiscontinuousSpace());
708    space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
709    live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
710    mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
711    discontinuous_spaces_.push_back(discontinuous_space);
712  }
713  if (space->IsAllocSpace()) {
714    alloc_spaces_.push_back(space->AsAllocSpace());
715  }
716}
717
718void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) {
719  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
720  if (continuous_space->IsDlMallocSpace()) {
721    dlmalloc_space_ = continuous_space->AsDlMallocSpace();
722  } else if (continuous_space->IsRosAllocSpace()) {
723    rosalloc_space_ = continuous_space->AsRosAllocSpace();
724  }
725}
726
727void Heap::RemoveSpace(space::Space* space) {
728  DCHECK(space != nullptr);
729  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
730  if (space->IsContinuousSpace()) {
731    DCHECK(!space->IsDiscontinuousSpace());
732    space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
733    // Continuous spaces don't necessarily have bitmaps.
734    accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
735    accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
736    if (live_bitmap != nullptr) {
737      DCHECK(mark_bitmap != nullptr);
738      live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
739      mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
740    }
741    auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
742    DCHECK(it != continuous_spaces_.end());
743    continuous_spaces_.erase(it);
744  } else {
745    DCHECK(space->IsDiscontinuousSpace());
746    space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
747    live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
748    mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
749    auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
750                        discontinuous_space);
751    DCHECK(it != discontinuous_spaces_.end());
752    discontinuous_spaces_.erase(it);
753  }
754  if (space->IsAllocSpace()) {
755    auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
756    DCHECK(it != alloc_spaces_.end());
757    alloc_spaces_.erase(it);
758  }
759}
760
761void Heap::RegisterGCAllocation(size_t bytes) {
762  if (this != nullptr) {
763    gc_memory_overhead_.FetchAndAddSequentiallyConsistent(bytes);
764  }
765}
766
767void Heap::RegisterGCDeAllocation(size_t bytes) {
768  if (this != nullptr) {
769    gc_memory_overhead_.FetchAndSubSequentiallyConsistent(bytes);
770  }
771}
772
773void Heap::DumpGcPerformanceInfo(std::ostream& os) {
774  // Dump cumulative timings.
775  os << "Dumping cumulative Gc timings\n";
776  uint64_t total_duration = 0;
777  // Dump cumulative loggers for each GC type.
778  uint64_t total_paused_time = 0;
779  for (auto& collector : garbage_collectors_) {
780    const CumulativeLogger& logger = collector->GetCumulativeTimings();
781    const size_t iterations = logger.GetIterations();
782    const Histogram<uint64_t>& pause_histogram = collector->GetPauseHistogram();
783    if (iterations != 0 && pause_histogram.SampleSize() != 0) {
784      os << ConstDumpable<CumulativeLogger>(logger);
785      const uint64_t total_ns = logger.GetTotalNs();
786      const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
787      double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
788      const uint64_t freed_bytes = collector->GetTotalFreedBytes();
789      const uint64_t freed_objects = collector->GetTotalFreedObjects();
790      Histogram<uint64_t>::CumulativeData cumulative_data;
791      pause_histogram.CreateHistogram(&cumulative_data);
792      pause_histogram.PrintConfidenceIntervals(os, 0.99, cumulative_data);
793      os << collector->GetName() << " total time: " << PrettyDuration(total_ns)
794         << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
795         << collector->GetName() << " freed: " << freed_objects
796         << " objects with total size " << PrettySize(freed_bytes) << "\n"
797         << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
798         << PrettySize(freed_bytes / seconds) << "/s\n";
799      total_duration += total_ns;
800      total_paused_time += total_pause_ns;
801    }
802    collector->ResetMeasurements();
803  }
804  uint64_t allocation_time =
805      static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
806  if (total_duration != 0) {
807    const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
808    os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
809    os << "Mean GC size throughput: "
810       << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
811    os << "Mean GC object throughput: "
812       << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
813  }
814  size_t total_objects_allocated = GetObjectsAllocatedEver();
815  os << "Total number of allocations: " << total_objects_allocated << "\n";
816  size_t total_bytes_allocated = GetBytesAllocatedEver();
817  os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
818  if (kMeasureAllocationTime) {
819    os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
820    os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
821       << "\n";
822  }
823  os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
824  os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
825  os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_.LoadRelaxed();
826  BaseMutex::DumpAll(os);
827}
828
829Heap::~Heap() {
830  VLOG(heap) << "Starting ~Heap()";
831  STLDeleteElements(&garbage_collectors_);
832  // If we don't reset then the mark stack complains in its destructor.
833  allocation_stack_->Reset();
834  live_stack_->Reset();
835  STLDeleteValues(&mod_union_tables_);
836  STLDeleteValues(&remembered_sets_);
837  STLDeleteElements(&continuous_spaces_);
838  STLDeleteElements(&discontinuous_spaces_);
839  delete gc_complete_lock_;
840  delete heap_trim_request_lock_;
841  VLOG(heap) << "Finished ~Heap()";
842}
843
844space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
845                                                            bool fail_ok) const {
846  for (const auto& space : continuous_spaces_) {
847    if (space->Contains(obj)) {
848      return space;
849    }
850  }
851  if (!fail_ok) {
852    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
853  }
854  return NULL;
855}
856
857space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
858                                                                  bool fail_ok) const {
859  for (const auto& space : discontinuous_spaces_) {
860    if (space->Contains(obj)) {
861      return space;
862    }
863  }
864  if (!fail_ok) {
865    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
866  }
867  return NULL;
868}
869
870space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
871  space::Space* result = FindContinuousSpaceFromObject(obj, true);
872  if (result != NULL) {
873    return result;
874  }
875  return FindDiscontinuousSpaceFromObject(obj, true);
876}
877
878space::ImageSpace* Heap::GetImageSpace() const {
879  for (const auto& space : continuous_spaces_) {
880    if (space->IsImageSpace()) {
881      return space->AsImageSpace();
882    }
883  }
884  return NULL;
885}
886
887void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
888  std::ostringstream oss;
889  size_t total_bytes_free = GetFreeMemory();
890  oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
891      << " free bytes";
892  // If the allocation failed due to fragmentation, print out the largest continuous allocation.
893  if (total_bytes_free >= byte_count) {
894    space::AllocSpace* space = nullptr;
895    if (allocator_type == kAllocatorTypeNonMoving) {
896      space = non_moving_space_;
897    } else if (allocator_type == kAllocatorTypeRosAlloc ||
898               allocator_type == kAllocatorTypeDlMalloc) {
899      space = main_space_;
900    } else if (allocator_type == kAllocatorTypeBumpPointer ||
901               allocator_type == kAllocatorTypeTLAB) {
902      space = bump_pointer_space_;
903    }
904    if (space != nullptr) {
905      space->LogFragmentationAllocFailure(oss, byte_count);
906    }
907  }
908  self->ThrowOutOfMemoryError(oss.str().c_str());
909}
910
911void Heap::DoPendingTransitionOrTrim() {
912  Thread* self = Thread::Current();
913  CollectorType desired_collector_type;
914  // Wait until we reach the desired transition time.
915  while (true) {
916    uint64_t wait_time;
917    {
918      MutexLock mu(self, *heap_trim_request_lock_);
919      desired_collector_type = desired_collector_type_;
920      uint64_t current_time = NanoTime();
921      if (current_time >= heap_transition_or_trim_target_time_) {
922        break;
923      }
924      wait_time = heap_transition_or_trim_target_time_ - current_time;
925    }
926    ScopedThreadStateChange tsc(self, kSleeping);
927    usleep(wait_time / 1000);  // Usleep takes microseconds.
928  }
929  // Launch homogeneous space compaction if it is desired.
930  if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
931    if (!CareAboutPauseTimes()) {
932      PerformHomogeneousSpaceCompact();
933    }
934    // No need to Trim(). Homogeneous space compaction may free more virtual and physical memory.
935    desired_collector_type = collector_type_;
936    return;
937  }
938  // Transition the collector if the desired collector type is not the same as the current
939  // collector type.
940  TransitionCollector(desired_collector_type);
941  if (!CareAboutPauseTimes()) {
942    // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
943    // about pauses.
944    Runtime* runtime = Runtime::Current();
945    runtime->GetThreadList()->SuspendAll();
946    uint64_t start_time = NanoTime();
947    size_t count = runtime->GetMonitorList()->DeflateMonitors();
948    VLOG(heap) << "Deflating " << count << " monitors took "
949        << PrettyDuration(NanoTime() - start_time);
950    runtime->GetThreadList()->ResumeAll();
951  }
952  // Do a heap trim if it is needed.
953  Trim();
954}
955
956void Heap::Trim() {
957  Thread* self = Thread::Current();
958  {
959    MutexLock mu(self, *heap_trim_request_lock_);
960    if (!heap_trim_request_pending_ || last_trim_time_ + kHeapTrimWait >= NanoTime()) {
961      return;
962    }
963    last_trim_time_ = NanoTime();
964    heap_trim_request_pending_ = false;
965  }
966  {
967    // Need to do this before acquiring the locks since we don't want to get suspended while
968    // holding any locks.
969    ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
970    // Pretend we are doing a GC to prevent background compaction from deleting the space we are
971    // trimming.
972    MutexLock mu(self, *gc_complete_lock_);
973    // Ensure there is only one GC at a time.
974    WaitForGcToCompleteLocked(kGcCauseTrim, self);
975    collector_type_running_ = kCollectorTypeHeapTrim;
976  }
977  uint64_t start_ns = NanoTime();
978  // Trim the managed spaces.
979  uint64_t total_alloc_space_allocated = 0;
980  uint64_t total_alloc_space_size = 0;
981  uint64_t managed_reclaimed = 0;
982  for (const auto& space : continuous_spaces_) {
983    if (space->IsMallocSpace()) {
984      gc::space::MallocSpace* malloc_space = space->AsMallocSpace();
985      if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) {
986        // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock
987        // for a long period of time.
988        managed_reclaimed += malloc_space->Trim();
989      }
990      total_alloc_space_size += malloc_space->Size();
991    }
992  }
993  total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated();
994  if (bump_pointer_space_ != nullptr) {
995    total_alloc_space_allocated -= bump_pointer_space_->Size();
996  }
997  const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
998      static_cast<float>(total_alloc_space_size);
999  uint64_t gc_heap_end_ns = NanoTime();
1000  // We never move things in the native heap, so we can finish the GC at this point.
1001  FinishGC(self, collector::kGcTypeNone);
1002  size_t native_reclaimed = 0;
1003  // Only trim the native heap if we don't care about pauses.
1004  if (!CareAboutPauseTimes()) {
1005#if defined(USE_DLMALLOC)
1006    // Trim the native heap.
1007    dlmalloc_trim(0);
1008    dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
1009#elif defined(USE_JEMALLOC)
1010    // Jemalloc does it's own internal trimming.
1011#else
1012    UNIMPLEMENTED(WARNING) << "Add trimming support";
1013#endif
1014  }
1015  uint64_t end_ns = NanoTime();
1016  VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
1017      << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
1018      << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
1019      << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
1020      << "%.";
1021}
1022
1023bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
1024  // Note: we deliberately don't take the lock here, and mustn't test anything that would require
1025  // taking the lock.
1026  if (obj == nullptr) {
1027    return true;
1028  }
1029  return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
1030}
1031
1032bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
1033  return FindContinuousSpaceFromObject(obj, true) != nullptr;
1034}
1035
1036bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
1037  if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
1038    return false;
1039  }
1040  for (const auto& space : continuous_spaces_) {
1041    if (space->HasAddress(obj)) {
1042      return true;
1043    }
1044  }
1045  return false;
1046}
1047
1048bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
1049                              bool search_live_stack, bool sorted) {
1050  if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
1051    return false;
1052  }
1053  if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
1054    mirror::Class* klass = obj->GetClass<kVerifyNone>();
1055    if (obj == klass) {
1056      // This case happens for java.lang.Class.
1057      return true;
1058    }
1059    return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
1060  } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
1061    // If we are in the allocated region of the temp space, then we are probably live (e.g. during
1062    // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
1063    return temp_space_->Contains(obj);
1064  }
1065  space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
1066  space::DiscontinuousSpace* d_space = nullptr;
1067  if (c_space != nullptr) {
1068    if (c_space->GetLiveBitmap()->Test(obj)) {
1069      return true;
1070    }
1071  } else {
1072    d_space = FindDiscontinuousSpaceFromObject(obj, true);
1073    if (d_space != nullptr) {
1074      if (d_space->GetLiveBitmap()->Test(obj)) {
1075        return true;
1076      }
1077    }
1078  }
1079  // This is covering the allocation/live stack swapping that is done without mutators suspended.
1080  for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
1081    if (i > 0) {
1082      NanoSleep(MsToNs(10));
1083    }
1084    if (search_allocation_stack) {
1085      if (sorted) {
1086        if (allocation_stack_->ContainsSorted(obj)) {
1087          return true;
1088        }
1089      } else if (allocation_stack_->Contains(obj)) {
1090        return true;
1091      }
1092    }
1093
1094    if (search_live_stack) {
1095      if (sorted) {
1096        if (live_stack_->ContainsSorted(obj)) {
1097          return true;
1098        }
1099      } else if (live_stack_->Contains(obj)) {
1100        return true;
1101      }
1102    }
1103  }
1104  // We need to check the bitmaps again since there is a race where we mark something as live and
1105  // then clear the stack containing it.
1106  if (c_space != nullptr) {
1107    if (c_space->GetLiveBitmap()->Test(obj)) {
1108      return true;
1109    }
1110  } else {
1111    d_space = FindDiscontinuousSpaceFromObject(obj, true);
1112    if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
1113      return true;
1114    }
1115  }
1116  return false;
1117}
1118
1119std::string Heap::DumpSpaces() const {
1120  std::ostringstream oss;
1121  DumpSpaces(oss);
1122  return oss.str();
1123}
1124
1125void Heap::DumpSpaces(std::ostream& stream) const {
1126  for (const auto& space : continuous_spaces_) {
1127    accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1128    accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1129    stream << space << " " << *space << "\n";
1130    if (live_bitmap != nullptr) {
1131      stream << live_bitmap << " " << *live_bitmap << "\n";
1132    }
1133    if (mark_bitmap != nullptr) {
1134      stream << mark_bitmap << " " << *mark_bitmap << "\n";
1135    }
1136  }
1137  for (const auto& space : discontinuous_spaces_) {
1138    stream << space << " " << *space << "\n";
1139  }
1140}
1141
1142void Heap::VerifyObjectBody(mirror::Object* obj) {
1143  if (this == nullptr && verify_object_mode_ == kVerifyObjectModeDisabled) {
1144    return;
1145  }
1146  // Ignore early dawn of the universe verifications.
1147  if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
1148    return;
1149  }
1150  CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
1151  mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
1152  CHECK(c != nullptr) << "Null class in object " << obj;
1153  CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
1154  CHECK(VerifyClassClass(c));
1155
1156  if (verify_object_mode_ > kVerifyObjectModeFast) {
1157    // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
1158    CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
1159  }
1160}
1161
1162void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
1163  reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
1164}
1165
1166void Heap::VerifyHeap() {
1167  ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1168  GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
1169}
1170
1171void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
1172  // Use signed comparison since freed bytes can be negative when background compaction foreground
1173  // transitions occurs. This is caused by the moving objects from a bump pointer space to a
1174  // free list backed space typically increasing memory footprint due to padding and binning.
1175  DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
1176  // Note: This relies on 2s complement for handling negative freed_bytes.
1177  num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
1178  if (Runtime::Current()->HasStatsEnabled()) {
1179    RuntimeStats* thread_stats = Thread::Current()->GetStats();
1180    thread_stats->freed_objects += freed_objects;
1181    thread_stats->freed_bytes += freed_bytes;
1182    // TODO: Do this concurrently.
1183    RuntimeStats* global_stats = Runtime::Current()->GetStats();
1184    global_stats->freed_objects += freed_objects;
1185    global_stats->freed_bytes += freed_bytes;
1186  }
1187}
1188
1189space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
1190  for (const auto& space : continuous_spaces_) {
1191    if (space->AsContinuousSpace()->IsRosAllocSpace()) {
1192      if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
1193        return space->AsContinuousSpace()->AsRosAllocSpace();
1194      }
1195    }
1196  }
1197  return nullptr;
1198}
1199
1200mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
1201                                             size_t alloc_size, size_t* bytes_allocated,
1202                                             size_t* usable_size,
1203                                             mirror::Class** klass) {
1204  bool was_default_allocator = allocator == GetCurrentAllocator();
1205  DCHECK(klass != nullptr);
1206  StackHandleScope<1> hs(self);
1207  HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
1208  klass = nullptr;  // Invalidate for safety.
1209  // The allocation failed. If the GC is running, block until it completes, and then retry the
1210  // allocation.
1211  collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
1212  if (last_gc != collector::kGcTypeNone) {
1213    // If we were the default allocator but the allocator changed while we were suspended,
1214    // abort the allocation.
1215    if (was_default_allocator && allocator != GetCurrentAllocator()) {
1216      return nullptr;
1217    }
1218    // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1219    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1220                                                     usable_size);
1221    if (ptr != nullptr) {
1222      return ptr;
1223    }
1224  }
1225
1226  collector::GcType tried_type = next_gc_type_;
1227  const bool gc_ran =
1228      CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1229  if (was_default_allocator && allocator != GetCurrentAllocator()) {
1230    return nullptr;
1231  }
1232  if (gc_ran) {
1233    mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1234                                                     usable_size);
1235    if (ptr != nullptr) {
1236      return ptr;
1237    }
1238  }
1239
1240  // Loop through our different Gc types and try to Gc until we get enough free memory.
1241  for (collector::GcType gc_type : gc_plan_) {
1242    if (gc_type == tried_type) {
1243      continue;
1244    }
1245    // Attempt to run the collector, if we succeed, re-try the allocation.
1246    const bool gc_ran =
1247        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1248    if (was_default_allocator && allocator != GetCurrentAllocator()) {
1249      return nullptr;
1250    }
1251    if (gc_ran) {
1252      // Did we free sufficient memory for the allocation to succeed?
1253      mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
1254                                                       usable_size);
1255      if (ptr != nullptr) {
1256        return ptr;
1257      }
1258    }
1259  }
1260  // Allocations have failed after GCs;  this is an exceptional state.
1261  // Try harder, growing the heap if necessary.
1262  mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
1263                                                  usable_size);
1264  if (ptr != nullptr) {
1265    return ptr;
1266  }
1267  // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1268  // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1269  // VM spec requires that all SoftReferences have been collected and cleared before throwing
1270  // OOME.
1271  VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1272           << " allocation";
1273  // TODO: Run finalization, but this may cause more allocations to occur.
1274  // We don't need a WaitForGcToComplete here either.
1275  DCHECK(!gc_plan_.empty());
1276  CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1277  if (was_default_allocator && allocator != GetCurrentAllocator()) {
1278    return nullptr;
1279  }
1280  ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
1281  if (ptr == nullptr && use_homogeneous_space_compaction_for_oom_) {
1282    const uint64_t current_time = NanoTime();
1283    if ((allocator == kAllocatorTypeRosAlloc || allocator == kAllocatorTypeDlMalloc) &&
1284        current_time - last_time_homogeneous_space_compaction_by_oom_ >
1285        min_interval_homogeneous_space_compaction_by_oom_) {
1286      last_time_homogeneous_space_compaction_by_oom_ = current_time;
1287      HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
1288      switch (result) {
1289        case HomogeneousSpaceCompactResult::kSuccess:
1290          // If the allocation succeeded, we delayed an oom.
1291          ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
1292          if (ptr != nullptr) {
1293            count_delayed_oom_++;
1294          }
1295          break;
1296        case HomogeneousSpaceCompactResult::kErrorReject:
1297          // Reject due to disabled moving GC.
1298          break;
1299        case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
1300          // Throw OOM by default.
1301          break;
1302        default: {
1303          LOG(FATAL) << "Unimplemented homogeneous space compaction result " << static_cast<size_t>(result);
1304        }
1305      }
1306      // Always print that we ran homogeneous space compation since this can cause jank.
1307      VLOG(heap) << "Ran heap homogeneous space compaction, "
1308                << " requested defragmentation "
1309                << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1310                << " performed defragmentation "
1311                << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1312                << " ignored homogeneous space compaction "
1313                << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
1314                << " delayed count = "
1315                << count_delayed_oom_.LoadSequentiallyConsistent();
1316    }
1317  }
1318  // If the allocation hasn't succeeded by this point, throw an OOM error.
1319  if (ptr == nullptr) {
1320    ThrowOutOfMemoryError(self, alloc_size, allocator);
1321  }
1322  return ptr;
1323}
1324
1325void Heap::SetTargetHeapUtilization(float target) {
1326  DCHECK_GT(target, 0.0f);  // asserted in Java code
1327  DCHECK_LT(target, 1.0f);
1328  target_utilization_ = target;
1329}
1330
1331size_t Heap::GetObjectsAllocated() const {
1332  size_t total = 0;
1333  for (space::AllocSpace* space : alloc_spaces_) {
1334    total += space->GetObjectsAllocated();
1335  }
1336  return total;
1337}
1338
1339size_t Heap::GetObjectsAllocatedEver() const {
1340  return GetObjectsFreedEver() + GetObjectsAllocated();
1341}
1342
1343size_t Heap::GetBytesAllocatedEver() const {
1344  return GetBytesFreedEver() + GetBytesAllocated();
1345}
1346
1347class InstanceCounter {
1348 public:
1349  InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
1350      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1351      : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
1352  }
1353  static void Callback(mirror::Object* obj, void* arg)
1354      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1355    InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1356    mirror::Class* instance_class = obj->GetClass();
1357    CHECK(instance_class != nullptr);
1358    for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1359      if (instance_counter->use_is_assignable_from_) {
1360        if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
1361          ++instance_counter->counts_[i];
1362        }
1363      } else if (instance_class == instance_counter->classes_[i]) {
1364        ++instance_counter->counts_[i];
1365      }
1366    }
1367  }
1368
1369 private:
1370  const std::vector<mirror::Class*>& classes_;
1371  bool use_is_assignable_from_;
1372  uint64_t* const counts_;
1373  DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1374};
1375
1376void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
1377                          uint64_t* counts) {
1378  // Can't do any GC in this function since this may move classes.
1379  Thread* self = Thread::Current();
1380  auto* old_cause = self->StartAssertNoThreadSuspension("CountInstances");
1381  InstanceCounter counter(classes, use_is_assignable_from, counts);
1382  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1383  VisitObjects(InstanceCounter::Callback, &counter);
1384  self->EndAssertNoThreadSuspension(old_cause);
1385}
1386
1387class InstanceCollector {
1388 public:
1389  InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
1390      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1391      : class_(c), max_count_(max_count), instances_(instances) {
1392  }
1393  static void Callback(mirror::Object* obj, void* arg)
1394      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1395    DCHECK(arg != nullptr);
1396    InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1397    mirror::Class* instance_class = obj->GetClass();
1398    if (instance_class == instance_collector->class_) {
1399      if (instance_collector->max_count_ == 0 ||
1400          instance_collector->instances_.size() < instance_collector->max_count_) {
1401        instance_collector->instances_.push_back(obj);
1402      }
1403    }
1404  }
1405
1406 private:
1407  mirror::Class* class_;
1408  uint32_t max_count_;
1409  std::vector<mirror::Object*>& instances_;
1410  DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1411};
1412
1413void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1414                        std::vector<mirror::Object*>& instances) {
1415  // Can't do any GC in this function since this may move classes.
1416  Thread* self = Thread::Current();
1417  auto* old_cause = self->StartAssertNoThreadSuspension("GetInstances");
1418  InstanceCollector collector(c, max_count, instances);
1419  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1420  VisitObjects(&InstanceCollector::Callback, &collector);
1421  self->EndAssertNoThreadSuspension(old_cause);
1422}
1423
1424class ReferringObjectsFinder {
1425 public:
1426  ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1427                         std::vector<mirror::Object*>& referring_objects)
1428      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1429      : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1430  }
1431
1432  static void Callback(mirror::Object* obj, void* arg)
1433      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1434    reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1435  }
1436
1437  // For bitmap Visit.
1438  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1439  // annotalysis on visitors.
1440  void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1441    o->VisitReferences<true>(*this, VoidFunctor());
1442  }
1443
1444  // For Object::VisitReferences.
1445  void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
1446      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1447    mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
1448    if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1449      referring_objects_.push_back(obj);
1450    }
1451  }
1452
1453 private:
1454  mirror::Object* object_;
1455  uint32_t max_count_;
1456  std::vector<mirror::Object*>& referring_objects_;
1457  DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1458};
1459
1460void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1461                               std::vector<mirror::Object*>& referring_objects) {
1462  // Can't do any GC in this function since this may move the object o.
1463  Thread* self = Thread::Current();
1464  auto* old_cause = self->StartAssertNoThreadSuspension("GetReferringObjects");
1465  ReferringObjectsFinder finder(o, max_count, referring_objects);
1466  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1467  VisitObjects(&ReferringObjectsFinder::Callback, &finder);
1468  self->EndAssertNoThreadSuspension(old_cause);
1469}
1470
1471void Heap::CollectGarbage(bool clear_soft_references) {
1472  // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1473  // last GC will not have necessarily been cleared.
1474  CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
1475}
1476
1477HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
1478  Thread* self = Thread::Current();
1479  // Inc requested homogeneous space compaction.
1480  count_requested_homogeneous_space_compaction_++;
1481  // Store performed homogeneous space compaction at a new request arrival.
1482  ThreadList* tl = Runtime::Current()->GetThreadList();
1483  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1484  Locks::mutator_lock_->AssertNotHeld(self);
1485  {
1486    ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1487    MutexLock mu(self, *gc_complete_lock_);
1488    // Ensure there is only one GC at a time.
1489    WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
1490    // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
1491    // is non zero.
1492    // If the collecotr type changed to something which doesn't benefit from homogeneous space compaction,
1493    // exit.
1494    if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_)) {
1495      return HomogeneousSpaceCompactResult::kErrorReject;
1496    }
1497    collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
1498  }
1499  if (Runtime::Current()->IsShuttingDown(self)) {
1500    // Don't allow heap transitions to happen if the runtime is shutting down since these can
1501    // cause objects to get finalized.
1502    FinishGC(self, collector::kGcTypeNone);
1503    return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
1504  }
1505  // Suspend all threads.
1506  tl->SuspendAll();
1507  uint64_t start_time = NanoTime();
1508  // Launch compaction.
1509  space::MallocSpace* to_space = main_space_backup_.release();
1510  space::MallocSpace* from_space = main_space_;
1511  to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1512  const uint64_t space_size_before_compaction = from_space->Size();
1513  AddSpace(to_space);
1514  Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
1515  // Leave as prot read so that we can still run ROSAlloc verification on this space.
1516  from_space->GetMemMap()->Protect(PROT_READ);
1517  const uint64_t space_size_after_compaction = to_space->Size();
1518  main_space_ = to_space;
1519  main_space_backup_.reset(from_space);
1520  RemoveSpace(from_space);
1521  SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
1522  // Update performed homogeneous space compaction count.
1523  count_performed_homogeneous_space_compaction_++;
1524  // Print statics log and resume all threads.
1525  uint64_t duration = NanoTime() - start_time;
1526  LOG(INFO) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
1527            << PrettySize(space_size_before_compaction) << " -> "
1528            << PrettySize(space_size_after_compaction) << " compact-ratio: "
1529            << std::fixed << static_cast<double>(space_size_after_compaction) /
1530            static_cast<double>(space_size_before_compaction);
1531  tl->ResumeAll();
1532  // Finish GC.
1533  reference_processor_.EnqueueClearedReferences(self);
1534  GrowForUtilization(semi_space_collector_);
1535  FinishGC(self, collector::kGcTypeFull);
1536  return HomogeneousSpaceCompactResult::kSuccess;
1537}
1538
1539
1540void Heap::TransitionCollector(CollectorType collector_type) {
1541  if (collector_type == collector_type_) {
1542    return;
1543  }
1544  VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1545             << " -> " << static_cast<int>(collector_type);
1546  uint64_t start_time = NanoTime();
1547  uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
1548  Runtime* const runtime = Runtime::Current();
1549  ThreadList* const tl = runtime->GetThreadList();
1550  Thread* const self = Thread::Current();
1551  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1552  Locks::mutator_lock_->AssertNotHeld(self);
1553  const bool copying_transition =
1554      IsMovingGc(background_collector_type_) || IsMovingGc(foreground_collector_type_);
1555  // Busy wait until we can GC (StartGC can fail if we have a non-zero
1556  // compacting_gc_disable_count_, this should rarely occurs).
1557  for (;;) {
1558    {
1559      ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1560      MutexLock mu(self, *gc_complete_lock_);
1561      // Ensure there is only one GC at a time.
1562      WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
1563      // If someone else beat us to it and changed the collector before we could, exit.
1564      // This is safe to do before the suspend all since we set the collector_type_running_ before
1565      // we exit the loop. If another thread attempts to do the heap transition before we exit,
1566      // then it would get blocked on WaitForGcToCompleteLocked.
1567      if (collector_type == collector_type_) {
1568        return;
1569      }
1570      // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1571      if (!copying_transition || disable_moving_gc_count_ == 0) {
1572        // TODO: Not hard code in semi-space collector?
1573        collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1574        break;
1575      }
1576    }
1577    usleep(1000);
1578  }
1579  if (runtime->IsShuttingDown(self)) {
1580    // Don't allow heap transitions to happen if the runtime is shutting down since these can
1581    // cause objects to get finalized.
1582    FinishGC(self, collector::kGcTypeNone);
1583    return;
1584  }
1585  tl->SuspendAll();
1586  switch (collector_type) {
1587    case kCollectorTypeSS: {
1588      if (!IsMovingGc(collector_type_)) {
1589        // Create the bump pointer space from the backup space.
1590        CHECK(main_space_backup_ != nullptr);
1591        std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
1592        // We are transitioning from non moving GC -> moving GC, since we copied from the bump
1593        // pointer space last transition it will be protected.
1594        CHECK(mem_map != nullptr);
1595        mem_map->Protect(PROT_READ | PROT_WRITE);
1596        bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
1597                                                                        mem_map.release());
1598        AddSpace(bump_pointer_space_);
1599        Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
1600        // Use the now empty main space mem map for the bump pointer temp space.
1601        mem_map.reset(main_space_->ReleaseMemMap());
1602        // Remove the main space so that we don't try to trim it, this doens't work for debug
1603        // builds since RosAlloc attempts to read the magic number from a protected page.
1604        RemoveSpace(main_space_);
1605        // Unset the pointers just in case.
1606        if (dlmalloc_space_ == main_space_) {
1607          dlmalloc_space_ = nullptr;
1608        } else if (rosalloc_space_ == main_space_) {
1609          rosalloc_space_ = nullptr;
1610        }
1611        RemoveRememberedSet(main_space_);
1612        RemoveRememberedSet(main_space_backup_.get());
1613        main_space_backup_.reset(nullptr);
1614        main_space_ = nullptr;
1615        temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
1616                                                                mem_map.release());
1617        AddSpace(temp_space_);
1618      }
1619      break;
1620    }
1621    case kCollectorTypeMS:
1622      // Fall through.
1623    case kCollectorTypeCMS: {
1624      if (IsMovingGc(collector_type_)) {
1625        CHECK(temp_space_ != nullptr);
1626        std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
1627        RemoveSpace(temp_space_);
1628        temp_space_ = nullptr;
1629        CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(),
1630                              mem_map->Size());
1631        mem_map.release();
1632        // Compact to the main space from the bump pointer space, don't need to swap semispaces.
1633        AddSpace(main_space_);
1634        main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1635        Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
1636        mem_map.reset(bump_pointer_space_->ReleaseMemMap());
1637        RemoveSpace(bump_pointer_space_);
1638        bump_pointer_space_ = nullptr;
1639        const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
1640        main_space_backup_.reset(CreateMallocSpaceFromMemMap(mem_map.get(), kDefaultInitialSize,
1641                                                             mem_map->Size(), mem_map->Size(),
1642                                                             name, true));
1643        mem_map.release();
1644      }
1645      break;
1646    }
1647    default: {
1648      LOG(FATAL) << "Attempted to transition to invalid collector type "
1649                 << static_cast<size_t>(collector_type);
1650      break;
1651    }
1652  }
1653  ChangeCollector(collector_type);
1654  tl->ResumeAll();
1655  // Can't call into java code with all threads suspended.
1656  reference_processor_.EnqueueClearedReferences(self);
1657  uint64_t duration = NanoTime() - start_time;
1658  GrowForUtilization(semi_space_collector_);
1659  FinishGC(self, collector::kGcTypeFull);
1660  int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
1661  int32_t delta_allocated = before_allocated - after_allocated;
1662  std::string saved_str;
1663  if (delta_allocated >= 0) {
1664    saved_str = " saved at least " + PrettySize(delta_allocated);
1665  } else {
1666    saved_str = " expanded " + PrettySize(-delta_allocated);
1667  }
1668  LOG(INFO) << "Heap transition to " << process_state_ << " took "
1669      << PrettyDuration(duration) << saved_str;
1670}
1671
1672void Heap::ChangeCollector(CollectorType collector_type) {
1673  // TODO: Only do this with all mutators suspended to avoid races.
1674  if (collector_type != collector_type_) {
1675    if (collector_type == kCollectorTypeMC) {
1676      // Don't allow mark compact unless support is compiled in.
1677      CHECK(kMarkCompactSupport);
1678    }
1679    collector_type_ = collector_type;
1680    gc_plan_.clear();
1681    switch (collector_type_) {
1682      case kCollectorTypeCC:  // Fall-through.
1683      case kCollectorTypeMC:  // Fall-through.
1684      case kCollectorTypeSS:  // Fall-through.
1685      case kCollectorTypeGSS: {
1686        gc_plan_.push_back(collector::kGcTypeFull);
1687        if (use_tlab_) {
1688          ChangeAllocator(kAllocatorTypeTLAB);
1689        } else {
1690          ChangeAllocator(kAllocatorTypeBumpPointer);
1691        }
1692        break;
1693      }
1694      case kCollectorTypeMS: {
1695        gc_plan_.push_back(collector::kGcTypeSticky);
1696        gc_plan_.push_back(collector::kGcTypePartial);
1697        gc_plan_.push_back(collector::kGcTypeFull);
1698        ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
1699        break;
1700      }
1701      case kCollectorTypeCMS: {
1702        gc_plan_.push_back(collector::kGcTypeSticky);
1703        gc_plan_.push_back(collector::kGcTypePartial);
1704        gc_plan_.push_back(collector::kGcTypeFull);
1705        ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
1706        break;
1707      }
1708      default: {
1709        LOG(FATAL) << "Unimplemented";
1710      }
1711    }
1712    if (IsGcConcurrent()) {
1713      concurrent_start_bytes_ =
1714          std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1715    } else {
1716      concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
1717    }
1718  }
1719}
1720
1721// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
1722class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
1723 public:
1724  explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"),
1725      bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
1726  }
1727
1728  void BuildBins(space::ContinuousSpace* space) {
1729    bin_live_bitmap_ = space->GetLiveBitmap();
1730    bin_mark_bitmap_ = space->GetMarkBitmap();
1731    BinContext context;
1732    context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
1733    context.collector_ = this;
1734    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1735    // Note: This requires traversing the space in increasing order of object addresses.
1736    bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
1737    // Add the last bin which spans after the last object to the end of the space.
1738    AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
1739  }
1740
1741 private:
1742  struct BinContext {
1743    uintptr_t prev_;  // The end of the previous object.
1744    ZygoteCompactingCollector* collector_;
1745  };
1746  // Maps from bin sizes to locations.
1747  std::multimap<size_t, uintptr_t> bins_;
1748  // Live bitmap of the space which contains the bins.
1749  accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
1750  // Mark bitmap of the space which contains the bins.
1751  accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
1752
1753  static void Callback(mirror::Object* obj, void* arg)
1754      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1755    DCHECK(arg != nullptr);
1756    BinContext* context = reinterpret_cast<BinContext*>(arg);
1757    ZygoteCompactingCollector* collector = context->collector_;
1758    uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
1759    size_t bin_size = object_addr - context->prev_;
1760    // Add the bin consisting of the end of the previous object to the start of the current object.
1761    collector->AddBin(bin_size, context->prev_);
1762    context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
1763  }
1764
1765  void AddBin(size_t size, uintptr_t position) {
1766    if (size != 0) {
1767      bins_.insert(std::make_pair(size, position));
1768    }
1769  }
1770
1771  virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
1772    // Don't sweep any spaces since we probably blasted the internal accounting of the free list
1773    // allocator.
1774    return false;
1775  }
1776
1777  virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
1778      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
1779    size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
1780    mirror::Object* forward_address;
1781    // Find the smallest bin which we can move obj in.
1782    auto it = bins_.lower_bound(object_size);
1783    if (it == bins_.end()) {
1784      // No available space in the bins, place it in the target space instead (grows the zygote
1785      // space).
1786      size_t bytes_allocated;
1787      forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
1788      if (to_space_live_bitmap_ != nullptr) {
1789        to_space_live_bitmap_->Set(forward_address);
1790      } else {
1791        GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
1792        GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
1793      }
1794    } else {
1795      size_t size = it->first;
1796      uintptr_t pos = it->second;
1797      bins_.erase(it);  // Erase the old bin which we replace with the new smaller bin.
1798      forward_address = reinterpret_cast<mirror::Object*>(pos);
1799      // Set the live and mark bits so that sweeping system weaks works properly.
1800      bin_live_bitmap_->Set(forward_address);
1801      bin_mark_bitmap_->Set(forward_address);
1802      DCHECK_GE(size, object_size);
1803      AddBin(size - object_size, pos + object_size);  // Add a new bin with the remaining space.
1804    }
1805    // Copy the object over to its new location.
1806    memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
1807    if (kUseBakerOrBrooksReadBarrier) {
1808      obj->AssertReadBarrierPointer();
1809      if (kUseBrooksReadBarrier) {
1810        DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
1811        forward_address->SetReadBarrierPointer(forward_address);
1812      }
1813      forward_address->AssertReadBarrierPointer();
1814    }
1815    return forward_address;
1816  }
1817};
1818
1819void Heap::UnBindBitmaps() {
1820  TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings());
1821  for (const auto& space : GetContinuousSpaces()) {
1822    if (space->IsContinuousMemMapAllocSpace()) {
1823      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1824      if (alloc_space->HasBoundBitmaps()) {
1825        alloc_space->UnBindBitmaps();
1826      }
1827    }
1828  }
1829}
1830
1831void Heap::PreZygoteFork() {
1832  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
1833  Thread* self = Thread::Current();
1834  MutexLock mu(self, zygote_creation_lock_);
1835  // Try to see if we have any Zygote spaces.
1836  if (have_zygote_space_) {
1837    return;
1838  }
1839  VLOG(heap) << "Starting PreZygoteFork";
1840  // Trim the pages at the end of the non moving space.
1841  non_moving_space_->Trim();
1842  // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
1843  // there.
1844  non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1845  // Change the collector to the post zygote one.
1846  bool same_space = non_moving_space_ == main_space_;
1847  if (kCompactZygote) {
1848    DCHECK(semi_space_collector_ != nullptr);
1849    // Temporarily disable rosalloc verification because the zygote
1850    // compaction will mess up the rosalloc internal metadata.
1851    ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
1852    ZygoteCompactingCollector zygote_collector(this);
1853    zygote_collector.BuildBins(non_moving_space_);
1854    // Create a new bump pointer space which we will compact into.
1855    space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1856                                         non_moving_space_->Limit());
1857    // Compact the bump pointer space to a new zygote bump pointer space.
1858    bool reset_main_space = false;
1859    if (IsMovingGc(collector_type_)) {
1860      zygote_collector.SetFromSpace(bump_pointer_space_);
1861    } else {
1862      CHECK(main_space_ != nullptr);
1863      // Copy from the main space.
1864      zygote_collector.SetFromSpace(main_space_);
1865      reset_main_space = true;
1866    }
1867    zygote_collector.SetToSpace(&target_space);
1868    zygote_collector.SetSwapSemiSpaces(false);
1869    zygote_collector.Run(kGcCauseCollectorTransition, false);
1870    if (reset_main_space) {
1871      main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1872      madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
1873      MemMap* mem_map = main_space_->ReleaseMemMap();
1874      RemoveSpace(main_space_);
1875      space::Space* old_main_space = main_space_;
1876      CreateMainMallocSpace(mem_map, kDefaultInitialSize, mem_map->Size(), mem_map->Size());
1877      delete old_main_space;
1878      AddSpace(main_space_);
1879    } else {
1880      bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1881    }
1882    if (temp_space_ != nullptr) {
1883      CHECK(temp_space_->IsEmpty());
1884    }
1885    total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
1886    total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
1887    // Update the end and write out image.
1888    non_moving_space_->SetEnd(target_space.End());
1889    non_moving_space_->SetLimit(target_space.Limit());
1890    VLOG(heap) << "Zygote space size " << non_moving_space_->Size() << " bytes";
1891  }
1892  ChangeCollector(foreground_collector_type_);
1893  // Save the old space so that we can remove it after we complete creating the zygote space.
1894  space::MallocSpace* old_alloc_space = non_moving_space_;
1895  // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
1896  // the remaining available space.
1897  // Remove the old space before creating the zygote space since creating the zygote space sets
1898  // the old alloc space's bitmaps to nullptr.
1899  RemoveSpace(old_alloc_space);
1900  if (collector::SemiSpace::kUseRememberedSet) {
1901    // Sanity bound check.
1902    FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace();
1903    // Remove the remembered set for the now zygote space (the old
1904    // non-moving space). Note now that we have compacted objects into
1905    // the zygote space, the data in the remembered set is no longer
1906    // needed. The zygote space will instead have a mod-union table
1907    // from this point on.
1908    RemoveRememberedSet(old_alloc_space);
1909  }
1910  space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
1911                                                                        low_memory_mode_,
1912                                                                        &non_moving_space_);
1913  CHECK(!non_moving_space_->CanMoveObjects());
1914  if (same_space) {
1915    main_space_ = non_moving_space_;
1916    SetSpaceAsDefault(main_space_);
1917  }
1918  delete old_alloc_space;
1919  CHECK(zygote_space != nullptr) << "Failed creating zygote space";
1920  AddSpace(zygote_space);
1921  non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
1922  AddSpace(non_moving_space_);
1923  have_zygote_space_ = true;
1924  // Enable large object space allocations.
1925  large_object_threshold_ = kDefaultLargeObjectThreshold;
1926  // Create the zygote space mod union table.
1927  accounting::ModUnionTable* mod_union_table =
1928      new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
1929  CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
1930  AddModUnionTable(mod_union_table);
1931  if (collector::SemiSpace::kUseRememberedSet) {
1932    // Add a new remembered set for the post-zygote non-moving space.
1933    accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
1934        new accounting::RememberedSet("Post-zygote non-moving space remembered set", this,
1935                                      non_moving_space_);
1936    CHECK(post_zygote_non_moving_space_rem_set != nullptr)
1937        << "Failed to create post-zygote non-moving space remembered set";
1938    AddRememberedSet(post_zygote_non_moving_space_rem_set);
1939  }
1940}
1941
1942void Heap::FlushAllocStack() {
1943  MarkAllocStackAsLive(allocation_stack_.get());
1944  allocation_stack_->Reset();
1945}
1946
1947void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
1948                          accounting::ContinuousSpaceBitmap* bitmap2,
1949                          accounting::LargeObjectBitmap* large_objects,
1950                          accounting::ObjectStack* stack) {
1951  DCHECK(bitmap1 != nullptr);
1952  DCHECK(bitmap2 != nullptr);
1953  mirror::Object** limit = stack->End();
1954  for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
1955    const mirror::Object* obj = *it;
1956    if (!kUseThreadLocalAllocationStack || obj != nullptr) {
1957      if (bitmap1->HasAddress(obj)) {
1958        bitmap1->Set(obj);
1959      } else if (bitmap2->HasAddress(obj)) {
1960        bitmap2->Set(obj);
1961      } else {
1962        large_objects->Set(obj);
1963      }
1964    }
1965  }
1966}
1967
1968void Heap::SwapSemiSpaces() {
1969  CHECK(bump_pointer_space_ != nullptr);
1970  CHECK(temp_space_ != nullptr);
1971  std::swap(bump_pointer_space_, temp_space_);
1972}
1973
1974void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
1975                   space::ContinuousMemMapAllocSpace* source_space,
1976                   GcCause gc_cause) {
1977  CHECK(kMovingCollector);
1978  if (target_space != source_space) {
1979    // Don't swap spaces since this isn't a typical semi space collection.
1980    semi_space_collector_->SetSwapSemiSpaces(false);
1981    semi_space_collector_->SetFromSpace(source_space);
1982    semi_space_collector_->SetToSpace(target_space);
1983    semi_space_collector_->Run(gc_cause, false);
1984  } else {
1985    CHECK(target_space->IsBumpPointerSpace())
1986        << "In-place compaction is only supported for bump pointer spaces";
1987    mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
1988    mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
1989  }
1990}
1991
1992collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
1993                                               bool clear_soft_references) {
1994  Thread* self = Thread::Current();
1995  Runtime* runtime = Runtime::Current();
1996  // If the heap can't run the GC, silently fail and return that no GC was run.
1997  switch (gc_type) {
1998    case collector::kGcTypePartial: {
1999      if (!have_zygote_space_) {
2000        return collector::kGcTypeNone;
2001      }
2002      break;
2003    }
2004    default: {
2005      // Other GC types don't have any special cases which makes them not runnable. The main case
2006      // here is full GC.
2007    }
2008  }
2009  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
2010  Locks::mutator_lock_->AssertNotHeld(self);
2011  if (self->IsHandlingStackOverflow()) {
2012    LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
2013  }
2014  bool compacting_gc;
2015  {
2016    gc_complete_lock_->AssertNotHeld(self);
2017    ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
2018    MutexLock mu(self, *gc_complete_lock_);
2019    // Ensure there is only one GC at a time.
2020    WaitForGcToCompleteLocked(gc_cause, self);
2021    compacting_gc = IsMovingGc(collector_type_);
2022    // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
2023    if (compacting_gc && disable_moving_gc_count_ != 0) {
2024      LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
2025      return collector::kGcTypeNone;
2026    }
2027    collector_type_running_ = collector_type_;
2028  }
2029
2030  if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
2031    ++runtime->GetStats()->gc_for_alloc_count;
2032    ++self->GetStats()->gc_for_alloc_count;
2033  }
2034  uint64_t gc_start_time_ns = NanoTime();
2035  uint64_t gc_start_size = GetBytesAllocated();
2036  // Approximate allocation rate in bytes / second.
2037  uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
2038  // Back to back GCs can cause 0 ms of wait time in between GC invocations.
2039  if (LIKELY(ms_delta != 0)) {
2040    allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
2041    VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
2042  }
2043
2044  DCHECK_LT(gc_type, collector::kGcTypeMax);
2045  DCHECK_NE(gc_type, collector::kGcTypeNone);
2046
2047  collector::GarbageCollector* collector = nullptr;
2048  // TODO: Clean this up.
2049  if (compacting_gc) {
2050    DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
2051           current_allocator_ == kAllocatorTypeTLAB);
2052    switch (collector_type_) {
2053      case kCollectorTypeSS:
2054        // Fall-through.
2055      case kCollectorTypeGSS:
2056        semi_space_collector_->SetFromSpace(bump_pointer_space_);
2057        semi_space_collector_->SetToSpace(temp_space_);
2058        semi_space_collector_->SetSwapSemiSpaces(true);
2059        collector = semi_space_collector_;
2060        break;
2061      case kCollectorTypeCC:
2062        collector = concurrent_copying_collector_;
2063        break;
2064      case kCollectorTypeMC:
2065        mark_compact_collector_->SetSpace(bump_pointer_space_);
2066        collector = mark_compact_collector_;
2067        break;
2068      default:
2069        LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
2070    }
2071    if (collector != mark_compact_collector_) {
2072      temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
2073      CHECK(temp_space_->IsEmpty());
2074    }
2075    gc_type = collector::kGcTypeFull;  // TODO: Not hard code this in.
2076  } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
2077      current_allocator_ == kAllocatorTypeDlMalloc) {
2078    collector = FindCollectorByGcType(gc_type);
2079  } else {
2080    LOG(FATAL) << "Invalid current allocator " << current_allocator_;
2081  }
2082  CHECK(collector != nullptr)
2083      << "Could not find garbage collector with collector_type="
2084      << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
2085  collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
2086  total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
2087  total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
2088  RequestHeapTrim();
2089  // Enqueue cleared references.
2090  reference_processor_.EnqueueClearedReferences(self);
2091  // Grow the heap so that we know when to perform the next GC.
2092  GrowForUtilization(collector);
2093  const size_t duration = GetCurrentGcIteration()->GetDurationNs();
2094  const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
2095  // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
2096  // (mutator time blocked >= long_pause_log_threshold_).
2097  bool log_gc = gc_cause == kGcCauseExplicit;
2098  if (!log_gc && CareAboutPauseTimes()) {
2099    // GC for alloc pauses the allocating thread, so consider it as a pause.
2100    log_gc = duration > long_gc_log_threshold_ ||
2101        (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
2102    for (uint64_t pause : pause_times) {
2103      log_gc = log_gc || pause >= long_pause_log_threshold_;
2104    }
2105  }
2106  if (log_gc) {
2107    const size_t percent_free = GetPercentFree();
2108    const size_t current_heap_size = GetBytesAllocated();
2109    const size_t total_memory = GetTotalMemory();
2110    std::ostringstream pause_string;
2111    for (size_t i = 0; i < pause_times.size(); ++i) {
2112        pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
2113                     << ((i != pause_times.size() - 1) ? "," : "");
2114    }
2115    LOG(INFO) << gc_cause << " " << collector->GetName()
2116              << " GC freed "  << current_gc_iteration_.GetFreedObjects() << "("
2117              << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
2118              << current_gc_iteration_.GetFreedLargeObjects() << "("
2119              << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
2120              << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
2121              << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
2122              << " total " << PrettyDuration((duration / 1000) * 1000);
2123    VLOG(heap) << ConstDumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
2124  }
2125  FinishGC(self, gc_type);
2126  // Inform DDMS that a GC completed.
2127  Dbg::GcDidFinish();
2128  return gc_type;
2129}
2130
2131void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
2132  MutexLock mu(self, *gc_complete_lock_);
2133  collector_type_running_ = kCollectorTypeNone;
2134  if (gc_type != collector::kGcTypeNone) {
2135    last_gc_type_ = gc_type;
2136  }
2137  // Wake anyone who may have been waiting for the GC to complete.
2138  gc_complete_cond_->Broadcast(self);
2139}
2140
2141static void RootMatchesObjectVisitor(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
2142                                     RootType /*root_type*/) {
2143  mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
2144  if (*root == obj) {
2145    LOG(INFO) << "Object " << obj << " is a root";
2146  }
2147}
2148
2149class ScanVisitor {
2150 public:
2151  void operator()(const mirror::Object* obj) const {
2152    LOG(ERROR) << "Would have rescanned object " << obj;
2153  }
2154};
2155
2156// Verify a reference from an object.
2157class VerifyReferenceVisitor {
2158 public:
2159  explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2160      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
2161      : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
2162
2163  size_t GetFailureCount() const {
2164    return fail_count_->LoadSequentiallyConsistent();
2165  }
2166
2167  void operator()(mirror::Class* klass, mirror::Reference* ref) const
2168      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2169    if (verify_referent_) {
2170      VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
2171    }
2172  }
2173
2174  void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
2175      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2176    VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
2177  }
2178
2179  bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
2180    return heap_->IsLiveObjectLocked(obj, true, false, true);
2181  }
2182
2183  static void VerifyRootCallback(mirror::Object** root, void* arg, uint32_t thread_id,
2184                                 RootType root_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2185    VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
2186    if (!visitor->VerifyReference(nullptr, *root, MemberOffset(0))) {
2187      LOG(ERROR) << "Root " << *root << " is dead with type " << PrettyTypeOf(*root)
2188          << " thread_id= " << thread_id << " root_type= " << root_type;
2189    }
2190  }
2191
2192 private:
2193  // TODO: Fix the no thread safety analysis.
2194  // Returns false on failure.
2195  bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
2196      NO_THREAD_SAFETY_ANALYSIS {
2197    if (ref == nullptr || IsLive(ref)) {
2198      // Verify that the reference is live.
2199      return true;
2200    }
2201    if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
2202      // Print message on only on first failure to prevent spam.
2203      LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
2204    }
2205    if (obj != nullptr) {
2206      // Only do this part for non roots.
2207      accounting::CardTable* card_table = heap_->GetCardTable();
2208      accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
2209      accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2210      byte* card_addr = card_table->CardFromAddr(obj);
2211      LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
2212                 << offset << "\n card value = " << static_cast<int>(*card_addr);
2213      if (heap_->IsValidObjectAddress(obj->GetClass())) {
2214        LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
2215      } else {
2216        LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
2217      }
2218
2219      // Attempt to find the class inside of the recently freed objects.
2220      space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
2221      if (ref_space != nullptr && ref_space->IsMallocSpace()) {
2222        space::MallocSpace* space = ref_space->AsMallocSpace();
2223        mirror::Class* ref_class = space->FindRecentFreedObject(ref);
2224        if (ref_class != nullptr) {
2225          LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
2226                     << PrettyClass(ref_class);
2227        } else {
2228          LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
2229        }
2230      }
2231
2232      if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
2233          ref->GetClass()->IsClass()) {
2234        LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
2235      } else {
2236        LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
2237                   << ") is not a valid heap address";
2238      }
2239
2240      card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
2241      void* cover_begin = card_table->AddrFromCard(card_addr);
2242      void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
2243          accounting::CardTable::kCardSize);
2244      LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
2245          << "-" << cover_end;
2246      accounting::ContinuousSpaceBitmap* bitmap =
2247          heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
2248
2249      if (bitmap == nullptr) {
2250        LOG(ERROR) << "Object " << obj << " has no bitmap";
2251        if (!VerifyClassClass(obj->GetClass())) {
2252          LOG(ERROR) << "Object " << obj << " failed class verification!";
2253        }
2254      } else {
2255        // Print out how the object is live.
2256        if (bitmap->Test(obj)) {
2257          LOG(ERROR) << "Object " << obj << " found in live bitmap";
2258        }
2259        if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
2260          LOG(ERROR) << "Object " << obj << " found in allocation stack";
2261        }
2262        if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
2263          LOG(ERROR) << "Object " << obj << " found in live stack";
2264        }
2265        if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
2266          LOG(ERROR) << "Ref " << ref << " found in allocation stack";
2267        }
2268        if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
2269          LOG(ERROR) << "Ref " << ref << " found in live stack";
2270        }
2271        // Attempt to see if the card table missed the reference.
2272        ScanVisitor scan_visitor;
2273        byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
2274        card_table->Scan(bitmap, byte_cover_begin,
2275                         byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
2276      }
2277
2278      // Search to see if any of the roots reference our object.
2279      void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
2280      Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
2281
2282      // Search to see if any of the roots reference our reference.
2283      arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
2284      Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
2285    }
2286    return false;
2287  }
2288
2289  Heap* const heap_;
2290  Atomic<size_t>* const fail_count_;
2291  const bool verify_referent_;
2292};
2293
2294// Verify all references within an object, for use with HeapBitmap::Visit.
2295class VerifyObjectVisitor {
2296 public:
2297  explicit VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
2298      : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {
2299  }
2300
2301  void operator()(mirror::Object* obj) const
2302      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2303    // Note: we are verifying the references in obj but not obj itself, this is because obj must
2304    // be live or else how did we find it in the live bitmap?
2305    VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
2306    // The class doesn't count as a reference but we should verify it anyways.
2307    obj->VisitReferences<true>(visitor, visitor);
2308  }
2309
2310  static void VisitCallback(mirror::Object* obj, void* arg)
2311      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2312    VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
2313    visitor->operator()(obj);
2314  }
2315
2316  size_t GetFailureCount() const {
2317    return fail_count_->LoadSequentiallyConsistent();
2318  }
2319
2320 private:
2321  Heap* const heap_;
2322  Atomic<size_t>* const fail_count_;
2323  const bool verify_referent_;
2324};
2325
2326void Heap::PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2327  // Slow path, the allocation stack push back must have already failed.
2328  DCHECK(!allocation_stack_->AtomicPushBack(*obj));
2329  do {
2330    // TODO: Add handle VerifyObject.
2331    StackHandleScope<1> hs(self);
2332    HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2333    // Push our object into the reserve region of the allocaiton stack. This is only required due
2334    // to heap verification requiring that roots are live (either in the live bitmap or in the
2335    // allocation stack).
2336    CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2337    CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2338  } while (!allocation_stack_->AtomicPushBack(*obj));
2339}
2340
2341void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) {
2342  // Slow path, the allocation stack push back must have already failed.
2343  DCHECK(!self->PushOnThreadLocalAllocationStack(*obj));
2344  mirror::Object** start_address;
2345  mirror::Object** end_address;
2346  while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address,
2347                                            &end_address)) {
2348    // TODO: Add handle VerifyObject.
2349    StackHandleScope<1> hs(self);
2350    HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2351    // Push our object into the reserve region of the allocaiton stack. This is only required due
2352    // to heap verification requiring that roots are live (either in the live bitmap or in the
2353    // allocation stack).
2354    CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(*obj));
2355    // Push into the reserve allocation stack.
2356    CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
2357  }
2358  self->SetThreadLocalAllocationStack(start_address, end_address);
2359  // Retry on the new thread-local allocation stack.
2360  CHECK(self->PushOnThreadLocalAllocationStack(*obj));  // Must succeed.
2361}
2362
2363// Must do this with mutators suspended since we are directly accessing the allocation stacks.
2364size_t Heap::VerifyHeapReferences(bool verify_referents) {
2365  Thread* self = Thread::Current();
2366  Locks::mutator_lock_->AssertExclusiveHeld(self);
2367  // Lets sort our allocation stacks so that we can efficiently binary search them.
2368  allocation_stack_->Sort();
2369  live_stack_->Sort();
2370  // Since we sorted the allocation stack content, need to revoke all
2371  // thread-local allocation stacks.
2372  RevokeAllThreadLocalAllocationStacks(self);
2373  Atomic<size_t> fail_count_(0);
2374  VerifyObjectVisitor visitor(this, &fail_count_, verify_referents);
2375  // Verify objects in the allocation stack since these will be objects which were:
2376  // 1. Allocated prior to the GC (pre GC verification).
2377  // 2. Allocated during the GC (pre sweep GC verification).
2378  // We don't want to verify the objects in the live stack since they themselves may be
2379  // pointing to dead objects if they are not reachable.
2380  VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
2381  // Verify the roots:
2382  Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRootCallback, &visitor);
2383  if (visitor.GetFailureCount() > 0) {
2384    // Dump mod-union tables.
2385    for (const auto& table_pair : mod_union_tables_) {
2386      accounting::ModUnionTable* mod_union_table = table_pair.second;
2387      mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
2388    }
2389    // Dump remembered sets.
2390    for (const auto& table_pair : remembered_sets_) {
2391      accounting::RememberedSet* remembered_set = table_pair.second;
2392      remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
2393    }
2394    DumpSpaces(LOG(ERROR));
2395  }
2396  return visitor.GetFailureCount();
2397}
2398
2399class VerifyReferenceCardVisitor {
2400 public:
2401  VerifyReferenceCardVisitor(Heap* heap, bool* failed)
2402      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
2403                            Locks::heap_bitmap_lock_)
2404      : heap_(heap), failed_(failed) {
2405  }
2406
2407  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
2408  // annotalysis on visitors.
2409  void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
2410      NO_THREAD_SAFETY_ANALYSIS {
2411    mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
2412    // Filter out class references since changing an object's class does not mark the card as dirty.
2413    // Also handles large objects, since the only reference they hold is a class reference.
2414    if (ref != nullptr && !ref->IsClass()) {
2415      accounting::CardTable* card_table = heap_->GetCardTable();
2416      // If the object is not dirty and it is referencing something in the live stack other than
2417      // class, then it must be on a dirty card.
2418      if (!card_table->AddrIsInCardTable(obj)) {
2419        LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
2420        *failed_ = true;
2421      } else if (!card_table->IsDirty(obj)) {
2422        // TODO: Check mod-union tables.
2423        // Card should be either kCardDirty if it got re-dirtied after we aged it, or
2424        // kCardDirty - 1 if it didnt get touched since we aged it.
2425        accounting::ObjectStack* live_stack = heap_->live_stack_.get();
2426        if (live_stack->ContainsSorted(ref)) {
2427          if (live_stack->ContainsSorted(obj)) {
2428            LOG(ERROR) << "Object " << obj << " found in live stack";
2429          }
2430          if (heap_->GetLiveBitmap()->Test(obj)) {
2431            LOG(ERROR) << "Object " << obj << " found in live bitmap";
2432          }
2433          LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
2434                    << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
2435
2436          // Print which field of the object is dead.
2437          if (!obj->IsObjectArray()) {
2438            mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
2439            CHECK(klass != NULL);
2440            mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
2441                                                                      : klass->GetIFields();
2442            CHECK(fields != NULL);
2443            for (int32_t i = 0; i < fields->GetLength(); ++i) {
2444              mirror::ArtField* cur = fields->Get(i);
2445              if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
2446                LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
2447                          << PrettyField(cur);
2448                break;
2449              }
2450            }
2451          } else {
2452            mirror::ObjectArray<mirror::Object>* object_array =
2453                obj->AsObjectArray<mirror::Object>();
2454            for (int32_t i = 0; i < object_array->GetLength(); ++i) {
2455              if (object_array->Get(i) == ref) {
2456                LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
2457              }
2458            }
2459          }
2460
2461          *failed_ = true;
2462        }
2463      }
2464    }
2465  }
2466
2467 private:
2468  Heap* const heap_;
2469  bool* const failed_;
2470};
2471
2472class VerifyLiveStackReferences {
2473 public:
2474  explicit VerifyLiveStackReferences(Heap* heap)
2475      : heap_(heap),
2476        failed_(false) {}
2477
2478  void operator()(mirror::Object* obj) const
2479      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
2480    VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
2481    obj->VisitReferences<true>(visitor, VoidFunctor());
2482  }
2483
2484  bool Failed() const {
2485    return failed_;
2486  }
2487
2488 private:
2489  Heap* const heap_;
2490  bool failed_;
2491};
2492
2493bool Heap::VerifyMissingCardMarks() {
2494  Thread* self = Thread::Current();
2495  Locks::mutator_lock_->AssertExclusiveHeld(self);
2496
2497  // We need to sort the live stack since we binary search it.
2498  live_stack_->Sort();
2499  // Since we sorted the allocation stack content, need to revoke all
2500  // thread-local allocation stacks.
2501  RevokeAllThreadLocalAllocationStacks(self);
2502  VerifyLiveStackReferences visitor(this);
2503  GetLiveBitmap()->Visit(visitor);
2504
2505  // We can verify objects in the live stack since none of these should reference dead objects.
2506  for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
2507    if (!kUseThreadLocalAllocationStack || *it != nullptr) {
2508      visitor(*it);
2509    }
2510  }
2511  return !visitor.Failed();
2512}
2513
2514void Heap::SwapStacks(Thread* self) {
2515  if (kUseThreadLocalAllocationStack) {
2516    live_stack_->AssertAllZero();
2517  }
2518  allocation_stack_.swap(live_stack_);
2519}
2520
2521void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
2522  // This must be called only during the pause.
2523  CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
2524  MutexLock mu(self, *Locks::runtime_shutdown_lock_);
2525  MutexLock mu2(self, *Locks::thread_list_lock_);
2526  std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2527  for (Thread* t : thread_list) {
2528    t->RevokeThreadLocalAllocationStack();
2529  }
2530}
2531
2532void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
2533  if (kIsDebugBuild) {
2534    if (bump_pointer_space_ != nullptr) {
2535      bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
2536    }
2537  }
2538}
2539
2540accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
2541  auto it = mod_union_tables_.find(space);
2542  if (it == mod_union_tables_.end()) {
2543    return nullptr;
2544  }
2545  return it->second;
2546}
2547
2548accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) {
2549  auto it = remembered_sets_.find(space);
2550  if (it == remembered_sets_.end()) {
2551    return nullptr;
2552  }
2553  return it->second;
2554}
2555
2556void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets) {
2557  TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2558  // Clear cards and keep track of cards cleared in the mod-union table.
2559  for (const auto& space : continuous_spaces_) {
2560    accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
2561    accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space);
2562    if (table != nullptr) {
2563      const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
2564          "ImageModUnionClearCards";
2565      TimingLogger::ScopedTiming t(name, timings);
2566      table->ClearCards();
2567    } else if (use_rem_sets && rem_set != nullptr) {
2568      DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
2569          << static_cast<int>(collector_type_);
2570      TimingLogger::ScopedTiming t("AllocSpaceRemSetClearCards", timings);
2571      rem_set->ClearCards();
2572    } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
2573      TimingLogger::ScopedTiming t("AllocSpaceClearCards", timings);
2574      // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
2575      // were dirty before the GC started.
2576      // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
2577      // -> clean(cleaning thread).
2578      // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
2579      // roots and then we scan / update mod union tables after. We will always scan either card.
2580      // If we end up with the non aged card, we scan it it in the pause.
2581      card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
2582                                     VoidFunctor());
2583    }
2584  }
2585}
2586
2587static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) {
2588}
2589
2590void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
2591  Thread* const self = Thread::Current();
2592  TimingLogger* const timings = current_gc_iteration_.GetTimings();
2593  TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2594  if (verify_pre_gc_heap_) {
2595    TimingLogger::ScopedTiming t("(Paused)PreGcVerifyHeapReferences", timings);
2596    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2597    size_t failures = VerifyHeapReferences();
2598    if (failures > 0) {
2599      LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2600          << " failures";
2601    }
2602  }
2603  // Check that all objects which reference things in the live stack are on dirty cards.
2604  if (verify_missing_card_marks_) {
2605    TimingLogger::ScopedTiming t("(Paused)PreGcVerifyMissingCardMarks", timings);
2606    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2607    SwapStacks(self);
2608    // Sort the live stack so that we can quickly binary search it later.
2609    CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
2610                                    << " missing card mark verification failed\n" << DumpSpaces();
2611    SwapStacks(self);
2612  }
2613  if (verify_mod_union_table_) {
2614    TimingLogger::ScopedTiming t("(Paused)PreGcVerifyModUnionTables", timings);
2615    ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
2616    for (const auto& table_pair : mod_union_tables_) {
2617      accounting::ModUnionTable* mod_union_table = table_pair.second;
2618      mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr);
2619      mod_union_table->Verify();
2620    }
2621  }
2622}
2623
2624void Heap::PreGcVerification(collector::GarbageCollector* gc) {
2625  if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
2626    collector::GarbageCollector::ScopedPause pause(gc);
2627    PreGcVerificationPaused(gc);
2628  }
2629}
2630
2631void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
2632  // TODO: Add a new runtime option for this?
2633  if (verify_pre_gc_rosalloc_) {
2634    RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
2635  }
2636}
2637
2638void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
2639  Thread* const self = Thread::Current();
2640  TimingLogger* const timings = current_gc_iteration_.GetTimings();
2641  TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2642  // Called before sweeping occurs since we want to make sure we are not going so reclaim any
2643  // reachable objects.
2644  if (verify_pre_sweeping_heap_) {
2645    TimingLogger::ScopedTiming t("(Paused)PostSweepingVerifyHeapReferences", timings);
2646    CHECK_NE(self->GetState(), kRunnable);
2647    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2648    // Swapping bound bitmaps does nothing.
2649    gc->SwapBitmaps();
2650    // Pass in false since concurrent reference processing can mean that the reference referents
2651    // may point to dead objects at the point which PreSweepingGcVerification is called.
2652    size_t failures = VerifyHeapReferences(false);
2653    if (failures > 0) {
2654      LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures
2655          << " failures";
2656    }
2657    gc->SwapBitmaps();
2658  }
2659  if (verify_pre_sweeping_rosalloc_) {
2660    RosAllocVerification(timings, "PreSweepingRosAllocVerification");
2661  }
2662}
2663
2664void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
2665  // Only pause if we have to do some verification.
2666  Thread* const self = Thread::Current();
2667  TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
2668  TimingLogger::ScopedTiming t(__FUNCTION__, timings);
2669  if (verify_system_weaks_) {
2670    ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2671    collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
2672    mark_sweep->VerifySystemWeaks();
2673  }
2674  if (verify_post_gc_rosalloc_) {
2675    RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
2676  }
2677  if (verify_post_gc_heap_) {
2678    TimingLogger::ScopedTiming t("(Paused)PostGcVerifyHeapReferences", timings);
2679    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2680    size_t failures = VerifyHeapReferences();
2681    if (failures > 0) {
2682      LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures
2683          << " failures";
2684    }
2685  }
2686}
2687
2688void Heap::PostGcVerification(collector::GarbageCollector* gc) {
2689  if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
2690    collector::GarbageCollector::ScopedPause pause(gc);
2691    PreGcVerificationPaused(gc);
2692  }
2693}
2694
2695void Heap::RosAllocVerification(TimingLogger* timings, const char* name) {
2696  TimingLogger::ScopedTiming t(name, timings);
2697  for (const auto& space : continuous_spaces_) {
2698    if (space->IsRosAllocSpace()) {
2699      VLOG(heap) << name << " : " << space->GetName();
2700      space->AsRosAllocSpace()->Verify();
2701    }
2702  }
2703}
2704
2705collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) {
2706  ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
2707  MutexLock mu(self, *gc_complete_lock_);
2708  return WaitForGcToCompleteLocked(cause, self);
2709}
2710
2711collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
2712  collector::GcType last_gc_type = collector::kGcTypeNone;
2713  uint64_t wait_start = NanoTime();
2714  while (collector_type_running_ != kCollectorTypeNone) {
2715    ATRACE_BEGIN("GC: Wait For Completion");
2716    // We must wait, change thread state then sleep on gc_complete_cond_;
2717    gc_complete_cond_->Wait(self);
2718    last_gc_type = last_gc_type_;
2719    ATRACE_END();
2720  }
2721  uint64_t wait_time = NanoTime() - wait_start;
2722  total_wait_time_ += wait_time;
2723  if (wait_time > long_pause_log_threshold_) {
2724    LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
2725        << " for cause " << cause;
2726  }
2727  return last_gc_type;
2728}
2729
2730void Heap::DumpForSigQuit(std::ostream& os) {
2731  os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
2732     << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
2733  DumpGcPerformanceInfo(os);
2734}
2735
2736size_t Heap::GetPercentFree() {
2737  return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
2738}
2739
2740void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
2741  if (max_allowed_footprint > GetMaxMemory()) {
2742    VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
2743             << PrettySize(GetMaxMemory());
2744    max_allowed_footprint = GetMaxMemory();
2745  }
2746  max_allowed_footprint_ = max_allowed_footprint;
2747}
2748
2749bool Heap::IsMovableObject(const mirror::Object* obj) const {
2750  if (kMovingCollector) {
2751    space::Space* space = FindContinuousSpaceFromObject(obj, true);
2752    if (space != nullptr) {
2753      // TODO: Check large object?
2754      return space->CanMoveObjects();
2755    }
2756  }
2757  return false;
2758}
2759
2760void Heap::UpdateMaxNativeFootprint() {
2761  size_t native_size = native_bytes_allocated_.LoadRelaxed();
2762  // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
2763  size_t target_size = native_size / GetTargetHeapUtilization();
2764  if (target_size > native_size + max_free_) {
2765    target_size = native_size + max_free_;
2766  } else if (target_size < native_size + min_free_) {
2767    target_size = native_size + min_free_;
2768  }
2769  native_footprint_gc_watermark_ = target_size;
2770  native_footprint_limit_ = 2 * target_size - native_size;
2771}
2772
2773collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
2774  for (const auto& collector : garbage_collectors_) {
2775    if (collector->GetCollectorType() == collector_type_ &&
2776        collector->GetGcType() == gc_type) {
2777      return collector;
2778    }
2779  }
2780  return nullptr;
2781}
2782
2783double Heap::HeapGrowthMultiplier() const {
2784  // If we don't care about pause times we are background, so return 1.0.
2785  if (!CareAboutPauseTimes() || IsLowMemoryMode()) {
2786    return 1.0;
2787  }
2788  return foreground_heap_growth_multiplier_;
2789}
2790
2791void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
2792  // We know what our utilization is at this moment.
2793  // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
2794  const uint64_t bytes_allocated = GetBytesAllocated();
2795  last_gc_size_ = bytes_allocated;
2796  last_gc_time_ns_ = NanoTime();
2797  uint64_t target_size;
2798  collector::GcType gc_type = collector_ran->GetGcType();
2799  if (gc_type != collector::kGcTypeSticky) {
2800    // Grow the heap for non sticky GC.
2801    const float multiplier = HeapGrowthMultiplier();  // Use the multiplier to grow more for
2802    // foreground.
2803    intptr_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
2804    CHECK_GE(delta, 0);
2805    target_size = bytes_allocated + delta * multiplier;
2806    target_size = std::min(target_size,
2807                           bytes_allocated + static_cast<uint64_t>(max_free_ * multiplier));
2808    target_size = std::max(target_size,
2809                           bytes_allocated + static_cast<uint64_t>(min_free_ * multiplier));
2810    native_need_to_run_finalization_ = true;
2811    next_gc_type_ = collector::kGcTypeSticky;
2812  } else {
2813    collector::GcType non_sticky_gc_type =
2814        have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
2815    // Find what the next non sticky collector will be.
2816    collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
2817    // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
2818    // do another sticky collection next.
2819    // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
2820    // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
2821    // if the sticky GC throughput always remained >= the full/partial throughput.
2822    if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
2823        non_sticky_collector->GetEstimatedMeanThroughput() &&
2824        non_sticky_collector->NumberOfIterations() > 0 &&
2825        bytes_allocated <= max_allowed_footprint_) {
2826      next_gc_type_ = collector::kGcTypeSticky;
2827    } else {
2828      next_gc_type_ = non_sticky_gc_type;
2829    }
2830    // If we have freed enough memory, shrink the heap back down.
2831    if (bytes_allocated + max_free_ < max_allowed_footprint_) {
2832      target_size = bytes_allocated + max_free_;
2833    } else {
2834      target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
2835    }
2836  }
2837  if (!ignore_max_footprint_) {
2838    SetIdealFootprint(target_size);
2839    if (IsGcConcurrent()) {
2840      // Calculate when to perform the next ConcurrentGC.
2841      // Calculate the estimated GC duration.
2842      const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
2843      // Estimate how many remaining bytes we will have when we need to start the next GC.
2844      size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
2845      remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
2846      remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
2847      if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
2848        // A never going to happen situation that from the estimated allocation rate we will exceed
2849        // the applications entire footprint with the given estimated allocation rate. Schedule
2850        // another GC nearly straight away.
2851        remaining_bytes = kMinConcurrentRemainingBytes;
2852      }
2853      DCHECK_LE(remaining_bytes, max_allowed_footprint_);
2854      DCHECK_LE(max_allowed_footprint_, growth_limit_);
2855      // Start a concurrent GC when we get close to the estimated remaining bytes. When the
2856      // allocation rate is very high, remaining_bytes could tell us that we should start a GC
2857      // right away.
2858      concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
2859                                         static_cast<size_t>(bytes_allocated));
2860    }
2861  }
2862}
2863
2864void Heap::ClearGrowthLimit() {
2865  growth_limit_ = capacity_;
2866  non_moving_space_->ClearGrowthLimit();
2867}
2868
2869void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
2870  ScopedObjectAccess soa(self);
2871  ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object));
2872  jvalue args[1];
2873  args[0].l = arg.get();
2874  InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
2875  // Restore object in case it gets moved.
2876  *object = soa.Decode<mirror::Object*>(arg.get());
2877}
2878
2879void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
2880  StackHandleScope<1> hs(self);
2881  HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
2882  RequestConcurrentGC(self);
2883}
2884
2885void Heap::RequestConcurrentGC(Thread* self) {
2886  // Make sure that we can do a concurrent GC.
2887  Runtime* runtime = Runtime::Current();
2888  if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
2889      self->IsHandlingStackOverflow()) {
2890    return;
2891  }
2892  // We already have a request pending, no reason to start more until we update
2893  // concurrent_start_bytes_.
2894  concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2895  JNIEnv* env = self->GetJniEnv();
2896  DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2897  DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
2898  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2899                            WellKnownClasses::java_lang_Daemons_requestGC);
2900  CHECK(!env->ExceptionCheck());
2901}
2902
2903void Heap::ConcurrentGC(Thread* self) {
2904  if (Runtime::Current()->IsShuttingDown(self)) {
2905    return;
2906  }
2907  // Wait for any GCs currently running to finish.
2908  if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
2909    // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2910    // instead. E.g. can't do partial, so do full instead.
2911    if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2912        collector::kGcTypeNone) {
2913      for (collector::GcType gc_type : gc_plan_) {
2914        // Attempt to run the collector, if we succeed, we are done.
2915        if (gc_type > next_gc_type_ &&
2916            CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2917          break;
2918        }
2919      }
2920    }
2921  }
2922}
2923
2924void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
2925  Thread* self = Thread::Current();
2926  {
2927    MutexLock mu(self, *heap_trim_request_lock_);
2928    if (desired_collector_type_ == desired_collector_type) {
2929      return;
2930    }
2931    heap_transition_or_trim_target_time_ =
2932        std::max(heap_transition_or_trim_target_time_, NanoTime() + delta_time);
2933    desired_collector_type_ = desired_collector_type;
2934  }
2935  SignalHeapTrimDaemon(self);
2936}
2937
2938void Heap::RequestHeapTrim() {
2939  // GC completed and now we must decide whether to request a heap trim (advising pages back to the
2940  // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
2941  // a space it will hold its lock and can become a cause of jank.
2942  // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
2943  // forking.
2944
2945  // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
2946  // because that only marks object heads, so a large array looks like lots of empty space. We
2947  // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
2948  // to utilization (which is probably inversely proportional to how much benefit we can expect).
2949  // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
2950  // not how much use we're making of those pages.
2951
2952  Thread* self = Thread::Current();
2953  Runtime* runtime = Runtime::Current();
2954  if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
2955    // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
2956    // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
2957    // as we don't hold the lock while requesting the trim).
2958    return;
2959  }
2960  {
2961    MutexLock mu(self, *heap_trim_request_lock_);
2962    if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
2963      // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
2964      // just yet.
2965      return;
2966    }
2967    heap_trim_request_pending_ = true;
2968    uint64_t current_time = NanoTime();
2969    if (heap_transition_or_trim_target_time_ < current_time) {
2970      heap_transition_or_trim_target_time_ = current_time + kHeapTrimWait;
2971    }
2972  }
2973  // Notify the daemon thread which will actually do the heap trim.
2974  SignalHeapTrimDaemon(self);
2975}
2976
2977void Heap::SignalHeapTrimDaemon(Thread* self) {
2978  JNIEnv* env = self->GetJniEnv();
2979  DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2980  DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != nullptr);
2981  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2982                            WellKnownClasses::java_lang_Daemons_requestHeapTrim);
2983  CHECK(!env->ExceptionCheck());
2984}
2985
2986void Heap::RevokeThreadLocalBuffers(Thread* thread) {
2987  if (rosalloc_space_ != nullptr) {
2988    rosalloc_space_->RevokeThreadLocalBuffers(thread);
2989  }
2990  if (bump_pointer_space_ != nullptr) {
2991    bump_pointer_space_->RevokeThreadLocalBuffers(thread);
2992  }
2993}
2994
2995void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
2996  if (rosalloc_space_ != nullptr) {
2997    rosalloc_space_->RevokeThreadLocalBuffers(thread);
2998  }
2999}
3000
3001void Heap::RevokeAllThreadLocalBuffers() {
3002  if (rosalloc_space_ != nullptr) {
3003    rosalloc_space_->RevokeAllThreadLocalBuffers();
3004  }
3005  if (bump_pointer_space_ != nullptr) {
3006    bump_pointer_space_->RevokeAllThreadLocalBuffers();
3007  }
3008}
3009
3010bool Heap::IsGCRequestPending() const {
3011  return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
3012}
3013
3014void Heap::RunFinalization(JNIEnv* env) {
3015  // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
3016  if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
3017    CHECK(WellKnownClasses::java_lang_System != nullptr);
3018    WellKnownClasses::java_lang_System_runFinalization =
3019        CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
3020    CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
3021  }
3022  env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
3023                            WellKnownClasses::java_lang_System_runFinalization);
3024}
3025
3026void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
3027  Thread* self = ThreadForEnv(env);
3028  if (native_need_to_run_finalization_) {
3029    RunFinalization(env);
3030    UpdateMaxNativeFootprint();
3031    native_need_to_run_finalization_ = false;
3032  }
3033  // Total number of native bytes allocated.
3034  size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
3035  new_native_bytes_allocated += bytes;
3036  if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
3037    collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
3038        collector::kGcTypeFull;
3039
3040    // The second watermark is higher than the gc watermark. If you hit this it means you are
3041    // allocating native objects faster than the GC can keep up with.
3042    if (new_native_bytes_allocated > native_footprint_limit_) {
3043      if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
3044        // Just finished a GC, attempt to run finalizers.
3045        RunFinalization(env);
3046        CHECK(!env->ExceptionCheck());
3047      }
3048      // If we still are over the watermark, attempt a GC for alloc and run finalizers.
3049      if (new_native_bytes_allocated > native_footprint_limit_) {
3050        CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3051        RunFinalization(env);
3052        native_need_to_run_finalization_ = false;
3053        CHECK(!env->ExceptionCheck());
3054      }
3055      // We have just run finalizers, update the native watermark since it is very likely that
3056      // finalizers released native managed allocations.
3057      UpdateMaxNativeFootprint();
3058    } else if (!IsGCRequestPending()) {
3059      if (IsGcConcurrent()) {
3060        RequestConcurrentGC(self);
3061      } else {
3062        CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
3063      }
3064    }
3065  }
3066}
3067
3068void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
3069  int expected_size, new_size;
3070  do {
3071    expected_size = native_bytes_allocated_.LoadRelaxed();
3072    new_size = expected_size - bytes;
3073    if (UNLIKELY(new_size < 0)) {
3074      ScopedObjectAccess soa(env);
3075      env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
3076                    StringPrintf("Attempted to free %d native bytes with only %d native bytes "
3077                                 "registered as allocated", bytes, expected_size).c_str());
3078      break;
3079    }
3080  } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size, new_size));
3081}
3082
3083size_t Heap::GetTotalMemory() const {
3084  size_t ret = 0;
3085  for (const auto& space : continuous_spaces_) {
3086    // Currently don't include the image space.
3087    if (!space->IsImageSpace()) {
3088      ret += space->Size();
3089    }
3090  }
3091  for (const auto& space : discontinuous_spaces_) {
3092    if (space->IsLargeObjectSpace()) {
3093      ret += space->AsLargeObjectSpace()->GetBytesAllocated();
3094    }
3095  }
3096  return ret;
3097}
3098
3099void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
3100  DCHECK(mod_union_table != nullptr);
3101  mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
3102}
3103
3104void Heap::CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
3105  CHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
3106        (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
3107        c->GetDescriptor().empty());
3108  CHECK_GE(byte_count, sizeof(mirror::Object));
3109}
3110
3111void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) {
3112  CHECK(remembered_set != nullptr);
3113  space::Space* space = remembered_set->GetSpace();
3114  CHECK(space != nullptr);
3115  CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space;
3116  remembered_sets_.Put(space, remembered_set);
3117  CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space;
3118}
3119
3120void Heap::RemoveRememberedSet(space::Space* space) {
3121  CHECK(space != nullptr);
3122  auto it = remembered_sets_.find(space);
3123  CHECK(it != remembered_sets_.end());
3124  remembered_sets_.erase(it);
3125  CHECK(remembered_sets_.find(space) == remembered_sets_.end());
3126}
3127
3128void Heap::ClearMarkedObjects() {
3129  // Clear all of the spaces' mark bitmaps.
3130  for (const auto& space : GetContinuousSpaces()) {
3131    accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
3132    if (space->GetLiveBitmap() != mark_bitmap) {
3133      mark_bitmap->Clear();
3134    }
3135  }
3136  // Clear the marked objects in the discontinous space object sets.
3137  for (const auto& space : GetDiscontinuousSpaces()) {
3138    space->GetMarkBitmap()->Clear();
3139  }
3140}
3141
3142}  // namespace gc
3143}  // namespace art
3144