heap.cc revision a4adbfd44032d70e166e6f18096bbbed05a990ba
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "heap.h"
18
19#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
21
22#include <limits>
23#include <vector>
24#include <valgrind.h>
25
26#include "base/histogram-inl.h"
27#include "base/stl_util.h"
28#include "common_throws.h"
29#include "cutils/sched_policy.h"
30#include "debugger.h"
31#include "gc/accounting/atomic_stack.h"
32#include "gc/accounting/card_table-inl.h"
33#include "gc/accounting/heap_bitmap-inl.h"
34#include "gc/accounting/mod_union_table.h"
35#include "gc/accounting/mod_union_table-inl.h"
36#include "gc/accounting/space_bitmap-inl.h"
37#include "gc/collector/mark_sweep-inl.h"
38#include "gc/collector/partial_mark_sweep.h"
39#include "gc/collector/semi_space.h"
40#include "gc/collector/sticky_mark_sweep.h"
41#include "gc/space/bump_pointer_space.h"
42#include "gc/space/dlmalloc_space-inl.h"
43#include "gc/space/image_space.h"
44#include "gc/space/large_object_space.h"
45#include "gc/space/rosalloc_space-inl.h"
46#include "gc/space/space-inl.h"
47#include "gc/space/zygote_space.h"
48#include "heap-inl.h"
49#include "image.h"
50#include "invoke_arg_array_builder.h"
51#include "mirror/art_field-inl.h"
52#include "mirror/class-inl.h"
53#include "mirror/object.h"
54#include "mirror/object-inl.h"
55#include "mirror/object_array-inl.h"
56#include "object_utils.h"
57#include "os.h"
58#include "runtime.h"
59#include "ScopedLocalRef.h"
60#include "scoped_thread_state_change.h"
61#include "sirt_ref.h"
62#include "thread_list.h"
63#include "UniquePtr.h"
64#include "well_known_classes.h"
65
66namespace art {
67
68extern void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator);
69
70namespace gc {
71
72static constexpr bool kGCALotMode = false;
73static constexpr size_t kGcAlotInterval = KB;
74// Minimum amount of remaining bytes before a concurrent GC is triggered.
75static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
76static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
77
78Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
79           double target_utilization, size_t capacity, const std::string& image_file_name,
80           CollectorType post_zygote_collector_type, CollectorType background_collector_type,
81           size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
82           size_t long_pause_log_threshold, size_t long_gc_log_threshold,
83           bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap,
84           bool verify_post_gc_heap, bool verify_pre_gc_rosalloc,
85           bool verify_post_gc_rosalloc)
86    : non_moving_space_(nullptr),
87      rosalloc_space_(nullptr),
88      dlmalloc_space_(nullptr),
89      main_space_(nullptr),
90      concurrent_gc_(false),
91      collector_type_(kCollectorTypeNone),
92      post_zygote_collector_type_(post_zygote_collector_type),
93      background_collector_type_(background_collector_type),
94      parallel_gc_threads_(parallel_gc_threads),
95      conc_gc_threads_(conc_gc_threads),
96      low_memory_mode_(low_memory_mode),
97      long_pause_log_threshold_(long_pause_log_threshold),
98      long_gc_log_threshold_(long_gc_log_threshold),
99      ignore_max_footprint_(ignore_max_footprint),
100      have_zygote_space_(false),
101      soft_reference_queue_(this),
102      weak_reference_queue_(this),
103      finalizer_reference_queue_(this),
104      phantom_reference_queue_(this),
105      cleared_references_(this),
106      collector_type_running_(kCollectorTypeNone),
107      last_gc_type_(collector::kGcTypeNone),
108      next_gc_type_(collector::kGcTypePartial),
109      capacity_(capacity),
110      growth_limit_(growth_limit),
111      max_allowed_footprint_(initial_size),
112      native_footprint_gc_watermark_(initial_size),
113      native_footprint_limit_(2 * initial_size),
114      native_need_to_run_finalization_(false),
115      // Initially assume we perceive jank in case the process state is never updated.
116      process_state_(kProcessStateJankPerceptible),
117      concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
118      total_bytes_freed_ever_(0),
119      total_objects_freed_ever_(0),
120      num_bytes_allocated_(0),
121      native_bytes_allocated_(0),
122      gc_memory_overhead_(0),
123      verify_missing_card_marks_(false),
124      verify_system_weaks_(false),
125      verify_pre_gc_heap_(verify_pre_gc_heap),
126      verify_post_gc_heap_(verify_post_gc_heap),
127      verify_mod_union_table_(false),
128      verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
129      verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
130      last_trim_time_ms_(0),
131      allocation_rate_(0),
132      /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
133       * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
134       * verification is enabled, we limit the size of allocation stacks to speed up their
135       * searching.
136       */
137      max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
138          : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
139      current_allocator_(kAllocatorTypeDlMalloc),
140      current_non_moving_allocator_(kAllocatorTypeNonMoving),
141      bump_pointer_space_(nullptr),
142      temp_space_(nullptr),
143      reference_referent_offset_(0),
144      reference_queue_offset_(0),
145      reference_queueNext_offset_(0),
146      reference_pendingNext_offset_(0),
147      finalizer_reference_zombie_offset_(0),
148      min_free_(min_free),
149      max_free_(max_free),
150      target_utilization_(target_utilization),
151      total_wait_time_(0),
152      total_allocation_time_(0),
153      verify_object_mode_(kHeapVerificationNotPermitted),
154      disable_moving_gc_count_(0),
155      running_on_valgrind_(RUNNING_ON_VALGRIND),
156      use_tlab_(use_tlab) {
157  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
158    LOG(INFO) << "Heap() entering";
159  }
160  // If we aren't the zygote, switch to the default non zygote allocator. This may update the
161  // entrypoints.
162  if (!Runtime::Current()->IsZygote() || !kMovingCollector) {
163    ChangeCollector(post_zygote_collector_type_);
164  } else {
165    // We are the zygote, use bump pointer allocation + semi space collector.
166    ChangeCollector(kCollectorTypeSS);
167  }
168
169  live_bitmap_.reset(new accounting::HeapBitmap(this));
170  mark_bitmap_.reset(new accounting::HeapBitmap(this));
171  // Requested begin for the alloc space, to follow the mapped image and oat files
172  byte* requested_alloc_space_begin = nullptr;
173  if (!image_file_name.empty()) {
174    space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str());
175    CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name;
176    AddSpace(image_space);
177    // Oat files referenced by image files immediately follow them in memory, ensure alloc space
178    // isn't going to get in the middle
179    byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
180    CHECK_GT(oat_file_end_addr, image_space->End());
181    if (oat_file_end_addr > requested_alloc_space_begin) {
182      requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
183    }
184  }
185  const char* name = Runtime::Current()->IsZygote() ? "zygote space" : "alloc space";
186  space::MallocSpace* malloc_space;
187  if (kUseRosAlloc) {
188    malloc_space = space::RosAllocSpace::Create(name, initial_size, growth_limit, capacity,
189                                                requested_alloc_space_begin, low_memory_mode_);
190    CHECK(malloc_space != nullptr) << "Failed to create rosalloc space";
191  } else {
192    malloc_space = space::DlMallocSpace::Create(name, initial_size, growth_limit, capacity,
193                                                requested_alloc_space_begin);
194    CHECK(malloc_space != nullptr) << "Failed to create dlmalloc space";
195  }
196  VLOG(heap) << "malloc_space : " << malloc_space;
197  if (kMovingCollector) {
198    // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
199    // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation
200    // issues.
201    const size_t bump_pointer_space_size = std::min(malloc_space->Capacity(), 128 * MB);
202    bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
203                                                          bump_pointer_space_size, nullptr);
204    CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
205    AddSpace(bump_pointer_space_);
206    temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size,
207                                                  nullptr);
208    CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
209    AddSpace(temp_space_);
210    VLOG(heap) << "bump_pointer_space : " << bump_pointer_space_;
211    VLOG(heap) << "temp_space : " << temp_space_;
212  }
213  non_moving_space_ = malloc_space;
214  malloc_space->SetFootprintLimit(malloc_space->Capacity());
215  AddSpace(malloc_space);
216
217  // Allocate the large object space.
218  constexpr bool kUseFreeListSpaceForLOS = false;
219  if (kUseFreeListSpaceForLOS) {
220    large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity);
221  } else {
222    large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
223  }
224  CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
225  AddSpace(large_object_space_);
226
227  // Compute heap capacity. Continuous spaces are sorted in order of Begin().
228  CHECK(!continuous_spaces_.empty());
229
230  // Relies on the spaces being sorted.
231  byte* heap_begin = continuous_spaces_.front()->Begin();
232  byte* heap_end = continuous_spaces_.back()->Limit();
233  if (Runtime::Current()->IsZygote()) {
234    std::string error_str;
235    post_zygote_non_moving_space_mem_map_.reset(
236        MemMap::MapAnonymous("post zygote non-moving space", nullptr, 64 * MB,
237                             PROT_READ | PROT_WRITE, true, &error_str));
238    CHECK(post_zygote_non_moving_space_mem_map_.get() != nullptr) << error_str;
239    heap_begin = std::min(post_zygote_non_moving_space_mem_map_->Begin(), heap_begin);
240    heap_end = std::max(post_zygote_non_moving_space_mem_map_->End(), heap_end);
241  }
242  size_t heap_capacity = heap_end - heap_begin;
243
244  // Allocate the card table.
245  card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
246  CHECK(card_table_.get() != NULL) << "Failed to create card table";
247
248  // Card cache for now since it makes it easier for us to update the references to the copying
249  // spaces.
250  accounting::ModUnionTable* mod_union_table =
251      new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace());
252  CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
253  AddModUnionTable(mod_union_table);
254
255  // TODO: Count objects in the image space here.
256  num_bytes_allocated_ = 0;
257
258  // Default mark stack size in bytes.
259  static const size_t default_mark_stack_size = 64 * KB;
260  mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size));
261  allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack",
262                                                          max_allocation_stack_size_));
263  live_stack_.reset(accounting::ObjectStack::Create("live stack",
264                                                    max_allocation_stack_size_));
265
266  // It's still too early to take a lock because there are no threads yet, but we can create locks
267  // now. We don't create it earlier to make it clear that you can't use locks during heap
268  // initialization.
269  gc_complete_lock_ = new Mutex("GC complete lock");
270  gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
271                                                *gc_complete_lock_));
272  last_gc_time_ns_ = NanoTime();
273  last_gc_size_ = GetBytesAllocated();
274
275  if (ignore_max_footprint_) {
276    SetIdealFootprint(std::numeric_limits<size_t>::max());
277    concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
278  }
279  CHECK_NE(max_allowed_footprint_, 0U);
280
281  // Create our garbage collectors.
282  for (size_t i = 0; i < 2; ++i) {
283    const bool concurrent = i != 0;
284    garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
285    garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
286    garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
287  }
288  if (kMovingCollector) {
289    // TODO: Clean this up.
290    bool generational = post_zygote_collector_type_ == kCollectorTypeGSS;
291    semi_space_collector_ = new collector::SemiSpace(this, generational);
292    garbage_collectors_.push_back(semi_space_collector_);
293  }
294
295  if (running_on_valgrind_) {
296    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
297  }
298
299  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
300    LOG(INFO) << "Heap() exiting";
301  }
302}
303
304void Heap::ChangeAllocator(AllocatorType allocator) {
305  // These two allocators are only used internally and don't have any entrypoints.
306  DCHECK_NE(allocator, kAllocatorTypeLOS);
307  DCHECK_NE(allocator, kAllocatorTypeNonMoving);
308  if (current_allocator_ != allocator) {
309    current_allocator_ = allocator;
310    SetQuickAllocEntryPointsAllocator(current_allocator_);
311    Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
312  }
313}
314
315bool Heap::IsCompilingBoot() const {
316  for (const auto& space : continuous_spaces_) {
317    if (space->IsImageSpace()) {
318      return false;
319    } else if (space->IsZygoteSpace()) {
320      return false;
321    }
322  }
323  return true;
324}
325
326bool Heap::HasImageSpace() const {
327  for (const auto& space : continuous_spaces_) {
328    if (space->IsImageSpace()) {
329      return true;
330    }
331  }
332  return false;
333}
334
335void Heap::IncrementDisableMovingGC(Thread* self) {
336  // Need to do this holding the lock to prevent races where the GC is about to run / running when
337  // we attempt to disable it.
338  ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
339  MutexLock mu(self, *gc_complete_lock_);
340  ++disable_moving_gc_count_;
341  if (IsCompactingGC(collector_type_running_)) {
342    WaitForGcToCompleteLocked(self);
343  }
344}
345
346void Heap::DecrementDisableMovingGC(Thread* self) {
347  MutexLock mu(self, *gc_complete_lock_);
348  CHECK_GE(disable_moving_gc_count_, 0U);
349  --disable_moving_gc_count_;
350}
351
352void Heap::UpdateProcessState(ProcessState process_state) {
353  if (process_state_ != process_state) {
354    process_state_ = process_state;
355    if (process_state_ == kProcessStateJankPerceptible) {
356      TransitionCollector(post_zygote_collector_type_);
357    } else {
358      TransitionCollector(background_collector_type_);
359    }
360  } else {
361    CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
362  }
363}
364
365void Heap::CreateThreadPool() {
366  const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
367  if (num_threads != 0) {
368    thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
369  }
370}
371
372void Heap::VisitObjects(ObjectVisitorCallback callback, void* arg) {
373  Thread* self = Thread::Current();
374  // GCs can move objects, so don't allow this.
375  const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
376  if (bump_pointer_space_ != nullptr) {
377    // Visit objects in bump pointer space.
378    bump_pointer_space_->Walk(callback, arg);
379  }
380  // TODO: Switch to standard begin and end to use ranged a based loop.
381  for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
382      it < end; ++it) {
383    mirror::Object* obj = *it;
384    callback(obj, arg);
385  }
386  GetLiveBitmap()->Walk(callback, arg);
387  self->EndAssertNoThreadSuspension(old_cause);
388}
389
390void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
391  space::ContinuousSpace* space1 = rosalloc_space_ != nullptr ? rosalloc_space_ : non_moving_space_;
392  space::ContinuousSpace* space2 = dlmalloc_space_ != nullptr ? dlmalloc_space_ : non_moving_space_;
393  // This is just logic to handle a case of either not having a rosalloc or dlmalloc space.
394  // TODO: Generalize this to n bitmaps?
395  if (space1 == nullptr) {
396    DCHECK(space2 != nullptr);
397    space1 = space2;
398  }
399  if (space2 == nullptr) {
400    DCHECK(space1 != nullptr);
401    space2 = space1;
402  }
403  MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
404                 large_object_space_->GetLiveObjects(), stack);
405}
406
407void Heap::DeleteThreadPool() {
408  thread_pool_.reset(nullptr);
409}
410
411void Heap::AddSpace(space::Space* space, bool set_as_default) {
412  DCHECK(space != nullptr);
413  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
414  if (space->IsContinuousSpace()) {
415    DCHECK(!space->IsDiscontinuousSpace());
416    space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
417    // Continuous spaces don't necessarily have bitmaps.
418    accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
419    accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
420    if (live_bitmap != nullptr) {
421      DCHECK(mark_bitmap != nullptr);
422      live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
423      mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
424    }
425    continuous_spaces_.push_back(continuous_space);
426    if (set_as_default) {
427      if (continuous_space->IsDlMallocSpace()) {
428        dlmalloc_space_ = continuous_space->AsDlMallocSpace();
429      } else if (continuous_space->IsRosAllocSpace()) {
430        rosalloc_space_ = continuous_space->AsRosAllocSpace();
431      }
432    }
433    // Ensure that spaces remain sorted in increasing order of start address.
434    std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
435              [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
436      return a->Begin() < b->Begin();
437    });
438  } else {
439    DCHECK(space->IsDiscontinuousSpace());
440    space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
441    DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
442    live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
443    DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
444    mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
445    discontinuous_spaces_.push_back(discontinuous_space);
446  }
447  if (space->IsAllocSpace()) {
448    alloc_spaces_.push_back(space->AsAllocSpace());
449  }
450}
451
452void Heap::RemoveSpace(space::Space* space) {
453  DCHECK(space != nullptr);
454  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
455  if (space->IsContinuousSpace()) {
456    DCHECK(!space->IsDiscontinuousSpace());
457    space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
458    // Continuous spaces don't necessarily have bitmaps.
459    accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
460    accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
461    if (live_bitmap != nullptr) {
462      DCHECK(mark_bitmap != nullptr);
463      live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
464      mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
465    }
466    auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
467    DCHECK(it != continuous_spaces_.end());
468    continuous_spaces_.erase(it);
469    if (continuous_space == dlmalloc_space_) {
470      dlmalloc_space_ = nullptr;
471    } else if (continuous_space == rosalloc_space_) {
472      rosalloc_space_ = nullptr;
473    }
474    if (continuous_space == main_space_) {
475      main_space_ = nullptr;
476    }
477  } else {
478    DCHECK(space->IsDiscontinuousSpace());
479    space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
480    DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
481    live_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
482    DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
483    mark_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
484    auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
485                        discontinuous_space);
486    DCHECK(it != discontinuous_spaces_.end());
487    discontinuous_spaces_.erase(it);
488  }
489  if (space->IsAllocSpace()) {
490    auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
491    DCHECK(it != alloc_spaces_.end());
492    alloc_spaces_.erase(it);
493  }
494}
495
496void Heap::RegisterGCAllocation(size_t bytes) {
497  if (this != nullptr) {
498    gc_memory_overhead_.FetchAndAdd(bytes);
499  }
500}
501
502void Heap::RegisterGCDeAllocation(size_t bytes) {
503  if (this != nullptr) {
504    gc_memory_overhead_.FetchAndSub(bytes);
505  }
506}
507
508void Heap::DumpGcPerformanceInfo(std::ostream& os) {
509  // Dump cumulative timings.
510  os << "Dumping cumulative Gc timings\n";
511  uint64_t total_duration = 0;
512
513  // Dump cumulative loggers for each GC type.
514  uint64_t total_paused_time = 0;
515  for (const auto& collector : garbage_collectors_) {
516    CumulativeLogger& logger = collector->GetCumulativeTimings();
517    if (logger.GetTotalNs() != 0) {
518      os << Dumpable<CumulativeLogger>(logger);
519      const uint64_t total_ns = logger.GetTotalNs();
520      const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
521      double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
522      const uint64_t freed_bytes = collector->GetTotalFreedBytes();
523      const uint64_t freed_objects = collector->GetTotalFreedObjects();
524      Histogram<uint64_t>::CumulativeData cumulative_data;
525      collector->GetPauseHistogram().CreateHistogram(&cumulative_data);
526      collector->GetPauseHistogram().PrintConfidenceIntervals(os, 0.99, cumulative_data);
527      os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n"
528         << collector->GetName() << " freed: " << freed_objects
529         << " objects with total size " << PrettySize(freed_bytes) << "\n"
530         << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
531         << PrettySize(freed_bytes / seconds) << "/s\n";
532      total_duration += total_ns;
533      total_paused_time += total_pause_ns;
534    }
535  }
536  uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
537  if (total_duration != 0) {
538    const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
539    os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
540    os << "Mean GC size throughput: "
541       << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
542    os << "Mean GC object throughput: "
543       << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
544  }
545  size_t total_objects_allocated = GetObjectsAllocatedEver();
546  os << "Total number of allocations: " << total_objects_allocated << "\n";
547  size_t total_bytes_allocated = GetBytesAllocatedEver();
548  os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
549  if (kMeasureAllocationTime) {
550    os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
551    os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
552       << "\n";
553  }
554  os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
555  os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
556  os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_;
557}
558
559Heap::~Heap() {
560  VLOG(heap) << "Starting ~Heap()";
561  STLDeleteElements(&garbage_collectors_);
562  // If we don't reset then the mark stack complains in its destructor.
563  allocation_stack_->Reset();
564  live_stack_->Reset();
565  STLDeleteValues(&mod_union_tables_);
566  STLDeleteElements(&continuous_spaces_);
567  STLDeleteElements(&discontinuous_spaces_);
568  delete gc_complete_lock_;
569  VLOG(heap) << "Finished ~Heap()";
570}
571
572space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
573                                                            bool fail_ok) const {
574  for (const auto& space : continuous_spaces_) {
575    if (space->Contains(obj)) {
576      return space;
577    }
578  }
579  if (!fail_ok) {
580    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
581  }
582  return NULL;
583}
584
585space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
586                                                                  bool fail_ok) const {
587  for (const auto& space : discontinuous_spaces_) {
588    if (space->Contains(obj)) {
589      return space;
590    }
591  }
592  if (!fail_ok) {
593    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
594  }
595  return NULL;
596}
597
598space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
599  space::Space* result = FindContinuousSpaceFromObject(obj, true);
600  if (result != NULL) {
601    return result;
602  }
603  return FindDiscontinuousSpaceFromObject(obj, true);
604}
605
606struct SoftReferenceArgs {
607  RootVisitor* is_marked_callback_;
608  RootVisitor* recursive_mark_callback_;
609  void* arg_;
610};
611
612mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
613  SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
614  // TODO: Not preserve all soft references.
615  return args->recursive_mark_callback_(obj, args->arg_);
616}
617
618// Process reference class instances and schedule finalizations.
619void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
620                             RootVisitor* is_marked_callback,
621                             RootVisitor* recursive_mark_object_callback, void* arg) {
622  // Unless we are in the zygote or required to clear soft references with white references,
623  // preserve some white referents.
624  if (!clear_soft && !Runtime::Current()->IsZygote()) {
625    SoftReferenceArgs soft_reference_args;
626    soft_reference_args.is_marked_callback_ = is_marked_callback;
627    soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback;
628    soft_reference_args.arg_ = arg;
629    soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
630                                                     &soft_reference_args);
631  }
632  timings.StartSplit("ProcessReferences");
633  // Clear all remaining soft and weak references with white referents.
634  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
635  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
636  timings.EndSplit();
637  // Preserve all white objects with finalize methods and schedule them for finalization.
638  timings.StartSplit("EnqueueFinalizerReferences");
639  finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
640                                                        recursive_mark_object_callback, arg);
641  timings.EndSplit();
642  timings.StartSplit("ProcessReferences");
643  // Clear all f-reachable soft and weak references with white referents.
644  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
645  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
646  // Clear all phantom references with white referents.
647  phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
648  // At this point all reference queues other than the cleared references should be empty.
649  DCHECK(soft_reference_queue_.IsEmpty());
650  DCHECK(weak_reference_queue_.IsEmpty());
651  DCHECK(finalizer_reference_queue_.IsEmpty());
652  DCHECK(phantom_reference_queue_.IsEmpty());
653  timings.EndSplit();
654}
655
656bool Heap::IsEnqueued(mirror::Object* ref) const {
657  // Since the references are stored as cyclic lists it means that once enqueued, the pending next
658  // will always be non-null.
659  return ref->GetFieldObject<mirror::Object>(GetReferencePendingNextOffset(), false) != nullptr;
660}
661
662bool Heap::IsEnqueuable(mirror::Object* ref) const {
663  DCHECK(ref != nullptr);
664  const mirror::Object* queue =
665      ref->GetFieldObject<mirror::Object>(GetReferenceQueueOffset(), false);
666  const mirror::Object* queue_next =
667      ref->GetFieldObject<mirror::Object>(GetReferenceQueueNextOffset(), false);
668  return queue != nullptr && queue_next == nullptr;
669}
670
671// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
672// marked, put it on the appropriate list in the heap for later processing.
673void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
674                                  RootVisitor mark_visitor, void* arg) {
675  DCHECK(klass != nullptr);
676  DCHECK(klass->IsReferenceClass());
677  DCHECK(obj != nullptr);
678  mirror::Object* referent = GetReferenceReferent(obj);
679  if (referent != nullptr) {
680    mirror::Object* forward_address = mark_visitor(referent, arg);
681    // Null means that the object is not currently marked.
682    if (forward_address == nullptr) {
683      Thread* self = Thread::Current();
684      // TODO: Remove these locks, and use atomic stacks for storing references?
685      // We need to check that the references haven't already been enqueued since we can end up
686      // scanning the same reference multiple times due to dirty cards.
687      if (klass->IsSoftReferenceClass()) {
688        soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
689      } else if (klass->IsWeakReferenceClass()) {
690        weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
691      } else if (klass->IsFinalizerReferenceClass()) {
692        finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
693      } else if (klass->IsPhantomReferenceClass()) {
694        phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
695      } else {
696        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
697                   << klass->GetAccessFlags();
698      }
699    } else if (referent != forward_address) {
700      // Referent is already marked and we need to update it.
701      SetReferenceReferent(obj, forward_address);
702    }
703  }
704}
705
706space::ImageSpace* Heap::GetImageSpace() const {
707  for (const auto& space : continuous_spaces_) {
708    if (space->IsImageSpace()) {
709      return space->AsImageSpace();
710    }
711  }
712  return NULL;
713}
714
715static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
716  size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
717  if (used_bytes < chunk_size) {
718    size_t chunk_free_bytes = chunk_size - used_bytes;
719    size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
720    max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
721  }
722}
723
724void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) {
725  std::ostringstream oss;
726  size_t total_bytes_free = GetFreeMemory();
727  oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
728      << " free bytes";
729  // If the allocation failed due to fragmentation, print out the largest continuous allocation.
730  if (!large_object_allocation && total_bytes_free >= byte_count) {
731    size_t max_contiguous_allocation = 0;
732    for (const auto& space : continuous_spaces_) {
733      if (space->IsMallocSpace()) {
734        // To allow the Walk/InspectAll() to exclusively-lock the mutator
735        // lock, temporarily release the shared access to the mutator
736        // lock here by transitioning to the suspended state.
737        Locks::mutator_lock_->AssertSharedHeld(self);
738        self->TransitionFromRunnableToSuspended(kSuspended);
739        space->AsMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
740        self->TransitionFromSuspendedToRunnable();
741        Locks::mutator_lock_->AssertSharedHeld(self);
742      }
743    }
744    oss << "; failed due to fragmentation (largest possible contiguous allocation "
745        <<  max_contiguous_allocation << " bytes)";
746  }
747  self->ThrowOutOfMemoryError(oss.str().c_str());
748}
749
750void Heap::Trim() {
751  Thread* self = Thread::Current();
752  {
753    // Need to do this before acquiring the locks since we don't want to get suspended while
754    // holding any locks.
755    ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
756    // Pretend we are doing a GC to prevent background compaction from deleting the space we are
757    // trimming.
758    MutexLock mu(self, *gc_complete_lock_);
759    // Ensure there is only one GC at a time.
760    WaitForGcToCompleteLocked(self);
761    collector_type_running_ = kCollectorTypeHeapTrim;
762  }
763  uint64_t start_ns = NanoTime();
764  // Trim the managed spaces.
765  uint64_t total_alloc_space_allocated = 0;
766  uint64_t total_alloc_space_size = 0;
767  uint64_t managed_reclaimed = 0;
768  for (const auto& space : continuous_spaces_) {
769    if (space->IsMallocSpace()) {
770      gc::space::MallocSpace* alloc_space = space->AsMallocSpace();
771      total_alloc_space_size += alloc_space->Size();
772      managed_reclaimed += alloc_space->Trim();
773    }
774  }
775  total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() -
776      bump_pointer_space_->Size();
777  const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
778      static_cast<float>(total_alloc_space_size);
779  uint64_t gc_heap_end_ns = NanoTime();
780  // We never move things in the native heap, so we can finish the GC at this point.
781  FinishGC(self, collector::kGcTypeNone);
782  // Trim the native heap.
783  dlmalloc_trim(0);
784  size_t native_reclaimed = 0;
785  dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
786  uint64_t end_ns = NanoTime();
787  VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
788      << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
789      << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
790      << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
791      << "%.";
792}
793
794bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
795  // Note: we deliberately don't take the lock here, and mustn't test anything that would require
796  // taking the lock.
797  if (obj == nullptr) {
798    return true;
799  }
800  return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
801}
802
803bool Heap::IsHeapAddress(const mirror::Object* obj) const {
804  if (kMovingCollector && bump_pointer_space_ && bump_pointer_space_->HasAddress(obj)) {
805    return true;
806  }
807  // TODO: This probably doesn't work for large objects.
808  return FindSpaceFromObject(obj, true) != nullptr;
809}
810
811bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
812                              bool search_live_stack, bool sorted) {
813  if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
814    return false;
815  }
816  if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
817    mirror::Class* klass = obj->GetClass();
818    if (obj == klass) {
819      // This case happens for java.lang.Class.
820      return true;
821    }
822    return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
823  } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
824    return false;
825  }
826  space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
827  space::DiscontinuousSpace* d_space = NULL;
828  if (c_space != nullptr) {
829    if (c_space->GetLiveBitmap()->Test(obj)) {
830      return true;
831    }
832  } else {
833    d_space = FindDiscontinuousSpaceFromObject(obj, true);
834    if (d_space != nullptr) {
835      if (d_space->GetLiveObjects()->Test(obj)) {
836        return true;
837      }
838    }
839  }
840  // This is covering the allocation/live stack swapping that is done without mutators suspended.
841  for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
842    if (i > 0) {
843      NanoSleep(MsToNs(10));
844    }
845    if (search_allocation_stack) {
846      if (sorted) {
847        if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
848          return true;
849        }
850      } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) {
851        return true;
852      }
853    }
854
855    if (search_live_stack) {
856      if (sorted) {
857        if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
858          return true;
859        }
860      } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
861        return true;
862      }
863    }
864  }
865  // We need to check the bitmaps again since there is a race where we mark something as live and
866  // then clear the stack containing it.
867  if (c_space != nullptr) {
868    if (c_space->GetLiveBitmap()->Test(obj)) {
869      return true;
870    }
871  } else {
872    d_space = FindDiscontinuousSpaceFromObject(obj, true);
873    if (d_space != nullptr && d_space->GetLiveObjects()->Test(obj)) {
874      return true;
875    }
876  }
877  return false;
878}
879
880void Heap::VerifyObjectImpl(mirror::Object* obj) {
881  if (Thread::Current() == NULL ||
882      Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
883    return;
884  }
885  VerifyObjectBody(obj);
886}
887
888bool Heap::VerifyClassClass(const mirror::Class* c) const {
889  // Note: we don't use the accessors here as they have internal sanity checks that we don't want
890  // to run
891  const byte* raw_addr =
892      reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
893  mirror::Class* c_c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
894  raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
895  mirror::Class* c_c_c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
896  return c_c == c_c_c;
897}
898
899void Heap::DumpSpaces(std::ostream& stream) {
900  for (const auto& space : continuous_spaces_) {
901    accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
902    accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
903    stream << space << " " << *space << "\n";
904    if (live_bitmap != nullptr) {
905      stream << live_bitmap << " " << *live_bitmap << "\n";
906    }
907    if (mark_bitmap != nullptr) {
908      stream << mark_bitmap << " " << *mark_bitmap << "\n";
909    }
910  }
911  for (const auto& space : discontinuous_spaces_) {
912    stream << space << " " << *space << "\n";
913  }
914}
915
916void Heap::VerifyObjectBody(mirror::Object* obj) {
917  CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
918  // Ignore early dawn of the universe verifications.
919  if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) {
920    return;
921  }
922  const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
923      mirror::Object::ClassOffset().Int32Value();
924  mirror::Class* c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
925  if (UNLIKELY(c == NULL)) {
926    LOG(FATAL) << "Null class in object: " << obj;
927  } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
928    LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
929  }
930  CHECK(VerifyClassClass(c));
931
932  if (verify_object_mode_ > kVerifyAllFast) {
933    // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
934    //       heap_bitmap_lock_.
935    if (!IsLiveObjectLocked(obj)) {
936      DumpSpaces();
937      LOG(FATAL) << "Object is dead: " << obj;
938    }
939    if (!IsLiveObjectLocked(c)) {
940      LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
941    }
942  }
943}
944
945void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
946  DCHECK(obj != NULL);
947  reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
948}
949
950void Heap::VerifyHeap() {
951  ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
952  GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
953}
954
955void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
956  DCHECK_LE(freed_bytes, num_bytes_allocated_.Load());
957  num_bytes_allocated_.FetchAndSub(freed_bytes);
958  if (Runtime::Current()->HasStatsEnabled()) {
959    RuntimeStats* thread_stats = Thread::Current()->GetStats();
960    thread_stats->freed_objects += freed_objects;
961    thread_stats->freed_bytes += freed_bytes;
962    // TODO: Do this concurrently.
963    RuntimeStats* global_stats = Runtime::Current()->GetStats();
964    global_stats->freed_objects += freed_objects;
965    global_stats->freed_bytes += freed_bytes;
966  }
967}
968
969mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
970                                             size_t alloc_size, size_t* bytes_allocated,
971                                             mirror::Class** klass) {
972  mirror::Object* ptr = nullptr;
973  bool was_default_allocator = allocator == GetCurrentAllocator();
974  DCHECK(klass != nullptr);
975  SirtRef<mirror::Class> sirt_klass(self, *klass);
976  // The allocation failed. If the GC is running, block until it completes, and then retry the
977  // allocation.
978  collector::GcType last_gc = WaitForGcToComplete(self);
979  if (last_gc != collector::kGcTypeNone) {
980    // If we were the default allocator but the allocator changed while we were suspended,
981    // abort the allocation.
982    if (was_default_allocator && allocator != GetCurrentAllocator()) {
983      *klass = sirt_klass.get();
984      return nullptr;
985    }
986    // A GC was in progress and we blocked, retry allocation now that memory has been freed.
987    ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated);
988  }
989
990  // Loop through our different Gc types and try to Gc until we get enough free memory.
991  for (collector::GcType gc_type : gc_plan_) {
992    if (ptr != nullptr) {
993      break;
994    }
995    // Attempt to run the collector, if we succeed, re-try the allocation.
996    bool gc_ran =
997        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
998    if (was_default_allocator && allocator != GetCurrentAllocator()) {
999      *klass = sirt_klass.get();
1000      return nullptr;
1001    }
1002    if (gc_ran) {
1003      // Did we free sufficient memory for the allocation to succeed?
1004      ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated);
1005    }
1006  }
1007  // Allocations have failed after GCs;  this is an exceptional state.
1008  if (ptr == nullptr) {
1009    // Try harder, growing the heap if necessary.
1010    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated);
1011  }
1012  if (ptr == nullptr) {
1013    // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1014    // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1015    // VM spec requires that all SoftReferences have been collected and cleared before throwing
1016    // OOME.
1017    VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1018             << " allocation";
1019    // TODO: Run finalization, but this may cause more allocations to occur.
1020    // We don't need a WaitForGcToComplete here either.
1021    DCHECK(!gc_plan_.empty());
1022    CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1023    if (was_default_allocator && allocator != GetCurrentAllocator()) {
1024      *klass = sirt_klass.get();
1025      return nullptr;
1026    }
1027    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated);
1028    if (ptr == nullptr) {
1029      ThrowOutOfMemoryError(self, alloc_size, false);
1030    }
1031  }
1032  *klass = sirt_klass.get();
1033  return ptr;
1034}
1035
1036void Heap::SetTargetHeapUtilization(float target) {
1037  DCHECK_GT(target, 0.0f);  // asserted in Java code
1038  DCHECK_LT(target, 1.0f);
1039  target_utilization_ = target;
1040}
1041
1042size_t Heap::GetObjectsAllocated() const {
1043  size_t total = 0;
1044  for (space::AllocSpace* space : alloc_spaces_) {
1045    total += space->GetObjectsAllocated();
1046  }
1047  return total;
1048}
1049
1050size_t Heap::GetObjectsAllocatedEver() const {
1051  return GetObjectsFreedEver() + GetObjectsAllocated();
1052}
1053
1054size_t Heap::GetBytesAllocatedEver() const {
1055  return GetBytesFreedEver() + GetBytesAllocated();
1056}
1057
1058class InstanceCounter {
1059 public:
1060  InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
1061      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1062      : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
1063  }
1064
1065  void operator()(mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1066    for (size_t i = 0; i < classes_.size(); ++i) {
1067      mirror::Class* instance_class = o->GetClass();
1068      if (use_is_assignable_from_) {
1069        if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) {
1070          ++counts_[i];
1071        }
1072      } else {
1073        if (instance_class == classes_[i]) {
1074          ++counts_[i];
1075        }
1076      }
1077    }
1078  }
1079
1080 private:
1081  const std::vector<mirror::Class*>& classes_;
1082  bool use_is_assignable_from_;
1083  uint64_t* const counts_;
1084
1085  DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1086};
1087
1088void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
1089                          uint64_t* counts) {
1090  // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1091  // is empty, so the live bitmap is the only place we need to look.
1092  Thread* self = Thread::Current();
1093  self->TransitionFromRunnableToSuspended(kNative);
1094  CollectGarbage(false);
1095  self->TransitionFromSuspendedToRunnable();
1096
1097  InstanceCounter counter(classes, use_is_assignable_from, counts);
1098  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1099  GetLiveBitmap()->Visit(counter);
1100}
1101
1102class InstanceCollector {
1103 public:
1104  InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
1105      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1106      : class_(c), max_count_(max_count), instances_(instances) {
1107  }
1108
1109  void operator()(mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1110    mirror::Class* instance_class = o->GetClass();
1111    if (instance_class == class_) {
1112      if (max_count_ == 0 || instances_.size() < max_count_) {
1113        instances_.push_back(o);
1114      }
1115    }
1116  }
1117
1118 private:
1119  mirror::Class* class_;
1120  uint32_t max_count_;
1121  std::vector<mirror::Object*>& instances_;
1122
1123  DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1124};
1125
1126void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1127                        std::vector<mirror::Object*>& instances) {
1128  // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1129  // is empty, so the live bitmap is the only place we need to look.
1130  Thread* self = Thread::Current();
1131  self->TransitionFromRunnableToSuspended(kNative);
1132  CollectGarbage(false);
1133  self->TransitionFromSuspendedToRunnable();
1134
1135  InstanceCollector collector(c, max_count, instances);
1136  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1137  GetLiveBitmap()->Visit(collector);
1138}
1139
1140class ReferringObjectsFinder {
1141 public:
1142  ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1143                         std::vector<mirror::Object*>& referring_objects)
1144      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1145      : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1146  }
1147
1148  // For bitmap Visit.
1149  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1150  // annotalysis on visitors.
1151  void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1152    collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true);
1153  }
1154
1155  // For MarkSweep::VisitObjectReferences.
1156  void operator()(mirror::Object* referrer, mirror::Object* object,
1157                  const MemberOffset&, bool) const {
1158    if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1159      referring_objects_.push_back(referrer);
1160    }
1161  }
1162
1163 private:
1164  mirror::Object* object_;
1165  uint32_t max_count_;
1166  std::vector<mirror::Object*>& referring_objects_;
1167
1168  DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1169};
1170
1171void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1172                               std::vector<mirror::Object*>& referring_objects) {
1173  // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1174  // is empty, so the live bitmap is the only place we need to look.
1175  Thread* self = Thread::Current();
1176  self->TransitionFromRunnableToSuspended(kNative);
1177  CollectGarbage(false);
1178  self->TransitionFromSuspendedToRunnable();
1179
1180  ReferringObjectsFinder finder(o, max_count, referring_objects);
1181  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1182  GetLiveBitmap()->Visit(finder);
1183}
1184
1185void Heap::CollectGarbage(bool clear_soft_references) {
1186  // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1187  // last GC will not have necessarily been cleared.
1188  CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
1189}
1190
1191void Heap::TransitionCollector(CollectorType collector_type) {
1192  if (collector_type == collector_type_) {
1193    return;
1194  }
1195  VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1196             << " -> " << static_cast<int>(collector_type);
1197  uint64_t start_time = NanoTime();
1198  uint32_t before_size  = GetTotalMemory();
1199  uint32_t before_allocated = num_bytes_allocated_.Load();
1200  ThreadList* tl = Runtime::Current()->GetThreadList();
1201  Thread* self = Thread::Current();
1202  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1203  Locks::mutator_lock_->AssertNotHeld(self);
1204  const bool copying_transition =
1205      IsCompactingGC(background_collector_type_) || IsCompactingGC(post_zygote_collector_type_);
1206  // Busy wait until we can GC (StartGC can fail if we have a non-zero
1207  // compacting_gc_disable_count_, this should rarely occurs).
1208  for (;;) {
1209    {
1210      ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1211      MutexLock mu(self, *gc_complete_lock_);
1212      // Ensure there is only one GC at a time.
1213      WaitForGcToCompleteLocked(self);
1214      // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1215      if (!copying_transition || disable_moving_gc_count_ == 0) {
1216        // TODO: Not hard code in semi-space collector?
1217        collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1218        break;
1219      }
1220    }
1221    usleep(1000);
1222  }
1223  tl->SuspendAll();
1224  PreGcRosAllocVerification(&semi_space_collector_->GetTimings());
1225  switch (collector_type) {
1226    case kCollectorTypeSS:
1227      // Fall-through.
1228    case kCollectorTypeGSS: {
1229      mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
1230      CHECK(main_space_ != nullptr);
1231      Compact(temp_space_, main_space_);
1232      DCHECK(allocator_mem_map_.get() == nullptr);
1233      allocator_mem_map_.reset(main_space_->ReleaseMemMap());
1234      madvise(main_space_->Begin(), main_space_->Size(), MADV_DONTNEED);
1235      // RemoveSpace does not delete the removed space.
1236      space::Space* old_space = main_space_;
1237      RemoveSpace(old_space);
1238      delete old_space;
1239      break;
1240    }
1241    case kCollectorTypeMS:
1242      // Fall through.
1243    case kCollectorTypeCMS: {
1244      if (IsCompactingGC(collector_type_)) {
1245        // TODO: Use mem-map from temp space?
1246        MemMap* mem_map = allocator_mem_map_.release();
1247        CHECK(mem_map != nullptr);
1248        size_t initial_size = kDefaultInitialSize;
1249        mprotect(mem_map->Begin(), initial_size, PROT_READ | PROT_WRITE);
1250        CHECK(main_space_ == nullptr);
1251        if (kUseRosAlloc) {
1252          main_space_ =
1253              space::RosAllocSpace::CreateFromMemMap(mem_map, "alloc space", kPageSize,
1254                                                     initial_size, mem_map->Size(),
1255                                                     mem_map->Size(), low_memory_mode_);
1256        } else {
1257          main_space_ =
1258              space::DlMallocSpace::CreateFromMemMap(mem_map, "alloc space", kPageSize,
1259                                                     initial_size, mem_map->Size(),
1260                                                     mem_map->Size());
1261        }
1262        main_space_->SetFootprintLimit(main_space_->Capacity());
1263        AddSpace(main_space_);
1264        Compact(main_space_, bump_pointer_space_);
1265      }
1266      break;
1267    }
1268    default: {
1269      LOG(FATAL) << "Attempted to transition to invalid collector type";
1270      break;
1271    }
1272  }
1273  ChangeCollector(collector_type);
1274  PostGcRosAllocVerification(&semi_space_collector_->GetTimings());
1275  tl->ResumeAll();
1276  // Can't call into java code with all threads suspended.
1277  EnqueueClearedReferences();
1278  uint64_t duration = NanoTime() - start_time;
1279  GrowForUtilization(collector::kGcTypeFull, duration);
1280  FinishGC(self, collector::kGcTypeFull);
1281  int32_t after_size = GetTotalMemory();
1282  int32_t delta_size = before_size - after_size;
1283  int32_t after_allocated = num_bytes_allocated_.Load();
1284  int32_t delta_allocated = before_allocated - after_allocated;
1285  const std::string saved_bytes_str =
1286      delta_size < 0 ? "-" + PrettySize(-delta_size) : PrettySize(delta_size);
1287  LOG(INFO) << "Heap transition to " << process_state_ << " took "
1288      << PrettyDuration(duration) << " " << PrettySize(before_size) << "->"
1289      << PrettySize(after_size) << " from " << PrettySize(delta_allocated) << " to "
1290      << PrettySize(delta_size) << " saved";
1291}
1292
1293void Heap::ChangeCollector(CollectorType collector_type) {
1294  // TODO: Only do this with all mutators suspended to avoid races.
1295  if (collector_type != collector_type_) {
1296    collector_type_ = collector_type;
1297    gc_plan_.clear();
1298    switch (collector_type_) {
1299      case kCollectorTypeSS:
1300        // Fall-through.
1301      case kCollectorTypeGSS: {
1302        concurrent_gc_ = false;
1303        gc_plan_.push_back(collector::kGcTypeFull);
1304        if (use_tlab_) {
1305          ChangeAllocator(kAllocatorTypeTLAB);
1306        } else {
1307          ChangeAllocator(kAllocatorTypeBumpPointer);
1308        }
1309        break;
1310      }
1311      case kCollectorTypeMS: {
1312        concurrent_gc_ = false;
1313        gc_plan_.push_back(collector::kGcTypeSticky);
1314        gc_plan_.push_back(collector::kGcTypePartial);
1315        gc_plan_.push_back(collector::kGcTypeFull);
1316        ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
1317        break;
1318      }
1319      case kCollectorTypeCMS: {
1320        concurrent_gc_ = true;
1321        gc_plan_.push_back(collector::kGcTypeSticky);
1322        gc_plan_.push_back(collector::kGcTypePartial);
1323        gc_plan_.push_back(collector::kGcTypeFull);
1324        ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
1325        break;
1326      }
1327      default: {
1328        LOG(FATAL) << "Unimplemented";
1329      }
1330    }
1331    if (concurrent_gc_) {
1332      concurrent_start_bytes_ =
1333          std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1334    } else {
1335      concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
1336    }
1337  }
1338}
1339
1340// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
1341class ZygoteCompactingCollector : public collector::SemiSpace {
1342 public:
1343  explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, "zygote collector") {
1344  }
1345
1346  void BuildBins(space::ContinuousSpace* space) {
1347    bin_live_bitmap_ = space->GetLiveBitmap();
1348    bin_mark_bitmap_ = space->GetMarkBitmap();
1349    BinContext context;
1350    context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
1351    context.collector_ = this;
1352    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1353    // Note: This requires traversing the space in increasing order of object addresses.
1354    bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
1355    // Add the last bin which spans after the last object to the end of the space.
1356    AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
1357  }
1358
1359 private:
1360  struct BinContext {
1361    uintptr_t prev_;  // The end of the previous object.
1362    ZygoteCompactingCollector* collector_;
1363  };
1364  // Maps from bin sizes to locations.
1365  std::multimap<size_t, uintptr_t> bins_;
1366  // Live bitmap of the space which contains the bins.
1367  accounting::SpaceBitmap* bin_live_bitmap_;
1368  // Mark bitmap of the space which contains the bins.
1369  accounting::SpaceBitmap* bin_mark_bitmap_;
1370
1371  static void Callback(mirror::Object* obj, void* arg)
1372      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1373    DCHECK(arg != nullptr);
1374    BinContext* context = reinterpret_cast<BinContext*>(arg);
1375    ZygoteCompactingCollector* collector = context->collector_;
1376    uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
1377    size_t bin_size = object_addr - context->prev_;
1378    // Add the bin consisting of the end of the previous object to the start of the current object.
1379    collector->AddBin(bin_size, context->prev_);
1380    context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
1381  }
1382
1383  void AddBin(size_t size, uintptr_t position) {
1384    if (size != 0) {
1385      bins_.insert(std::make_pair(size, position));
1386    }
1387  }
1388
1389  virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
1390    // Don't sweep any spaces since we probably blasted the internal accounting of the free list
1391    // allocator.
1392    return false;
1393  }
1394
1395  virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
1396      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
1397    size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
1398    mirror::Object* forward_address;
1399    // Find the smallest bin which we can move obj in.
1400    auto it = bins_.lower_bound(object_size);
1401    if (it == bins_.end()) {
1402      // No available space in the bins, place it in the target space instead (grows the zygote
1403      // space).
1404      size_t bytes_allocated;
1405      forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
1406      if (to_space_live_bitmap_ != nullptr) {
1407        to_space_live_bitmap_->Set(forward_address);
1408      } else {
1409        GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
1410        GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
1411      }
1412    } else {
1413      size_t size = it->first;
1414      uintptr_t pos = it->second;
1415      bins_.erase(it);  // Erase the old bin which we replace with the new smaller bin.
1416      forward_address = reinterpret_cast<mirror::Object*>(pos);
1417      // Set the live and mark bits so that sweeping system weaks works properly.
1418      bin_live_bitmap_->Set(forward_address);
1419      bin_mark_bitmap_->Set(forward_address);
1420      DCHECK_GE(size, object_size);
1421      AddBin(size - object_size, pos + object_size);  // Add a new bin with the remaining space.
1422    }
1423    // Copy the object over to its new location.
1424    memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
1425    return forward_address;
1426  }
1427};
1428
1429void Heap::UnBindBitmaps() {
1430  for (const auto& space : GetContinuousSpaces()) {
1431    if (space->IsContinuousMemMapAllocSpace()) {
1432      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1433      if (alloc_space->HasBoundBitmaps()) {
1434        alloc_space->UnBindBitmaps();
1435      }
1436    }
1437  }
1438}
1439
1440void Heap::PreZygoteFork() {
1441  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
1442  static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
1443  Thread* self = Thread::Current();
1444  MutexLock mu(self, zygote_creation_lock_);
1445  // Try to see if we have any Zygote spaces.
1446  if (have_zygote_space_) {
1447    return;
1448  }
1449  VLOG(heap) << "Starting PreZygoteFork";
1450  // Trim the pages at the end of the non moving space.
1451  non_moving_space_->Trim();
1452  non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1453  // Change the collector to the post zygote one.
1454  ChangeCollector(post_zygote_collector_type_);
1455  // TODO: Delete bump_pointer_space_ and temp_pointer_space_?
1456  if (semi_space_collector_ != nullptr) {
1457    // Temporarily disable rosalloc verification because the zygote
1458    // compaction will mess up the rosalloc internal metadata.
1459    ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
1460    ZygoteCompactingCollector zygote_collector(this);
1461    zygote_collector.BuildBins(non_moving_space_);
1462    // Create a new bump pointer space which we will compact into.
1463    space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1464                                         non_moving_space_->Limit());
1465    // Compact the bump pointer space to a new zygote bump pointer space.
1466    temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1467    zygote_collector.SetFromSpace(bump_pointer_space_);
1468    zygote_collector.SetToSpace(&target_space);
1469    zygote_collector.Run(kGcCauseCollectorTransition, false);
1470    CHECK(temp_space_->IsEmpty());
1471    total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
1472    total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
1473    // Update the end and write out image.
1474    non_moving_space_->SetEnd(target_space.End());
1475    non_moving_space_->SetLimit(target_space.Limit());
1476    VLOG(heap) << "Zygote size " << non_moving_space_->Size() << " bytes";
1477  }
1478  // Save the old space so that we can remove it after we complete creating the zygote space.
1479  space::MallocSpace* old_alloc_space = non_moving_space_;
1480  // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
1481  // the remaining available space.
1482  // Remove the old space before creating the zygote space since creating the zygote space sets
1483  // the old alloc space's bitmaps to nullptr.
1484  RemoveSpace(old_alloc_space);
1485  space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
1486                                                                        low_memory_mode_,
1487                                                                        &main_space_);
1488  delete old_alloc_space;
1489  CHECK(zygote_space != nullptr) << "Failed creating zygote space";
1490  AddSpace(zygote_space, false);
1491  CHECK(main_space_ != nullptr);
1492  if (main_space_->IsRosAllocSpace()) {
1493    rosalloc_space_ = main_space_->AsRosAllocSpace();
1494  } else if (main_space_->IsDlMallocSpace()) {
1495    dlmalloc_space_ = main_space_->AsDlMallocSpace();
1496  }
1497  main_space_->SetFootprintLimit(main_space_->Capacity());
1498  AddSpace(main_space_);
1499  have_zygote_space_ = true;
1500  // Create the zygote space mod union table.
1501  accounting::ModUnionTable* mod_union_table =
1502      new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
1503  CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
1504  AddModUnionTable(mod_union_table);
1505  // Reset the cumulative loggers since we now have a few additional timing phases.
1506  for (const auto& collector : garbage_collectors_) {
1507    collector->ResetCumulativeStatistics();
1508  }
1509  // Can't use RosAlloc for non moving space due to thread local buffers.
1510  // TODO: Non limited space for non-movable objects?
1511  MemMap* mem_map = post_zygote_non_moving_space_mem_map_.release();
1512  space::MallocSpace* new_non_moving_space =
1513      space::DlMallocSpace::CreateFromMemMap(mem_map, "Non moving dlmalloc space", kPageSize,
1514                                             2 * MB, mem_map->Size(), mem_map->Size());
1515  AddSpace(new_non_moving_space, false);
1516  CHECK(new_non_moving_space != nullptr) << "Failed to create new non-moving space";
1517  new_non_moving_space->SetFootprintLimit(new_non_moving_space->Capacity());
1518  non_moving_space_ = new_non_moving_space;
1519}
1520
1521void Heap::FlushAllocStack() {
1522  MarkAllocStackAsLive(allocation_stack_.get());
1523  allocation_stack_->Reset();
1524}
1525
1526void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap1,
1527                          accounting::SpaceBitmap* bitmap2,
1528                          accounting::ObjectSet* large_objects,
1529                          accounting::ObjectStack* stack) {
1530  DCHECK(bitmap1 != nullptr);
1531  DCHECK(bitmap2 != nullptr);
1532  mirror::Object** limit = stack->End();
1533  for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
1534    const mirror::Object* obj = *it;
1535    DCHECK(obj != nullptr);
1536    if (bitmap1->HasAddress(obj)) {
1537      bitmap1->Set(obj);
1538    } else if (bitmap2->HasAddress(obj)) {
1539      bitmap2->Set(obj);
1540    } else {
1541      large_objects->Set(obj);
1542    }
1543  }
1544}
1545
1546void Heap::SwapSemiSpaces() {
1547  // Swap the spaces so we allocate into the space which we just evacuated.
1548  std::swap(bump_pointer_space_, temp_space_);
1549}
1550
1551void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
1552                   space::ContinuousMemMapAllocSpace* source_space) {
1553  CHECK(kMovingCollector);
1554  CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported";
1555  if (target_space != source_space) {
1556    semi_space_collector_->SetFromSpace(source_space);
1557    semi_space_collector_->SetToSpace(target_space);
1558    semi_space_collector_->Run(kGcCauseCollectorTransition, false);
1559  }
1560}
1561
1562collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
1563                                               bool clear_soft_references) {
1564  Thread* self = Thread::Current();
1565  Runtime* runtime = Runtime::Current();
1566  // If the heap can't run the GC, silently fail and return that no GC was run.
1567  switch (gc_type) {
1568    case collector::kGcTypePartial: {
1569      if (!have_zygote_space_) {
1570        return collector::kGcTypeNone;
1571      }
1572      break;
1573    }
1574    default: {
1575      // Other GC types don't have any special cases which makes them not runnable. The main case
1576      // here is full GC.
1577    }
1578  }
1579  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1580  Locks::mutator_lock_->AssertNotHeld(self);
1581  if (self->IsHandlingStackOverflow()) {
1582    LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
1583  }
1584  bool compacting_gc;
1585  {
1586    gc_complete_lock_->AssertNotHeld(self);
1587    ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1588    MutexLock mu(self, *gc_complete_lock_);
1589    // Ensure there is only one GC at a time.
1590    WaitForGcToCompleteLocked(self);
1591    compacting_gc = IsCompactingGC(collector_type_);
1592    // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
1593    if (compacting_gc && disable_moving_gc_count_ != 0) {
1594      LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
1595      return collector::kGcTypeNone;
1596    }
1597    collector_type_running_ = collector_type_;
1598  }
1599
1600  if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
1601    ++runtime->GetStats()->gc_for_alloc_count;
1602    ++self->GetStats()->gc_for_alloc_count;
1603  }
1604  uint64_t gc_start_time_ns = NanoTime();
1605  uint64_t gc_start_size = GetBytesAllocated();
1606  // Approximate allocation rate in bytes / second.
1607  uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
1608  // Back to back GCs can cause 0 ms of wait time in between GC invocations.
1609  if (LIKELY(ms_delta != 0)) {
1610    allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
1611    VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
1612  }
1613
1614  DCHECK_LT(gc_type, collector::kGcTypeMax);
1615  DCHECK_NE(gc_type, collector::kGcTypeNone);
1616
1617  collector::GarbageCollector* collector = nullptr;
1618  // TODO: Clean this up.
1619  if (compacting_gc) {
1620    DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
1621           current_allocator_ == kAllocatorTypeTLAB);
1622    gc_type = semi_space_collector_->GetGcType();
1623    CHECK(temp_space_->IsEmpty());
1624    semi_space_collector_->SetFromSpace(bump_pointer_space_);
1625    semi_space_collector_->SetToSpace(temp_space_);
1626    mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
1627    collector = semi_space_collector_;
1628    gc_type = collector::kGcTypeFull;
1629  } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
1630      current_allocator_ == kAllocatorTypeDlMalloc) {
1631    for (const auto& cur_collector : garbage_collectors_) {
1632      if (cur_collector->IsConcurrent() == concurrent_gc_ &&
1633          cur_collector->GetGcType() == gc_type) {
1634        collector = cur_collector;
1635        break;
1636      }
1637    }
1638  } else {
1639    LOG(FATAL) << "Invalid current allocator " << current_allocator_;
1640  }
1641  CHECK(collector != nullptr)
1642      << "Could not find garbage collector with concurrent=" << concurrent_gc_
1643      << " and type=" << gc_type;
1644  ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
1645  collector->Run(gc_cause, clear_soft_references);
1646  total_objects_freed_ever_ += collector->GetFreedObjects();
1647  total_bytes_freed_ever_ += collector->GetFreedBytes();
1648  // Enqueue cleared references.
1649  EnqueueClearedReferences();
1650  // Grow the heap so that we know when to perform the next GC.
1651  GrowForUtilization(gc_type, collector->GetDurationNs());
1652  if (CareAboutPauseTimes()) {
1653    const size_t duration = collector->GetDurationNs();
1654    std::vector<uint64_t> pauses = collector->GetPauseTimes();
1655    // GC for alloc pauses the allocating thread, so consider it as a pause.
1656    bool was_slow = duration > long_gc_log_threshold_ ||
1657        (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
1658    if (!was_slow) {
1659      for (uint64_t pause : pauses) {
1660        was_slow = was_slow || pause > long_pause_log_threshold_;
1661      }
1662    }
1663    if (was_slow) {
1664        const size_t percent_free = GetPercentFree();
1665        const size_t current_heap_size = GetBytesAllocated();
1666        const size_t total_memory = GetTotalMemory();
1667        std::ostringstream pause_string;
1668        for (size_t i = 0; i < pauses.size(); ++i) {
1669            pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
1670                         << ((i != pauses.size() - 1) ? ", " : "");
1671        }
1672        LOG(INFO) << gc_cause << " " << collector->GetName()
1673                  << " GC freed "  <<  collector->GetFreedObjects() << "("
1674                  << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
1675                  << collector->GetFreedLargeObjects() << "("
1676                  << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
1677                  << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
1678                  << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
1679                  << " total " << PrettyDuration((duration / 1000) * 1000);
1680        if (VLOG_IS_ON(heap)) {
1681            LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
1682        }
1683    }
1684  }
1685  FinishGC(self, gc_type);
1686  ATRACE_END();
1687
1688  // Inform DDMS that a GC completed.
1689  Dbg::GcDidFinish();
1690  return gc_type;
1691}
1692
1693void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
1694  MutexLock mu(self, *gc_complete_lock_);
1695  collector_type_running_ = kCollectorTypeNone;
1696  if (gc_type != collector::kGcTypeNone) {
1697    last_gc_type_ = gc_type;
1698  }
1699  // Wake anyone who may have been waiting for the GC to complete.
1700  gc_complete_cond_->Broadcast(self);
1701}
1702
1703static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) {
1704  mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
1705  if (root == obj) {
1706    LOG(INFO) << "Object " << obj << " is a root";
1707  }
1708  return root;
1709}
1710
1711class ScanVisitor {
1712 public:
1713  void operator()(const mirror::Object* obj) const {
1714    LOG(ERROR) << "Would have rescanned object " << obj;
1715  }
1716};
1717
1718// Verify a reference from an object.
1719class VerifyReferenceVisitor {
1720 public:
1721  explicit VerifyReferenceVisitor(Heap* heap)
1722      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
1723      : heap_(heap), failed_(false) {}
1724
1725  bool Failed() const {
1726    return failed_;
1727  }
1728
1729  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
1730  // analysis on visitors.
1731  void operator()(mirror::Object* obj, mirror::Object* ref,
1732                  const MemberOffset& offset, bool /* is_static */) const
1733      NO_THREAD_SAFETY_ANALYSIS {
1734    if (ref == nullptr || IsLive(ref)) {
1735      // Verify that the reference is live.
1736      return;
1737    }
1738    if (!failed_) {
1739      // Print message on only on first failure to prevent spam.
1740      LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
1741      failed_ = true;
1742    }
1743    if (obj != nullptr) {
1744      accounting::CardTable* card_table = heap_->GetCardTable();
1745      accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
1746      accounting::ObjectStack* live_stack = heap_->live_stack_.get();
1747      byte* card_addr = card_table->CardFromAddr(obj);
1748      LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
1749                 << offset << "\n card value = " << static_cast<int>(*card_addr);
1750      if (heap_->IsValidObjectAddress(obj->GetClass())) {
1751        LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
1752      } else {
1753        LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
1754      }
1755
1756      // Attmept to find the class inside of the recently freed objects.
1757      space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
1758      if (ref_space != nullptr && ref_space->IsMallocSpace()) {
1759        space::MallocSpace* space = ref_space->AsMallocSpace();
1760        mirror::Class* ref_class = space->FindRecentFreedObject(ref);
1761        if (ref_class != nullptr) {
1762          LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
1763                     << PrettyClass(ref_class);
1764        } else {
1765          LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
1766        }
1767      }
1768
1769      if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
1770          ref->GetClass()->IsClass()) {
1771        LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
1772      } else {
1773        LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
1774                   << ") is not a valid heap address";
1775      }
1776
1777      card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
1778      void* cover_begin = card_table->AddrFromCard(card_addr);
1779      void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
1780          accounting::CardTable::kCardSize);
1781      LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
1782          << "-" << cover_end;
1783      accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
1784
1785      if (bitmap == nullptr) {
1786        LOG(ERROR) << "Object " << obj << " has no bitmap";
1787        if (!heap_->VerifyClassClass(obj->GetClass())) {
1788          LOG(ERROR) << "Object " << obj << " failed class verification!";
1789        }
1790      } else {
1791        // Print out how the object is live.
1792        if (bitmap->Test(obj)) {
1793          LOG(ERROR) << "Object " << obj << " found in live bitmap";
1794        }
1795        if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
1796          LOG(ERROR) << "Object " << obj << " found in allocation stack";
1797        }
1798        if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
1799          LOG(ERROR) << "Object " << obj << " found in live stack";
1800        }
1801        if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
1802          LOG(ERROR) << "Ref " << ref << " found in allocation stack";
1803        }
1804        if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
1805          LOG(ERROR) << "Ref " << ref << " found in live stack";
1806        }
1807        // Attempt to see if the card table missed the reference.
1808        ScanVisitor scan_visitor;
1809        byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
1810        card_table->Scan(bitmap, byte_cover_begin,
1811                         byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
1812      }
1813
1814      // Search to see if any of the roots reference our object.
1815      void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
1816      Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1817
1818      // Search to see if any of the roots reference our reference.
1819      arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
1820      Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1821    } else {
1822      LOG(ERROR) << "Root " << ref << " is dead with type " << PrettyTypeOf(ref);
1823    }
1824  }
1825
1826  bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
1827    return heap_->IsLiveObjectLocked(obj, true, false, true);
1828  }
1829
1830  static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) {
1831    VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
1832    (*visitor)(nullptr, root, MemberOffset(0), true);
1833    return root;
1834  }
1835
1836 private:
1837  Heap* const heap_;
1838  mutable bool failed_;
1839};
1840
1841// Verify all references within an object, for use with HeapBitmap::Visit.
1842class VerifyObjectVisitor {
1843 public:
1844  explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
1845
1846  void operator()(mirror::Object* obj) const
1847      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1848    // Note: we are verifying the references in obj but not obj itself, this is because obj must
1849    // be live or else how did we find it in the live bitmap?
1850    VerifyReferenceVisitor visitor(heap_);
1851    // The class doesn't count as a reference but we should verify it anyways.
1852    collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
1853    if (obj->GetClass()->IsReferenceClass()) {
1854      visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false);
1855    }
1856    failed_ = failed_ || visitor.Failed();
1857  }
1858
1859  static void VisitCallback(mirror::Object* obj, void* arg)
1860      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1861    VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
1862    visitor->operator()(obj);
1863  }
1864
1865  bool Failed() const {
1866    return failed_;
1867  }
1868
1869 private:
1870  Heap* const heap_;
1871  mutable bool failed_;
1872};
1873
1874// Must do this with mutators suspended since we are directly accessing the allocation stacks.
1875bool Heap::VerifyHeapReferences() {
1876  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1877  // Lets sort our allocation stacks so that we can efficiently binary search them.
1878  allocation_stack_->Sort();
1879  live_stack_->Sort();
1880  VerifyObjectVisitor visitor(this);
1881  // Verify objects in the allocation stack since these will be objects which were:
1882  // 1. Allocated prior to the GC (pre GC verification).
1883  // 2. Allocated during the GC (pre sweep GC verification).
1884  // We don't want to verify the objects in the live stack since they themselves may be
1885  // pointing to dead objects if they are not reachable.
1886  VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
1887  // Verify the roots:
1888  Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
1889  if (visitor.Failed()) {
1890    // Dump mod-union tables.
1891    for (const auto& table_pair : mod_union_tables_) {
1892      accounting::ModUnionTable* mod_union_table = table_pair.second;
1893      mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
1894    }
1895    DumpSpaces();
1896    return false;
1897  }
1898  return true;
1899}
1900
1901class VerifyReferenceCardVisitor {
1902 public:
1903  VerifyReferenceCardVisitor(Heap* heap, bool* failed)
1904      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
1905                            Locks::heap_bitmap_lock_)
1906      : heap_(heap), failed_(failed) {
1907  }
1908
1909  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1910  // annotalysis on visitors.
1911  void operator()(mirror::Object* obj, mirror::Object* ref, const MemberOffset& offset,
1912                  bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
1913    // Filter out class references since changing an object's class does not mark the card as dirty.
1914    // Also handles large objects, since the only reference they hold is a class reference.
1915    if (ref != NULL && !ref->IsClass()) {
1916      accounting::CardTable* card_table = heap_->GetCardTable();
1917      // If the object is not dirty and it is referencing something in the live stack other than
1918      // class, then it must be on a dirty card.
1919      if (!card_table->AddrIsInCardTable(obj)) {
1920        LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
1921        *failed_ = true;
1922      } else if (!card_table->IsDirty(obj)) {
1923        // TODO: Check mod-union tables.
1924        // Card should be either kCardDirty if it got re-dirtied after we aged it, or
1925        // kCardDirty - 1 if it didnt get touched since we aged it.
1926        accounting::ObjectStack* live_stack = heap_->live_stack_.get();
1927        if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
1928          if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
1929            LOG(ERROR) << "Object " << obj << " found in live stack";
1930          }
1931          if (heap_->GetLiveBitmap()->Test(obj)) {
1932            LOG(ERROR) << "Object " << obj << " found in live bitmap";
1933          }
1934          LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
1935                    << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
1936
1937          // Print which field of the object is dead.
1938          if (!obj->IsObjectArray()) {
1939            mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1940            CHECK(klass != NULL);
1941            mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
1942                                                                      : klass->GetIFields();
1943            CHECK(fields != NULL);
1944            for (int32_t i = 0; i < fields->GetLength(); ++i) {
1945              mirror::ArtField* cur = fields->Get(i);
1946              if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1947                LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
1948                          << PrettyField(cur);
1949                break;
1950              }
1951            }
1952          } else {
1953            mirror::ObjectArray<mirror::Object>* object_array =
1954                obj->AsObjectArray<mirror::Object>();
1955            for (int32_t i = 0; i < object_array->GetLength(); ++i) {
1956              if (object_array->Get(i) == ref) {
1957                LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
1958              }
1959            }
1960          }
1961
1962          *failed_ = true;
1963        }
1964      }
1965    }
1966  }
1967
1968 private:
1969  Heap* const heap_;
1970  bool* const failed_;
1971};
1972
1973class VerifyLiveStackReferences {
1974 public:
1975  explicit VerifyLiveStackReferences(Heap* heap)
1976      : heap_(heap),
1977        failed_(false) {}
1978
1979  void operator()(mirror::Object* obj) const
1980      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1981    VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
1982    collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
1983  }
1984
1985  bool Failed() const {
1986    return failed_;
1987  }
1988
1989 private:
1990  Heap* const heap_;
1991  bool failed_;
1992};
1993
1994bool Heap::VerifyMissingCardMarks() {
1995  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1996
1997  // We need to sort the live stack since we binary search it.
1998  live_stack_->Sort();
1999  VerifyLiveStackReferences visitor(this);
2000  GetLiveBitmap()->Visit(visitor);
2001
2002  // We can verify objects in the live stack since none of these should reference dead objects.
2003  for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
2004    visitor(*it);
2005  }
2006
2007  if (visitor.Failed()) {
2008    DumpSpaces();
2009    return false;
2010  }
2011  return true;
2012}
2013
2014void Heap::SwapStacks() {
2015  allocation_stack_.swap(live_stack_);
2016}
2017
2018accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
2019  auto it = mod_union_tables_.find(space);
2020  if (it == mod_union_tables_.end()) {
2021    return nullptr;
2022  }
2023  return it->second;
2024}
2025
2026void Heap::ProcessCards(TimingLogger& timings) {
2027  // Clear cards and keep track of cards cleared in the mod-union table.
2028  for (const auto& space : continuous_spaces_) {
2029    accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
2030    if (table != nullptr) {
2031      const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
2032          "ImageModUnionClearCards";
2033      TimingLogger::ScopedSplit split(name, &timings);
2034      table->ClearCards();
2035    } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
2036      TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
2037      // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
2038      // were dirty before the GC started.
2039      // TODO: Don't need to use atomic.
2040      // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
2041      // roots and then we scan / update mod union tables after. We will always scan either card.
2042      // If we end up with the non aged card, we scan it it in the pause.
2043      card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
2044    }
2045  }
2046}
2047
2048static mirror::Object* IdentityCallback(mirror::Object* obj, void*) {
2049  return obj;
2050}
2051
2052void Heap::PreGcVerification(collector::GarbageCollector* gc) {
2053  ThreadList* thread_list = Runtime::Current()->GetThreadList();
2054  Thread* self = Thread::Current();
2055
2056  if (verify_pre_gc_heap_) {
2057    thread_list->SuspendAll();
2058    {
2059      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2060      if (!VerifyHeapReferences()) {
2061        LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed";
2062      }
2063    }
2064    thread_list->ResumeAll();
2065  }
2066
2067  // Check that all objects which reference things in the live stack are on dirty cards.
2068  if (verify_missing_card_marks_) {
2069    thread_list->SuspendAll();
2070    {
2071      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2072      SwapStacks();
2073      // Sort the live stack so that we can quickly binary search it later.
2074      if (!VerifyMissingCardMarks()) {
2075        LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
2076      }
2077      SwapStacks();
2078    }
2079    thread_list->ResumeAll();
2080  }
2081
2082  if (verify_mod_union_table_) {
2083    thread_list->SuspendAll();
2084    ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
2085    for (const auto& table_pair : mod_union_tables_) {
2086      accounting::ModUnionTable* mod_union_table = table_pair.second;
2087      mod_union_table->UpdateAndMarkReferences(IdentityCallback, nullptr);
2088      mod_union_table->Verify();
2089    }
2090    thread_list->ResumeAll();
2091  }
2092}
2093
2094void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
2095  // Called before sweeping occurs since we want to make sure we are not going so reclaim any
2096  // reachable objects.
2097  if (verify_post_gc_heap_) {
2098    Thread* self = Thread::Current();
2099    CHECK_NE(self->GetState(), kRunnable);
2100    {
2101      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2102      // Swapping bound bitmaps does nothing.
2103      gc->SwapBitmaps();
2104      if (!VerifyHeapReferences()) {
2105        LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
2106      }
2107      gc->SwapBitmaps();
2108    }
2109  }
2110}
2111
2112void Heap::PostGcVerification(collector::GarbageCollector* gc) {
2113  if (verify_system_weaks_) {
2114    Thread* self = Thread::Current();
2115    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2116    collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
2117    mark_sweep->VerifySystemWeaks();
2118  }
2119}
2120
2121void Heap::PreGcRosAllocVerification(TimingLogger* timings) {
2122  if (verify_pre_gc_rosalloc_) {
2123    TimingLogger::ScopedSplit split("PreGcRosAllocVerification", timings);
2124    for (const auto& space : continuous_spaces_) {
2125      if (space->IsRosAllocSpace()) {
2126        VLOG(heap) << "PreGcRosAllocVerification : " << space->GetName();
2127        space::RosAllocSpace* rosalloc_space = space->AsRosAllocSpace();
2128        rosalloc_space->Verify();
2129      }
2130    }
2131  }
2132}
2133
2134void Heap::PostGcRosAllocVerification(TimingLogger* timings) {
2135  if (verify_post_gc_rosalloc_) {
2136    TimingLogger::ScopedSplit split("PostGcRosAllocVerification", timings);
2137    for (const auto& space : continuous_spaces_) {
2138      if (space->IsRosAllocSpace()) {
2139        VLOG(heap) << "PostGcRosAllocVerification : " << space->GetName();
2140        space::RosAllocSpace* rosalloc_space = space->AsRosAllocSpace();
2141        rosalloc_space->Verify();
2142      }
2143    }
2144  }
2145}
2146
2147collector::GcType Heap::WaitForGcToComplete(Thread* self) {
2148  ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
2149  MutexLock mu(self, *gc_complete_lock_);
2150  return WaitForGcToCompleteLocked(self);
2151}
2152
2153collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
2154  collector::GcType last_gc_type = collector::kGcTypeNone;
2155  uint64_t wait_start = NanoTime();
2156  while (collector_type_running_ != kCollectorTypeNone) {
2157    ATRACE_BEGIN("GC: Wait For Completion");
2158    // We must wait, change thread state then sleep on gc_complete_cond_;
2159    gc_complete_cond_->Wait(self);
2160    last_gc_type = last_gc_type_;
2161    ATRACE_END();
2162  }
2163  uint64_t wait_time = NanoTime() - wait_start;
2164  total_wait_time_ += wait_time;
2165  if (wait_time > long_pause_log_threshold_) {
2166    LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time);
2167  }
2168  return last_gc_type;
2169}
2170
2171void Heap::DumpForSigQuit(std::ostream& os) {
2172  os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
2173     << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
2174  DumpGcPerformanceInfo(os);
2175}
2176
2177size_t Heap::GetPercentFree() {
2178  return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory());
2179}
2180
2181void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
2182  if (max_allowed_footprint > GetMaxMemory()) {
2183    VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
2184             << PrettySize(GetMaxMemory());
2185    max_allowed_footprint = GetMaxMemory();
2186  }
2187  max_allowed_footprint_ = max_allowed_footprint;
2188}
2189
2190bool Heap::IsMovableObject(const mirror::Object* obj) const {
2191  if (kMovingCollector) {
2192    DCHECK(!IsInTempSpace(obj));
2193    if (bump_pointer_space_->HasAddress(obj)) {
2194      return true;
2195    }
2196    // TODO: Refactor this logic into the space itself?
2197    // Objects in the main space are only copied during background -> foreground transitions or
2198    // visa versa.
2199    if (main_space_ != nullptr && main_space_->HasAddress(obj) &&
2200        (IsCompactingGC(background_collector_type_) ||
2201            IsCompactingGC(post_zygote_collector_type_))) {
2202      return true;
2203    }
2204  }
2205  return false;
2206}
2207
2208bool Heap::IsInTempSpace(const mirror::Object* obj) const {
2209  if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) {
2210    return true;
2211  }
2212  return false;
2213}
2214
2215void Heap::UpdateMaxNativeFootprint() {
2216  size_t native_size = native_bytes_allocated_;
2217  // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
2218  size_t target_size = native_size / GetTargetHeapUtilization();
2219  if (target_size > native_size + max_free_) {
2220    target_size = native_size + max_free_;
2221  } else if (target_size < native_size + min_free_) {
2222    target_size = native_size + min_free_;
2223  }
2224  native_footprint_gc_watermark_ = target_size;
2225  native_footprint_limit_ = 2 * target_size - native_size;
2226}
2227
2228void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
2229  // We know what our utilization is at this moment.
2230  // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
2231  const size_t bytes_allocated = GetBytesAllocated();
2232  last_gc_size_ = bytes_allocated;
2233  last_gc_time_ns_ = NanoTime();
2234  size_t target_size;
2235  if (gc_type != collector::kGcTypeSticky) {
2236    // Grow the heap for non sticky GC.
2237    target_size = bytes_allocated / GetTargetHeapUtilization();
2238    if (target_size > bytes_allocated + max_free_) {
2239      target_size = bytes_allocated + max_free_;
2240    } else if (target_size < bytes_allocated + min_free_) {
2241      target_size = bytes_allocated + min_free_;
2242    }
2243    native_need_to_run_finalization_ = true;
2244    next_gc_type_ = collector::kGcTypeSticky;
2245  } else {
2246    // Based on how close the current heap size is to the target size, decide
2247    // whether or not to do a partial or sticky GC next.
2248    if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
2249      next_gc_type_ = collector::kGcTypeSticky;
2250    } else {
2251      next_gc_type_ = have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
2252    }
2253    // If we have freed enough memory, shrink the heap back down.
2254    if (bytes_allocated + max_free_ < max_allowed_footprint_) {
2255      target_size = bytes_allocated + max_free_;
2256    } else {
2257      target_size = std::max(bytes_allocated, max_allowed_footprint_);
2258    }
2259  }
2260  if (!ignore_max_footprint_) {
2261    SetIdealFootprint(target_size);
2262    if (concurrent_gc_) {
2263      // Calculate when to perform the next ConcurrentGC.
2264      // Calculate the estimated GC duration.
2265      const double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
2266      // Estimate how many remaining bytes we will have when we need to start the next GC.
2267      size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
2268      remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
2269      remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
2270      if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
2271        // A never going to happen situation that from the estimated allocation rate we will exceed
2272        // the applications entire footprint with the given estimated allocation rate. Schedule
2273        // another GC nearly straight away.
2274        remaining_bytes = kMinConcurrentRemainingBytes;
2275      }
2276      DCHECK_LE(remaining_bytes, max_allowed_footprint_);
2277      DCHECK_LE(max_allowed_footprint_, growth_limit_);
2278      // Start a concurrent GC when we get close to the estimated remaining bytes. When the
2279      // allocation rate is very high, remaining_bytes could tell us that we should start a GC
2280      // right away.
2281      concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated);
2282    }
2283  }
2284}
2285
2286void Heap::ClearGrowthLimit() {
2287  growth_limit_ = capacity_;
2288  non_moving_space_->ClearGrowthLimit();
2289}
2290
2291void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
2292                               MemberOffset reference_queue_offset,
2293                               MemberOffset reference_queueNext_offset,
2294                               MemberOffset reference_pendingNext_offset,
2295                               MemberOffset finalizer_reference_zombie_offset) {
2296  reference_referent_offset_ = reference_referent_offset;
2297  reference_queue_offset_ = reference_queue_offset;
2298  reference_queueNext_offset_ = reference_queueNext_offset;
2299  reference_pendingNext_offset_ = reference_pendingNext_offset;
2300  finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
2301  CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2302  CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
2303  CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
2304  CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
2305  CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
2306}
2307
2308void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) {
2309  DCHECK(reference != NULL);
2310  DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2311  reference->SetFieldObject(reference_referent_offset_, referent, true);
2312}
2313
2314mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
2315  DCHECK(reference != NULL);
2316  DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2317  return reference->GetFieldObject<mirror::Object>(reference_referent_offset_, true);
2318}
2319
2320void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
2321  ScopedObjectAccess soa(self);
2322  JValue result;
2323  ArgArray arg_array(NULL, 0);
2324  arg_array.Append(object);
2325  soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self,
2326      arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
2327}
2328
2329void Heap::EnqueueClearedReferences() {
2330  Thread* self = Thread::Current();
2331  Locks::mutator_lock_->AssertNotHeld(self);
2332  if (!cleared_references_.IsEmpty()) {
2333    // When a runtime isn't started there are no reference queues to care about so ignore.
2334    if (LIKELY(Runtime::Current()->IsStarted())) {
2335      ScopedObjectAccess soa(self);
2336      JValue result;
2337      ArgArray arg_array(NULL, 0);
2338      arg_array.Append(cleared_references_.GetList());
2339      soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
2340          arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
2341    }
2342    cleared_references_.Clear();
2343  }
2344}
2345
2346void Heap::RequestConcurrentGC(Thread* self) {
2347  // Make sure that we can do a concurrent GC.
2348  Runtime* runtime = Runtime::Current();
2349  if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
2350      self->IsHandlingStackOverflow()) {
2351    return;
2352  }
2353  // We already have a request pending, no reason to start more until we update
2354  // concurrent_start_bytes_.
2355  concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2356  JNIEnv* env = self->GetJniEnv();
2357  DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2358  DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
2359  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2360                            WellKnownClasses::java_lang_Daemons_requestGC);
2361  CHECK(!env->ExceptionCheck());
2362}
2363
2364void Heap::ConcurrentGC(Thread* self) {
2365  if (Runtime::Current()->IsShuttingDown(self)) {
2366    return;
2367  }
2368  // Wait for any GCs currently running to finish.
2369  if (WaitForGcToComplete(self) == collector::kGcTypeNone) {
2370    // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2371    // instead. E.g. can't do partial, so do full instead.
2372    if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2373        collector::kGcTypeNone) {
2374      for (collector::GcType gc_type : gc_plan_) {
2375        // Attempt to run the collector, if we succeed, we are done.
2376        if (gc_type > next_gc_type_ &&
2377            CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2378          break;
2379        }
2380      }
2381    }
2382  }
2383}
2384
2385void Heap::RequestHeapTrim() {
2386  // GC completed and now we must decide whether to request a heap trim (advising pages back to the
2387  // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
2388  // a space it will hold its lock and can become a cause of jank.
2389  // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
2390  // forking.
2391
2392  // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
2393  // because that only marks object heads, so a large array looks like lots of empty space. We
2394  // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
2395  // to utilization (which is probably inversely proportional to how much benefit we can expect).
2396  // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
2397  // not how much use we're making of those pages.
2398  uint64_t ms_time = MilliTime();
2399  // Don't bother trimming the alloc space if a heap trim occurred in the last two seconds.
2400  if (ms_time - last_trim_time_ms_ < 2 * 1000) {
2401    return;
2402  }
2403
2404  Thread* self = Thread::Current();
2405  Runtime* runtime = Runtime::Current();
2406  if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
2407    // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
2408    // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
2409    // as we don't hold the lock while requesting the trim).
2410    return;
2411  }
2412
2413  last_trim_time_ms_ = ms_time;
2414
2415  // Trim only if we do not currently care about pause times.
2416  if (!CareAboutPauseTimes()) {
2417    JNIEnv* env = self->GetJniEnv();
2418    DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
2419    DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
2420    env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2421                              WellKnownClasses::java_lang_Daemons_requestHeapTrim);
2422    CHECK(!env->ExceptionCheck());
2423  }
2424}
2425
2426void Heap::RevokeThreadLocalBuffers(Thread* thread) {
2427  if (rosalloc_space_ != nullptr) {
2428    rosalloc_space_->RevokeThreadLocalBuffers(thread);
2429  }
2430  if (bump_pointer_space_ != nullptr) {
2431    bump_pointer_space_->RevokeThreadLocalBuffers(thread);
2432  }
2433}
2434
2435void Heap::RevokeAllThreadLocalBuffers() {
2436  if (rosalloc_space_ != nullptr) {
2437    rosalloc_space_->RevokeAllThreadLocalBuffers();
2438  }
2439  if (bump_pointer_space_ != nullptr) {
2440    bump_pointer_space_->RevokeAllThreadLocalBuffers();
2441  }
2442}
2443
2444bool Heap::IsGCRequestPending() const {
2445  return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
2446}
2447
2448void Heap::RunFinalization(JNIEnv* env) {
2449  // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
2450  if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
2451    CHECK(WellKnownClasses::java_lang_System != nullptr);
2452    WellKnownClasses::java_lang_System_runFinalization =
2453        CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
2454    CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
2455  }
2456  env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
2457                            WellKnownClasses::java_lang_System_runFinalization);
2458}
2459
2460void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
2461  Thread* self = ThreadForEnv(env);
2462  if (native_need_to_run_finalization_) {
2463    RunFinalization(env);
2464    UpdateMaxNativeFootprint();
2465    native_need_to_run_finalization_ = false;
2466  }
2467  // Total number of native bytes allocated.
2468  native_bytes_allocated_.FetchAndAdd(bytes);
2469  if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
2470    collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
2471        collector::kGcTypeFull;
2472
2473    // The second watermark is higher than the gc watermark. If you hit this it means you are
2474    // allocating native objects faster than the GC can keep up with.
2475    if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
2476      if (WaitForGcToComplete(self) != collector::kGcTypeNone) {
2477        // Just finished a GC, attempt to run finalizers.
2478        RunFinalization(env);
2479        CHECK(!env->ExceptionCheck());
2480      }
2481      // If we still are over the watermark, attempt a GC for alloc and run finalizers.
2482      if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
2483        CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
2484        RunFinalization(env);
2485        native_need_to_run_finalization_ = false;
2486        CHECK(!env->ExceptionCheck());
2487      }
2488      // We have just run finalizers, update the native watermark since it is very likely that
2489      // finalizers released native managed allocations.
2490      UpdateMaxNativeFootprint();
2491    } else if (!IsGCRequestPending()) {
2492      if (concurrent_gc_) {
2493        RequestConcurrentGC(self);
2494      } else {
2495        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
2496      }
2497    }
2498  }
2499}
2500
2501void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
2502  int expected_size, new_size;
2503  do {
2504    expected_size = native_bytes_allocated_.Load();
2505    new_size = expected_size - bytes;
2506    if (UNLIKELY(new_size < 0)) {
2507      ScopedObjectAccess soa(env);
2508      env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
2509                    StringPrintf("Attempted to free %d native bytes with only %d native bytes "
2510                                 "registered as allocated", bytes, expected_size).c_str());
2511      break;
2512    }
2513  } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
2514}
2515
2516size_t Heap::GetTotalMemory() const {
2517  size_t ret = 0;
2518  for (const auto& space : continuous_spaces_) {
2519    // Currently don't include the image space.
2520    if (!space->IsImageSpace()) {
2521      ret += space->Size();
2522    }
2523  }
2524  for (const auto& space : discontinuous_spaces_) {
2525    if (space->IsLargeObjectSpace()) {
2526      ret += space->AsLargeObjectSpace()->GetBytesAllocated();
2527    }
2528  }
2529  return ret;
2530}
2531
2532void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
2533  DCHECK(mod_union_table != nullptr);
2534  mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
2535}
2536
2537}  // namespace gc
2538}  // namespace art
2539