heap.cc revision ff3b24aa929a9db79daeef7c0b0522da099700a9
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "heap.h"
18
19#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
21
22#include <limits>
23#include <vector>
24#include <valgrind.h>
25
26#include "base/histogram-inl.h"
27#include "base/stl_util.h"
28#include "common_throws.h"
29#include "cutils/sched_policy.h"
30#include "debugger.h"
31#include "gc/accounting/atomic_stack.h"
32#include "gc/accounting/card_table-inl.h"
33#include "gc/accounting/heap_bitmap-inl.h"
34#include "gc/accounting/mod_union_table.h"
35#include "gc/accounting/mod_union_table-inl.h"
36#include "gc/accounting/space_bitmap-inl.h"
37#include "gc/collector/mark_sweep-inl.h"
38#include "gc/collector/partial_mark_sweep.h"
39#include "gc/collector/semi_space.h"
40#include "gc/collector/sticky_mark_sweep.h"
41#include "gc/space/bump_pointer_space.h"
42#include "gc/space/dlmalloc_space-inl.h"
43#include "gc/space/image_space.h"
44#include "gc/space/large_object_space.h"
45#include "gc/space/rosalloc_space-inl.h"
46#include "gc/space/space-inl.h"
47#include "heap-inl.h"
48#include "image.h"
49#include "invoke_arg_array_builder.h"
50#include "mirror/art_field-inl.h"
51#include "mirror/class-inl.h"
52#include "mirror/object.h"
53#include "mirror/object-inl.h"
54#include "mirror/object_array-inl.h"
55#include "object_utils.h"
56#include "os.h"
57#include "ScopedLocalRef.h"
58#include "scoped_thread_state_change.h"
59#include "sirt_ref.h"
60#include "thread_list.h"
61#include "UniquePtr.h"
62#include "well_known_classes.h"
63
64namespace art {
65
66extern void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator);
67
68namespace gc {
69
70static constexpr bool kGCALotMode = false;
71static constexpr size_t kGcAlotInterval = KB;
72// Minimum amount of remaining bytes before a concurrent GC is triggered.
73static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
74static constexpr AllocatorType kDefaultPreZygoteAllocator = kAllocatorTypeFreeList;
75static constexpr AllocatorType kDefaultPostZygoteAllocator = kAllocatorTypeFreeList;
76
77Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
78           double target_utilization, size_t capacity, const std::string& image_file_name,
79           bool concurrent_gc, size_t parallel_gc_threads, size_t conc_gc_threads,
80           bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold,
81           bool dump_gc_performance_on_shutdown, bool ignore_max_footprint)
82    : non_moving_space_(nullptr),
83      concurrent_gc_(concurrent_gc),
84      parallel_gc_threads_(parallel_gc_threads),
85      conc_gc_threads_(conc_gc_threads),
86      low_memory_mode_(low_memory_mode),
87      long_pause_log_threshold_(long_pause_log_threshold),
88      long_gc_log_threshold_(long_gc_log_threshold),
89      dump_gc_performance_on_shutdown_(dump_gc_performance_on_shutdown),
90      ignore_max_footprint_(ignore_max_footprint),
91      have_zygote_space_(false),
92      soft_reference_queue_(this),
93      weak_reference_queue_(this),
94      finalizer_reference_queue_(this),
95      phantom_reference_queue_(this),
96      cleared_references_(this),
97      is_gc_running_(false),
98      last_gc_type_(collector::kGcTypeNone),
99      next_gc_type_(collector::kGcTypePartial),
100      capacity_(capacity),
101      growth_limit_(growth_limit),
102      max_allowed_footprint_(initial_size),
103      native_footprint_gc_watermark_(initial_size),
104      native_footprint_limit_(2 * initial_size),
105      native_need_to_run_finalization_(false),
106      activity_thread_class_(NULL),
107      application_thread_class_(NULL),
108      activity_thread_(NULL),
109      application_thread_(NULL),
110      last_process_state_id_(NULL),
111      // Initially care about pauses in case we never get notified of process states, or if the JNI
112      // code becomes broken.
113      care_about_pause_times_(true),
114      concurrent_start_bytes_(concurrent_gc_ ? initial_size - kMinConcurrentRemainingBytes
115          :  std::numeric_limits<size_t>::max()),
116      total_bytes_freed_ever_(0),
117      total_objects_freed_ever_(0),
118      num_bytes_allocated_(0),
119      native_bytes_allocated_(0),
120      gc_memory_overhead_(0),
121      verify_missing_card_marks_(false),
122      verify_system_weaks_(false),
123      verify_pre_gc_heap_(false),
124      verify_post_gc_heap_(false),
125      verify_mod_union_table_(false),
126      min_alloc_space_size_for_sticky_gc_(2 * MB),
127      min_remaining_space_for_sticky_gc_(1 * MB),
128      last_trim_time_ms_(0),
129      allocation_rate_(0),
130      /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
131       * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
132       * verification is enabled, we limit the size of allocation stacks to speed up their
133       * searching.
134       */
135      max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
136          : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
137      current_allocator_(kMovingCollector ? kAllocatorTypeBumpPointer : kAllocatorTypeFreeList),
138      current_non_moving_allocator_(kAllocatorTypeFreeList),
139      bump_pointer_space_(nullptr),
140      temp_space_(nullptr),
141      reference_referent_offset_(0),
142      reference_queue_offset_(0),
143      reference_queueNext_offset_(0),
144      reference_pendingNext_offset_(0),
145      finalizer_reference_zombie_offset_(0),
146      min_free_(min_free),
147      max_free_(max_free),
148      target_utilization_(target_utilization),
149      total_wait_time_(0),
150      total_allocation_time_(0),
151      verify_object_mode_(kHeapVerificationNotPermitted),
152      gc_disable_count_(0),
153      running_on_valgrind_(RUNNING_ON_VALGRIND) {
154  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
155    LOG(INFO) << "Heap() entering";
156  }
157  // If we aren't the zygote, switch to the default non zygote allocator. This may update the
158  // entrypoints.
159  if (!Runtime::Current()->IsZygote()) {
160    ChangeAllocator(kDefaultPreZygoteAllocator);
161  }
162  live_bitmap_.reset(new accounting::HeapBitmap(this));
163  mark_bitmap_.reset(new accounting::HeapBitmap(this));
164  // Requested begin for the alloc space, to follow the mapped image and oat files
165  byte* requested_alloc_space_begin = nullptr;
166  if (!image_file_name.empty()) {
167    space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str());
168    CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name;
169    AddSpace(image_space);
170    // Oat files referenced by image files immediately follow them in memory, ensure alloc space
171    // isn't going to get in the middle
172    byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
173    CHECK_GT(oat_file_end_addr, image_space->End());
174    if (oat_file_end_addr > requested_alloc_space_begin) {
175      requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
176    }
177  }
178
179  const char* name = Runtime::Current()->IsZygote() ? "zygote space" : "alloc space";
180  if (!kUseRosAlloc) {
181    non_moving_space_ = space::DlMallocSpace::Create(name, initial_size, growth_limit, capacity,
182                                                     requested_alloc_space_begin);
183  } else {
184    non_moving_space_ = space::RosAllocSpace::Create(name, initial_size, growth_limit, capacity,
185                                                     requested_alloc_space_begin);
186  }
187  if (kMovingCollector) {
188    // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
189    // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation
190    // issues.
191    const size_t bump_pointer_space_size = std::min(non_moving_space_->Capacity(), 128 * MB);
192    bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
193                                                          bump_pointer_space_size, nullptr);
194    CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
195    AddSpace(bump_pointer_space_);
196    temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size,
197                                                  nullptr);
198    CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
199    AddSpace(temp_space_);
200  }
201
202  CHECK(non_moving_space_ != NULL) << "Failed to create non-moving space";
203  non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
204  AddSpace(non_moving_space_);
205
206  // Allocate the large object space.
207  const bool kUseFreeListSpaceForLOS = false;
208  if (kUseFreeListSpaceForLOS) {
209    large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity);
210  } else {
211    large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
212  }
213  CHECK(large_object_space_ != NULL) << "Failed to create large object space";
214  AddSpace(large_object_space_);
215
216  // Compute heap capacity. Continuous spaces are sorted in order of Begin().
217  CHECK(!continuous_spaces_.empty());
218  // Relies on the spaces being sorted.
219  byte* heap_begin = continuous_spaces_.front()->Begin();
220  byte* heap_end = continuous_spaces_.back()->Limit();
221  size_t heap_capacity = heap_end - heap_begin;
222
223  // Allocate the card table.
224  card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
225  CHECK(card_table_.get() != NULL) << "Failed to create card table";
226
227  // Card cache for now since it makes it easier for us to update the references to the copying
228  // spaces.
229  accounting::ModUnionTable* mod_union_table =
230      new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace());
231  CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
232  AddModUnionTable(mod_union_table);
233
234  // TODO: Count objects in the image space here.
235  num_bytes_allocated_ = 0;
236
237  // Default mark stack size in bytes.
238  static const size_t default_mark_stack_size = 64 * KB;
239  mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size));
240  allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack",
241                                                          max_allocation_stack_size_));
242  live_stack_.reset(accounting::ObjectStack::Create("live stack",
243                                                    max_allocation_stack_size_));
244
245  // It's still too early to take a lock because there are no threads yet, but we can create locks
246  // now. We don't create it earlier to make it clear that you can't use locks during heap
247  // initialization.
248  gc_complete_lock_ = new Mutex("GC complete lock");
249  gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
250                                                *gc_complete_lock_));
251  last_gc_time_ns_ = NanoTime();
252  last_gc_size_ = GetBytesAllocated();
253
254  if (ignore_max_footprint_) {
255    SetIdealFootprint(std::numeric_limits<size_t>::max());
256    concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
257  }
258  CHECK_NE(max_allowed_footprint_, 0U);
259
260  // Create our garbage collectors.
261  for (size_t i = 0; i < 2; ++i) {
262    const bool concurrent = i != 0;
263    garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
264    garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
265    garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
266  }
267  gc_plan_.push_back(collector::kGcTypeSticky);
268  gc_plan_.push_back(collector::kGcTypePartial);
269  gc_plan_.push_back(collector::kGcTypeFull);
270  if (kMovingCollector) {
271    // TODO: Clean this up.
272    semi_space_collector_ = new collector::SemiSpace(this);
273    garbage_collectors_.push_back(semi_space_collector_);
274  }
275
276  if (running_on_valgrind_) {
277    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
278  }
279
280  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
281    LOG(INFO) << "Heap() exiting";
282  }
283}
284
285void Heap::ChangeAllocator(AllocatorType allocator) {
286  DCHECK_NE(allocator, kAllocatorTypeLOS);
287  if (current_allocator_ != allocator) {
288    current_allocator_ = allocator;
289    SetQuickAllocEntryPointsAllocator(current_allocator_);
290    Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
291  }
292}
293
294bool Heap::IsCompilingBoot() const {
295  for (const auto& space : continuous_spaces_) {
296    if (space->IsImageSpace()) {
297      return false;
298    } else if (space->IsZygoteSpace()) {
299      return false;
300    }
301  }
302  return true;
303}
304
305bool Heap::HasImageSpace() const {
306  for (const auto& space : continuous_spaces_) {
307    if (space->IsImageSpace()) {
308      return true;
309    }
310  }
311  return false;
312}
313
314void Heap::IncrementDisableGC(Thread* self) {
315  // Need to do this holding the lock to prevent races where the GC is about to run / running when
316  // we attempt to disable it.
317  ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
318  MutexLock mu(self, *gc_complete_lock_);
319  WaitForGcToCompleteLocked(self);
320  ++gc_disable_count_;
321}
322
323void Heap::DecrementDisableGC(Thread* self) {
324  MutexLock mu(self, *gc_complete_lock_);
325  CHECK_GE(gc_disable_count_, 0U);
326  --gc_disable_count_;
327}
328
329void Heap::CreateThreadPool() {
330  const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
331  if (num_threads != 0) {
332    thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
333  }
334}
335
336void Heap::VisitObjects(ObjectVisitorCallback callback, void* arg) {
337  // Visit objects in bump pointer space.
338  Thread* self = Thread::Current();
339  // TODO: Use reference block.
340  std::vector<SirtRef<mirror::Object>*> saved_refs;
341  if (bump_pointer_space_ != nullptr) {
342    // Need to put all these in sirts since the callback may trigger a GC. TODO: Use a better data
343    // structure.
344    mirror::Object* obj = reinterpret_cast<mirror::Object*>(bump_pointer_space_->Begin());
345    const mirror::Object* end = reinterpret_cast<const mirror::Object*>(
346        bump_pointer_space_->End());
347    while (obj < end) {
348      saved_refs.push_back(new SirtRef<mirror::Object>(self, obj));
349      obj = space::BumpPointerSpace::GetNextObject(obj);
350    }
351  }
352  // TODO: Switch to standard begin and end to use ranged a based loop.
353  for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
354      it < end; ++it) {
355    mirror::Object* obj = *it;
356    // Objects in the allocation stack might be in a movable space.
357    saved_refs.push_back(new SirtRef<mirror::Object>(self, obj));
358  }
359  GetLiveBitmap()->Walk(callback, arg);
360  for (const auto& ref : saved_refs) {
361    callback(ref->get(), arg);
362  }
363  // Need to free the sirts in reverse order they were allocated.
364  for (size_t i = saved_refs.size(); i != 0; --i) {
365    delete saved_refs[i - 1];
366  }
367}
368
369void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
370  MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), stack);
371}
372
373void Heap::DeleteThreadPool() {
374  thread_pool_.reset(nullptr);
375}
376
377static bool ReadStaticInt(JNIEnvExt* env, jclass clz, const char* name, int* out_value) {
378  DCHECK(out_value != NULL);
379  jfieldID field = env->GetStaticFieldID(clz, name, "I");
380  if (field == NULL) {
381    env->ExceptionClear();
382    return false;
383  }
384  *out_value = env->GetStaticIntField(clz, field);
385  return true;
386}
387
388void Heap::ListenForProcessStateChange() {
389  VLOG(heap) << "Heap notified of process state change";
390
391  Thread* self = Thread::Current();
392  JNIEnvExt* env = self->GetJniEnv();
393
394  if (!have_zygote_space_) {
395    return;
396  }
397
398  if (activity_thread_class_ == NULL) {
399    jclass clz = env->FindClass("android/app/ActivityThread");
400    if (clz == NULL) {
401      env->ExceptionClear();
402      LOG(WARNING) << "Could not find activity thread class in process state change";
403      return;
404    }
405    activity_thread_class_ = reinterpret_cast<jclass>(env->NewGlobalRef(clz));
406  }
407
408  if (activity_thread_class_ != NULL && activity_thread_ == NULL) {
409    jmethodID current_activity_method = env->GetStaticMethodID(activity_thread_class_,
410                                                               "currentActivityThread",
411                                                               "()Landroid/app/ActivityThread;");
412    if (current_activity_method == NULL) {
413      env->ExceptionClear();
414      LOG(WARNING) << "Could not get method for currentActivityThread";
415      return;
416    }
417
418    jobject obj = env->CallStaticObjectMethod(activity_thread_class_, current_activity_method);
419    if (obj == NULL) {
420      env->ExceptionClear();
421      LOG(WARNING) << "Could not get current activity";
422      return;
423    }
424    activity_thread_ = env->NewGlobalRef(obj);
425  }
426
427  if (process_state_cares_about_pause_time_.empty()) {
428    // Just attempt to do this the first time.
429    jclass clz = env->FindClass("android/app/ActivityManager");
430    if (clz == NULL) {
431      LOG(WARNING) << "Activity manager class is null";
432      return;
433    }
434    ScopedLocalRef<jclass> activity_manager(env, clz);
435    std::vector<const char*> care_about_pauses;
436    care_about_pauses.push_back("PROCESS_STATE_TOP");
437    care_about_pauses.push_back("PROCESS_STATE_IMPORTANT_BACKGROUND");
438    // Attempt to read the constants and classify them as whether or not we care about pause times.
439    for (size_t i = 0; i < care_about_pauses.size(); ++i) {
440      int process_state = 0;
441      if (ReadStaticInt(env, activity_manager.get(), care_about_pauses[i], &process_state)) {
442        process_state_cares_about_pause_time_.insert(process_state);
443        VLOG(heap) << "Adding process state " << process_state
444                   << " to set of states which care about pause time";
445      }
446    }
447  }
448
449  if (application_thread_class_ == NULL) {
450    jclass clz = env->FindClass("android/app/ActivityThread$ApplicationThread");
451    if (clz == NULL) {
452      env->ExceptionClear();
453      LOG(WARNING) << "Could not get application thread class";
454      return;
455    }
456    application_thread_class_ = reinterpret_cast<jclass>(env->NewGlobalRef(clz));
457    last_process_state_id_ = env->GetFieldID(application_thread_class_, "mLastProcessState", "I");
458    if (last_process_state_id_ == NULL) {
459      env->ExceptionClear();
460      LOG(WARNING) << "Could not get last process state member";
461      return;
462    }
463  }
464
465  if (application_thread_class_ != NULL && application_thread_ == NULL) {
466    jmethodID get_application_thread =
467        env->GetMethodID(activity_thread_class_, "getApplicationThread",
468                         "()Landroid/app/ActivityThread$ApplicationThread;");
469    if (get_application_thread == NULL) {
470      LOG(WARNING) << "Could not get method ID for get application thread";
471      return;
472    }
473
474    jobject obj = env->CallObjectMethod(activity_thread_, get_application_thread);
475    if (obj == NULL) {
476      LOG(WARNING) << "Could not get application thread";
477      return;
478    }
479
480    application_thread_ = env->NewGlobalRef(obj);
481  }
482
483  if (application_thread_ != NULL && last_process_state_id_ != NULL) {
484    int process_state = env->GetIntField(application_thread_, last_process_state_id_);
485    env->ExceptionClear();
486
487    care_about_pause_times_ = process_state_cares_about_pause_time_.find(process_state) !=
488        process_state_cares_about_pause_time_.end();
489
490    VLOG(heap) << "New process state " << process_state
491               << " care about pauses " << care_about_pause_times_;
492  }
493}
494
495void Heap::AddSpace(space::Space* space) {
496  DCHECK(space != NULL);
497  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
498  if (space->IsContinuousSpace()) {
499    DCHECK(!space->IsDiscontinuousSpace());
500    space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
501    // Continuous spaces don't necessarily have bitmaps.
502    accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
503    accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
504    if (live_bitmap != nullptr) {
505      DCHECK(mark_bitmap != nullptr);
506      live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
507      mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
508    }
509
510    continuous_spaces_.push_back(continuous_space);
511    if (continuous_space->IsMallocSpace()) {
512      non_moving_space_ = continuous_space->AsMallocSpace();
513    }
514
515    // Ensure that spaces remain sorted in increasing order of start address.
516    std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
517              [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
518      return a->Begin() < b->Begin();
519    });
520    // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
521    // avoid redundant marking.
522    bool seen_zygote = false, seen_alloc = false;
523    for (const auto& space : continuous_spaces_) {
524      if (space->IsImageSpace()) {
525        CHECK(!seen_zygote);
526        CHECK(!seen_alloc);
527      } else if (space->IsZygoteSpace()) {
528        CHECK(!seen_alloc);
529        seen_zygote = true;
530      } else if (space->IsMallocSpace()) {
531        seen_alloc = true;
532      }
533    }
534  } else {
535    DCHECK(space->IsDiscontinuousSpace());
536    space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
537    DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
538    live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
539    DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
540    mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
541    discontinuous_spaces_.push_back(discontinuous_space);
542  }
543  if (space->IsAllocSpace()) {
544    alloc_spaces_.push_back(space->AsAllocSpace());
545  }
546}
547
548void Heap::RegisterGCAllocation(size_t bytes) {
549  if (this != nullptr) {
550    gc_memory_overhead_.fetch_add(bytes);
551  }
552}
553
554void Heap::RegisterGCDeAllocation(size_t bytes) {
555  if (this != nullptr) {
556    gc_memory_overhead_.fetch_sub(bytes);
557  }
558}
559
560void Heap::DumpGcPerformanceInfo(std::ostream& os) {
561  // Dump cumulative timings.
562  os << "Dumping cumulative Gc timings\n";
563  uint64_t total_duration = 0;
564
565  // Dump cumulative loggers for each GC type.
566  uint64_t total_paused_time = 0;
567  for (const auto& collector : garbage_collectors_) {
568    CumulativeLogger& logger = collector->GetCumulativeTimings();
569    if (logger.GetTotalNs() != 0) {
570      os << Dumpable<CumulativeLogger>(logger);
571      const uint64_t total_ns = logger.GetTotalNs();
572      const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
573      double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
574      const uint64_t freed_bytes = collector->GetTotalFreedBytes();
575      const uint64_t freed_objects = collector->GetTotalFreedObjects();
576      Histogram<uint64_t>::CumulativeData cumulative_data;
577      collector->GetPauseHistogram().CreateHistogram(&cumulative_data);
578      collector->GetPauseHistogram().PrintConfidenceIntervals(os, 0.99, cumulative_data);
579      os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n"
580         << collector->GetName() << " freed: " << freed_objects
581         << " objects with total size " << PrettySize(freed_bytes) << "\n"
582         << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
583         << PrettySize(freed_bytes / seconds) << "/s\n";
584      total_duration += total_ns;
585      total_paused_time += total_pause_ns;
586    }
587  }
588  uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
589  size_t total_objects_allocated = GetObjectsAllocatedEver();
590  size_t total_bytes_allocated = GetBytesAllocatedEver();
591  if (total_duration != 0) {
592    const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
593    os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
594    os << "Mean GC size throughput: "
595       << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
596    os << "Mean GC object throughput: "
597       << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
598  }
599  os << "Total number of allocations: " << total_objects_allocated << "\n";
600  os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
601  if (kMeasureAllocationTime) {
602    os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
603    os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
604       << "\n";
605  }
606  os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
607  os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
608  os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_;
609}
610
611Heap::~Heap() {
612  VLOG(heap) << "Starting ~Heap()";
613  if (dump_gc_performance_on_shutdown_) {
614    DumpGcPerformanceInfo(LOG(INFO));
615  }
616  STLDeleteElements(&garbage_collectors_);
617  // If we don't reset then the mark stack complains in its destructor.
618  allocation_stack_->Reset();
619  live_stack_->Reset();
620  STLDeleteValues(&mod_union_tables_);
621  STLDeleteElements(&continuous_spaces_);
622  STLDeleteElements(&discontinuous_spaces_);
623  delete gc_complete_lock_;
624  VLOG(heap) << "Finished ~Heap()";
625}
626
627space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
628                                                            bool fail_ok) const {
629  for (const auto& space : continuous_spaces_) {
630    if (space->Contains(obj)) {
631      return space;
632    }
633  }
634  if (!fail_ok) {
635    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
636  }
637  return NULL;
638}
639
640space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
641                                                                  bool fail_ok) const {
642  for (const auto& space : discontinuous_spaces_) {
643    if (space->Contains(obj)) {
644      return space;
645    }
646  }
647  if (!fail_ok) {
648    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
649  }
650  return NULL;
651}
652
653space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
654  space::Space* result = FindContinuousSpaceFromObject(obj, true);
655  if (result != NULL) {
656    return result;
657  }
658  return FindDiscontinuousSpaceFromObject(obj, true);
659}
660
661struct SoftReferenceArgs {
662  RootVisitor* is_marked_callback_;
663  RootVisitor* recursive_mark_callback_;
664  void* arg_;
665};
666
667mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
668  SoftReferenceArgs* args  = reinterpret_cast<SoftReferenceArgs*>(arg);
669  // TODO: Not preserve all soft references.
670  return args->recursive_mark_callback_(obj, args->arg_);
671}
672
673// Process reference class instances and schedule finalizations.
674void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
675                             RootVisitor* is_marked_callback,
676                             RootVisitor* recursive_mark_object_callback, void* arg) {
677  // Unless we are in the zygote or required to clear soft references with white references,
678  // preserve some white referents.
679  if (!clear_soft && !Runtime::Current()->IsZygote()) {
680    SoftReferenceArgs soft_reference_args;
681    soft_reference_args.is_marked_callback_ = is_marked_callback;
682    soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback;
683    soft_reference_args.arg_ = arg;
684    soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
685                                                     &soft_reference_args);
686  }
687  timings.StartSplit("ProcessReferences");
688  // Clear all remaining soft and weak references with white referents.
689  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
690  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
691  timings.EndSplit();
692  // Preserve all white objects with finalize methods and schedule them for finalization.
693  timings.StartSplit("EnqueueFinalizerReferences");
694  finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
695                                                        recursive_mark_object_callback, arg);
696  timings.EndSplit();
697  timings.StartSplit("ProcessReferences");
698  // Clear all f-reachable soft and weak references with white referents.
699  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
700  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
701  // Clear all phantom references with white referents.
702  phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
703  // At this point all reference queues other than the cleared references should be empty.
704  DCHECK(soft_reference_queue_.IsEmpty());
705  DCHECK(weak_reference_queue_.IsEmpty());
706  DCHECK(finalizer_reference_queue_.IsEmpty());
707  DCHECK(phantom_reference_queue_.IsEmpty());
708  timings.EndSplit();
709}
710
711bool Heap::IsEnqueued(mirror::Object* ref) const {
712  // Since the references are stored as cyclic lists it means that once enqueued, the pending next
713  // will always be non-null.
714  return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr;
715}
716
717bool Heap::IsEnqueuable(const mirror::Object* ref) const {
718  DCHECK(ref != nullptr);
719  const mirror::Object* queue =
720      ref->GetFieldObject<mirror::Object*>(GetReferenceQueueOffset(), false);
721  const mirror::Object* queue_next =
722      ref->GetFieldObject<mirror::Object*>(GetReferenceQueueNextOffset(), false);
723  return queue != nullptr && queue_next == nullptr;
724}
725
726// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
727// marked, put it on the appropriate list in the heap for later processing.
728void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
729                                  RootVisitor mark_visitor, void* arg) {
730  DCHECK(klass != nullptr);
731  DCHECK(klass->IsReferenceClass());
732  DCHECK(obj != nullptr);
733  mirror::Object* referent = GetReferenceReferent(obj);
734  if (referent != nullptr) {
735    mirror::Object* forward_address = mark_visitor(referent, arg);
736    // Null means that the object is not currently marked.
737    if (forward_address == nullptr) {
738      Thread* self = Thread::Current();
739      // TODO: Remove these locks, and use atomic stacks for storing references?
740      // We need to check that the references haven't already been enqueued since we can end up
741      // scanning the same reference multiple times due to dirty cards.
742      if (klass->IsSoftReferenceClass()) {
743        soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
744      } else if (klass->IsWeakReferenceClass()) {
745        weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
746      } else if (klass->IsFinalizerReferenceClass()) {
747        finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
748      } else if (klass->IsPhantomReferenceClass()) {
749        phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
750      } else {
751        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
752                   << klass->GetAccessFlags();
753      }
754    } else if (referent != forward_address) {
755      // Referent is already marked and we need to update it.
756      SetReferenceReferent(obj, forward_address);
757    }
758  }
759}
760
761space::ImageSpace* Heap::GetImageSpace() const {
762  for (const auto& space : continuous_spaces_) {
763    if (space->IsImageSpace()) {
764      return space->AsImageSpace();
765    }
766  }
767  return NULL;
768}
769
770static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
771  size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
772  if (used_bytes < chunk_size) {
773    size_t chunk_free_bytes = chunk_size - used_bytes;
774    size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
775    max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
776  }
777}
778
779void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) {
780  std::ostringstream oss;
781  int64_t total_bytes_free = GetFreeMemory();
782  oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
783      << " free bytes";
784  // If the allocation failed due to fragmentation, print out the largest continuous allocation.
785  if (!large_object_allocation && total_bytes_free >= byte_count) {
786    size_t max_contiguous_allocation = 0;
787    for (const auto& space : continuous_spaces_) {
788      if (space->IsMallocSpace()) {
789        // To allow the Walk/InspectAll() to exclusively-lock the mutator
790        // lock, temporarily release the shared access to the mutator
791        // lock here by transitioning to the suspended state.
792        Locks::mutator_lock_->AssertSharedHeld(self);
793        self->TransitionFromRunnableToSuspended(kSuspended);
794        space->AsMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
795        self->TransitionFromSuspendedToRunnable();
796        Locks::mutator_lock_->AssertSharedHeld(self);
797      }
798    }
799    oss << "; failed due to fragmentation (largest possible contiguous allocation "
800        <<  max_contiguous_allocation << " bytes)";
801  }
802  self->ThrowOutOfMemoryError(oss.str().c_str());
803}
804
805void Heap::Trim() {
806  uint64_t start_ns = NanoTime();
807  // Trim the managed spaces.
808  uint64_t total_alloc_space_allocated = 0;
809  uint64_t total_alloc_space_size = 0;
810  uint64_t managed_reclaimed = 0;
811  for (const auto& space : continuous_spaces_) {
812    if (space->IsMallocSpace() && !space->IsZygoteSpace()) {
813      gc::space::MallocSpace* alloc_space = space->AsMallocSpace();
814      total_alloc_space_size += alloc_space->Size();
815      managed_reclaimed += alloc_space->Trim();
816    }
817  }
818  total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() -
819      bump_pointer_space_->GetBytesAllocated();
820  const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
821      static_cast<float>(total_alloc_space_size);
822  uint64_t gc_heap_end_ns = NanoTime();
823  // Trim the native heap.
824  dlmalloc_trim(0);
825  size_t native_reclaimed = 0;
826  dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
827  uint64_t end_ns = NanoTime();
828  VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
829      << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
830      << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
831      << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
832      << "%.";
833}
834
835bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
836  // Note: we deliberately don't take the lock here, and mustn't test anything that would require
837  // taking the lock.
838  if (obj == nullptr) {
839    return true;
840  }
841  return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
842}
843
844bool Heap::IsHeapAddress(const mirror::Object* obj) const {
845  if (kMovingCollector && bump_pointer_space_->HasAddress(obj)) {
846    return true;
847  }
848  // TODO: This probably doesn't work for large objects.
849  return FindSpaceFromObject(obj, true) != nullptr;
850}
851
852bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack,
853                              bool search_live_stack, bool sorted) {
854  // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
855  if (obj == nullptr || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
856    return false;
857  }
858  space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
859  space::DiscontinuousSpace* d_space = NULL;
860  if (c_space != NULL) {
861    if (c_space->GetLiveBitmap()->Test(obj)) {
862      return true;
863    }
864  } else if (bump_pointer_space_->Contains(obj) || temp_space_->Contains(obj)) {
865      return true;
866  } else {
867    d_space = FindDiscontinuousSpaceFromObject(obj, true);
868    if (d_space != NULL) {
869      if (d_space->GetLiveObjects()->Test(obj)) {
870        return true;
871      }
872    }
873  }
874  // This is covering the allocation/live stack swapping that is done without mutators suspended.
875  for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
876    if (i > 0) {
877      NanoSleep(MsToNs(10));
878    }
879    if (search_allocation_stack) {
880      if (sorted) {
881        if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
882          return true;
883        }
884      } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) {
885        return true;
886      }
887    }
888
889    if (search_live_stack) {
890      if (sorted) {
891        if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
892          return true;
893        }
894      } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
895        return true;
896      }
897    }
898  }
899  // We need to check the bitmaps again since there is a race where we mark something as live and
900  // then clear the stack containing it.
901  if (c_space != NULL) {
902    if (c_space->GetLiveBitmap()->Test(obj)) {
903      return true;
904    }
905  } else {
906    d_space = FindDiscontinuousSpaceFromObject(obj, true);
907    if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) {
908      return true;
909    }
910  }
911  return false;
912}
913
914void Heap::VerifyObjectImpl(const mirror::Object* obj) {
915  if (Thread::Current() == NULL ||
916      Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
917    return;
918  }
919  VerifyObjectBody(obj);
920}
921
922void Heap::DumpSpaces(std::ostream& stream) {
923  for (const auto& space : continuous_spaces_) {
924    accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
925    accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
926    stream << space << " " << *space << "\n";
927    if (live_bitmap != nullptr) {
928      stream << live_bitmap << " " << *live_bitmap << "\n";
929    }
930    if (mark_bitmap != nullptr) {
931      stream << mark_bitmap << " " << *mark_bitmap << "\n";
932    }
933  }
934  for (const auto& space : discontinuous_spaces_) {
935    stream << space << " " << *space << "\n";
936  }
937}
938
939void Heap::VerifyObjectBody(const mirror::Object* obj) {
940  CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
941  // Ignore early dawn of the universe verifications.
942  if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.load()) < 10 * KB)) {
943    return;
944  }
945  const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
946      mirror::Object::ClassOffset().Int32Value();
947  const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
948  if (UNLIKELY(c == NULL)) {
949    LOG(FATAL) << "Null class in object: " << obj;
950  } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
951    LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
952  }
953  // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
954  // Note: we don't use the accessors here as they have internal sanity checks
955  // that we don't want to run
956  raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
957  const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
958  raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
959  const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
960  CHECK_EQ(c_c, c_c_c);
961
962  if (verify_object_mode_ > kVerifyAllFast) {
963    // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
964    //       heap_bitmap_lock_.
965    if (!IsLiveObjectLocked(obj)) {
966      DumpSpaces();
967      LOG(FATAL) << "Object is dead: " << obj;
968    }
969    if (!IsLiveObjectLocked(c)) {
970      LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
971    }
972  }
973}
974
975void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
976  DCHECK(obj != NULL);
977  reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
978}
979
980void Heap::VerifyHeap() {
981  ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
982  GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
983}
984
985void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
986  DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_));
987  num_bytes_allocated_.fetch_sub(freed_bytes);
988
989  if (Runtime::Current()->HasStatsEnabled()) {
990    RuntimeStats* thread_stats = Thread::Current()->GetStats();
991    thread_stats->freed_objects += freed_objects;
992    thread_stats->freed_bytes += freed_bytes;
993
994    // TODO: Do this concurrently.
995    RuntimeStats* global_stats = Runtime::Current()->GetStats();
996    global_stats->freed_objects += freed_objects;
997    global_stats->freed_bytes += freed_bytes;
998  }
999}
1000
1001mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
1002                                             size_t alloc_size, size_t* bytes_allocated) {
1003  mirror::Object* ptr = nullptr;
1004  // The allocation failed. If the GC is running, block until it completes, and then retry the
1005  // allocation.
1006  collector::GcType last_gc = WaitForGcToComplete(self);
1007  if (last_gc != collector::kGcTypeNone) {
1008    // A GC was in progress and we blocked, retry allocation now that memory has been freed.
1009    ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated);
1010  }
1011
1012  // Loop through our different Gc types and try to Gc until we get enough free memory.
1013  for (collector::GcType gc_type : gc_plan_) {
1014    if (ptr != nullptr) {
1015      break;
1016    }
1017    // Attempt to run the collector, if we succeed, re-try the allocation.
1018    if (CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone) {
1019      // Did we free sufficient memory for the allocation to succeed?
1020      ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated);
1021    }
1022  }
1023  // Allocations have failed after GCs;  this is an exceptional state.
1024  if (ptr == nullptr) {
1025    // Try harder, growing the heap if necessary.
1026    ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated);
1027  }
1028  if (ptr == nullptr) {
1029    // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1030    // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1031    // VM spec requires that all SoftReferences have been collected and cleared before throwing
1032    // OOME.
1033    VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1034             << " allocation";
1035    // TODO: Run finalization, but this may cause more allocations to occur.
1036    // We don't need a WaitForGcToComplete here either.
1037    DCHECK(!gc_plan_.empty());
1038    CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1039    ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated);
1040    if (ptr == nullptr) {
1041      ThrowOutOfMemoryError(self, alloc_size, false);
1042    }
1043  }
1044  return ptr;
1045}
1046
1047void Heap::SetTargetHeapUtilization(float target) {
1048  DCHECK_GT(target, 0.0f);  // asserted in Java code
1049  DCHECK_LT(target, 1.0f);
1050  target_utilization_ = target;
1051}
1052
1053size_t Heap::GetObjectsAllocated() const {
1054  size_t total = 0;
1055  for (space::AllocSpace* space : alloc_spaces_) {
1056    total += space->GetObjectsAllocated();
1057  }
1058  return total;
1059}
1060
1061size_t Heap::GetObjectsAllocatedEver() const {
1062  size_t total = 0;
1063  for (space::AllocSpace* space : alloc_spaces_) {
1064    total += space->GetTotalObjectsAllocated();
1065  }
1066  return total;
1067}
1068
1069size_t Heap::GetBytesAllocatedEver() const {
1070  size_t total = 0;
1071  for (space::AllocSpace* space : alloc_spaces_) {
1072    total += space->GetTotalBytesAllocated();
1073  }
1074  return total;
1075}
1076
1077class InstanceCounter {
1078 public:
1079  InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
1080      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1081      : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
1082  }
1083
1084  void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1085    for (size_t i = 0; i < classes_.size(); ++i) {
1086      const mirror::Class* instance_class = o->GetClass();
1087      if (use_is_assignable_from_) {
1088        if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) {
1089          ++counts_[i];
1090        }
1091      } else {
1092        if (instance_class == classes_[i]) {
1093          ++counts_[i];
1094        }
1095      }
1096    }
1097  }
1098
1099 private:
1100  const std::vector<mirror::Class*>& classes_;
1101  bool use_is_assignable_from_;
1102  uint64_t* const counts_;
1103
1104  DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1105};
1106
1107void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
1108                          uint64_t* counts) {
1109  // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1110  // is empty, so the live bitmap is the only place we need to look.
1111  Thread* self = Thread::Current();
1112  self->TransitionFromRunnableToSuspended(kNative);
1113  CollectGarbage(false);
1114  self->TransitionFromSuspendedToRunnable();
1115
1116  InstanceCounter counter(classes, use_is_assignable_from, counts);
1117  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1118  GetLiveBitmap()->Visit(counter);
1119}
1120
1121class InstanceCollector {
1122 public:
1123  InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
1124      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1125      : class_(c), max_count_(max_count), instances_(instances) {
1126  }
1127
1128  void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1129    const mirror::Class* instance_class = o->GetClass();
1130    if (instance_class == class_) {
1131      if (max_count_ == 0 || instances_.size() < max_count_) {
1132        instances_.push_back(const_cast<mirror::Object*>(o));
1133      }
1134    }
1135  }
1136
1137 private:
1138  mirror::Class* class_;
1139  uint32_t max_count_;
1140  std::vector<mirror::Object*>& instances_;
1141
1142  DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1143};
1144
1145void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1146                        std::vector<mirror::Object*>& instances) {
1147  // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1148  // is empty, so the live bitmap is the only place we need to look.
1149  Thread* self = Thread::Current();
1150  self->TransitionFromRunnableToSuspended(kNative);
1151  CollectGarbage(false);
1152  self->TransitionFromSuspendedToRunnable();
1153
1154  InstanceCollector collector(c, max_count, instances);
1155  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1156  GetLiveBitmap()->Visit(collector);
1157}
1158
1159class ReferringObjectsFinder {
1160 public:
1161  ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1162                         std::vector<mirror::Object*>& referring_objects)
1163      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1164      : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1165  }
1166
1167  // For bitmap Visit.
1168  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1169  // annotalysis on visitors.
1170  void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1171    collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true);
1172  }
1173
1174  // For MarkSweep::VisitObjectReferences.
1175  void operator()(mirror::Object* referrer, mirror::Object* object,
1176                  const MemberOffset&, bool) const {
1177    if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1178      referring_objects_.push_back(referrer);
1179    }
1180  }
1181
1182 private:
1183  mirror::Object* object_;
1184  uint32_t max_count_;
1185  std::vector<mirror::Object*>& referring_objects_;
1186
1187  DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1188};
1189
1190void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1191                               std::vector<mirror::Object*>& referring_objects) {
1192  // We only want reachable instances, so do a GC. This also ensures that the alloc stack
1193  // is empty, so the live bitmap is the only place we need to look.
1194  Thread* self = Thread::Current();
1195  self->TransitionFromRunnableToSuspended(kNative);
1196  CollectGarbage(false);
1197  self->TransitionFromSuspendedToRunnable();
1198
1199  ReferringObjectsFinder finder(o, max_count, referring_objects);
1200  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1201  GetLiveBitmap()->Visit(finder);
1202}
1203
1204void Heap::CollectGarbage(bool clear_soft_references) {
1205  // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1206  // last GC will not have necessarily been cleared.
1207  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references);
1208}
1209
1210void Heap::PreZygoteFork() {
1211  static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
1212  Thread* self = Thread::Current();
1213  MutexLock mu(self, zygote_creation_lock_);
1214  // Try to see if we have any Zygote spaces.
1215  if (have_zygote_space_) {
1216    return;
1217  }
1218  VLOG(heap) << "Starting PreZygoteFork";
1219  // Do this before acquiring the zygote creation lock so that we don't get lock order violations.
1220  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
1221  // Trim the pages at the end of the non moving space.
1222  non_moving_space_->Trim();
1223  non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1224  // Change the allocator to the post zygote one.
1225  ChangeAllocator(kDefaultPostZygoteAllocator);
1226  // TODO: Delete bump_pointer_space_ and temp_pointer_space_?
1227  if (semi_space_collector_ != nullptr) {
1228    // Create a new bump pointer space which we will compact into.
1229    space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1230                                         non_moving_space_->Limit());
1231    // Compact the bump pointer space to a new zygote bump pointer space.
1232    temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1233    Compact(&target_space, bump_pointer_space_);
1234    CHECK_EQ(temp_space_->GetBytesAllocated(), 0U);
1235    total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
1236    total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
1237    // Update the end and write out image.
1238    non_moving_space_->SetEnd(target_space.End());
1239    non_moving_space_->SetLimit(target_space.Limit());
1240    accounting::SpaceBitmap* bitmap = non_moving_space_->GetLiveBitmap();
1241    // Record the allocations in the bitmap.
1242    VLOG(heap) << "Recording zygote allocations";
1243    mirror::Object* obj = reinterpret_cast<mirror::Object*>(target_space.Begin());
1244    const mirror::Object* end = reinterpret_cast<const mirror::Object*>(target_space.End());
1245    while (obj < end) {
1246      bitmap->Set(obj);
1247      obj = space::BumpPointerSpace::GetNextObject(obj);
1248    }
1249  }
1250  // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
1251  // the remaining available heap memory.
1252  space::MallocSpace* zygote_space = non_moving_space_;
1253  non_moving_space_ = zygote_space->CreateZygoteSpace("alloc space");
1254  non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
1255  // Change the GC retention policy of the zygote space to only collect when full.
1256  zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect);
1257  AddSpace(non_moving_space_);
1258  have_zygote_space_ = true;
1259  zygote_space->InvalidateAllocator();
1260  // Create the zygote space mod union table.
1261  accounting::ModUnionTable* mod_union_table =
1262      new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
1263  CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
1264  AddModUnionTable(mod_union_table);
1265  // Reset the cumulative loggers since we now have a few additional timing phases.
1266  for (const auto& collector : garbage_collectors_) {
1267    collector->ResetCumulativeStatistics();
1268  }
1269}
1270
1271void Heap::FlushAllocStack() {
1272  MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
1273                 allocation_stack_.get());
1274  allocation_stack_->Reset();
1275}
1276
1277void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
1278                          accounting::ObjectStack* stack) {
1279  mirror::Object** limit = stack->End();
1280  for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
1281    const mirror::Object* obj = *it;
1282    DCHECK(obj != NULL);
1283    if (LIKELY(bitmap->HasAddress(obj))) {
1284      bitmap->Set(obj);
1285    } else {
1286      large_objects->Set(obj);
1287    }
1288  }
1289}
1290
1291const char* PrettyCause(GcCause cause) {
1292  switch (cause) {
1293    case kGcCauseForAlloc: return "Alloc";
1294    case kGcCauseBackground: return "Background";
1295    case kGcCauseExplicit: return "Explicit";
1296    default:
1297      LOG(FATAL) << "Unreachable";
1298  }
1299  return "";
1300}
1301
1302void Heap::SwapSemiSpaces() {
1303  // Swap the spaces so we allocate into the space which we just evacuated.
1304  std::swap(bump_pointer_space_, temp_space_);
1305}
1306
1307void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
1308                   space::ContinuousMemMapAllocSpace* source_space) {
1309  CHECK(kMovingCollector);
1310  CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported";
1311  if (target_space != source_space) {
1312    semi_space_collector_->SetFromSpace(source_space);
1313    semi_space_collector_->SetToSpace(target_space);
1314    semi_space_collector_->Run(false);
1315  }
1316}
1317
1318collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
1319                                               bool clear_soft_references) {
1320  Thread* self = Thread::Current();
1321  Runtime* runtime = Runtime::Current();
1322  // If the heap can't run the GC, silently fail and return that no GC was run.
1323  switch (gc_type) {
1324    case collector::kGcTypeSticky: {
1325      const size_t alloc_space_size = non_moving_space_->Size();
1326      if (alloc_space_size < min_alloc_space_size_for_sticky_gc_ ||
1327        non_moving_space_->Capacity() - alloc_space_size < min_remaining_space_for_sticky_gc_) {
1328        return collector::kGcTypeNone;
1329      }
1330      break;
1331    }
1332    case collector::kGcTypePartial: {
1333      if (!have_zygote_space_) {
1334        return collector::kGcTypeNone;
1335      }
1336      break;
1337    }
1338    default: {
1339      // Other GC types don't have any special cases which makes them not runnable. The main case
1340      // here is full GC.
1341    }
1342  }
1343  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1344  Locks::mutator_lock_->AssertNotHeld(self);
1345  if (self->IsHandlingStackOverflow()) {
1346    LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
1347  }
1348  {
1349    gc_complete_lock_->AssertNotHeld(self);
1350    MutexLock mu(self, *gc_complete_lock_);
1351    // Ensure there is only one GC at a time.
1352    WaitForGcToCompleteLocked(self);
1353    // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
1354    //       Not doing at the moment to ensure soft references are cleared.
1355    // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
1356    if (gc_disable_count_ != 0) {
1357      LOG(WARNING) << "Skipping GC due to disable count " << gc_disable_count_;
1358      return collector::kGcTypeNone;
1359    }
1360    is_gc_running_ = true;
1361  }
1362  if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
1363    ++runtime->GetStats()->gc_for_alloc_count;
1364    ++self->GetStats()->gc_for_alloc_count;
1365  }
1366  uint64_t gc_start_time_ns = NanoTime();
1367  uint64_t gc_start_size = GetBytesAllocated();
1368  // Approximate allocation rate in bytes / second.
1369  uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
1370  // Back to back GCs can cause 0 ms of wait time in between GC invocations.
1371  if (LIKELY(ms_delta != 0)) {
1372    allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
1373    VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
1374  }
1375
1376  DCHECK_LT(gc_type, collector::kGcTypeMax);
1377  DCHECK_NE(gc_type, collector::kGcTypeNone);
1378
1379  collector::GarbageCollector* collector = nullptr;
1380  // TODO: Clean this up.
1381  if (current_allocator_ == kAllocatorTypeBumpPointer) {
1382    gc_type = semi_space_collector_->GetGcType();
1383    CHECK_EQ(temp_space_->GetObjectsAllocated(), 0U);
1384    semi_space_collector_->SetFromSpace(bump_pointer_space_);
1385    semi_space_collector_->SetToSpace(temp_space_);
1386    mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
1387    collector = semi_space_collector_;
1388    gc_type = collector::kGcTypeFull;
1389  } else if (current_allocator_ == kAllocatorTypeFreeList) {
1390    for (const auto& cur_collector : garbage_collectors_) {
1391      if (cur_collector->IsConcurrent() == concurrent_gc_ &&
1392          cur_collector->GetGcType() == gc_type) {
1393        collector = cur_collector;
1394        break;
1395      }
1396    }
1397  } else {
1398    LOG(FATAL) << "Invalid current allocator " << current_allocator_;
1399  }
1400  CHECK(collector != NULL)
1401      << "Could not find garbage collector with concurrent=" << concurrent_gc_
1402      << " and type=" << gc_type;
1403
1404  ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
1405
1406  collector->Run(clear_soft_references);
1407  total_objects_freed_ever_ += collector->GetFreedObjects();
1408  total_bytes_freed_ever_ += collector->GetFreedBytes();
1409
1410  // Enqueue cleared references.
1411  EnqueueClearedReferences();
1412
1413  // Grow the heap so that we know when to perform the next GC.
1414  GrowForUtilization(gc_type, collector->GetDurationNs());
1415
1416  if (care_about_pause_times_) {
1417    const size_t duration = collector->GetDurationNs();
1418    std::vector<uint64_t> pauses = collector->GetPauseTimes();
1419    // GC for alloc pauses the allocating thread, so consider it as a pause.
1420    bool was_slow = duration > long_gc_log_threshold_ ||
1421            (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
1422    if (!was_slow) {
1423      for (uint64_t pause : pauses) {
1424        was_slow = was_slow || pause > long_pause_log_threshold_;
1425      }
1426    }
1427    if (was_slow) {
1428        const size_t percent_free = GetPercentFree();
1429        const size_t current_heap_size = GetBytesAllocated();
1430        const size_t total_memory = GetTotalMemory();
1431        std::ostringstream pause_string;
1432        for (size_t i = 0; i < pauses.size(); ++i) {
1433            pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
1434                         << ((i != pauses.size() - 1) ? ", " : "");
1435        }
1436        LOG(INFO) << gc_cause << " " << collector->GetName()
1437                  << " GC freed "  <<  collector->GetFreedObjects() << "("
1438                  << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
1439                  << collector->GetFreedLargeObjects() << "("
1440                  << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
1441                  << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
1442                  << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
1443                  << " total " << PrettyDuration((duration / 1000) * 1000);
1444        if (VLOG_IS_ON(heap)) {
1445            LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
1446        }
1447    }
1448  }
1449
1450  {
1451      MutexLock mu(self, *gc_complete_lock_);
1452      is_gc_running_ = false;
1453      last_gc_type_ = gc_type;
1454      // Wake anyone who may have been waiting for the GC to complete.
1455      gc_complete_cond_->Broadcast(self);
1456  }
1457
1458  ATRACE_END();
1459
1460  // Inform DDMS that a GC completed.
1461  Dbg::GcDidFinish();
1462  return gc_type;
1463}
1464
1465static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) {
1466  mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
1467  if (root == obj) {
1468    LOG(INFO) << "Object " << obj << " is a root";
1469  }
1470  return root;
1471}
1472
1473class ScanVisitor {
1474 public:
1475  void operator()(const mirror::Object* obj) const {
1476    LOG(ERROR) << "Would have rescanned object " << obj;
1477  }
1478};
1479
1480// Verify a reference from an object.
1481class VerifyReferenceVisitor {
1482 public:
1483  explicit VerifyReferenceVisitor(Heap* heap)
1484      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
1485      : heap_(heap), failed_(false) {}
1486
1487  bool Failed() const {
1488    return failed_;
1489  }
1490
1491  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
1492  // analysis on visitors.
1493  void operator()(const mirror::Object* obj, const mirror::Object* ref,
1494                  const MemberOffset& offset, bool /* is_static */) const
1495      NO_THREAD_SAFETY_ANALYSIS {
1496    // Verify that the reference is live.
1497    if (UNLIKELY(ref != NULL && !IsLive(ref))) {
1498      accounting::CardTable* card_table = heap_->GetCardTable();
1499      accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
1500      accounting::ObjectStack* live_stack = heap_->live_stack_.get();
1501      if (!failed_) {
1502        // Print message on only on first failure to prevent spam.
1503        LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
1504        failed_ = true;
1505      }
1506      if (obj != nullptr) {
1507        byte* card_addr = card_table->CardFromAddr(obj);
1508        LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
1509                   << offset << "\n card value = " << static_cast<int>(*card_addr);
1510        if (heap_->IsValidObjectAddress(obj->GetClass())) {
1511          LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
1512        } else {
1513          LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
1514        }
1515
1516        // Attmept to find the class inside of the recently freed objects.
1517        space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
1518        if (ref_space != nullptr && ref_space->IsMallocSpace()) {
1519          space::MallocSpace* space = ref_space->AsMallocSpace();
1520          mirror::Class* ref_class = space->FindRecentFreedObject(ref);
1521          if (ref_class != nullptr) {
1522            LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
1523                       << PrettyClass(ref_class);
1524          } else {
1525            LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
1526          }
1527        }
1528
1529        if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
1530            ref->GetClass()->IsClass()) {
1531          LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
1532        } else {
1533          LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
1534                     << ") is not a valid heap address";
1535        }
1536
1537        card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
1538        void* cover_begin = card_table->AddrFromCard(card_addr);
1539        void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
1540            accounting::CardTable::kCardSize);
1541        LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
1542            << "-" << cover_end;
1543        accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
1544
1545        // Print out how the object is live.
1546        if (bitmap != NULL && bitmap->Test(obj)) {
1547          LOG(ERROR) << "Object " << obj << " found in live bitmap";
1548        }
1549        if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
1550          LOG(ERROR) << "Object " << obj << " found in allocation stack";
1551        }
1552        if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
1553          LOG(ERROR) << "Object " << obj << " found in live stack";
1554        }
1555        if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
1556          LOG(ERROR) << "Ref " << ref << " found in allocation stack";
1557        }
1558        if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
1559          LOG(ERROR) << "Ref " << ref << " found in live stack";
1560        }
1561        // Attempt to see if the card table missed the reference.
1562        ScanVisitor scan_visitor;
1563        byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
1564        card_table->Scan(bitmap, byte_cover_begin,
1565                         byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
1566
1567        // Search to see if any of the roots reference our object.
1568        void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
1569        Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1570
1571        // Search to see if any of the roots reference our reference.
1572        arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
1573        Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1574      } else {
1575        LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref);
1576      }
1577    }
1578  }
1579
1580  bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
1581    return heap_->IsLiveObjectLocked(obj, true, false, true);
1582  }
1583
1584  static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) {
1585    VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
1586    (*visitor)(nullptr, root, MemberOffset(0), true);
1587    return root;
1588  }
1589
1590 private:
1591  Heap* const heap_;
1592  mutable bool failed_;
1593};
1594
1595// Verify all references within an object, for use with HeapBitmap::Visit.
1596class VerifyObjectVisitor {
1597 public:
1598  explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
1599
1600  void operator()(mirror::Object* obj) const
1601      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1602    // Note: we are verifying the references in obj but not obj itself, this is because obj must
1603    // be live or else how did we find it in the live bitmap?
1604    VerifyReferenceVisitor visitor(heap_);
1605    // The class doesn't count as a reference but we should verify it anyways.
1606    collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
1607    if (obj->GetClass()->IsReferenceClass()) {
1608      visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false);
1609    }
1610    failed_ = failed_ || visitor.Failed();
1611  }
1612
1613  static void VisitCallback(mirror::Object* obj, void* arg)
1614      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1615    VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
1616    visitor->operator()(obj);
1617  }
1618
1619  bool Failed() const {
1620    return failed_;
1621  }
1622
1623 private:
1624  Heap* const heap_;
1625  mutable bool failed_;
1626};
1627
1628// Must do this with mutators suspended since we are directly accessing the allocation stacks.
1629bool Heap::VerifyHeapReferences() {
1630  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1631  // Lets sort our allocation stacks so that we can efficiently binary search them.
1632  allocation_stack_->Sort();
1633  live_stack_->Sort();
1634  VerifyObjectVisitor visitor(this);
1635  // Verify objects in the allocation stack since these will be objects which were:
1636  // 1. Allocated prior to the GC (pre GC verification).
1637  // 2. Allocated during the GC (pre sweep GC verification).
1638  // We don't want to verify the objects in the live stack since they themselves may be
1639  // pointing to dead objects if they are not reachable.
1640  VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
1641  // Verify the roots:
1642  Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
1643  if (visitor.Failed()) {
1644    // Dump mod-union tables.
1645    for (const auto& table_pair : mod_union_tables_) {
1646      accounting::ModUnionTable* mod_union_table = table_pair.second;
1647      mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
1648    }
1649    DumpSpaces();
1650    return false;
1651  }
1652  return true;
1653}
1654
1655class VerifyReferenceCardVisitor {
1656 public:
1657  VerifyReferenceCardVisitor(Heap* heap, bool* failed)
1658      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
1659                            Locks::heap_bitmap_lock_)
1660      : heap_(heap), failed_(failed) {
1661  }
1662
1663  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1664  // annotalysis on visitors.
1665  void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
1666                  bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
1667    // Filter out class references since changing an object's class does not mark the card as dirty.
1668    // Also handles large objects, since the only reference they hold is a class reference.
1669    if (ref != NULL && !ref->IsClass()) {
1670      accounting::CardTable* card_table = heap_->GetCardTable();
1671      // If the object is not dirty and it is referencing something in the live stack other than
1672      // class, then it must be on a dirty card.
1673      if (!card_table->AddrIsInCardTable(obj)) {
1674        LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
1675        *failed_ = true;
1676      } else if (!card_table->IsDirty(obj)) {
1677        // Card should be either kCardDirty if it got re-dirtied after we aged it, or
1678        // kCardDirty - 1 if it didnt get touched since we aged it.
1679        accounting::ObjectStack* live_stack = heap_->live_stack_.get();
1680        if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
1681          if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
1682            LOG(ERROR) << "Object " << obj << " found in live stack";
1683          }
1684          if (heap_->GetLiveBitmap()->Test(obj)) {
1685            LOG(ERROR) << "Object " << obj << " found in live bitmap";
1686          }
1687          LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
1688                    << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
1689
1690          // Print which field of the object is dead.
1691          if (!obj->IsObjectArray()) {
1692            const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1693            CHECK(klass != NULL);
1694            const mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
1695                                                                            : klass->GetIFields();
1696            CHECK(fields != NULL);
1697            for (int32_t i = 0; i < fields->GetLength(); ++i) {
1698              const mirror::ArtField* cur = fields->Get(i);
1699              if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1700                LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
1701                          << PrettyField(cur);
1702                break;
1703              }
1704            }
1705          } else {
1706            const mirror::ObjectArray<mirror::Object>* object_array =
1707                obj->AsObjectArray<mirror::Object>();
1708            for (int32_t i = 0; i < object_array->GetLength(); ++i) {
1709              if (object_array->Get(i) == ref) {
1710                LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
1711              }
1712            }
1713          }
1714
1715          *failed_ = true;
1716        }
1717      }
1718    }
1719  }
1720
1721 private:
1722  Heap* const heap_;
1723  bool* const failed_;
1724};
1725
1726class VerifyLiveStackReferences {
1727 public:
1728  explicit VerifyLiveStackReferences(Heap* heap)
1729      : heap_(heap),
1730        failed_(false) {}
1731
1732  void operator()(mirror::Object* obj) const
1733      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1734    VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
1735    collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
1736  }
1737
1738  bool Failed() const {
1739    return failed_;
1740  }
1741
1742 private:
1743  Heap* const heap_;
1744  bool failed_;
1745};
1746
1747bool Heap::VerifyMissingCardMarks() {
1748  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1749
1750  // We need to sort the live stack since we binary search it.
1751  live_stack_->Sort();
1752  VerifyLiveStackReferences visitor(this);
1753  GetLiveBitmap()->Visit(visitor);
1754
1755  // We can verify objects in the live stack since none of these should reference dead objects.
1756  for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
1757    visitor(*it);
1758  }
1759
1760  if (visitor.Failed()) {
1761    DumpSpaces();
1762    return false;
1763  }
1764  return true;
1765}
1766
1767void Heap::SwapStacks() {
1768  allocation_stack_.swap(live_stack_);
1769}
1770
1771accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
1772  auto it = mod_union_tables_.find(space);
1773  if (it == mod_union_tables_.end()) {
1774    return nullptr;
1775  }
1776  return it->second;
1777}
1778
1779void Heap::ProcessCards(TimingLogger& timings) {
1780  // Clear cards and keep track of cards cleared in the mod-union table.
1781  for (const auto& space : continuous_spaces_) {
1782    accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
1783    if (table != nullptr) {
1784      const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
1785          "ImageModUnionClearCards";
1786      TimingLogger::ScopedSplit split(name, &timings);
1787      table->ClearCards();
1788    } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
1789      TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
1790      // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
1791      // were dirty before the GC started.
1792      // TODO: Don't need to use atomic.
1793      // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
1794      // roots and then we scan / update mod union tables after. We will always scan either card.//
1795      // If we end up with the non aged card, we scan it it in the pause.
1796      card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
1797    }
1798  }
1799}
1800
1801static mirror::Object* IdentityCallback(mirror::Object* obj, void*) {
1802  return obj;
1803}
1804
1805void Heap::PreGcVerification(collector::GarbageCollector* gc) {
1806  ThreadList* thread_list = Runtime::Current()->GetThreadList();
1807  Thread* self = Thread::Current();
1808
1809  if (verify_pre_gc_heap_) {
1810    thread_list->SuspendAll();
1811    {
1812      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1813      if (!VerifyHeapReferences()) {
1814        LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed";
1815      }
1816    }
1817    thread_list->ResumeAll();
1818  }
1819
1820  // Check that all objects which reference things in the live stack are on dirty cards.
1821  if (verify_missing_card_marks_) {
1822    thread_list->SuspendAll();
1823    {
1824      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1825      SwapStacks();
1826      // Sort the live stack so that we can quickly binary search it later.
1827      if (!VerifyMissingCardMarks()) {
1828        LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
1829      }
1830      SwapStacks();
1831    }
1832    thread_list->ResumeAll();
1833  }
1834
1835  if (verify_mod_union_table_) {
1836    thread_list->SuspendAll();
1837    ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
1838    for (const auto& table_pair : mod_union_tables_) {
1839      accounting::ModUnionTable* mod_union_table = table_pair.second;
1840      mod_union_table->UpdateAndMarkReferences(IdentityCallback, nullptr);
1841      mod_union_table->Verify();
1842    }
1843    thread_list->ResumeAll();
1844  }
1845}
1846
1847void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
1848  // Called before sweeping occurs since we want to make sure we are not going so reclaim any
1849  // reachable objects.
1850  if (verify_post_gc_heap_) {
1851    Thread* self = Thread::Current();
1852    CHECK_NE(self->GetState(), kRunnable);
1853    {
1854      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1855      // Swapping bound bitmaps does nothing.
1856      gc->SwapBitmaps();
1857      if (!VerifyHeapReferences()) {
1858        LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
1859      }
1860      gc->SwapBitmaps();
1861    }
1862  }
1863}
1864
1865void Heap::PostGcVerification(collector::GarbageCollector* gc) {
1866  if (verify_system_weaks_) {
1867    Thread* self = Thread::Current();
1868    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1869    collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
1870    mark_sweep->VerifySystemWeaks();
1871  }
1872}
1873
1874collector::GcType Heap::WaitForGcToComplete(Thread* self) {
1875  ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1876  MutexLock mu(self, *gc_complete_lock_);
1877  return WaitForGcToCompleteLocked(self);
1878}
1879
1880collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
1881  collector::GcType last_gc_type = collector::kGcTypeNone;
1882  uint64_t wait_start = NanoTime();
1883  while (is_gc_running_) {
1884    ATRACE_BEGIN("GC: Wait For Completion");
1885    // We must wait, change thread state then sleep on gc_complete_cond_;
1886    gc_complete_cond_->Wait(self);
1887    last_gc_type = last_gc_type_;
1888    ATRACE_END();
1889  }
1890  uint64_t wait_time = NanoTime() - wait_start;
1891  total_wait_time_ += wait_time;
1892  if (wait_time > long_pause_log_threshold_) {
1893    LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time);
1894  }
1895  return last_gc_type;
1896}
1897
1898void Heap::DumpForSigQuit(std::ostream& os) {
1899  os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
1900     << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
1901  DumpGcPerformanceInfo(os);
1902}
1903
1904size_t Heap::GetPercentFree() {
1905  return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory());
1906}
1907
1908void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
1909  if (max_allowed_footprint > GetMaxMemory()) {
1910    VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
1911             << PrettySize(GetMaxMemory());
1912    max_allowed_footprint = GetMaxMemory();
1913  }
1914  max_allowed_footprint_ = max_allowed_footprint;
1915}
1916
1917bool Heap::IsMovableObject(const mirror::Object* obj) const {
1918  if (kMovingCollector) {
1919    DCHECK(!IsInTempSpace(obj));
1920    if (bump_pointer_space_->HasAddress(obj)) {
1921      return true;
1922    }
1923  }
1924  return false;
1925}
1926
1927bool Heap::IsInTempSpace(const mirror::Object* obj) const {
1928  if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) {
1929    return true;
1930  }
1931  return false;
1932}
1933
1934void Heap::UpdateMaxNativeFootprint() {
1935  size_t native_size = native_bytes_allocated_;
1936  // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
1937  size_t target_size = native_size / GetTargetHeapUtilization();
1938  if (target_size > native_size + max_free_) {
1939    target_size = native_size + max_free_;
1940  } else if (target_size < native_size + min_free_) {
1941    target_size = native_size + min_free_;
1942  }
1943  native_footprint_gc_watermark_ = target_size;
1944  native_footprint_limit_ = 2 * target_size - native_size;
1945}
1946
1947void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
1948  // We know what our utilization is at this moment.
1949  // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
1950  const size_t bytes_allocated = GetBytesAllocated();
1951  last_gc_size_ = bytes_allocated;
1952  last_gc_time_ns_ = NanoTime();
1953  size_t target_size;
1954  if (gc_type != collector::kGcTypeSticky) {
1955    // Grow the heap for non sticky GC.
1956    target_size = bytes_allocated / GetTargetHeapUtilization();
1957    if (target_size > bytes_allocated + max_free_) {
1958      target_size = bytes_allocated + max_free_;
1959    } else if (target_size < bytes_allocated + min_free_) {
1960      target_size = bytes_allocated + min_free_;
1961    }
1962    native_need_to_run_finalization_ = true;
1963    next_gc_type_ = collector::kGcTypeSticky;
1964  } else {
1965    // Based on how close the current heap size is to the target size, decide
1966    // whether or not to do a partial or sticky GC next.
1967    if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
1968      next_gc_type_ = collector::kGcTypeSticky;
1969    } else {
1970      next_gc_type_ = collector::kGcTypePartial;
1971    }
1972    // If we have freed enough memory, shrink the heap back down.
1973    if (bytes_allocated + max_free_ < max_allowed_footprint_) {
1974      target_size = bytes_allocated + max_free_;
1975    } else {
1976      target_size = std::max(bytes_allocated, max_allowed_footprint_);
1977    }
1978  }
1979  if (!ignore_max_footprint_) {
1980    SetIdealFootprint(target_size);
1981    if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) {
1982      // Calculate when to perform the next ConcurrentGC.
1983      // Calculate the estimated GC duration.
1984      double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
1985      // Estimate how many remaining bytes we will have when we need to start the next GC.
1986      size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
1987      remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
1988      if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
1989        // A never going to happen situation that from the estimated allocation rate we will exceed
1990        // the applications entire footprint with the given estimated allocation rate. Schedule
1991        // another GC straight away.
1992        concurrent_start_bytes_ = bytes_allocated;
1993      } else {
1994        // Start a concurrent GC when we get close to the estimated remaining bytes. When the
1995        // allocation rate is very high, remaining_bytes could tell us that we should start a GC
1996        // right away.
1997        concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
1998                                           bytes_allocated);
1999      }
2000      DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_);
2001      DCHECK_LE(max_allowed_footprint_, growth_limit_);
2002    }
2003  }
2004}
2005
2006void Heap::ClearGrowthLimit() {
2007  growth_limit_ = capacity_;
2008  non_moving_space_->ClearGrowthLimit();
2009}
2010
2011void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
2012                               MemberOffset reference_queue_offset,
2013                               MemberOffset reference_queueNext_offset,
2014                               MemberOffset reference_pendingNext_offset,
2015                               MemberOffset finalizer_reference_zombie_offset) {
2016  reference_referent_offset_ = reference_referent_offset;
2017  reference_queue_offset_ = reference_queue_offset;
2018  reference_queueNext_offset_ = reference_queueNext_offset;
2019  reference_pendingNext_offset_ = reference_pendingNext_offset;
2020  finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
2021  CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2022  CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
2023  CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
2024  CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
2025  CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
2026}
2027
2028void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) {
2029  DCHECK(reference != NULL);
2030  DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2031  reference->SetFieldObject(reference_referent_offset_, referent, true);
2032}
2033
2034mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
2035  DCHECK(reference != NULL);
2036  DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2037  return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true);
2038}
2039
2040void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
2041  ScopedObjectAccess soa(self);
2042  JValue result;
2043  ArgArray arg_array(NULL, 0);
2044  arg_array.Append(reinterpret_cast<uint32_t>(object));
2045  soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self,
2046      arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
2047}
2048
2049void Heap::EnqueueClearedReferences() {
2050  if (!cleared_references_.IsEmpty()) {
2051    // When a runtime isn't started there are no reference queues to care about so ignore.
2052    if (LIKELY(Runtime::Current()->IsStarted())) {
2053      ScopedObjectAccess soa(Thread::Current());
2054      JValue result;
2055      ArgArray arg_array(NULL, 0);
2056      arg_array.Append(reinterpret_cast<uint32_t>(cleared_references_.GetList()));
2057      soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
2058          arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
2059    }
2060    cleared_references_.Clear();
2061  }
2062}
2063
2064void Heap::RequestConcurrentGC(Thread* self) {
2065  // Make sure that we can do a concurrent GC.
2066  Runtime* runtime = Runtime::Current();
2067  DCHECK(concurrent_gc_);
2068  if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
2069      self->IsHandlingStackOverflow()) {
2070    return;
2071  }
2072  // We already have a request pending, no reason to start more until we update
2073  // concurrent_start_bytes_.
2074  concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2075  JNIEnv* env = self->GetJniEnv();
2076  DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2077  DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
2078  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2079                            WellKnownClasses::java_lang_Daemons_requestGC);
2080  CHECK(!env->ExceptionCheck());
2081}
2082
2083void Heap::ConcurrentGC(Thread* self) {
2084  if (Runtime::Current()->IsShuttingDown(self)) {
2085    return;
2086  }
2087  // Wait for any GCs currently running to finish.
2088  if (WaitForGcToComplete(self) == collector::kGcTypeNone) {
2089    // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2090    // instead. E.g. can't do partial, so do full instead.
2091    if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2092        collector::kGcTypeNone) {
2093      for (collector::GcType gc_type : gc_plan_) {
2094        // Attempt to run the collector, if we succeed, we are done.
2095        if (gc_type > next_gc_type_ &&
2096            CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2097          break;
2098        }
2099      }
2100    }
2101  }
2102}
2103
2104void Heap::RequestHeapTrim() {
2105  // GC completed and now we must decide whether to request a heap trim (advising pages back to the
2106  // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
2107  // a space it will hold its lock and can become a cause of jank.
2108  // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
2109  // forking.
2110
2111  // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
2112  // because that only marks object heads, so a large array looks like lots of empty space. We
2113  // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
2114  // to utilization (which is probably inversely proportional to how much benefit we can expect).
2115  // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
2116  // not how much use we're making of those pages.
2117  uint64_t ms_time = MilliTime();
2118  // Don't bother trimming the alloc space if a heap trim occurred in the last two seconds.
2119  if (ms_time - last_trim_time_ms_ < 2 * 1000) {
2120    return;
2121  }
2122
2123  Thread* self = Thread::Current();
2124  Runtime* runtime = Runtime::Current();
2125  if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
2126    // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
2127    // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
2128    // as we don't hold the lock while requesting the trim).
2129    return;
2130  }
2131
2132  last_trim_time_ms_ = ms_time;
2133  ListenForProcessStateChange();
2134
2135  // Trim only if we do not currently care about pause times.
2136  if (!care_about_pause_times_) {
2137    JNIEnv* env = self->GetJniEnv();
2138    DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
2139    DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
2140    env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2141                              WellKnownClasses::java_lang_Daemons_requestHeapTrim);
2142    CHECK(!env->ExceptionCheck());
2143  }
2144}
2145
2146void Heap::RevokeThreadLocalBuffers(Thread* thread) {
2147  non_moving_space_->RevokeThreadLocalBuffers(thread);
2148}
2149
2150void Heap::RevokeAllThreadLocalBuffers() {
2151  non_moving_space_->RevokeAllThreadLocalBuffers();
2152}
2153
2154bool Heap::IsGCRequestPending() const {
2155  return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
2156}
2157
2158void Heap::RunFinalization(JNIEnv* env) {
2159  // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
2160  if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
2161    CHECK(WellKnownClasses::java_lang_System != nullptr);
2162    WellKnownClasses::java_lang_System_runFinalization =
2163        CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
2164    CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
2165  }
2166  env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
2167                            WellKnownClasses::java_lang_System_runFinalization);
2168}
2169
2170void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
2171  Thread* self = ThreadForEnv(env);
2172  if (native_need_to_run_finalization_) {
2173    RunFinalization(env);
2174    UpdateMaxNativeFootprint();
2175    native_need_to_run_finalization_ = false;
2176  }
2177  // Total number of native bytes allocated.
2178  native_bytes_allocated_.fetch_add(bytes);
2179  if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
2180    collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
2181        collector::kGcTypeFull;
2182
2183    // The second watermark is higher than the gc watermark. If you hit this it means you are
2184    // allocating native objects faster than the GC can keep up with.
2185    if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
2186      if (WaitForGcToComplete(self) != collector::kGcTypeNone) {
2187        // Just finished a GC, attempt to run finalizers.
2188        RunFinalization(env);
2189        CHECK(!env->ExceptionCheck());
2190      }
2191      // If we still are over the watermark, attempt a GC for alloc and run finalizers.
2192      if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
2193        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
2194        RunFinalization(env);
2195        native_need_to_run_finalization_ = false;
2196        CHECK(!env->ExceptionCheck());
2197      }
2198      // We have just run finalizers, update the native watermark since it is very likely that
2199      // finalizers released native managed allocations.
2200      UpdateMaxNativeFootprint();
2201    } else if (!IsGCRequestPending()) {
2202      if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) {
2203        RequestConcurrentGC(self);
2204      } else {
2205        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
2206      }
2207    }
2208  }
2209}
2210
2211void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
2212  int expected_size, new_size;
2213  do {
2214    expected_size = native_bytes_allocated_.load();
2215    new_size = expected_size - bytes;
2216    if (UNLIKELY(new_size < 0)) {
2217      ScopedObjectAccess soa(env);
2218      env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
2219                    StringPrintf("Attempted to free %d native bytes with only %d native bytes "
2220                                 "registered as allocated", bytes, expected_size).c_str());
2221      break;
2222    }
2223  } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size));
2224}
2225
2226int64_t Heap::GetTotalMemory() const {
2227  int64_t ret = 0;
2228  for (const auto& space : continuous_spaces_) {
2229    // Currently don't include the image space.
2230    if (!space->IsImageSpace()) {
2231      ret += space->Size();
2232    }
2233  }
2234  for (const auto& space : discontinuous_spaces_) {
2235    if (space->IsLargeObjectSpace()) {
2236      ret += space->AsLargeObjectSpace()->GetBytesAllocated();
2237    }
2238  }
2239  return ret;
2240}
2241
2242void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
2243  DCHECK(mod_union_table != nullptr);
2244  mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
2245}
2246
2247}  // namespace gc
2248}  // namespace art
2249