heap.cc revision ebdf3f320d71563cf0236c31d35d633be9576d8c
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "heap.h"
18
19#define ATRACE_TAG ATRACE_TAG_DALVIK
20#include <cutils/trace.h>
21
22#include <limits>
23#include <vector>
24#include <valgrind.h>
25
26#include "base/histogram-inl.h"
27#include "base/stl_util.h"
28#include "common_throws.h"
29#include "cutils/sched_policy.h"
30#include "debugger.h"
31#include "gc/accounting/atomic_stack.h"
32#include "gc/accounting/card_table-inl.h"
33#include "gc/accounting/heap_bitmap-inl.h"
34#include "gc/accounting/mod_union_table.h"
35#include "gc/accounting/mod_union_table-inl.h"
36#include "gc/accounting/space_bitmap-inl.h"
37#include "gc/collector/mark_sweep-inl.h"
38#include "gc/collector/partial_mark_sweep.h"
39#include "gc/collector/semi_space.h"
40#include "gc/collector/sticky_mark_sweep.h"
41#include "gc/space/bump_pointer_space.h"
42#include "gc/space/dlmalloc_space-inl.h"
43#include "gc/space/image_space.h"
44#include "gc/space/large_object_space.h"
45#include "gc/space/rosalloc_space-inl.h"
46#include "gc/space/space-inl.h"
47#include "gc/space/zygote_space.h"
48#include "heap-inl.h"
49#include "image.h"
50#include "invoke_arg_array_builder.h"
51#include "mirror/art_field-inl.h"
52#include "mirror/class-inl.h"
53#include "mirror/object.h"
54#include "mirror/object-inl.h"
55#include "mirror/object_array-inl.h"
56#include "object_utils.h"
57#include "os.h"
58#include "runtime.h"
59#include "ScopedLocalRef.h"
60#include "scoped_thread_state_change.h"
61#include "sirt_ref.h"
62#include "thread_list.h"
63#include "UniquePtr.h"
64#include "well_known_classes.h"
65
66namespace art {
67
68extern void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator);
69
70namespace gc {
71
72static constexpr bool kGCALotMode = false;
73static constexpr size_t kGcAlotInterval = KB;
74// Minimum amount of remaining bytes before a concurrent GC is triggered.
75static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
76static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
77
78Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
79           double target_utilization, size_t capacity, const std::string& image_file_name,
80           CollectorType post_zygote_collector_type, CollectorType background_collector_type,
81           size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
82           size_t long_pause_log_threshold, size_t long_gc_log_threshold,
83           bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap,
84           bool verify_post_gc_heap, bool verify_pre_gc_rosalloc,
85           bool verify_post_gc_rosalloc)
86    : non_moving_space_(nullptr),
87      rosalloc_space_(nullptr),
88      dlmalloc_space_(nullptr),
89      main_space_(nullptr),
90      concurrent_gc_(false),
91      collector_type_(kCollectorTypeNone),
92      post_zygote_collector_type_(post_zygote_collector_type),
93      background_collector_type_(background_collector_type),
94      parallel_gc_threads_(parallel_gc_threads),
95      conc_gc_threads_(conc_gc_threads),
96      low_memory_mode_(low_memory_mode),
97      long_pause_log_threshold_(long_pause_log_threshold),
98      long_gc_log_threshold_(long_gc_log_threshold),
99      ignore_max_footprint_(ignore_max_footprint),
100      have_zygote_space_(false),
101      soft_reference_queue_(this),
102      weak_reference_queue_(this),
103      finalizer_reference_queue_(this),
104      phantom_reference_queue_(this),
105      cleared_references_(this),
106      collector_type_running_(kCollectorTypeNone),
107      last_gc_type_(collector::kGcTypeNone),
108      next_gc_type_(collector::kGcTypePartial),
109      capacity_(capacity),
110      growth_limit_(growth_limit),
111      max_allowed_footprint_(initial_size),
112      native_footprint_gc_watermark_(initial_size),
113      native_footprint_limit_(2 * initial_size),
114      native_need_to_run_finalization_(false),
115      // Initially assume we perceive jank in case the process state is never updated.
116      process_state_(kProcessStateJankPerceptible),
117      concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
118      total_bytes_freed_ever_(0),
119      total_objects_freed_ever_(0),
120      num_bytes_allocated_(0),
121      native_bytes_allocated_(0),
122      gc_memory_overhead_(0),
123      verify_missing_card_marks_(false),
124      verify_system_weaks_(false),
125      verify_pre_gc_heap_(verify_pre_gc_heap),
126      verify_post_gc_heap_(verify_post_gc_heap),
127      verify_mod_union_table_(false),
128      verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
129      verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
130      last_trim_time_ms_(0),
131      allocation_rate_(0),
132      /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
133       * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
134       * verification is enabled, we limit the size of allocation stacks to speed up their
135       * searching.
136       */
137      max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
138          : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
139      current_allocator_(kAllocatorTypeDlMalloc),
140      current_non_moving_allocator_(kAllocatorTypeNonMoving),
141      bump_pointer_space_(nullptr),
142      temp_space_(nullptr),
143      reference_referent_offset_(0),
144      reference_queue_offset_(0),
145      reference_queueNext_offset_(0),
146      reference_pendingNext_offset_(0),
147      finalizer_reference_zombie_offset_(0),
148      min_free_(min_free),
149      max_free_(max_free),
150      target_utilization_(target_utilization),
151      total_wait_time_(0),
152      total_allocation_time_(0),
153      verify_object_mode_(kHeapVerificationNotPermitted),
154      disable_moving_gc_count_(0),
155      running_on_valgrind_(RUNNING_ON_VALGRIND),
156      use_tlab_(use_tlab) {
157  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
158    LOG(INFO) << "Heap() entering";
159  }
160  // If we aren't the zygote, switch to the default non zygote allocator. This may update the
161  // entrypoints.
162  if (!Runtime::Current()->IsZygote() || !kMovingCollector) {
163    ChangeCollector(post_zygote_collector_type_);
164  } else {
165    // We are the zygote, use bump pointer allocation + semi space collector.
166    ChangeCollector(kCollectorTypeSS);
167  }
168
169  live_bitmap_.reset(new accounting::HeapBitmap(this));
170  mark_bitmap_.reset(new accounting::HeapBitmap(this));
171  // Requested begin for the alloc space, to follow the mapped image and oat files
172  byte* requested_alloc_space_begin = nullptr;
173  if (!image_file_name.empty()) {
174    space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str());
175    CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name;
176    AddSpace(image_space);
177    // Oat files referenced by image files immediately follow them in memory, ensure alloc space
178    // isn't going to get in the middle
179    byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
180    CHECK_GT(oat_file_end_addr, image_space->End());
181    if (oat_file_end_addr > requested_alloc_space_begin) {
182      requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
183    }
184  }
185  const char* name = Runtime::Current()->IsZygote() ? "zygote space" : "alloc space";
186  space::MallocSpace* malloc_space;
187  if (kUseRosAlloc) {
188    malloc_space = space::RosAllocSpace::Create(name, initial_size, growth_limit, capacity,
189                                                requested_alloc_space_begin, low_memory_mode_);
190    CHECK(malloc_space != nullptr) << "Failed to create rosalloc space";
191  } else {
192    malloc_space = space::DlMallocSpace::Create(name, initial_size, growth_limit, capacity,
193                                                requested_alloc_space_begin);
194    CHECK(malloc_space != nullptr) << "Failed to create dlmalloc space";
195  }
196  VLOG(heap) << "malloc_space : " << malloc_space;
197  if (kMovingCollector) {
198    // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
199    // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation
200    // issues.
201    const size_t bump_pointer_space_size = std::min(malloc_space->Capacity(), 128 * MB);
202    bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
203                                                          bump_pointer_space_size, nullptr);
204    CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
205    AddSpace(bump_pointer_space_);
206    temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size,
207                                                  nullptr);
208    CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
209    AddSpace(temp_space_);
210    VLOG(heap) << "bump_pointer_space : " << bump_pointer_space_;
211    VLOG(heap) << "temp_space : " << temp_space_;
212  }
213  non_moving_space_ = malloc_space;
214  malloc_space->SetFootprintLimit(malloc_space->Capacity());
215  AddSpace(malloc_space);
216
217  // Allocate the large object space.
218  constexpr bool kUseFreeListSpaceForLOS = false;
219  if (kUseFreeListSpaceForLOS) {
220    large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity);
221  } else {
222    large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
223  }
224  CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
225  AddSpace(large_object_space_);
226
227  // Compute heap capacity. Continuous spaces are sorted in order of Begin().
228  CHECK(!continuous_spaces_.empty());
229
230  // Relies on the spaces being sorted.
231  byte* heap_begin = continuous_spaces_.front()->Begin();
232  byte* heap_end = continuous_spaces_.back()->Limit();
233  if (Runtime::Current()->IsZygote()) {
234    std::string error_str;
235    post_zygote_non_moving_space_mem_map_.reset(
236        MemMap::MapAnonymous("post zygote non-moving space", nullptr, 64 * MB,
237                             PROT_READ | PROT_WRITE, true, &error_str));
238    CHECK(post_zygote_non_moving_space_mem_map_.get() != nullptr) << error_str;
239    heap_begin = std::min(post_zygote_non_moving_space_mem_map_->Begin(), heap_begin);
240    heap_end = std::max(post_zygote_non_moving_space_mem_map_->End(), heap_end);
241  }
242  size_t heap_capacity = heap_end - heap_begin;
243
244  // Allocate the card table.
245  card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
246  CHECK(card_table_.get() != NULL) << "Failed to create card table";
247
248  // Card cache for now since it makes it easier for us to update the references to the copying
249  // spaces.
250  accounting::ModUnionTable* mod_union_table =
251      new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace());
252  CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
253  AddModUnionTable(mod_union_table);
254
255  // TODO: Count objects in the image space here.
256  num_bytes_allocated_ = 0;
257
258  // Default mark stack size in bytes.
259  static const size_t default_mark_stack_size = 64 * KB;
260  mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size));
261  allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack",
262                                                          max_allocation_stack_size_));
263  live_stack_.reset(accounting::ObjectStack::Create("live stack",
264                                                    max_allocation_stack_size_));
265
266  // It's still too early to take a lock because there are no threads yet, but we can create locks
267  // now. We don't create it earlier to make it clear that you can't use locks during heap
268  // initialization.
269  gc_complete_lock_ = new Mutex("GC complete lock");
270  gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
271                                                *gc_complete_lock_));
272  last_gc_time_ns_ = NanoTime();
273  last_gc_size_ = GetBytesAllocated();
274
275  if (ignore_max_footprint_) {
276    SetIdealFootprint(std::numeric_limits<size_t>::max());
277    concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
278  }
279  CHECK_NE(max_allowed_footprint_, 0U);
280
281  // Create our garbage collectors.
282  for (size_t i = 0; i < 2; ++i) {
283    const bool concurrent = i != 0;
284    garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
285    garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
286    garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
287  }
288  if (kMovingCollector) {
289    // TODO: Clean this up.
290    bool generational = post_zygote_collector_type_ == kCollectorTypeGSS;
291    semi_space_collector_ = new collector::SemiSpace(this, generational);
292    garbage_collectors_.push_back(semi_space_collector_);
293  }
294
295  if (running_on_valgrind_) {
296    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
297  }
298
299  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
300    LOG(INFO) << "Heap() exiting";
301  }
302}
303
304void Heap::ChangeAllocator(AllocatorType allocator) {
305  // These two allocators are only used internally and don't have any entrypoints.
306  DCHECK_NE(allocator, kAllocatorTypeLOS);
307  DCHECK_NE(allocator, kAllocatorTypeNonMoving);
308  if (current_allocator_ != allocator) {
309    current_allocator_ = allocator;
310    SetQuickAllocEntryPointsAllocator(current_allocator_);
311    Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints();
312  }
313}
314
315bool Heap::IsCompilingBoot() const {
316  for (const auto& space : continuous_spaces_) {
317    if (space->IsImageSpace()) {
318      return false;
319    } else if (space->IsZygoteSpace()) {
320      return false;
321    }
322  }
323  return true;
324}
325
326bool Heap::HasImageSpace() const {
327  for (const auto& space : continuous_spaces_) {
328    if (space->IsImageSpace()) {
329      return true;
330    }
331  }
332  return false;
333}
334
335void Heap::IncrementDisableMovingGC(Thread* self) {
336  // Need to do this holding the lock to prevent races where the GC is about to run / running when
337  // we attempt to disable it.
338  ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
339  MutexLock mu(self, *gc_complete_lock_);
340  ++disable_moving_gc_count_;
341  if (IsCompactingGC(collector_type_running_)) {
342    WaitForGcToCompleteLocked(self);
343  }
344}
345
346void Heap::DecrementDisableMovingGC(Thread* self) {
347  MutexLock mu(self, *gc_complete_lock_);
348  CHECK_GE(disable_moving_gc_count_, 0U);
349  --disable_moving_gc_count_;
350}
351
352void Heap::UpdateProcessState(ProcessState process_state) {
353  if (process_state_ != process_state) {
354    process_state_ = process_state;
355    if (process_state_ == kProcessStateJankPerceptible) {
356      TransitionCollector(post_zygote_collector_type_);
357    } else {
358      TransitionCollector(background_collector_type_);
359    }
360  } else {
361    CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
362  }
363}
364
365void Heap::CreateThreadPool() {
366  const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
367  if (num_threads != 0) {
368    thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
369  }
370}
371
372void Heap::VisitObjects(ObjectCallback callback, void* arg) {
373  Thread* self = Thread::Current();
374  // GCs can move objects, so don't allow this.
375  const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
376  if (bump_pointer_space_ != nullptr) {
377    // Visit objects in bump pointer space.
378    bump_pointer_space_->Walk(callback, arg);
379  }
380  // TODO: Switch to standard begin and end to use ranged a based loop.
381  for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
382      it < end; ++it) {
383    mirror::Object* obj = *it;
384    if (obj != nullptr && obj->GetClass() != nullptr) {
385      // Avoid the race condition caused by the object not yet being written into the allocation
386      // stack or the class not yet being written in the object.
387      callback(obj, arg);
388    }
389  }
390  GetLiveBitmap()->Walk(callback, arg);
391  self->EndAssertNoThreadSuspension(old_cause);
392}
393
394void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
395  space::ContinuousSpace* space1 = rosalloc_space_ != nullptr ? rosalloc_space_ : non_moving_space_;
396  space::ContinuousSpace* space2 = dlmalloc_space_ != nullptr ? dlmalloc_space_ : non_moving_space_;
397  // This is just logic to handle a case of either not having a rosalloc or dlmalloc space.
398  // TODO: Generalize this to n bitmaps?
399  if (space1 == nullptr) {
400    DCHECK(space2 != nullptr);
401    space1 = space2;
402  }
403  if (space2 == nullptr) {
404    DCHECK(space1 != nullptr);
405    space2 = space1;
406  }
407  MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
408                 large_object_space_->GetLiveObjects(), stack);
409}
410
411void Heap::DeleteThreadPool() {
412  thread_pool_.reset(nullptr);
413}
414
415void Heap::AddSpace(space::Space* space, bool set_as_default) {
416  DCHECK(space != nullptr);
417  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
418  if (space->IsContinuousSpace()) {
419    DCHECK(!space->IsDiscontinuousSpace());
420    space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
421    // Continuous spaces don't necessarily have bitmaps.
422    accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
423    accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
424    if (live_bitmap != nullptr) {
425      DCHECK(mark_bitmap != nullptr);
426      live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
427      mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
428    }
429    continuous_spaces_.push_back(continuous_space);
430    if (set_as_default) {
431      if (continuous_space->IsDlMallocSpace()) {
432        dlmalloc_space_ = continuous_space->AsDlMallocSpace();
433      } else if (continuous_space->IsRosAllocSpace()) {
434        rosalloc_space_ = continuous_space->AsRosAllocSpace();
435      }
436    }
437    // Ensure that spaces remain sorted in increasing order of start address.
438    std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
439              [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
440      return a->Begin() < b->Begin();
441    });
442  } else {
443    DCHECK(space->IsDiscontinuousSpace());
444    space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
445    DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
446    live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
447    DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
448    mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
449    discontinuous_spaces_.push_back(discontinuous_space);
450  }
451  if (space->IsAllocSpace()) {
452    alloc_spaces_.push_back(space->AsAllocSpace());
453  }
454}
455
456void Heap::RemoveSpace(space::Space* space) {
457  DCHECK(space != nullptr);
458  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
459  if (space->IsContinuousSpace()) {
460    DCHECK(!space->IsDiscontinuousSpace());
461    space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
462    // Continuous spaces don't necessarily have bitmaps.
463    accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
464    accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
465    if (live_bitmap != nullptr) {
466      DCHECK(mark_bitmap != nullptr);
467      live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap);
468      mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap);
469    }
470    auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
471    DCHECK(it != continuous_spaces_.end());
472    continuous_spaces_.erase(it);
473    if (continuous_space == dlmalloc_space_) {
474      dlmalloc_space_ = nullptr;
475    } else if (continuous_space == rosalloc_space_) {
476      rosalloc_space_ = nullptr;
477    }
478    if (continuous_space == main_space_) {
479      main_space_ = nullptr;
480    }
481  } else {
482    DCHECK(space->IsDiscontinuousSpace());
483    space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
484    DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
485    live_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
486    DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
487    mark_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
488    auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
489                        discontinuous_space);
490    DCHECK(it != discontinuous_spaces_.end());
491    discontinuous_spaces_.erase(it);
492  }
493  if (space->IsAllocSpace()) {
494    auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace());
495    DCHECK(it != alloc_spaces_.end());
496    alloc_spaces_.erase(it);
497  }
498}
499
500void Heap::RegisterGCAllocation(size_t bytes) {
501  if (this != nullptr) {
502    gc_memory_overhead_.FetchAndAdd(bytes);
503  }
504}
505
506void Heap::RegisterGCDeAllocation(size_t bytes) {
507  if (this != nullptr) {
508    gc_memory_overhead_.FetchAndSub(bytes);
509  }
510}
511
512void Heap::DumpGcPerformanceInfo(std::ostream& os) {
513  // Dump cumulative timings.
514  os << "Dumping cumulative Gc timings\n";
515  uint64_t total_duration = 0;
516
517  // Dump cumulative loggers for each GC type.
518  uint64_t total_paused_time = 0;
519  for (const auto& collector : garbage_collectors_) {
520    CumulativeLogger& logger = collector->GetCumulativeTimings();
521    if (logger.GetTotalNs() != 0) {
522      os << Dumpable<CumulativeLogger>(logger);
523      const uint64_t total_ns = logger.GetTotalNs();
524      const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
525      double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
526      const uint64_t freed_bytes = collector->GetTotalFreedBytes();
527      const uint64_t freed_objects = collector->GetTotalFreedObjects();
528      Histogram<uint64_t>::CumulativeData cumulative_data;
529      collector->GetPauseHistogram().CreateHistogram(&cumulative_data);
530      collector->GetPauseHistogram().PrintConfidenceIntervals(os, 0.99, cumulative_data);
531      os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n"
532         << collector->GetName() << " freed: " << freed_objects
533         << " objects with total size " << PrettySize(freed_bytes) << "\n"
534         << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
535         << PrettySize(freed_bytes / seconds) << "/s\n";
536      total_duration += total_ns;
537      total_paused_time += total_pause_ns;
538    }
539  }
540  uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
541  if (total_duration != 0) {
542    const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
543    os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
544    os << "Mean GC size throughput: "
545       << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
546    os << "Mean GC object throughput: "
547       << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
548  }
549  size_t total_objects_allocated = GetObjectsAllocatedEver();
550  os << "Total number of allocations: " << total_objects_allocated << "\n";
551  size_t total_bytes_allocated = GetBytesAllocatedEver();
552  os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
553  if (kMeasureAllocationTime) {
554    os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
555    os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
556       << "\n";
557  }
558  os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
559  os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
560  os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_;
561}
562
563Heap::~Heap() {
564  VLOG(heap) << "Starting ~Heap()";
565  STLDeleteElements(&garbage_collectors_);
566  // If we don't reset then the mark stack complains in its destructor.
567  allocation_stack_->Reset();
568  live_stack_->Reset();
569  STLDeleteValues(&mod_union_tables_);
570  STLDeleteElements(&continuous_spaces_);
571  STLDeleteElements(&discontinuous_spaces_);
572  delete gc_complete_lock_;
573  VLOG(heap) << "Finished ~Heap()";
574}
575
576space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
577                                                            bool fail_ok) const {
578  for (const auto& space : continuous_spaces_) {
579    if (space->Contains(obj)) {
580      return space;
581    }
582  }
583  if (!fail_ok) {
584    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
585  }
586  return NULL;
587}
588
589space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
590                                                                  bool fail_ok) const {
591  for (const auto& space : discontinuous_spaces_) {
592    if (space->Contains(obj)) {
593      return space;
594    }
595  }
596  if (!fail_ok) {
597    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
598  }
599  return NULL;
600}
601
602space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
603  space::Space* result = FindContinuousSpaceFromObject(obj, true);
604  if (result != NULL) {
605    return result;
606  }
607  return FindDiscontinuousSpaceFromObject(obj, true);
608}
609
610struct SoftReferenceArgs {
611  IsMarkedCallback* is_marked_callback_;
612  MarkObjectCallback* recursive_mark_callback_;
613  void* arg_;
614};
615
616mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
617  SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
618  // TODO: Not preserve all soft references.
619  return args->recursive_mark_callback_(obj, args->arg_);
620}
621
622// Process reference class instances and schedule finalizations.
623void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
624                             IsMarkedCallback* is_marked_callback,
625                             MarkObjectCallback* recursive_mark_object_callback, void* arg) {
626  // Unless we are in the zygote or required to clear soft references with white references,
627  // preserve some white referents.
628  if (!clear_soft && !Runtime::Current()->IsZygote()) {
629    SoftReferenceArgs soft_reference_args;
630    soft_reference_args.is_marked_callback_ = is_marked_callback;
631    soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback;
632    soft_reference_args.arg_ = arg;
633    soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
634                                                     &soft_reference_args);
635  }
636  timings.StartSplit("ProcessReferences");
637  // Clear all remaining soft and weak references with white referents.
638  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
639  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
640  timings.EndSplit();
641  // Preserve all white objects with finalize methods and schedule them for finalization.
642  timings.StartSplit("EnqueueFinalizerReferences");
643  finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
644                                                        recursive_mark_object_callback, arg);
645  timings.EndSplit();
646  timings.StartSplit("ProcessReferences");
647  // Clear all f-reachable soft and weak references with white referents.
648  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
649  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
650  // Clear all phantom references with white referents.
651  phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
652  // At this point all reference queues other than the cleared references should be empty.
653  DCHECK(soft_reference_queue_.IsEmpty());
654  DCHECK(weak_reference_queue_.IsEmpty());
655  DCHECK(finalizer_reference_queue_.IsEmpty());
656  DCHECK(phantom_reference_queue_.IsEmpty());
657  timings.EndSplit();
658}
659
660bool Heap::IsEnqueued(mirror::Object* ref) const {
661  // Since the references are stored as cyclic lists it means that once enqueued, the pending next
662  // will always be non-null.
663  return ref->GetFieldObject<mirror::Object>(GetReferencePendingNextOffset(), false) != nullptr;
664}
665
666bool Heap::IsEnqueuable(mirror::Object* ref) const {
667  DCHECK(ref != nullptr);
668  const mirror::Object* queue =
669      ref->GetFieldObject<mirror::Object>(GetReferenceQueueOffset(), false);
670  const mirror::Object* queue_next =
671      ref->GetFieldObject<mirror::Object>(GetReferenceQueueNextOffset(), false);
672  return queue != nullptr && queue_next == nullptr;
673}
674
675// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
676// marked, put it on the appropriate list in the heap for later processing.
677void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
678                                  IsMarkedCallback is_marked_callback, void* arg) {
679  DCHECK(klass != nullptr);
680  DCHECK(klass->IsReferenceClass());
681  DCHECK(obj != nullptr);
682  mirror::Object* referent = GetReferenceReferent(obj);
683  if (referent != nullptr) {
684    mirror::Object* forward_address = is_marked_callback(referent, arg);
685    // Null means that the object is not currently marked.
686    if (forward_address == nullptr) {
687      Thread* self = Thread::Current();
688      // TODO: Remove these locks, and use atomic stacks for storing references?
689      // We need to check that the references haven't already been enqueued since we can end up
690      // scanning the same reference multiple times due to dirty cards.
691      if (klass->IsSoftReferenceClass()) {
692        soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
693      } else if (klass->IsWeakReferenceClass()) {
694        weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
695      } else if (klass->IsFinalizerReferenceClass()) {
696        finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
697      } else if (klass->IsPhantomReferenceClass()) {
698        phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
699      } else {
700        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
701                   << klass->GetAccessFlags();
702      }
703    } else if (referent != forward_address) {
704      // Referent is already marked and we need to update it.
705      SetReferenceReferent(obj, forward_address);
706    }
707  }
708}
709
710space::ImageSpace* Heap::GetImageSpace() const {
711  for (const auto& space : continuous_spaces_) {
712    if (space->IsImageSpace()) {
713      return space->AsImageSpace();
714    }
715  }
716  return NULL;
717}
718
719static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
720  size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
721  if (used_bytes < chunk_size) {
722    size_t chunk_free_bytes = chunk_size - used_bytes;
723    size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
724    max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
725  }
726}
727
728void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) {
729  std::ostringstream oss;
730  size_t total_bytes_free = GetFreeMemory();
731  oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
732      << " free bytes";
733  // If the allocation failed due to fragmentation, print out the largest continuous allocation.
734  if (!large_object_allocation && total_bytes_free >= byte_count) {
735    size_t max_contiguous_allocation = 0;
736    for (const auto& space : continuous_spaces_) {
737      if (space->IsMallocSpace()) {
738        // To allow the Walk/InspectAll() to exclusively-lock the mutator
739        // lock, temporarily release the shared access to the mutator
740        // lock here by transitioning to the suspended state.
741        Locks::mutator_lock_->AssertSharedHeld(self);
742        self->TransitionFromRunnableToSuspended(kSuspended);
743        space->AsMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
744        self->TransitionFromSuspendedToRunnable();
745        Locks::mutator_lock_->AssertSharedHeld(self);
746      }
747    }
748    oss << "; failed due to fragmentation (largest possible contiguous allocation "
749        <<  max_contiguous_allocation << " bytes)";
750  }
751  self->ThrowOutOfMemoryError(oss.str().c_str());
752}
753
754void Heap::Trim() {
755  Thread* self = Thread::Current();
756  {
757    // Need to do this before acquiring the locks since we don't want to get suspended while
758    // holding any locks.
759    ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
760    // Pretend we are doing a GC to prevent background compaction from deleting the space we are
761    // trimming.
762    MutexLock mu(self, *gc_complete_lock_);
763    // Ensure there is only one GC at a time.
764    WaitForGcToCompleteLocked(self);
765    collector_type_running_ = kCollectorTypeHeapTrim;
766  }
767  uint64_t start_ns = NanoTime();
768  // Trim the managed spaces.
769  uint64_t total_alloc_space_allocated = 0;
770  uint64_t total_alloc_space_size = 0;
771  uint64_t managed_reclaimed = 0;
772  for (const auto& space : continuous_spaces_) {
773    if (space->IsMallocSpace()) {
774      gc::space::MallocSpace* alloc_space = space->AsMallocSpace();
775      total_alloc_space_size += alloc_space->Size();
776      managed_reclaimed += alloc_space->Trim();
777    }
778  }
779  total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() -
780      bump_pointer_space_->Size();
781  const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
782      static_cast<float>(total_alloc_space_size);
783  uint64_t gc_heap_end_ns = NanoTime();
784  // We never move things in the native heap, so we can finish the GC at this point.
785  FinishGC(self, collector::kGcTypeNone);
786  // Trim the native heap.
787  dlmalloc_trim(0);
788  size_t native_reclaimed = 0;
789  dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
790  uint64_t end_ns = NanoTime();
791  VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
792      << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
793      << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
794      << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
795      << "%.";
796}
797
798bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
799  // Note: we deliberately don't take the lock here, and mustn't test anything that would require
800  // taking the lock.
801  if (obj == nullptr) {
802    return true;
803  }
804  return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
805}
806
807bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
808  return FindContinuousSpaceFromObject(obj, true) != nullptr;
809}
810
811bool Heap::IsHeapAddress(const mirror::Object* obj) const {
812  // TODO: This might not work for large objects.
813  return FindSpaceFromObject(obj, true) != nullptr;
814}
815
816bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
817                              bool search_live_stack, bool sorted) {
818  if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
819    return false;
820  }
821  if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
822    mirror::Class* klass = obj->GetClass();
823    if (obj == klass) {
824      // This case happens for java.lang.Class.
825      return true;
826    }
827    return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
828  } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
829    return false;
830  }
831  space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
832  space::DiscontinuousSpace* d_space = NULL;
833  if (c_space != nullptr) {
834    if (c_space->GetLiveBitmap()->Test(obj)) {
835      return true;
836    }
837  } else {
838    d_space = FindDiscontinuousSpaceFromObject(obj, true);
839    if (d_space != nullptr) {
840      if (d_space->GetLiveObjects()->Test(obj)) {
841        return true;
842      }
843    }
844  }
845  // This is covering the allocation/live stack swapping that is done without mutators suspended.
846  for (size_t i = 0; i < (sorted ? 1 : 5); ++i) {
847    if (i > 0) {
848      NanoSleep(MsToNs(10));
849    }
850    if (search_allocation_stack) {
851      if (sorted) {
852        if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
853          return true;
854        }
855      } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) {
856        return true;
857      }
858    }
859
860    if (search_live_stack) {
861      if (sorted) {
862        if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
863          return true;
864        }
865      } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
866        return true;
867      }
868    }
869  }
870  // We need to check the bitmaps again since there is a race where we mark something as live and
871  // then clear the stack containing it.
872  if (c_space != nullptr) {
873    if (c_space->GetLiveBitmap()->Test(obj)) {
874      return true;
875    }
876  } else {
877    d_space = FindDiscontinuousSpaceFromObject(obj, true);
878    if (d_space != nullptr && d_space->GetLiveObjects()->Test(obj)) {
879      return true;
880    }
881  }
882  return false;
883}
884
885void Heap::VerifyObjectImpl(mirror::Object* obj) {
886  if (Thread::Current() == NULL ||
887      Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
888    return;
889  }
890  VerifyObjectBody(obj);
891}
892
893bool Heap::VerifyClassClass(const mirror::Class* c) const {
894  // Note: we don't use the accessors here as they have internal sanity checks that we don't want
895  // to run
896  const byte* raw_addr =
897      reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
898  mirror::Class* c_c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
899  raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
900  mirror::Class* c_c_c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
901  return c_c == c_c_c;
902}
903
904void Heap::DumpSpaces(std::ostream& stream) {
905  for (const auto& space : continuous_spaces_) {
906    accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
907    accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
908    stream << space << " " << *space << "\n";
909    if (live_bitmap != nullptr) {
910      stream << live_bitmap << " " << *live_bitmap << "\n";
911    }
912    if (mark_bitmap != nullptr) {
913      stream << mark_bitmap << " " << *mark_bitmap << "\n";
914    }
915  }
916  for (const auto& space : discontinuous_spaces_) {
917    stream << space << " " << *space << "\n";
918  }
919}
920
921void Heap::VerifyObjectBody(mirror::Object* obj) {
922  CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
923  // Ignore early dawn of the universe verifications.
924  if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) {
925    return;
926  }
927  const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
928      mirror::Object::ClassOffset().Int32Value();
929  mirror::Class* c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
930  if (UNLIKELY(c == NULL)) {
931    LOG(FATAL) << "Null class in object: " << obj;
932  } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
933    LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
934  }
935  CHECK(VerifyClassClass(c));
936
937  if (verify_object_mode_ > kVerifyAllFast) {
938    // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
939    //       heap_bitmap_lock_.
940    if (!IsLiveObjectLocked(obj)) {
941      DumpSpaces();
942      LOG(FATAL) << "Object is dead: " << obj;
943    }
944    if (!IsLiveObjectLocked(c)) {
945      LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
946    }
947  }
948}
949
950void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
951  DCHECK(obj != NULL);
952  reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
953}
954
955void Heap::VerifyHeap() {
956  ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
957  GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
958}
959
960void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
961  DCHECK_LE(freed_bytes, num_bytes_allocated_.Load());
962  num_bytes_allocated_.FetchAndSub(freed_bytes);
963  if (Runtime::Current()->HasStatsEnabled()) {
964    RuntimeStats* thread_stats = Thread::Current()->GetStats();
965    thread_stats->freed_objects += freed_objects;
966    thread_stats->freed_bytes += freed_bytes;
967    // TODO: Do this concurrently.
968    RuntimeStats* global_stats = Runtime::Current()->GetStats();
969    global_stats->freed_objects += freed_objects;
970    global_stats->freed_bytes += freed_bytes;
971  }
972}
973
974mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
975                                             size_t alloc_size, size_t* bytes_allocated,
976                                             mirror::Class** klass) {
977  mirror::Object* ptr = nullptr;
978  bool was_default_allocator = allocator == GetCurrentAllocator();
979  DCHECK(klass != nullptr);
980  SirtRef<mirror::Class> sirt_klass(self, *klass);
981  // The allocation failed. If the GC is running, block until it completes, and then retry the
982  // allocation.
983  collector::GcType last_gc = WaitForGcToComplete(self);
984  if (last_gc != collector::kGcTypeNone) {
985    // If we were the default allocator but the allocator changed while we were suspended,
986    // abort the allocation.
987    if (was_default_allocator && allocator != GetCurrentAllocator()) {
988      *klass = sirt_klass.get();
989      return nullptr;
990    }
991    // A GC was in progress and we blocked, retry allocation now that memory has been freed.
992    ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated);
993  }
994
995  // Loop through our different Gc types and try to Gc until we get enough free memory.
996  for (collector::GcType gc_type : gc_plan_) {
997    if (ptr != nullptr) {
998      break;
999    }
1000    // Attempt to run the collector, if we succeed, re-try the allocation.
1001    bool gc_ran =
1002        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
1003    if (was_default_allocator && allocator != GetCurrentAllocator()) {
1004      *klass = sirt_klass.get();
1005      return nullptr;
1006    }
1007    if (gc_ran) {
1008      // Did we free sufficient memory for the allocation to succeed?
1009      ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated);
1010    }
1011  }
1012  // Allocations have failed after GCs;  this is an exceptional state.
1013  if (ptr == nullptr) {
1014    // Try harder, growing the heap if necessary.
1015    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated);
1016  }
1017  if (ptr == nullptr) {
1018    // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
1019    // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
1020    // VM spec requires that all SoftReferences have been collected and cleared before throwing
1021    // OOME.
1022    VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
1023             << " allocation";
1024    // TODO: Run finalization, but this may cause more allocations to occur.
1025    // We don't need a WaitForGcToComplete here either.
1026    DCHECK(!gc_plan_.empty());
1027    CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
1028    if (was_default_allocator && allocator != GetCurrentAllocator()) {
1029      *klass = sirt_klass.get();
1030      return nullptr;
1031    }
1032    ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated);
1033    if (ptr == nullptr) {
1034      ThrowOutOfMemoryError(self, alloc_size, false);
1035    }
1036  }
1037  *klass = sirt_klass.get();
1038  return ptr;
1039}
1040
1041void Heap::SetTargetHeapUtilization(float target) {
1042  DCHECK_GT(target, 0.0f);  // asserted in Java code
1043  DCHECK_LT(target, 1.0f);
1044  target_utilization_ = target;
1045}
1046
1047size_t Heap::GetObjectsAllocated() const {
1048  size_t total = 0;
1049  for (space::AllocSpace* space : alloc_spaces_) {
1050    total += space->GetObjectsAllocated();
1051  }
1052  return total;
1053}
1054
1055size_t Heap::GetObjectsAllocatedEver() const {
1056  return GetObjectsFreedEver() + GetObjectsAllocated();
1057}
1058
1059size_t Heap::GetBytesAllocatedEver() const {
1060  return GetBytesFreedEver() + GetBytesAllocated();
1061}
1062
1063class InstanceCounter {
1064 public:
1065  InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts)
1066      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1067      : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
1068  }
1069  static void Callback(mirror::Object* obj, void* arg)
1070      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1071    InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
1072    mirror::Class* instance_class = obj->GetClass();
1073    CHECK(instance_class != nullptr);
1074    for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
1075      if (instance_counter->use_is_assignable_from_) {
1076        if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
1077          ++instance_counter->counts_[i];
1078        }
1079      } else if (instance_class == instance_counter->classes_[i]) {
1080        ++instance_counter->counts_[i];
1081      }
1082    }
1083  }
1084
1085 private:
1086  const std::vector<mirror::Class*>& classes_;
1087  bool use_is_assignable_from_;
1088  uint64_t* const counts_;
1089  DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
1090};
1091
1092void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
1093                          uint64_t* counts) {
1094  // Can't do any GC in this function since this may move classes.
1095  Thread* self = Thread::Current();
1096  auto* old_cause = self->StartAssertNoThreadSuspension("CountInstances");
1097  InstanceCounter counter(classes, use_is_assignable_from, counts);
1098  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1099  VisitObjects(InstanceCounter::Callback, &counter);
1100  self->EndAssertNoThreadSuspension(old_cause);
1101}
1102
1103class InstanceCollector {
1104 public:
1105  InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
1106      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1107      : class_(c), max_count_(max_count), instances_(instances) {
1108  }
1109  static void Callback(mirror::Object* obj, void* arg)
1110      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1111    DCHECK(arg != nullptr);
1112    InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
1113    mirror::Class* instance_class = obj->GetClass();
1114    if (instance_class == instance_collector->class_) {
1115      if (instance_collector->max_count_ == 0 ||
1116          instance_collector->instances_.size() < instance_collector->max_count_) {
1117        instance_collector->instances_.push_back(obj);
1118      }
1119    }
1120  }
1121
1122 private:
1123  mirror::Class* class_;
1124  uint32_t max_count_;
1125  std::vector<mirror::Object*>& instances_;
1126  DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
1127};
1128
1129void Heap::GetInstances(mirror::Class* c, int32_t max_count,
1130                        std::vector<mirror::Object*>& instances) {
1131  // Can't do any GC in this function since this may move classes.
1132  Thread* self = Thread::Current();
1133  auto* old_cause = self->StartAssertNoThreadSuspension("GetInstances");
1134  InstanceCollector collector(c, max_count, instances);
1135  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1136  VisitObjects(&InstanceCollector::Callback, &collector);
1137  self->EndAssertNoThreadSuspension(old_cause);
1138}
1139
1140class ReferringObjectsFinder {
1141 public:
1142  ReferringObjectsFinder(mirror::Object* object, int32_t max_count,
1143                         std::vector<mirror::Object*>& referring_objects)
1144      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1145      : object_(object), max_count_(max_count), referring_objects_(referring_objects) {
1146  }
1147
1148  static void Callback(mirror::Object* obj, void* arg)
1149      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1150    reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
1151  }
1152
1153  // For bitmap Visit.
1154  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1155  // annotalysis on visitors.
1156  void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
1157    collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true);
1158  }
1159
1160  // For MarkSweep::VisitObjectReferences.
1161  void operator()(mirror::Object* referrer, mirror::Object* object,
1162                  const MemberOffset&, bool) const {
1163    if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
1164      referring_objects_.push_back(referrer);
1165    }
1166  }
1167
1168 private:
1169  mirror::Object* object_;
1170  uint32_t max_count_;
1171  std::vector<mirror::Object*>& referring_objects_;
1172  DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
1173};
1174
1175void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
1176                               std::vector<mirror::Object*>& referring_objects) {
1177  // Can't do any GC in this function since this may move the object o.
1178  Thread* self = Thread::Current();
1179  auto* old_cause = self->StartAssertNoThreadSuspension("GetReferringObjects");
1180  ReferringObjectsFinder finder(o, max_count, referring_objects);
1181  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1182  VisitObjects(&ReferringObjectsFinder::Callback, &finder);
1183  self->EndAssertNoThreadSuspension(old_cause);
1184}
1185
1186void Heap::CollectGarbage(bool clear_soft_references) {
1187  // Even if we waited for a GC we still need to do another GC since weaks allocated during the
1188  // last GC will not have necessarily been cleared.
1189  CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
1190}
1191
1192void Heap::TransitionCollector(CollectorType collector_type) {
1193  if (collector_type == collector_type_) {
1194    return;
1195  }
1196  VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
1197             << " -> " << static_cast<int>(collector_type);
1198  uint64_t start_time = NanoTime();
1199  uint32_t before_size  = GetTotalMemory();
1200  uint32_t before_allocated = num_bytes_allocated_.Load();
1201  ThreadList* tl = Runtime::Current()->GetThreadList();
1202  Thread* self = Thread::Current();
1203  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1204  Locks::mutator_lock_->AssertNotHeld(self);
1205  const bool copying_transition =
1206      IsCompactingGC(background_collector_type_) || IsCompactingGC(post_zygote_collector_type_);
1207  // Busy wait until we can GC (StartGC can fail if we have a non-zero
1208  // compacting_gc_disable_count_, this should rarely occurs).
1209  for (;;) {
1210    {
1211      ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1212      MutexLock mu(self, *gc_complete_lock_);
1213      // Ensure there is only one GC at a time.
1214      WaitForGcToCompleteLocked(self);
1215      // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
1216      if (!copying_transition || disable_moving_gc_count_ == 0) {
1217        // TODO: Not hard code in semi-space collector?
1218        collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
1219        break;
1220      }
1221    }
1222    usleep(1000);
1223  }
1224  tl->SuspendAll();
1225  PreGcRosAllocVerification(&semi_space_collector_->GetTimings());
1226  switch (collector_type) {
1227    case kCollectorTypeSS:
1228      // Fall-through.
1229    case kCollectorTypeGSS: {
1230      mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
1231      CHECK(main_space_ != nullptr);
1232      Compact(temp_space_, main_space_);
1233      DCHECK(allocator_mem_map_.get() == nullptr);
1234      allocator_mem_map_.reset(main_space_->ReleaseMemMap());
1235      madvise(main_space_->Begin(), main_space_->Size(), MADV_DONTNEED);
1236      // RemoveSpace does not delete the removed space.
1237      space::Space* old_space = main_space_;
1238      RemoveSpace(old_space);
1239      delete old_space;
1240      break;
1241    }
1242    case kCollectorTypeMS:
1243      // Fall through.
1244    case kCollectorTypeCMS: {
1245      if (IsCompactingGC(collector_type_)) {
1246        // TODO: Use mem-map from temp space?
1247        MemMap* mem_map = allocator_mem_map_.release();
1248        CHECK(mem_map != nullptr);
1249        size_t initial_size = kDefaultInitialSize;
1250        mprotect(mem_map->Begin(), initial_size, PROT_READ | PROT_WRITE);
1251        CHECK(main_space_ == nullptr);
1252        if (kUseRosAlloc) {
1253          main_space_ =
1254              space::RosAllocSpace::CreateFromMemMap(mem_map, "alloc space", kPageSize,
1255                                                     initial_size, mem_map->Size(),
1256                                                     mem_map->Size(), low_memory_mode_);
1257        } else {
1258          main_space_ =
1259              space::DlMallocSpace::CreateFromMemMap(mem_map, "alloc space", kPageSize,
1260                                                     initial_size, mem_map->Size(),
1261                                                     mem_map->Size());
1262        }
1263        main_space_->SetFootprintLimit(main_space_->Capacity());
1264        AddSpace(main_space_);
1265        Compact(main_space_, bump_pointer_space_);
1266      }
1267      break;
1268    }
1269    default: {
1270      LOG(FATAL) << "Attempted to transition to invalid collector type";
1271      break;
1272    }
1273  }
1274  ChangeCollector(collector_type);
1275  PostGcRosAllocVerification(&semi_space_collector_->GetTimings());
1276  tl->ResumeAll();
1277  // Can't call into java code with all threads suspended.
1278  EnqueueClearedReferences();
1279  uint64_t duration = NanoTime() - start_time;
1280  GrowForUtilization(collector::kGcTypeFull, duration);
1281  FinishGC(self, collector::kGcTypeFull);
1282  int32_t after_size = GetTotalMemory();
1283  int32_t delta_size = before_size - after_size;
1284  int32_t after_allocated = num_bytes_allocated_.Load();
1285  int32_t delta_allocated = before_allocated - after_allocated;
1286  const std::string saved_bytes_str =
1287      delta_size < 0 ? "-" + PrettySize(-delta_size) : PrettySize(delta_size);
1288  LOG(INFO) << "Heap transition to " << process_state_ << " took "
1289      << PrettyDuration(duration) << " " << PrettySize(before_size) << "->"
1290      << PrettySize(after_size) << " from " << PrettySize(delta_allocated) << " to "
1291      << PrettySize(delta_size) << " saved";
1292}
1293
1294void Heap::ChangeCollector(CollectorType collector_type) {
1295  // TODO: Only do this with all mutators suspended to avoid races.
1296  if (collector_type != collector_type_) {
1297    collector_type_ = collector_type;
1298    gc_plan_.clear();
1299    switch (collector_type_) {
1300      case kCollectorTypeSS:
1301        // Fall-through.
1302      case kCollectorTypeGSS: {
1303        concurrent_gc_ = false;
1304        gc_plan_.push_back(collector::kGcTypeFull);
1305        if (use_tlab_) {
1306          ChangeAllocator(kAllocatorTypeTLAB);
1307        } else {
1308          ChangeAllocator(kAllocatorTypeBumpPointer);
1309        }
1310        break;
1311      }
1312      case kCollectorTypeMS: {
1313        concurrent_gc_ = false;
1314        gc_plan_.push_back(collector::kGcTypeSticky);
1315        gc_plan_.push_back(collector::kGcTypePartial);
1316        gc_plan_.push_back(collector::kGcTypeFull);
1317        ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
1318        break;
1319      }
1320      case kCollectorTypeCMS: {
1321        concurrent_gc_ = true;
1322        gc_plan_.push_back(collector::kGcTypeSticky);
1323        gc_plan_.push_back(collector::kGcTypePartial);
1324        gc_plan_.push_back(collector::kGcTypeFull);
1325        ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc);
1326        break;
1327      }
1328      default: {
1329        LOG(FATAL) << "Unimplemented";
1330      }
1331    }
1332    if (concurrent_gc_) {
1333      concurrent_start_bytes_ =
1334          std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
1335    } else {
1336      concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
1337    }
1338  }
1339}
1340
1341// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
1342class ZygoteCompactingCollector : public collector::SemiSpace {
1343 public:
1344  explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, "zygote collector") {
1345  }
1346
1347  void BuildBins(space::ContinuousSpace* space) {
1348    bin_live_bitmap_ = space->GetLiveBitmap();
1349    bin_mark_bitmap_ = space->GetMarkBitmap();
1350    BinContext context;
1351    context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
1352    context.collector_ = this;
1353    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
1354    // Note: This requires traversing the space in increasing order of object addresses.
1355    bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
1356    // Add the last bin which spans after the last object to the end of the space.
1357    AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
1358  }
1359
1360 private:
1361  struct BinContext {
1362    uintptr_t prev_;  // The end of the previous object.
1363    ZygoteCompactingCollector* collector_;
1364  };
1365  // Maps from bin sizes to locations.
1366  std::multimap<size_t, uintptr_t> bins_;
1367  // Live bitmap of the space which contains the bins.
1368  accounting::SpaceBitmap* bin_live_bitmap_;
1369  // Mark bitmap of the space which contains the bins.
1370  accounting::SpaceBitmap* bin_mark_bitmap_;
1371
1372  static void Callback(mirror::Object* obj, void* arg)
1373      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1374    DCHECK(arg != nullptr);
1375    BinContext* context = reinterpret_cast<BinContext*>(arg);
1376    ZygoteCompactingCollector* collector = context->collector_;
1377    uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
1378    size_t bin_size = object_addr - context->prev_;
1379    // Add the bin consisting of the end of the previous object to the start of the current object.
1380    collector->AddBin(bin_size, context->prev_);
1381    context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
1382  }
1383
1384  void AddBin(size_t size, uintptr_t position) {
1385    if (size != 0) {
1386      bins_.insert(std::make_pair(size, position));
1387    }
1388  }
1389
1390  virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
1391    // Don't sweep any spaces since we probably blasted the internal accounting of the free list
1392    // allocator.
1393    return false;
1394  }
1395
1396  virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
1397      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
1398    size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
1399    mirror::Object* forward_address;
1400    // Find the smallest bin which we can move obj in.
1401    auto it = bins_.lower_bound(object_size);
1402    if (it == bins_.end()) {
1403      // No available space in the bins, place it in the target space instead (grows the zygote
1404      // space).
1405      size_t bytes_allocated;
1406      forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
1407      if (to_space_live_bitmap_ != nullptr) {
1408        to_space_live_bitmap_->Set(forward_address);
1409      } else {
1410        GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address);
1411        GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address);
1412      }
1413    } else {
1414      size_t size = it->first;
1415      uintptr_t pos = it->second;
1416      bins_.erase(it);  // Erase the old bin which we replace with the new smaller bin.
1417      forward_address = reinterpret_cast<mirror::Object*>(pos);
1418      // Set the live and mark bits so that sweeping system weaks works properly.
1419      bin_live_bitmap_->Set(forward_address);
1420      bin_mark_bitmap_->Set(forward_address);
1421      DCHECK_GE(size, object_size);
1422      AddBin(size - object_size, pos + object_size);  // Add a new bin with the remaining space.
1423    }
1424    // Copy the object over to its new location.
1425    memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
1426    return forward_address;
1427  }
1428};
1429
1430void Heap::UnBindBitmaps() {
1431  for (const auto& space : GetContinuousSpaces()) {
1432    if (space->IsContinuousMemMapAllocSpace()) {
1433      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1434      if (alloc_space->HasBoundBitmaps()) {
1435        alloc_space->UnBindBitmaps();
1436      }
1437    }
1438  }
1439}
1440
1441void Heap::PreZygoteFork() {
1442  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
1443  static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
1444  Thread* self = Thread::Current();
1445  MutexLock mu(self, zygote_creation_lock_);
1446  // Try to see if we have any Zygote spaces.
1447  if (have_zygote_space_) {
1448    return;
1449  }
1450  VLOG(heap) << "Starting PreZygoteFork";
1451  // Trim the pages at the end of the non moving space.
1452  non_moving_space_->Trim();
1453  non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1454  // Change the collector to the post zygote one.
1455  ChangeCollector(post_zygote_collector_type_);
1456  // TODO: Delete bump_pointer_space_ and temp_pointer_space_?
1457  if (semi_space_collector_ != nullptr) {
1458    // Temporarily disable rosalloc verification because the zygote
1459    // compaction will mess up the rosalloc internal metadata.
1460    ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
1461    ZygoteCompactingCollector zygote_collector(this);
1462    zygote_collector.BuildBins(non_moving_space_);
1463    // Create a new bump pointer space which we will compact into.
1464    space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
1465                                         non_moving_space_->Limit());
1466    // Compact the bump pointer space to a new zygote bump pointer space.
1467    temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
1468    zygote_collector.SetFromSpace(bump_pointer_space_);
1469    zygote_collector.SetToSpace(&target_space);
1470    zygote_collector.Run(kGcCauseCollectorTransition, false);
1471    CHECK(temp_space_->IsEmpty());
1472    total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
1473    total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
1474    // Update the end and write out image.
1475    non_moving_space_->SetEnd(target_space.End());
1476    non_moving_space_->SetLimit(target_space.Limit());
1477    VLOG(heap) << "Zygote size " << non_moving_space_->Size() << " bytes";
1478  }
1479  // Save the old space so that we can remove it after we complete creating the zygote space.
1480  space::MallocSpace* old_alloc_space = non_moving_space_;
1481  // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
1482  // the remaining available space.
1483  // Remove the old space before creating the zygote space since creating the zygote space sets
1484  // the old alloc space's bitmaps to nullptr.
1485  RemoveSpace(old_alloc_space);
1486  space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
1487                                                                        low_memory_mode_,
1488                                                                        &main_space_);
1489  delete old_alloc_space;
1490  CHECK(zygote_space != nullptr) << "Failed creating zygote space";
1491  AddSpace(zygote_space, false);
1492  CHECK(main_space_ != nullptr);
1493  if (main_space_->IsRosAllocSpace()) {
1494    rosalloc_space_ = main_space_->AsRosAllocSpace();
1495  } else if (main_space_->IsDlMallocSpace()) {
1496    dlmalloc_space_ = main_space_->AsDlMallocSpace();
1497  }
1498  main_space_->SetFootprintLimit(main_space_->Capacity());
1499  AddSpace(main_space_);
1500  have_zygote_space_ = true;
1501  // Create the zygote space mod union table.
1502  accounting::ModUnionTable* mod_union_table =
1503      new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
1504  CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
1505  AddModUnionTable(mod_union_table);
1506  // Reset the cumulative loggers since we now have a few additional timing phases.
1507  for (const auto& collector : garbage_collectors_) {
1508    collector->ResetCumulativeStatistics();
1509  }
1510  // Can't use RosAlloc for non moving space due to thread local buffers.
1511  // TODO: Non limited space for non-movable objects?
1512  MemMap* mem_map = post_zygote_non_moving_space_mem_map_.release();
1513  space::MallocSpace* new_non_moving_space =
1514      space::DlMallocSpace::CreateFromMemMap(mem_map, "Non moving dlmalloc space", kPageSize,
1515                                             2 * MB, mem_map->Size(), mem_map->Size());
1516  AddSpace(new_non_moving_space, false);
1517  CHECK(new_non_moving_space != nullptr) << "Failed to create new non-moving space";
1518  new_non_moving_space->SetFootprintLimit(new_non_moving_space->Capacity());
1519  non_moving_space_ = new_non_moving_space;
1520}
1521
1522void Heap::FlushAllocStack() {
1523  MarkAllocStackAsLive(allocation_stack_.get());
1524  allocation_stack_->Reset();
1525}
1526
1527void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap1,
1528                          accounting::SpaceBitmap* bitmap2,
1529                          accounting::ObjectSet* large_objects,
1530                          accounting::ObjectStack* stack) {
1531  DCHECK(bitmap1 != nullptr);
1532  DCHECK(bitmap2 != nullptr);
1533  mirror::Object** limit = stack->End();
1534  for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
1535    const mirror::Object* obj = *it;
1536    DCHECK(obj != nullptr);
1537    if (bitmap1->HasAddress(obj)) {
1538      bitmap1->Set(obj);
1539    } else if (bitmap2->HasAddress(obj)) {
1540      bitmap2->Set(obj);
1541    } else {
1542      large_objects->Set(obj);
1543    }
1544  }
1545}
1546
1547void Heap::SwapSemiSpaces() {
1548  // Swap the spaces so we allocate into the space which we just evacuated.
1549  std::swap(bump_pointer_space_, temp_space_);
1550}
1551
1552void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
1553                   space::ContinuousMemMapAllocSpace* source_space) {
1554  CHECK(kMovingCollector);
1555  CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported";
1556  if (target_space != source_space) {
1557    semi_space_collector_->SetFromSpace(source_space);
1558    semi_space_collector_->SetToSpace(target_space);
1559    semi_space_collector_->Run(kGcCauseCollectorTransition, false);
1560  }
1561}
1562
1563collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
1564                                               bool clear_soft_references) {
1565  Thread* self = Thread::Current();
1566  Runtime* runtime = Runtime::Current();
1567  // If the heap can't run the GC, silently fail and return that no GC was run.
1568  switch (gc_type) {
1569    case collector::kGcTypePartial: {
1570      if (!have_zygote_space_) {
1571        return collector::kGcTypeNone;
1572      }
1573      break;
1574    }
1575    default: {
1576      // Other GC types don't have any special cases which makes them not runnable. The main case
1577      // here is full GC.
1578    }
1579  }
1580  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
1581  Locks::mutator_lock_->AssertNotHeld(self);
1582  if (self->IsHandlingStackOverflow()) {
1583    LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
1584  }
1585  bool compacting_gc;
1586  {
1587    gc_complete_lock_->AssertNotHeld(self);
1588    ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
1589    MutexLock mu(self, *gc_complete_lock_);
1590    // Ensure there is only one GC at a time.
1591    WaitForGcToCompleteLocked(self);
1592    compacting_gc = IsCompactingGC(collector_type_);
1593    // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
1594    if (compacting_gc && disable_moving_gc_count_ != 0) {
1595      LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_;
1596      return collector::kGcTypeNone;
1597    }
1598    collector_type_running_ = collector_type_;
1599  }
1600
1601  if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
1602    ++runtime->GetStats()->gc_for_alloc_count;
1603    ++self->GetStats()->gc_for_alloc_count;
1604  }
1605  uint64_t gc_start_time_ns = NanoTime();
1606  uint64_t gc_start_size = GetBytesAllocated();
1607  // Approximate allocation rate in bytes / second.
1608  uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
1609  // Back to back GCs can cause 0 ms of wait time in between GC invocations.
1610  if (LIKELY(ms_delta != 0)) {
1611    allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
1612    VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
1613  }
1614
1615  DCHECK_LT(gc_type, collector::kGcTypeMax);
1616  DCHECK_NE(gc_type, collector::kGcTypeNone);
1617
1618  collector::GarbageCollector* collector = nullptr;
1619  // TODO: Clean this up.
1620  if (compacting_gc) {
1621    DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
1622           current_allocator_ == kAllocatorTypeTLAB);
1623    gc_type = semi_space_collector_->GetGcType();
1624    CHECK(temp_space_->IsEmpty());
1625    semi_space_collector_->SetFromSpace(bump_pointer_space_);
1626    semi_space_collector_->SetToSpace(temp_space_);
1627    mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
1628    collector = semi_space_collector_;
1629    gc_type = collector::kGcTypeFull;
1630  } else if (current_allocator_ == kAllocatorTypeRosAlloc ||
1631      current_allocator_ == kAllocatorTypeDlMalloc) {
1632    for (const auto& cur_collector : garbage_collectors_) {
1633      if (cur_collector->IsConcurrent() == concurrent_gc_ &&
1634          cur_collector->GetGcType() == gc_type) {
1635        collector = cur_collector;
1636        break;
1637      }
1638    }
1639  } else {
1640    LOG(FATAL) << "Invalid current allocator " << current_allocator_;
1641  }
1642  CHECK(collector != nullptr)
1643      << "Could not find garbage collector with concurrent=" << concurrent_gc_
1644      << " and type=" << gc_type;
1645  ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
1646  collector->Run(gc_cause, clear_soft_references);
1647  total_objects_freed_ever_ += collector->GetFreedObjects();
1648  total_bytes_freed_ever_ += collector->GetFreedBytes();
1649  // Enqueue cleared references.
1650  EnqueueClearedReferences();
1651  // Grow the heap so that we know when to perform the next GC.
1652  GrowForUtilization(gc_type, collector->GetDurationNs());
1653  if (CareAboutPauseTimes()) {
1654    const size_t duration = collector->GetDurationNs();
1655    std::vector<uint64_t> pauses = collector->GetPauseTimes();
1656    // GC for alloc pauses the allocating thread, so consider it as a pause.
1657    bool was_slow = duration > long_gc_log_threshold_ ||
1658        (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
1659    if (!was_slow) {
1660      for (uint64_t pause : pauses) {
1661        was_slow = was_slow || pause > long_pause_log_threshold_;
1662      }
1663    }
1664    if (was_slow) {
1665        const size_t percent_free = GetPercentFree();
1666        const size_t current_heap_size = GetBytesAllocated();
1667        const size_t total_memory = GetTotalMemory();
1668        std::ostringstream pause_string;
1669        for (size_t i = 0; i < pauses.size(); ++i) {
1670            pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
1671                         << ((i != pauses.size() - 1) ? ", " : "");
1672        }
1673        LOG(INFO) << gc_cause << " " << collector->GetName()
1674                  << " GC freed "  <<  collector->GetFreedObjects() << "("
1675                  << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
1676                  << collector->GetFreedLargeObjects() << "("
1677                  << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
1678                  << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
1679                  << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
1680                  << " total " << PrettyDuration((duration / 1000) * 1000);
1681        if (VLOG_IS_ON(heap)) {
1682            LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
1683        }
1684    }
1685  }
1686  FinishGC(self, gc_type);
1687  ATRACE_END();
1688
1689  // Inform DDMS that a GC completed.
1690  Dbg::GcDidFinish();
1691  return gc_type;
1692}
1693
1694void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
1695  MutexLock mu(self, *gc_complete_lock_);
1696  collector_type_running_ = kCollectorTypeNone;
1697  if (gc_type != collector::kGcTypeNone) {
1698    last_gc_type_ = gc_type;
1699  }
1700  // Wake anyone who may have been waiting for the GC to complete.
1701  gc_complete_cond_->Broadcast(self);
1702}
1703
1704static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg,
1705                                                uint32_t /*thread_id*/, RootType /*root_type*/) {
1706  mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
1707  if (root == obj) {
1708    LOG(INFO) << "Object " << obj << " is a root";
1709  }
1710  return root;
1711}
1712
1713class ScanVisitor {
1714 public:
1715  void operator()(const mirror::Object* obj) const {
1716    LOG(ERROR) << "Would have rescanned object " << obj;
1717  }
1718};
1719
1720// Verify a reference from an object.
1721class VerifyReferenceVisitor {
1722 public:
1723  explicit VerifyReferenceVisitor(Heap* heap)
1724      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
1725      : heap_(heap), failed_(false) {}
1726
1727  bool Failed() const {
1728    return failed_;
1729  }
1730
1731  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
1732  // analysis on visitors.
1733  void operator()(mirror::Object* obj, mirror::Object* ref,
1734                  const MemberOffset& offset, bool /* is_static */) const
1735      NO_THREAD_SAFETY_ANALYSIS {
1736    if (ref == nullptr || IsLive(ref)) {
1737      // Verify that the reference is live.
1738      return;
1739    }
1740    if (!failed_) {
1741      // Print message on only on first failure to prevent spam.
1742      LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
1743      failed_ = true;
1744    }
1745    if (obj != nullptr) {
1746      accounting::CardTable* card_table = heap_->GetCardTable();
1747      accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
1748      accounting::ObjectStack* live_stack = heap_->live_stack_.get();
1749      byte* card_addr = card_table->CardFromAddr(obj);
1750      LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
1751                 << offset << "\n card value = " << static_cast<int>(*card_addr);
1752      if (heap_->IsValidObjectAddress(obj->GetClass())) {
1753        LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
1754      } else {
1755        LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
1756      }
1757
1758      // Attmept to find the class inside of the recently freed objects.
1759      space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
1760      if (ref_space != nullptr && ref_space->IsMallocSpace()) {
1761        space::MallocSpace* space = ref_space->AsMallocSpace();
1762        mirror::Class* ref_class = space->FindRecentFreedObject(ref);
1763        if (ref_class != nullptr) {
1764          LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class "
1765                     << PrettyClass(ref_class);
1766        } else {
1767          LOG(ERROR) << "Reference " << ref << " not found as a recently freed object";
1768        }
1769      }
1770
1771      if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
1772          ref->GetClass()->IsClass()) {
1773        LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
1774      } else {
1775        LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass()
1776                   << ") is not a valid heap address";
1777      }
1778
1779      card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
1780      void* cover_begin = card_table->AddrFromCard(card_addr);
1781      void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
1782          accounting::CardTable::kCardSize);
1783      LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
1784          << "-" << cover_end;
1785      accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
1786
1787      if (bitmap == nullptr) {
1788        LOG(ERROR) << "Object " << obj << " has no bitmap";
1789        if (!heap_->VerifyClassClass(obj->GetClass())) {
1790          LOG(ERROR) << "Object " << obj << " failed class verification!";
1791        }
1792      } else {
1793        // Print out how the object is live.
1794        if (bitmap->Test(obj)) {
1795          LOG(ERROR) << "Object " << obj << " found in live bitmap";
1796        }
1797        if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
1798          LOG(ERROR) << "Object " << obj << " found in allocation stack";
1799        }
1800        if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
1801          LOG(ERROR) << "Object " << obj << " found in live stack";
1802        }
1803        if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
1804          LOG(ERROR) << "Ref " << ref << " found in allocation stack";
1805        }
1806        if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
1807          LOG(ERROR) << "Ref " << ref << " found in live stack";
1808        }
1809        // Attempt to see if the card table missed the reference.
1810        ScanVisitor scan_visitor;
1811        byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
1812        card_table->Scan(bitmap, byte_cover_begin,
1813                         byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
1814      }
1815
1816      // Search to see if any of the roots reference our object.
1817      void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
1818      Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1819
1820      // Search to see if any of the roots reference our reference.
1821      arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
1822      Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
1823    } else {
1824      LOG(ERROR) << "Root " << ref << " is dead with type " << PrettyTypeOf(ref);
1825    }
1826  }
1827
1828  bool IsLive(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
1829    return heap_->IsLiveObjectLocked(obj, true, false, true);
1830  }
1831
1832  static mirror::Object* VerifyRoots(mirror::Object* root, void* arg, uint32_t /*thread_id*/,
1833                                     RootType /*root_type*/) {
1834    VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
1835    (*visitor)(nullptr, root, MemberOffset(0), true);
1836    return root;
1837  }
1838
1839 private:
1840  Heap* const heap_;
1841  mutable bool failed_;
1842};
1843
1844// Verify all references within an object, for use with HeapBitmap::Visit.
1845class VerifyObjectVisitor {
1846 public:
1847  explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
1848
1849  void operator()(mirror::Object* obj) const
1850      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1851    // Note: we are verifying the references in obj but not obj itself, this is because obj must
1852    // be live or else how did we find it in the live bitmap?
1853    VerifyReferenceVisitor visitor(heap_);
1854    // The class doesn't count as a reference but we should verify it anyways.
1855    collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
1856    if (obj->GetClass()->IsReferenceClass()) {
1857      visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false);
1858    }
1859    failed_ = failed_ || visitor.Failed();
1860  }
1861
1862  static void VisitCallback(mirror::Object* obj, void* arg)
1863      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1864    VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
1865    visitor->operator()(obj);
1866  }
1867
1868  bool Failed() const {
1869    return failed_;
1870  }
1871
1872 private:
1873  Heap* const heap_;
1874  mutable bool failed_;
1875};
1876
1877// Must do this with mutators suspended since we are directly accessing the allocation stacks.
1878bool Heap::VerifyHeapReferences() {
1879  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1880  // Lets sort our allocation stacks so that we can efficiently binary search them.
1881  allocation_stack_->Sort();
1882  live_stack_->Sort();
1883  VerifyObjectVisitor visitor(this);
1884  // Verify objects in the allocation stack since these will be objects which were:
1885  // 1. Allocated prior to the GC (pre GC verification).
1886  // 2. Allocated during the GC (pre sweep GC verification).
1887  // We don't want to verify the objects in the live stack since they themselves may be
1888  // pointing to dead objects if they are not reachable.
1889  VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
1890  // Verify the roots:
1891  Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
1892  if (visitor.Failed()) {
1893    // Dump mod-union tables.
1894    for (const auto& table_pair : mod_union_tables_) {
1895      accounting::ModUnionTable* mod_union_table = table_pair.second;
1896      mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
1897    }
1898    DumpSpaces();
1899    return false;
1900  }
1901  return true;
1902}
1903
1904class VerifyReferenceCardVisitor {
1905 public:
1906  VerifyReferenceCardVisitor(Heap* heap, bool* failed)
1907      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
1908                            Locks::heap_bitmap_lock_)
1909      : heap_(heap), failed_(failed) {
1910  }
1911
1912  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
1913  // annotalysis on visitors.
1914  void operator()(mirror::Object* obj, mirror::Object* ref, const MemberOffset& offset,
1915                  bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
1916    // Filter out class references since changing an object's class does not mark the card as dirty.
1917    // Also handles large objects, since the only reference they hold is a class reference.
1918    if (ref != NULL && !ref->IsClass()) {
1919      accounting::CardTable* card_table = heap_->GetCardTable();
1920      // If the object is not dirty and it is referencing something in the live stack other than
1921      // class, then it must be on a dirty card.
1922      if (!card_table->AddrIsInCardTable(obj)) {
1923        LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
1924        *failed_ = true;
1925      } else if (!card_table->IsDirty(obj)) {
1926        // TODO: Check mod-union tables.
1927        // Card should be either kCardDirty if it got re-dirtied after we aged it, or
1928        // kCardDirty - 1 if it didnt get touched since we aged it.
1929        accounting::ObjectStack* live_stack = heap_->live_stack_.get();
1930        if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
1931          if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
1932            LOG(ERROR) << "Object " << obj << " found in live stack";
1933          }
1934          if (heap_->GetLiveBitmap()->Test(obj)) {
1935            LOG(ERROR) << "Object " << obj << " found in live bitmap";
1936          }
1937          LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj)
1938                    << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack";
1939
1940          // Print which field of the object is dead.
1941          if (!obj->IsObjectArray()) {
1942            mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1943            CHECK(klass != NULL);
1944            mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields()
1945                                                                      : klass->GetIFields();
1946            CHECK(fields != NULL);
1947            for (int32_t i = 0; i < fields->GetLength(); ++i) {
1948              mirror::ArtField* cur = fields->Get(i);
1949              if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1950                LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is "
1951                          << PrettyField(cur);
1952                break;
1953              }
1954            }
1955          } else {
1956            mirror::ObjectArray<mirror::Object>* object_array =
1957                obj->AsObjectArray<mirror::Object>();
1958            for (int32_t i = 0; i < object_array->GetLength(); ++i) {
1959              if (object_array->Get(i) == ref) {
1960                LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref";
1961              }
1962            }
1963          }
1964
1965          *failed_ = true;
1966        }
1967      }
1968    }
1969  }
1970
1971 private:
1972  Heap* const heap_;
1973  bool* const failed_;
1974};
1975
1976class VerifyLiveStackReferences {
1977 public:
1978  explicit VerifyLiveStackReferences(Heap* heap)
1979      : heap_(heap),
1980        failed_(false) {}
1981
1982  void operator()(mirror::Object* obj) const
1983      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1984    VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
1985    collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
1986  }
1987
1988  bool Failed() const {
1989    return failed_;
1990  }
1991
1992 private:
1993  Heap* const heap_;
1994  bool failed_;
1995};
1996
1997bool Heap::VerifyMissingCardMarks() {
1998  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
1999
2000  // We need to sort the live stack since we binary search it.
2001  live_stack_->Sort();
2002  VerifyLiveStackReferences visitor(this);
2003  GetLiveBitmap()->Visit(visitor);
2004
2005  // We can verify objects in the live stack since none of these should reference dead objects.
2006  for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
2007    visitor(*it);
2008  }
2009
2010  if (visitor.Failed()) {
2011    DumpSpaces();
2012    return false;
2013  }
2014  return true;
2015}
2016
2017void Heap::SwapStacks() {
2018  allocation_stack_.swap(live_stack_);
2019}
2020
2021accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
2022  auto it = mod_union_tables_.find(space);
2023  if (it == mod_union_tables_.end()) {
2024    return nullptr;
2025  }
2026  return it->second;
2027}
2028
2029void Heap::ProcessCards(TimingLogger& timings) {
2030  // Clear cards and keep track of cards cleared in the mod-union table.
2031  for (const auto& space : continuous_spaces_) {
2032    accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
2033    if (table != nullptr) {
2034      const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
2035          "ImageModUnionClearCards";
2036      TimingLogger::ScopedSplit split(name, &timings);
2037      table->ClearCards();
2038    } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
2039      TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
2040      // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
2041      // were dirty before the GC started.
2042      // TODO: Don't need to use atomic.
2043      // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
2044      // roots and then we scan / update mod union tables after. We will always scan either card.
2045      // If we end up with the non aged card, we scan it it in the pause.
2046      card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
2047    }
2048  }
2049}
2050
2051static mirror::Object* IdentityRootCallback(mirror::Object* obj, void*, uint32_t, RootType) {
2052  return obj;
2053}
2054
2055void Heap::PreGcVerification(collector::GarbageCollector* gc) {
2056  ThreadList* thread_list = Runtime::Current()->GetThreadList();
2057  Thread* self = Thread::Current();
2058
2059  if (verify_pre_gc_heap_) {
2060    thread_list->SuspendAll();
2061    {
2062      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2063      if (!VerifyHeapReferences()) {
2064        LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed";
2065      }
2066    }
2067    thread_list->ResumeAll();
2068  }
2069
2070  // Check that all objects which reference things in the live stack are on dirty cards.
2071  if (verify_missing_card_marks_) {
2072    thread_list->SuspendAll();
2073    {
2074      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2075      SwapStacks();
2076      // Sort the live stack so that we can quickly binary search it later.
2077      if (!VerifyMissingCardMarks()) {
2078        LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
2079      }
2080      SwapStacks();
2081    }
2082    thread_list->ResumeAll();
2083  }
2084
2085  if (verify_mod_union_table_) {
2086    thread_list->SuspendAll();
2087    ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
2088    for (const auto& table_pair : mod_union_tables_) {
2089      accounting::ModUnionTable* mod_union_table = table_pair.second;
2090      mod_union_table->UpdateAndMarkReferences(IdentityRootCallback, nullptr);
2091      mod_union_table->Verify();
2092    }
2093    thread_list->ResumeAll();
2094  }
2095}
2096
2097void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
2098  // Called before sweeping occurs since we want to make sure we are not going so reclaim any
2099  // reachable objects.
2100  if (verify_post_gc_heap_) {
2101    Thread* self = Thread::Current();
2102    CHECK_NE(self->GetState(), kRunnable);
2103    {
2104      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2105      // Swapping bound bitmaps does nothing.
2106      gc->SwapBitmaps();
2107      if (!VerifyHeapReferences()) {
2108        LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
2109      }
2110      gc->SwapBitmaps();
2111    }
2112  }
2113}
2114
2115void Heap::PostGcVerification(collector::GarbageCollector* gc) {
2116  if (verify_system_weaks_) {
2117    Thread* self = Thread::Current();
2118    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
2119    collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
2120    mark_sweep->VerifySystemWeaks();
2121  }
2122}
2123
2124void Heap::PreGcRosAllocVerification(TimingLogger* timings) {
2125  if (verify_pre_gc_rosalloc_) {
2126    TimingLogger::ScopedSplit split("PreGcRosAllocVerification", timings);
2127    for (const auto& space : continuous_spaces_) {
2128      if (space->IsRosAllocSpace()) {
2129        VLOG(heap) << "PreGcRosAllocVerification : " << space->GetName();
2130        space::RosAllocSpace* rosalloc_space = space->AsRosAllocSpace();
2131        rosalloc_space->Verify();
2132      }
2133    }
2134  }
2135}
2136
2137void Heap::PostGcRosAllocVerification(TimingLogger* timings) {
2138  if (verify_post_gc_rosalloc_) {
2139    TimingLogger::ScopedSplit split("PostGcRosAllocVerification", timings);
2140    for (const auto& space : continuous_spaces_) {
2141      if (space->IsRosAllocSpace()) {
2142        VLOG(heap) << "PostGcRosAllocVerification : " << space->GetName();
2143        space::RosAllocSpace* rosalloc_space = space->AsRosAllocSpace();
2144        rosalloc_space->Verify();
2145      }
2146    }
2147  }
2148}
2149
2150collector::GcType Heap::WaitForGcToComplete(Thread* self) {
2151  ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
2152  MutexLock mu(self, *gc_complete_lock_);
2153  return WaitForGcToCompleteLocked(self);
2154}
2155
2156collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
2157  collector::GcType last_gc_type = collector::kGcTypeNone;
2158  uint64_t wait_start = NanoTime();
2159  while (collector_type_running_ != kCollectorTypeNone) {
2160    ATRACE_BEGIN("GC: Wait For Completion");
2161    // We must wait, change thread state then sleep on gc_complete_cond_;
2162    gc_complete_cond_->Wait(self);
2163    last_gc_type = last_gc_type_;
2164    ATRACE_END();
2165  }
2166  uint64_t wait_time = NanoTime() - wait_start;
2167  total_wait_time_ += wait_time;
2168  if (wait_time > long_pause_log_threshold_) {
2169    LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time);
2170  }
2171  return last_gc_type;
2172}
2173
2174void Heap::DumpForSigQuit(std::ostream& os) {
2175  os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
2176     << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
2177  DumpGcPerformanceInfo(os);
2178}
2179
2180size_t Heap::GetPercentFree() {
2181  return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory());
2182}
2183
2184void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
2185  if (max_allowed_footprint > GetMaxMemory()) {
2186    VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
2187             << PrettySize(GetMaxMemory());
2188    max_allowed_footprint = GetMaxMemory();
2189  }
2190  max_allowed_footprint_ = max_allowed_footprint;
2191}
2192
2193bool Heap::IsMovableObject(const mirror::Object* obj) const {
2194  if (kMovingCollector) {
2195    DCHECK(!IsInTempSpace(obj));
2196    if (bump_pointer_space_->HasAddress(obj)) {
2197      return true;
2198    }
2199    // TODO: Refactor this logic into the space itself?
2200    // Objects in the main space are only copied during background -> foreground transitions or
2201    // visa versa.
2202    if (main_space_ != nullptr && main_space_->HasAddress(obj) &&
2203        (IsCompactingGC(background_collector_type_) ||
2204            IsCompactingGC(post_zygote_collector_type_))) {
2205      return true;
2206    }
2207  }
2208  return false;
2209}
2210
2211bool Heap::IsInTempSpace(const mirror::Object* obj) const {
2212  if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) {
2213    return true;
2214  }
2215  return false;
2216}
2217
2218void Heap::UpdateMaxNativeFootprint() {
2219  size_t native_size = native_bytes_allocated_;
2220  // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
2221  size_t target_size = native_size / GetTargetHeapUtilization();
2222  if (target_size > native_size + max_free_) {
2223    target_size = native_size + max_free_;
2224  } else if (target_size < native_size + min_free_) {
2225    target_size = native_size + min_free_;
2226  }
2227  native_footprint_gc_watermark_ = target_size;
2228  native_footprint_limit_ = 2 * target_size - native_size;
2229}
2230
2231void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
2232  // We know what our utilization is at this moment.
2233  // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
2234  const size_t bytes_allocated = GetBytesAllocated();
2235  last_gc_size_ = bytes_allocated;
2236  last_gc_time_ns_ = NanoTime();
2237  size_t target_size;
2238  if (gc_type != collector::kGcTypeSticky) {
2239    // Grow the heap for non sticky GC.
2240    target_size = bytes_allocated / GetTargetHeapUtilization();
2241    if (target_size > bytes_allocated + max_free_) {
2242      target_size = bytes_allocated + max_free_;
2243    } else if (target_size < bytes_allocated + min_free_) {
2244      target_size = bytes_allocated + min_free_;
2245    }
2246    native_need_to_run_finalization_ = true;
2247    next_gc_type_ = collector::kGcTypeSticky;
2248  } else {
2249    // Based on how close the current heap size is to the target size, decide
2250    // whether or not to do a partial or sticky GC next.
2251    if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
2252      next_gc_type_ = collector::kGcTypeSticky;
2253    } else {
2254      next_gc_type_ = have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
2255    }
2256    // If we have freed enough memory, shrink the heap back down.
2257    if (bytes_allocated + max_free_ < max_allowed_footprint_) {
2258      target_size = bytes_allocated + max_free_;
2259    } else {
2260      target_size = std::max(bytes_allocated, max_allowed_footprint_);
2261    }
2262  }
2263  if (!ignore_max_footprint_) {
2264    SetIdealFootprint(target_size);
2265    if (concurrent_gc_) {
2266      // Calculate when to perform the next ConcurrentGC.
2267      // Calculate the estimated GC duration.
2268      const double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
2269      // Estimate how many remaining bytes we will have when we need to start the next GC.
2270      size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
2271      remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
2272      remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
2273      if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
2274        // A never going to happen situation that from the estimated allocation rate we will exceed
2275        // the applications entire footprint with the given estimated allocation rate. Schedule
2276        // another GC nearly straight away.
2277        remaining_bytes = kMinConcurrentRemainingBytes;
2278      }
2279      DCHECK_LE(remaining_bytes, max_allowed_footprint_);
2280      DCHECK_LE(max_allowed_footprint_, growth_limit_);
2281      // Start a concurrent GC when we get close to the estimated remaining bytes. When the
2282      // allocation rate is very high, remaining_bytes could tell us that we should start a GC
2283      // right away.
2284      concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated);
2285    }
2286  }
2287}
2288
2289void Heap::ClearGrowthLimit() {
2290  growth_limit_ = capacity_;
2291  non_moving_space_->ClearGrowthLimit();
2292}
2293
2294void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
2295                               MemberOffset reference_queue_offset,
2296                               MemberOffset reference_queueNext_offset,
2297                               MemberOffset reference_pendingNext_offset,
2298                               MemberOffset finalizer_reference_zombie_offset) {
2299  reference_referent_offset_ = reference_referent_offset;
2300  reference_queue_offset_ = reference_queue_offset;
2301  reference_queueNext_offset_ = reference_queueNext_offset;
2302  reference_pendingNext_offset_ = reference_pendingNext_offset;
2303  finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
2304  CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2305  CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
2306  CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
2307  CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
2308  CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
2309}
2310
2311void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) {
2312  DCHECK(reference != NULL);
2313  DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2314  reference->SetFieldObject(reference_referent_offset_, referent, true);
2315}
2316
2317mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
2318  DCHECK(reference != NULL);
2319  DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
2320  return reference->GetFieldObject<mirror::Object>(reference_referent_offset_, true);
2321}
2322
2323void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
2324  ScopedObjectAccess soa(self);
2325  JValue result;
2326  ArgArray arg_array("VL", 2);
2327  arg_array.Append(object);
2328  soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self,
2329      arg_array.GetArray(), arg_array.GetNumBytes(), &result, "VL");
2330}
2331
2332void Heap::EnqueueClearedReferences() {
2333  Thread* self = Thread::Current();
2334  Locks::mutator_lock_->AssertNotHeld(self);
2335  if (!cleared_references_.IsEmpty()) {
2336    // When a runtime isn't started there are no reference queues to care about so ignore.
2337    if (LIKELY(Runtime::Current()->IsStarted())) {
2338      ScopedObjectAccess soa(self);
2339      JValue result;
2340      ArgArray arg_array("VL", 2);
2341      arg_array.Append(cleared_references_.GetList());
2342      soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
2343          arg_array.GetArray(), arg_array.GetNumBytes(), &result, "VL");
2344    }
2345    cleared_references_.Clear();
2346  }
2347}
2348
2349void Heap::RequestConcurrentGC(Thread* self) {
2350  // Make sure that we can do a concurrent GC.
2351  Runtime* runtime = Runtime::Current();
2352  if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
2353      self->IsHandlingStackOverflow()) {
2354    return;
2355  }
2356  // We already have a request pending, no reason to start more until we update
2357  // concurrent_start_bytes_.
2358  concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
2359  JNIEnv* env = self->GetJniEnv();
2360  DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
2361  DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
2362  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2363                            WellKnownClasses::java_lang_Daemons_requestGC);
2364  CHECK(!env->ExceptionCheck());
2365}
2366
2367void Heap::ConcurrentGC(Thread* self) {
2368  if (Runtime::Current()->IsShuttingDown(self)) {
2369    return;
2370  }
2371  // Wait for any GCs currently running to finish.
2372  if (WaitForGcToComplete(self) == collector::kGcTypeNone) {
2373    // If the we can't run the GC type we wanted to run, find the next appropriate one and try that
2374    // instead. E.g. can't do partial, so do full instead.
2375    if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
2376        collector::kGcTypeNone) {
2377      for (collector::GcType gc_type : gc_plan_) {
2378        // Attempt to run the collector, if we succeed, we are done.
2379        if (gc_type > next_gc_type_ &&
2380            CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) {
2381          break;
2382        }
2383      }
2384    }
2385  }
2386}
2387
2388void Heap::RequestHeapTrim() {
2389  // GC completed and now we must decide whether to request a heap trim (advising pages back to the
2390  // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
2391  // a space it will hold its lock and can become a cause of jank.
2392  // Note, the large object space self trims and the Zygote space was trimmed and unchanging since
2393  // forking.
2394
2395  // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
2396  // because that only marks object heads, so a large array looks like lots of empty space. We
2397  // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
2398  // to utilization (which is probably inversely proportional to how much benefit we can expect).
2399  // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
2400  // not how much use we're making of those pages.
2401  uint64_t ms_time = MilliTime();
2402  // Don't bother trimming the alloc space if a heap trim occurred in the last two seconds.
2403  if (ms_time - last_trim_time_ms_ < 2 * 1000) {
2404    return;
2405  }
2406
2407  Thread* self = Thread::Current();
2408  Runtime* runtime = Runtime::Current();
2409  if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
2410    // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
2411    // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
2412    // as we don't hold the lock while requesting the trim).
2413    return;
2414  }
2415
2416  last_trim_time_ms_ = ms_time;
2417
2418  // Trim only if we do not currently care about pause times.
2419  if (!CareAboutPauseTimes()) {
2420    JNIEnv* env = self->GetJniEnv();
2421    DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
2422    DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
2423    env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
2424                              WellKnownClasses::java_lang_Daemons_requestHeapTrim);
2425    CHECK(!env->ExceptionCheck());
2426  }
2427}
2428
2429void Heap::RevokeThreadLocalBuffers(Thread* thread) {
2430  if (rosalloc_space_ != nullptr) {
2431    rosalloc_space_->RevokeThreadLocalBuffers(thread);
2432  }
2433  if (bump_pointer_space_ != nullptr) {
2434    bump_pointer_space_->RevokeThreadLocalBuffers(thread);
2435  }
2436}
2437
2438void Heap::RevokeAllThreadLocalBuffers() {
2439  if (rosalloc_space_ != nullptr) {
2440    rosalloc_space_->RevokeAllThreadLocalBuffers();
2441  }
2442  if (bump_pointer_space_ != nullptr) {
2443    bump_pointer_space_->RevokeAllThreadLocalBuffers();
2444  }
2445}
2446
2447bool Heap::IsGCRequestPending() const {
2448  return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
2449}
2450
2451void Heap::RunFinalization(JNIEnv* env) {
2452  // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
2453  if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
2454    CHECK(WellKnownClasses::java_lang_System != nullptr);
2455    WellKnownClasses::java_lang_System_runFinalization =
2456        CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
2457    CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
2458  }
2459  env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
2460                            WellKnownClasses::java_lang_System_runFinalization);
2461}
2462
2463void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
2464  Thread* self = ThreadForEnv(env);
2465  if (native_need_to_run_finalization_) {
2466    RunFinalization(env);
2467    UpdateMaxNativeFootprint();
2468    native_need_to_run_finalization_ = false;
2469  }
2470  // Total number of native bytes allocated.
2471  native_bytes_allocated_.FetchAndAdd(bytes);
2472  if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
2473    collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
2474        collector::kGcTypeFull;
2475
2476    // The second watermark is higher than the gc watermark. If you hit this it means you are
2477    // allocating native objects faster than the GC can keep up with.
2478    if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
2479      if (WaitForGcToComplete(self) != collector::kGcTypeNone) {
2480        // Just finished a GC, attempt to run finalizers.
2481        RunFinalization(env);
2482        CHECK(!env->ExceptionCheck());
2483      }
2484      // If we still are over the watermark, attempt a GC for alloc and run finalizers.
2485      if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
2486        CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
2487        RunFinalization(env);
2488        native_need_to_run_finalization_ = false;
2489        CHECK(!env->ExceptionCheck());
2490      }
2491      // We have just run finalizers, update the native watermark since it is very likely that
2492      // finalizers released native managed allocations.
2493      UpdateMaxNativeFootprint();
2494    } else if (!IsGCRequestPending()) {
2495      if (concurrent_gc_) {
2496        RequestConcurrentGC(self);
2497      } else {
2498        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
2499      }
2500    }
2501  }
2502}
2503
2504void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
2505  int expected_size, new_size;
2506  do {
2507    expected_size = native_bytes_allocated_.Load();
2508    new_size = expected_size - bytes;
2509    if (UNLIKELY(new_size < 0)) {
2510      ScopedObjectAccess soa(env);
2511      env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
2512                    StringPrintf("Attempted to free %d native bytes with only %d native bytes "
2513                                 "registered as allocated", bytes, expected_size).c_str());
2514      break;
2515    }
2516  } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
2517}
2518
2519size_t Heap::GetTotalMemory() const {
2520  size_t ret = 0;
2521  for (const auto& space : continuous_spaces_) {
2522    // Currently don't include the image space.
2523    if (!space->IsImageSpace()) {
2524      ret += space->Size();
2525    }
2526  }
2527  for (const auto& space : discontinuous_spaces_) {
2528    if (space->IsLargeObjectSpace()) {
2529      ret += space->AsLargeObjectSpace()->GetBytesAllocated();
2530    }
2531  }
2532  return ret;
2533}
2534
2535void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
2536  DCHECK(mod_union_table != nullptr);
2537  mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table);
2538}
2539
2540}  // namespace gc
2541}  // namespace art
2542