heap.cc revision ca2a24da53869a04e1947aa46d06ccce5247d6f4
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "heap.h" 18 19#define ATRACE_TAG ATRACE_TAG_DALVIK 20#include <cutils/trace.h> 21 22#include <limits> 23#include <vector> 24#include <valgrind.h> 25 26#include "base/histogram-inl.h" 27#include "base/stl_util.h" 28#include "common_throws.h" 29#include "cutils/sched_policy.h" 30#include "debugger.h" 31#include "gc/accounting/atomic_stack.h" 32#include "gc/accounting/card_table-inl.h" 33#include "gc/accounting/heap_bitmap-inl.h" 34#include "gc/accounting/mod_union_table.h" 35#include "gc/accounting/mod_union_table-inl.h" 36#include "gc/accounting/space_bitmap-inl.h" 37#include "gc/collector/mark_sweep-inl.h" 38#include "gc/collector/partial_mark_sweep.h" 39#include "gc/collector/semi_space.h" 40#include "gc/collector/sticky_mark_sweep.h" 41#include "gc/space/bump_pointer_space.h" 42#include "gc/space/dlmalloc_space-inl.h" 43#include "gc/space/image_space.h" 44#include "gc/space/large_object_space.h" 45#include "gc/space/rosalloc_space-inl.h" 46#include "gc/space/space-inl.h" 47#include "heap-inl.h" 48#include "image.h" 49#include "invoke_arg_array_builder.h" 50#include "mirror/art_field-inl.h" 51#include "mirror/class-inl.h" 52#include "mirror/object.h" 53#include "mirror/object-inl.h" 54#include "mirror/object_array-inl.h" 55#include "object_utils.h" 56#include "os.h" 57#include "runtime.h" 58#include "ScopedLocalRef.h" 59#include "scoped_thread_state_change.h" 60#include "sirt_ref.h" 61#include "thread_list.h" 62#include "UniquePtr.h" 63#include "well_known_classes.h" 64 65namespace art { 66 67extern void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator); 68 69namespace gc { 70 71static constexpr bool kGCALotMode = false; 72static constexpr size_t kGcAlotInterval = KB; 73// Minimum amount of remaining bytes before a concurrent GC is triggered. 74static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB; 75 76Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free, 77 double target_utilization, size_t capacity, const std::string& image_file_name, 78 CollectorType collector_type, size_t parallel_gc_threads, size_t conc_gc_threads, 79 bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold, 80 bool ignore_max_footprint) 81 : non_moving_space_(nullptr), 82 concurrent_gc_(collector_type == gc::kCollectorTypeCMS), 83 collector_type_(collector_type), 84 parallel_gc_threads_(parallel_gc_threads), 85 conc_gc_threads_(conc_gc_threads), 86 low_memory_mode_(low_memory_mode), 87 long_pause_log_threshold_(long_pause_log_threshold), 88 long_gc_log_threshold_(long_gc_log_threshold), 89 ignore_max_footprint_(ignore_max_footprint), 90 have_zygote_space_(false), 91 soft_reference_queue_(this), 92 weak_reference_queue_(this), 93 finalizer_reference_queue_(this), 94 phantom_reference_queue_(this), 95 cleared_references_(this), 96 is_gc_running_(false), 97 last_gc_type_(collector::kGcTypeNone), 98 next_gc_type_(collector::kGcTypePartial), 99 capacity_(capacity), 100 growth_limit_(growth_limit), 101 max_allowed_footprint_(initial_size), 102 native_footprint_gc_watermark_(initial_size), 103 native_footprint_limit_(2 * initial_size), 104 native_need_to_run_finalization_(false), 105 activity_thread_class_(NULL), 106 application_thread_class_(NULL), 107 activity_thread_(NULL), 108 application_thread_(NULL), 109 last_process_state_id_(NULL), 110 // Initially assume we perceive jank in case the process state is never updated. 111 process_state_(kProcessStateJankPerceptible), 112 concurrent_start_bytes_(concurrent_gc_ ? initial_size - kMinConcurrentRemainingBytes 113 : std::numeric_limits<size_t>::max()), 114 total_bytes_freed_ever_(0), 115 total_objects_freed_ever_(0), 116 num_bytes_allocated_(0), 117 native_bytes_allocated_(0), 118 gc_memory_overhead_(0), 119 verify_missing_card_marks_(false), 120 verify_system_weaks_(false), 121 verify_pre_gc_heap_(false), 122 verify_post_gc_heap_(false), 123 verify_mod_union_table_(false), 124 min_alloc_space_size_for_sticky_gc_(2 * MB), 125 min_remaining_space_for_sticky_gc_(1 * MB), 126 last_trim_time_ms_(0), 127 allocation_rate_(0), 128 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This 129 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap 130 * verification is enabled, we limit the size of allocation stacks to speed up their 131 * searching. 132 */ 133 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval 134 : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB), 135 current_allocator_(kMovingCollector ? kAllocatorTypeBumpPointer : kAllocatorTypeFreeList), 136 current_non_moving_allocator_(kAllocatorTypeFreeList), 137 bump_pointer_space_(nullptr), 138 temp_space_(nullptr), 139 reference_referent_offset_(0), 140 reference_queue_offset_(0), 141 reference_queueNext_offset_(0), 142 reference_pendingNext_offset_(0), 143 finalizer_reference_zombie_offset_(0), 144 min_free_(min_free), 145 max_free_(max_free), 146 target_utilization_(target_utilization), 147 total_wait_time_(0), 148 total_allocation_time_(0), 149 verify_object_mode_(kHeapVerificationNotPermitted), 150 gc_disable_count_(0), 151 running_on_valgrind_(RUNNING_ON_VALGRIND) { 152 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 153 LOG(INFO) << "Heap() entering"; 154 } 155 // If we aren't the zygote, switch to the default non zygote allocator. This may update the 156 // entrypoints. 157 if (!Runtime::Current()->IsZygote()) { 158 ChangeCollector(collector_type_); 159 } 160 live_bitmap_.reset(new accounting::HeapBitmap(this)); 161 mark_bitmap_.reset(new accounting::HeapBitmap(this)); 162 // Requested begin for the alloc space, to follow the mapped image and oat files 163 byte* requested_alloc_space_begin = nullptr; 164 if (!image_file_name.empty()) { 165 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str()); 166 CHECK(image_space != nullptr) << "Failed to create space for " << image_file_name; 167 AddSpace(image_space); 168 // Oat files referenced by image files immediately follow them in memory, ensure alloc space 169 // isn't going to get in the middle 170 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd(); 171 CHECK_GT(oat_file_end_addr, image_space->End()); 172 if (oat_file_end_addr > requested_alloc_space_begin) { 173 requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize); 174 } 175 } 176 177 const char* name = Runtime::Current()->IsZygote() ? "zygote space" : "alloc space"; 178 if (!kUseRosAlloc) { 179 non_moving_space_ = space::DlMallocSpace::Create(name, initial_size, growth_limit, capacity, 180 requested_alloc_space_begin); 181 } else { 182 non_moving_space_ = space::RosAllocSpace::Create(name, initial_size, growth_limit, capacity, 183 requested_alloc_space_begin); 184 } 185 if (kMovingCollector) { 186 // TODO: Place bump-pointer spaces somewhere to minimize size of card table. 187 // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation 188 // issues. 189 const size_t bump_pointer_space_size = std::min(non_moving_space_->Capacity(), 128 * MB); 190 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space", 191 bump_pointer_space_size, nullptr); 192 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space"; 193 AddSpace(bump_pointer_space_); 194 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size, 195 nullptr); 196 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space"; 197 AddSpace(temp_space_); 198 } 199 200 CHECK(non_moving_space_ != NULL) << "Failed to create non-moving space"; 201 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity()); 202 AddSpace(non_moving_space_); 203 204 // Allocate the large object space. 205 const bool kUseFreeListSpaceForLOS = false; 206 if (kUseFreeListSpaceForLOS) { 207 large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity); 208 } else { 209 large_object_space_ = space::LargeObjectMapSpace::Create("large object space"); 210 } 211 CHECK(large_object_space_ != NULL) << "Failed to create large object space"; 212 AddSpace(large_object_space_); 213 214 // Compute heap capacity. Continuous spaces are sorted in order of Begin(). 215 CHECK(!continuous_spaces_.empty()); 216 // Relies on the spaces being sorted. 217 byte* heap_begin = continuous_spaces_.front()->Begin(); 218 byte* heap_end = continuous_spaces_.back()->Limit(); 219 size_t heap_capacity = heap_end - heap_begin; 220 221 // Allocate the card table. 222 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity)); 223 CHECK(card_table_.get() != NULL) << "Failed to create card table"; 224 225 // Card cache for now since it makes it easier for us to update the references to the copying 226 // spaces. 227 accounting::ModUnionTable* mod_union_table = 228 new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace()); 229 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table"; 230 AddModUnionTable(mod_union_table); 231 232 // TODO: Count objects in the image space here. 233 num_bytes_allocated_ = 0; 234 235 // Default mark stack size in bytes. 236 static const size_t default_mark_stack_size = 64 * KB; 237 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size)); 238 allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack", 239 max_allocation_stack_size_)); 240 live_stack_.reset(accounting::ObjectStack::Create("live stack", 241 max_allocation_stack_size_)); 242 243 // It's still too early to take a lock because there are no threads yet, but we can create locks 244 // now. We don't create it earlier to make it clear that you can't use locks during heap 245 // initialization. 246 gc_complete_lock_ = new Mutex("GC complete lock"); 247 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable", 248 *gc_complete_lock_)); 249 last_gc_time_ns_ = NanoTime(); 250 last_gc_size_ = GetBytesAllocated(); 251 252 if (ignore_max_footprint_) { 253 SetIdealFootprint(std::numeric_limits<size_t>::max()); 254 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); 255 } 256 CHECK_NE(max_allowed_footprint_, 0U); 257 258 // Create our garbage collectors. 259 for (size_t i = 0; i < 2; ++i) { 260 const bool concurrent = i != 0; 261 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent)); 262 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent)); 263 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent)); 264 } 265 gc_plan_.push_back(collector::kGcTypeSticky); 266 gc_plan_.push_back(collector::kGcTypePartial); 267 gc_plan_.push_back(collector::kGcTypeFull); 268 if (kMovingCollector) { 269 // TODO: Clean this up. 270 semi_space_collector_ = new collector::SemiSpace(this); 271 garbage_collectors_.push_back(semi_space_collector_); 272 } 273 274 if (running_on_valgrind_) { 275 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(); 276 } 277 278 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 279 LOG(INFO) << "Heap() exiting"; 280 } 281} 282 283void Heap::ChangeAllocator(AllocatorType allocator) { 284 DCHECK_NE(allocator, kAllocatorTypeLOS); 285 if (current_allocator_ != allocator) { 286 current_allocator_ = allocator; 287 SetQuickAllocEntryPointsAllocator(current_allocator_); 288 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints(); 289 } 290} 291 292bool Heap::IsCompilingBoot() const { 293 for (const auto& space : continuous_spaces_) { 294 if (space->IsImageSpace()) { 295 return false; 296 } else if (space->IsZygoteSpace()) { 297 return false; 298 } 299 } 300 return true; 301} 302 303bool Heap::HasImageSpace() const { 304 for (const auto& space : continuous_spaces_) { 305 if (space->IsImageSpace()) { 306 return true; 307 } 308 } 309 return false; 310} 311 312void Heap::IncrementDisableGC(Thread* self) { 313 // Need to do this holding the lock to prevent races where the GC is about to run / running when 314 // we attempt to disable it. 315 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); 316 MutexLock mu(self, *gc_complete_lock_); 317 WaitForGcToCompleteLocked(self); 318 ++gc_disable_count_; 319} 320 321void Heap::DecrementDisableGC(Thread* self) { 322 MutexLock mu(self, *gc_complete_lock_); 323 CHECK_GE(gc_disable_count_, 0U); 324 --gc_disable_count_; 325} 326 327void Heap::UpdateProcessState(ProcessState process_state) { 328 process_state_ = process_state; 329} 330 331void Heap::CreateThreadPool() { 332 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_); 333 if (num_threads != 0) { 334 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads)); 335 } 336} 337 338void Heap::VisitObjects(ObjectVisitorCallback callback, void* arg) { 339 // Visit objects in bump pointer space. 340 Thread* self = Thread::Current(); 341 // TODO: Use reference block. 342 std::vector<SirtRef<mirror::Object>*> saved_refs; 343 if (bump_pointer_space_ != nullptr) { 344 // Need to put all these in sirts since the callback may trigger a GC. TODO: Use a better data 345 // structure. 346 mirror::Object* obj = reinterpret_cast<mirror::Object*>(bump_pointer_space_->Begin()); 347 const mirror::Object* end = reinterpret_cast<const mirror::Object*>( 348 bump_pointer_space_->End()); 349 while (obj < end) { 350 saved_refs.push_back(new SirtRef<mirror::Object>(self, obj)); 351 obj = space::BumpPointerSpace::GetNextObject(obj); 352 } 353 } 354 // TODO: Switch to standard begin and end to use ranged a based loop. 355 for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End(); 356 it < end; ++it) { 357 mirror::Object* obj = *it; 358 // Objects in the allocation stack might be in a movable space. 359 saved_refs.push_back(new SirtRef<mirror::Object>(self, obj)); 360 } 361 GetLiveBitmap()->Walk(callback, arg); 362 for (const auto& ref : saved_refs) { 363 callback(ref->get(), arg); 364 } 365 // Need to free the sirts in reverse order they were allocated. 366 for (size_t i = saved_refs.size(); i != 0; --i) { 367 delete saved_refs[i - 1]; 368 } 369} 370 371void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) { 372 MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), stack); 373} 374 375void Heap::DeleteThreadPool() { 376 thread_pool_.reset(nullptr); 377} 378 379void Heap::AddSpace(space::Space* space) { 380 DCHECK(space != NULL); 381 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 382 if (space->IsContinuousSpace()) { 383 DCHECK(!space->IsDiscontinuousSpace()); 384 space::ContinuousSpace* continuous_space = space->AsContinuousSpace(); 385 // Continuous spaces don't necessarily have bitmaps. 386 accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap(); 387 accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap(); 388 if (live_bitmap != nullptr) { 389 DCHECK(mark_bitmap != nullptr); 390 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap); 391 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap); 392 } 393 394 continuous_spaces_.push_back(continuous_space); 395 if (continuous_space->IsMallocSpace()) { 396 non_moving_space_ = continuous_space->AsMallocSpace(); 397 } 398 399 // Ensure that spaces remain sorted in increasing order of start address. 400 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), 401 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) { 402 return a->Begin() < b->Begin(); 403 }); 404 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to 405 // avoid redundant marking. 406 bool seen_zygote = false, seen_alloc = false; 407 for (const auto& space : continuous_spaces_) { 408 if (space->IsImageSpace()) { 409 CHECK(!seen_zygote); 410 CHECK(!seen_alloc); 411 } else if (space->IsZygoteSpace()) { 412 CHECK(!seen_alloc); 413 seen_zygote = true; 414 } else if (space->IsMallocSpace()) { 415 seen_alloc = true; 416 } 417 } 418 } else { 419 DCHECK(space->IsDiscontinuousSpace()); 420 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace(); 421 DCHECK(discontinuous_space->GetLiveObjects() != nullptr); 422 live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects()); 423 DCHECK(discontinuous_space->GetMarkObjects() != nullptr); 424 mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects()); 425 discontinuous_spaces_.push_back(discontinuous_space); 426 } 427 if (space->IsAllocSpace()) { 428 alloc_spaces_.push_back(space->AsAllocSpace()); 429 } 430} 431 432void Heap::RegisterGCAllocation(size_t bytes) { 433 if (this != nullptr) { 434 gc_memory_overhead_.fetch_add(bytes); 435 } 436} 437 438void Heap::RegisterGCDeAllocation(size_t bytes) { 439 if (this != nullptr) { 440 gc_memory_overhead_.fetch_sub(bytes); 441 } 442} 443 444void Heap::DumpGcPerformanceInfo(std::ostream& os) { 445 // Dump cumulative timings. 446 os << "Dumping cumulative Gc timings\n"; 447 uint64_t total_duration = 0; 448 449 // Dump cumulative loggers for each GC type. 450 uint64_t total_paused_time = 0; 451 for (const auto& collector : garbage_collectors_) { 452 CumulativeLogger& logger = collector->GetCumulativeTimings(); 453 if (logger.GetTotalNs() != 0) { 454 os << Dumpable<CumulativeLogger>(logger); 455 const uint64_t total_ns = logger.GetTotalNs(); 456 const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs(); 457 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0; 458 const uint64_t freed_bytes = collector->GetTotalFreedBytes(); 459 const uint64_t freed_objects = collector->GetTotalFreedObjects(); 460 Histogram<uint64_t>::CumulativeData cumulative_data; 461 collector->GetPauseHistogram().CreateHistogram(&cumulative_data); 462 collector->GetPauseHistogram().PrintConfidenceIntervals(os, 0.99, cumulative_data); 463 os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n" 464 << collector->GetName() << " freed: " << freed_objects 465 << " objects with total size " << PrettySize(freed_bytes) << "\n" 466 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / " 467 << PrettySize(freed_bytes / seconds) << "/s\n"; 468 total_duration += total_ns; 469 total_paused_time += total_pause_ns; 470 } 471 } 472 uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust; 473 size_t total_objects_allocated = GetObjectsAllocatedEver(); 474 size_t total_bytes_allocated = GetBytesAllocatedEver(); 475 if (total_duration != 0) { 476 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0; 477 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; 478 os << "Mean GC size throughput: " 479 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n"; 480 os << "Mean GC object throughput: " 481 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n"; 482 } 483 os << "Total number of allocations: " << total_objects_allocated << "\n"; 484 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n"; 485 if (kMeasureAllocationTime) { 486 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n"; 487 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated) 488 << "\n"; 489 } 490 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n"; 491 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n"; 492 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_; 493} 494 495Heap::~Heap() { 496 VLOG(heap) << "Starting ~Heap()"; 497 STLDeleteElements(&garbage_collectors_); 498 // If we don't reset then the mark stack complains in its destructor. 499 allocation_stack_->Reset(); 500 live_stack_->Reset(); 501 STLDeleteValues(&mod_union_tables_); 502 STLDeleteElements(&continuous_spaces_); 503 STLDeleteElements(&discontinuous_spaces_); 504 delete gc_complete_lock_; 505 VLOG(heap) << "Finished ~Heap()"; 506} 507 508space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj, 509 bool fail_ok) const { 510 for (const auto& space : continuous_spaces_) { 511 if (space->Contains(obj)) { 512 return space; 513 } 514 } 515 if (!fail_ok) { 516 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 517 } 518 return NULL; 519} 520 521space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj, 522 bool fail_ok) const { 523 for (const auto& space : discontinuous_spaces_) { 524 if (space->Contains(obj)) { 525 return space; 526 } 527 } 528 if (!fail_ok) { 529 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 530 } 531 return NULL; 532} 533 534space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const { 535 space::Space* result = FindContinuousSpaceFromObject(obj, true); 536 if (result != NULL) { 537 return result; 538 } 539 return FindDiscontinuousSpaceFromObject(obj, true); 540} 541 542struct SoftReferenceArgs { 543 RootVisitor* is_marked_callback_; 544 RootVisitor* recursive_mark_callback_; 545 void* arg_; 546}; 547 548mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) { 549 SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg); 550 // TODO: Not preserve all soft references. 551 return args->recursive_mark_callback_(obj, args->arg_); 552} 553 554// Process reference class instances and schedule finalizations. 555void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft, 556 RootVisitor* is_marked_callback, 557 RootVisitor* recursive_mark_object_callback, void* arg) { 558 // Unless we are in the zygote or required to clear soft references with white references, 559 // preserve some white referents. 560 if (!clear_soft && !Runtime::Current()->IsZygote()) { 561 SoftReferenceArgs soft_reference_args; 562 soft_reference_args.is_marked_callback_ = is_marked_callback; 563 soft_reference_args.recursive_mark_callback_ = recursive_mark_object_callback; 564 soft_reference_args.arg_ = arg; 565 soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback, 566 &soft_reference_args); 567 } 568 timings.StartSplit("ProcessReferences"); 569 // Clear all remaining soft and weak references with white referents. 570 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg); 571 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg); 572 timings.EndSplit(); 573 // Preserve all white objects with finalize methods and schedule them for finalization. 574 timings.StartSplit("EnqueueFinalizerReferences"); 575 finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback, 576 recursive_mark_object_callback, arg); 577 timings.EndSplit(); 578 timings.StartSplit("ProcessReferences"); 579 // Clear all f-reachable soft and weak references with white referents. 580 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg); 581 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg); 582 // Clear all phantom references with white referents. 583 phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg); 584 // At this point all reference queues other than the cleared references should be empty. 585 DCHECK(soft_reference_queue_.IsEmpty()); 586 DCHECK(weak_reference_queue_.IsEmpty()); 587 DCHECK(finalizer_reference_queue_.IsEmpty()); 588 DCHECK(phantom_reference_queue_.IsEmpty()); 589 timings.EndSplit(); 590} 591 592bool Heap::IsEnqueued(mirror::Object* ref) const { 593 // Since the references are stored as cyclic lists it means that once enqueued, the pending next 594 // will always be non-null. 595 return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr; 596} 597 598bool Heap::IsEnqueuable(const mirror::Object* ref) const { 599 DCHECK(ref != nullptr); 600 const mirror::Object* queue = 601 ref->GetFieldObject<mirror::Object*>(GetReferenceQueueOffset(), false); 602 const mirror::Object* queue_next = 603 ref->GetFieldObject<mirror::Object*>(GetReferenceQueueNextOffset(), false); 604 return queue != nullptr && queue_next == nullptr; 605} 606 607// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 608// marked, put it on the appropriate list in the heap for later processing. 609void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, 610 RootVisitor mark_visitor, void* arg) { 611 DCHECK(klass != nullptr); 612 DCHECK(klass->IsReferenceClass()); 613 DCHECK(obj != nullptr); 614 mirror::Object* referent = GetReferenceReferent(obj); 615 if (referent != nullptr) { 616 mirror::Object* forward_address = mark_visitor(referent, arg); 617 // Null means that the object is not currently marked. 618 if (forward_address == nullptr) { 619 Thread* self = Thread::Current(); 620 // TODO: Remove these locks, and use atomic stacks for storing references? 621 // We need to check that the references haven't already been enqueued since we can end up 622 // scanning the same reference multiple times due to dirty cards. 623 if (klass->IsSoftReferenceClass()) { 624 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); 625 } else if (klass->IsWeakReferenceClass()) { 626 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); 627 } else if (klass->IsFinalizerReferenceClass()) { 628 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); 629 } else if (klass->IsPhantomReferenceClass()) { 630 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj); 631 } else { 632 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex 633 << klass->GetAccessFlags(); 634 } 635 } else if (referent != forward_address) { 636 // Referent is already marked and we need to update it. 637 SetReferenceReferent(obj, forward_address); 638 } 639 } 640} 641 642space::ImageSpace* Heap::GetImageSpace() const { 643 for (const auto& space : continuous_spaces_) { 644 if (space->IsImageSpace()) { 645 return space->AsImageSpace(); 646 } 647 } 648 return NULL; 649} 650 651static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) { 652 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start); 653 if (used_bytes < chunk_size) { 654 size_t chunk_free_bytes = chunk_size - used_bytes; 655 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg); 656 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes); 657 } 658} 659 660void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) { 661 std::ostringstream oss; 662 int64_t total_bytes_free = GetFreeMemory(); 663 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free 664 << " free bytes"; 665 // If the allocation failed due to fragmentation, print out the largest continuous allocation. 666 if (!large_object_allocation && total_bytes_free >= byte_count) { 667 size_t max_contiguous_allocation = 0; 668 for (const auto& space : continuous_spaces_) { 669 if (space->IsMallocSpace()) { 670 // To allow the Walk/InspectAll() to exclusively-lock the mutator 671 // lock, temporarily release the shared access to the mutator 672 // lock here by transitioning to the suspended state. 673 Locks::mutator_lock_->AssertSharedHeld(self); 674 self->TransitionFromRunnableToSuspended(kSuspended); 675 space->AsMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation); 676 self->TransitionFromSuspendedToRunnable(); 677 Locks::mutator_lock_->AssertSharedHeld(self); 678 } 679 } 680 oss << "; failed due to fragmentation (largest possible contiguous allocation " 681 << max_contiguous_allocation << " bytes)"; 682 } 683 self->ThrowOutOfMemoryError(oss.str().c_str()); 684} 685 686void Heap::Trim() { 687 uint64_t start_ns = NanoTime(); 688 // Trim the managed spaces. 689 uint64_t total_alloc_space_allocated = 0; 690 uint64_t total_alloc_space_size = 0; 691 uint64_t managed_reclaimed = 0; 692 for (const auto& space : continuous_spaces_) { 693 if (space->IsMallocSpace() && !space->IsZygoteSpace()) { 694 gc::space::MallocSpace* alloc_space = space->AsMallocSpace(); 695 total_alloc_space_size += alloc_space->Size(); 696 managed_reclaimed += alloc_space->Trim(); 697 } 698 } 699 total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() - 700 bump_pointer_space_->GetBytesAllocated(); 701 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) / 702 static_cast<float>(total_alloc_space_size); 703 uint64_t gc_heap_end_ns = NanoTime(); 704 // Trim the native heap. 705 dlmalloc_trim(0); 706 size_t native_reclaimed = 0; 707 dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed); 708 uint64_t end_ns = NanoTime(); 709 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns) 710 << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration=" 711 << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed) 712 << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization) 713 << "%."; 714} 715 716bool Heap::IsValidObjectAddress(const mirror::Object* obj) const { 717 // Note: we deliberately don't take the lock here, and mustn't test anything that would require 718 // taking the lock. 719 if (obj == nullptr) { 720 return true; 721 } 722 return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj); 723} 724 725bool Heap::IsHeapAddress(const mirror::Object* obj) const { 726 if (kMovingCollector && bump_pointer_space_->HasAddress(obj)) { 727 return true; 728 } 729 // TODO: This probably doesn't work for large objects. 730 return FindSpaceFromObject(obj, true) != nullptr; 731} 732 733bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack, 734 bool search_live_stack, bool sorted) { 735 // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current()); 736 if (obj == nullptr || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 737 return false; 738 } 739 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true); 740 space::DiscontinuousSpace* d_space = NULL; 741 if (c_space != NULL) { 742 if (c_space->GetLiveBitmap()->Test(obj)) { 743 return true; 744 } 745 } else if (bump_pointer_space_->Contains(obj) || temp_space_->Contains(obj)) { 746 return true; 747 } else { 748 d_space = FindDiscontinuousSpaceFromObject(obj, true); 749 if (d_space != NULL) { 750 if (d_space->GetLiveObjects()->Test(obj)) { 751 return true; 752 } 753 } 754 } 755 // This is covering the allocation/live stack swapping that is done without mutators suspended. 756 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) { 757 if (i > 0) { 758 NanoSleep(MsToNs(10)); 759 } 760 if (search_allocation_stack) { 761 if (sorted) { 762 if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) { 763 return true; 764 } 765 } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) { 766 return true; 767 } 768 } 769 770 if (search_live_stack) { 771 if (sorted) { 772 if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) { 773 return true; 774 } 775 } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) { 776 return true; 777 } 778 } 779 } 780 // We need to check the bitmaps again since there is a race where we mark something as live and 781 // then clear the stack containing it. 782 if (c_space != NULL) { 783 if (c_space->GetLiveBitmap()->Test(obj)) { 784 return true; 785 } 786 } else { 787 d_space = FindDiscontinuousSpaceFromObject(obj, true); 788 if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) { 789 return true; 790 } 791 } 792 return false; 793} 794 795void Heap::VerifyObjectImpl(const mirror::Object* obj) { 796 if (Thread::Current() == NULL || 797 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) { 798 return; 799 } 800 VerifyObjectBody(obj); 801} 802 803void Heap::DumpSpaces(std::ostream& stream) { 804 for (const auto& space : continuous_spaces_) { 805 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 806 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 807 stream << space << " " << *space << "\n"; 808 if (live_bitmap != nullptr) { 809 stream << live_bitmap << " " << *live_bitmap << "\n"; 810 } 811 if (mark_bitmap != nullptr) { 812 stream << mark_bitmap << " " << *mark_bitmap << "\n"; 813 } 814 } 815 for (const auto& space : discontinuous_spaces_) { 816 stream << space << " " << *space << "\n"; 817 } 818} 819 820void Heap::VerifyObjectBody(const mirror::Object* obj) { 821 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj; 822 // Ignore early dawn of the universe verifications. 823 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.load()) < 10 * KB)) { 824 return; 825 } 826 const byte* raw_addr = reinterpret_cast<const byte*>(obj) + 827 mirror::Object::ClassOffset().Int32Value(); 828 const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 829 if (UNLIKELY(c == NULL)) { 830 LOG(FATAL) << "Null class in object: " << obj; 831 } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) { 832 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; 833 } 834 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() 835 // Note: we don't use the accessors here as they have internal sanity checks 836 // that we don't want to run 837 raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value(); 838 const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 839 raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value(); 840 const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 841 CHECK_EQ(c_c, c_c_c); 842 843 if (verify_object_mode_ > kVerifyAllFast) { 844 // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the 845 // heap_bitmap_lock_. 846 if (!IsLiveObjectLocked(obj)) { 847 DumpSpaces(); 848 LOG(FATAL) << "Object is dead: " << obj; 849 } 850 if (!IsLiveObjectLocked(c)) { 851 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; 852 } 853 } 854} 855 856void Heap::VerificationCallback(mirror::Object* obj, void* arg) { 857 DCHECK(obj != NULL); 858 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj); 859} 860 861void Heap::VerifyHeap() { 862 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 863 GetLiveBitmap()->Walk(Heap::VerificationCallback, this); 864} 865 866void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) { 867 DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_)); 868 num_bytes_allocated_.fetch_sub(freed_bytes); 869 870 if (Runtime::Current()->HasStatsEnabled()) { 871 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 872 thread_stats->freed_objects += freed_objects; 873 thread_stats->freed_bytes += freed_bytes; 874 875 // TODO: Do this concurrently. 876 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 877 global_stats->freed_objects += freed_objects; 878 global_stats->freed_bytes += freed_bytes; 879 } 880} 881 882mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator, 883 size_t alloc_size, size_t* bytes_allocated) { 884 mirror::Object* ptr = nullptr; 885 // The allocation failed. If the GC is running, block until it completes, and then retry the 886 // allocation. 887 collector::GcType last_gc = WaitForGcToComplete(self); 888 if (last_gc != collector::kGcTypeNone) { 889 // A GC was in progress and we blocked, retry allocation now that memory has been freed. 890 ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated); 891 } 892 893 // Loop through our different Gc types and try to Gc until we get enough free memory. 894 for (collector::GcType gc_type : gc_plan_) { 895 if (ptr != nullptr) { 896 break; 897 } 898 // Attempt to run the collector, if we succeed, re-try the allocation. 899 if (CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone) { 900 // Did we free sufficient memory for the allocation to succeed? 901 ptr = TryToAllocate<true>(self, allocator, alloc_size, false, bytes_allocated); 902 } 903 } 904 // Allocations have failed after GCs; this is an exceptional state. 905 if (ptr == nullptr) { 906 // Try harder, growing the heap if necessary. 907 ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated); 908 } 909 if (ptr == nullptr) { 910 // Most allocations should have succeeded by now, so the heap is really full, really fragmented, 911 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The 912 // VM spec requires that all SoftReferences have been collected and cleared before throwing 913 // OOME. 914 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) 915 << " allocation"; 916 // TODO: Run finalization, but this may cause more allocations to occur. 917 // We don't need a WaitForGcToComplete here either. 918 DCHECK(!gc_plan_.empty()); 919 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true); 920 ptr = TryToAllocate<true>(self, allocator, alloc_size, true, bytes_allocated); 921 if (ptr == nullptr) { 922 ThrowOutOfMemoryError(self, alloc_size, false); 923 } 924 } 925 return ptr; 926} 927 928void Heap::SetTargetHeapUtilization(float target) { 929 DCHECK_GT(target, 0.0f); // asserted in Java code 930 DCHECK_LT(target, 1.0f); 931 target_utilization_ = target; 932} 933 934size_t Heap::GetObjectsAllocated() const { 935 size_t total = 0; 936 for (space::AllocSpace* space : alloc_spaces_) { 937 total += space->GetObjectsAllocated(); 938 } 939 return total; 940} 941 942size_t Heap::GetObjectsAllocatedEver() const { 943 size_t total = 0; 944 for (space::AllocSpace* space : alloc_spaces_) { 945 total += space->GetTotalObjectsAllocated(); 946 } 947 return total; 948} 949 950size_t Heap::GetBytesAllocatedEver() const { 951 size_t total = 0; 952 for (space::AllocSpace* space : alloc_spaces_) { 953 total += space->GetTotalBytesAllocated(); 954 } 955 return total; 956} 957 958class InstanceCounter { 959 public: 960 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts) 961 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 962 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) { 963 } 964 965 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 966 for (size_t i = 0; i < classes_.size(); ++i) { 967 const mirror::Class* instance_class = o->GetClass(); 968 if (use_is_assignable_from_) { 969 if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) { 970 ++counts_[i]; 971 } 972 } else { 973 if (instance_class == classes_[i]) { 974 ++counts_[i]; 975 } 976 } 977 } 978 } 979 980 private: 981 const std::vector<mirror::Class*>& classes_; 982 bool use_is_assignable_from_; 983 uint64_t* const counts_; 984 985 DISALLOW_COPY_AND_ASSIGN(InstanceCounter); 986}; 987 988void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, 989 uint64_t* counts) { 990 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 991 // is empty, so the live bitmap is the only place we need to look. 992 Thread* self = Thread::Current(); 993 self->TransitionFromRunnableToSuspended(kNative); 994 CollectGarbage(false); 995 self->TransitionFromSuspendedToRunnable(); 996 997 InstanceCounter counter(classes, use_is_assignable_from, counts); 998 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 999 GetLiveBitmap()->Visit(counter); 1000} 1001 1002class InstanceCollector { 1003 public: 1004 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) 1005 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1006 : class_(c), max_count_(max_count), instances_(instances) { 1007 } 1008 1009 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1010 const mirror::Class* instance_class = o->GetClass(); 1011 if (instance_class == class_) { 1012 if (max_count_ == 0 || instances_.size() < max_count_) { 1013 instances_.push_back(const_cast<mirror::Object*>(o)); 1014 } 1015 } 1016 } 1017 1018 private: 1019 mirror::Class* class_; 1020 uint32_t max_count_; 1021 std::vector<mirror::Object*>& instances_; 1022 1023 DISALLOW_COPY_AND_ASSIGN(InstanceCollector); 1024}; 1025 1026void Heap::GetInstances(mirror::Class* c, int32_t max_count, 1027 std::vector<mirror::Object*>& instances) { 1028 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 1029 // is empty, so the live bitmap is the only place we need to look. 1030 Thread* self = Thread::Current(); 1031 self->TransitionFromRunnableToSuspended(kNative); 1032 CollectGarbage(false); 1033 self->TransitionFromSuspendedToRunnable(); 1034 1035 InstanceCollector collector(c, max_count, instances); 1036 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1037 GetLiveBitmap()->Visit(collector); 1038} 1039 1040class ReferringObjectsFinder { 1041 public: 1042 ReferringObjectsFinder(mirror::Object* object, int32_t max_count, 1043 std::vector<mirror::Object*>& referring_objects) 1044 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1045 : object_(object), max_count_(max_count), referring_objects_(referring_objects) { 1046 } 1047 1048 // For bitmap Visit. 1049 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 1050 // annotalysis on visitors. 1051 void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS { 1052 collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true); 1053 } 1054 1055 // For MarkSweep::VisitObjectReferences. 1056 void operator()(mirror::Object* referrer, mirror::Object* object, 1057 const MemberOffset&, bool) const { 1058 if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { 1059 referring_objects_.push_back(referrer); 1060 } 1061 } 1062 1063 private: 1064 mirror::Object* object_; 1065 uint32_t max_count_; 1066 std::vector<mirror::Object*>& referring_objects_; 1067 1068 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder); 1069}; 1070 1071void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count, 1072 std::vector<mirror::Object*>& referring_objects) { 1073 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 1074 // is empty, so the live bitmap is the only place we need to look. 1075 Thread* self = Thread::Current(); 1076 self->TransitionFromRunnableToSuspended(kNative); 1077 CollectGarbage(false); 1078 self->TransitionFromSuspendedToRunnable(); 1079 1080 ReferringObjectsFinder finder(o, max_count, referring_objects); 1081 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1082 GetLiveBitmap()->Visit(finder); 1083} 1084 1085void Heap::CollectGarbage(bool clear_soft_references) { 1086 // Even if we waited for a GC we still need to do another GC since weaks allocated during the 1087 // last GC will not have necessarily been cleared. 1088 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references); 1089} 1090 1091void Heap::ChangeCollector(CollectorType collector_type) { 1092 switch (collector_type) { 1093 case kCollectorTypeSS: { 1094 ChangeAllocator(kAllocatorTypeBumpPointer); 1095 break; 1096 } 1097 case kCollectorTypeMS: 1098 // Fall-through. 1099 case kCollectorTypeCMS: { 1100 ChangeAllocator(kAllocatorTypeFreeList); 1101 break; 1102 default: 1103 LOG(FATAL) << "Unimplemented"; 1104 } 1105 } 1106} 1107 1108void Heap::PreZygoteFork() { 1109 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock); 1110 Thread* self = Thread::Current(); 1111 MutexLock mu(self, zygote_creation_lock_); 1112 // Try to see if we have any Zygote spaces. 1113 if (have_zygote_space_) { 1114 return; 1115 } 1116 VLOG(heap) << "Starting PreZygoteFork"; 1117 // Do this before acquiring the zygote creation lock so that we don't get lock order violations. 1118 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false); 1119 // Trim the pages at the end of the non moving space. 1120 non_moving_space_->Trim(); 1121 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); 1122 // Change the allocator to the post zygote one. 1123 ChangeCollector(collector_type_); 1124 // TODO: Delete bump_pointer_space_ and temp_pointer_space_? 1125 if (semi_space_collector_ != nullptr) { 1126 // Create a new bump pointer space which we will compact into. 1127 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(), 1128 non_moving_space_->Limit()); 1129 // Compact the bump pointer space to a new zygote bump pointer space. 1130 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); 1131 Compact(&target_space, bump_pointer_space_); 1132 CHECK_EQ(temp_space_->GetBytesAllocated(), 0U); 1133 total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects(); 1134 total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes(); 1135 // Update the end and write out image. 1136 non_moving_space_->SetEnd(target_space.End()); 1137 non_moving_space_->SetLimit(target_space.Limit()); 1138 accounting::SpaceBitmap* bitmap = non_moving_space_->GetLiveBitmap(); 1139 // Record the allocations in the bitmap. 1140 VLOG(heap) << "Recording zygote allocations"; 1141 mirror::Object* obj = reinterpret_cast<mirror::Object*>(target_space.Begin()); 1142 const mirror::Object* end = reinterpret_cast<const mirror::Object*>(target_space.End()); 1143 while (obj < end) { 1144 bitmap->Set(obj); 1145 obj = space::BumpPointerSpace::GetNextObject(obj); 1146 } 1147 } 1148 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of 1149 // the remaining available heap memory. 1150 space::MallocSpace* zygote_space = non_moving_space_; 1151 non_moving_space_ = zygote_space->CreateZygoteSpace("alloc space"); 1152 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity()); 1153 // Change the GC retention policy of the zygote space to only collect when full. 1154 zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect); 1155 AddSpace(non_moving_space_); 1156 have_zygote_space_ = true; 1157 zygote_space->InvalidateAllocator(); 1158 // Create the zygote space mod union table. 1159 accounting::ModUnionTable* mod_union_table = 1160 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space); 1161 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table"; 1162 AddModUnionTable(mod_union_table); 1163 // Reset the cumulative loggers since we now have a few additional timing phases. 1164 for (const auto& collector : garbage_collectors_) { 1165 collector->ResetCumulativeStatistics(); 1166 } 1167} 1168 1169void Heap::FlushAllocStack() { 1170 MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), 1171 allocation_stack_.get()); 1172 allocation_stack_->Reset(); 1173} 1174 1175void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects, 1176 accounting::ObjectStack* stack) { 1177 mirror::Object** limit = stack->End(); 1178 for (mirror::Object** it = stack->Begin(); it != limit; ++it) { 1179 const mirror::Object* obj = *it; 1180 DCHECK(obj != NULL); 1181 if (LIKELY(bitmap->HasAddress(obj))) { 1182 bitmap->Set(obj); 1183 } else { 1184 large_objects->Set(obj); 1185 } 1186 } 1187} 1188 1189const char* PrettyCause(GcCause cause) { 1190 switch (cause) { 1191 case kGcCauseForAlloc: return "Alloc"; 1192 case kGcCauseBackground: return "Background"; 1193 case kGcCauseExplicit: return "Explicit"; 1194 default: 1195 LOG(FATAL) << "Unreachable"; 1196 } 1197 return ""; 1198} 1199 1200void Heap::SwapSemiSpaces() { 1201 // Swap the spaces so we allocate into the space which we just evacuated. 1202 std::swap(bump_pointer_space_, temp_space_); 1203} 1204 1205void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space, 1206 space::ContinuousMemMapAllocSpace* source_space) { 1207 CHECK(kMovingCollector); 1208 CHECK_NE(target_space, source_space) << "In-place compaction currently unsupported"; 1209 if (target_space != source_space) { 1210 semi_space_collector_->SetFromSpace(source_space); 1211 semi_space_collector_->SetToSpace(target_space); 1212 semi_space_collector_->Run(false); 1213 } 1214} 1215 1216collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause, 1217 bool clear_soft_references) { 1218 Thread* self = Thread::Current(); 1219 Runtime* runtime = Runtime::Current(); 1220 // If the heap can't run the GC, silently fail and return that no GC was run. 1221 switch (gc_type) { 1222 case collector::kGcTypeSticky: { 1223 const size_t alloc_space_size = non_moving_space_->Size(); 1224 if (alloc_space_size < min_alloc_space_size_for_sticky_gc_ || 1225 non_moving_space_->Capacity() - alloc_space_size < min_remaining_space_for_sticky_gc_) { 1226 return collector::kGcTypeNone; 1227 } 1228 break; 1229 } 1230 case collector::kGcTypePartial: { 1231 if (!have_zygote_space_) { 1232 return collector::kGcTypeNone; 1233 } 1234 break; 1235 } 1236 default: { 1237 // Other GC types don't have any special cases which makes them not runnable. The main case 1238 // here is full GC. 1239 } 1240 } 1241 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); 1242 Locks::mutator_lock_->AssertNotHeld(self); 1243 if (self->IsHandlingStackOverflow()) { 1244 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow."; 1245 } 1246 { 1247 gc_complete_lock_->AssertNotHeld(self); 1248 MutexLock mu(self, *gc_complete_lock_); 1249 // Ensure there is only one GC at a time. 1250 WaitForGcToCompleteLocked(self); 1251 // TODO: if another thread beat this one to do the GC, perhaps we should just return here? 1252 // Not doing at the moment to ensure soft references are cleared. 1253 // GC can be disabled if someone has a used GetPrimitiveArrayCritical. 1254 if (gc_disable_count_ != 0) { 1255 LOG(WARNING) << "Skipping GC due to disable count " << gc_disable_count_; 1256 return collector::kGcTypeNone; 1257 } 1258 is_gc_running_ = true; 1259 } 1260 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) { 1261 ++runtime->GetStats()->gc_for_alloc_count; 1262 ++self->GetStats()->gc_for_alloc_count; 1263 } 1264 uint64_t gc_start_time_ns = NanoTime(); 1265 uint64_t gc_start_size = GetBytesAllocated(); 1266 // Approximate allocation rate in bytes / second. 1267 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_); 1268 // Back to back GCs can cause 0 ms of wait time in between GC invocations. 1269 if (LIKELY(ms_delta != 0)) { 1270 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta; 1271 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s"; 1272 } 1273 1274 DCHECK_LT(gc_type, collector::kGcTypeMax); 1275 DCHECK_NE(gc_type, collector::kGcTypeNone); 1276 1277 collector::GarbageCollector* collector = nullptr; 1278 // TODO: Clean this up. 1279 if (current_allocator_ == kAllocatorTypeBumpPointer) { 1280 gc_type = semi_space_collector_->GetGcType(); 1281 CHECK_EQ(temp_space_->GetObjectsAllocated(), 0U); 1282 semi_space_collector_->SetFromSpace(bump_pointer_space_); 1283 semi_space_collector_->SetToSpace(temp_space_); 1284 mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE); 1285 collector = semi_space_collector_; 1286 gc_type = collector::kGcTypeFull; 1287 } else if (current_allocator_ == kAllocatorTypeFreeList) { 1288 for (const auto& cur_collector : garbage_collectors_) { 1289 if (cur_collector->IsConcurrent() == concurrent_gc_ && 1290 cur_collector->GetGcType() == gc_type) { 1291 collector = cur_collector; 1292 break; 1293 } 1294 } 1295 } else { 1296 LOG(FATAL) << "Invalid current allocator " << current_allocator_; 1297 } 1298 CHECK(collector != NULL) 1299 << "Could not find garbage collector with concurrent=" << concurrent_gc_ 1300 << " and type=" << gc_type; 1301 1302 ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str()); 1303 1304 collector->Run(clear_soft_references); 1305 total_objects_freed_ever_ += collector->GetFreedObjects(); 1306 total_bytes_freed_ever_ += collector->GetFreedBytes(); 1307 1308 // Enqueue cleared references. 1309 EnqueueClearedReferences(); 1310 1311 // Grow the heap so that we know when to perform the next GC. 1312 GrowForUtilization(gc_type, collector->GetDurationNs()); 1313 1314 if (CareAboutPauseTimes()) { 1315 const size_t duration = collector->GetDurationNs(); 1316 std::vector<uint64_t> pauses = collector->GetPauseTimes(); 1317 // GC for alloc pauses the allocating thread, so consider it as a pause. 1318 bool was_slow = duration > long_gc_log_threshold_ || 1319 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_); 1320 if (!was_slow) { 1321 for (uint64_t pause : pauses) { 1322 was_slow = was_slow || pause > long_pause_log_threshold_; 1323 } 1324 } 1325 if (was_slow) { 1326 const size_t percent_free = GetPercentFree(); 1327 const size_t current_heap_size = GetBytesAllocated(); 1328 const size_t total_memory = GetTotalMemory(); 1329 std::ostringstream pause_string; 1330 for (size_t i = 0; i < pauses.size(); ++i) { 1331 pause_string << PrettyDuration((pauses[i] / 1000) * 1000) 1332 << ((i != pauses.size() - 1) ? ", " : ""); 1333 } 1334 LOG(INFO) << gc_cause << " " << collector->GetName() 1335 << " GC freed " << collector->GetFreedObjects() << "(" 1336 << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, " 1337 << collector->GetFreedLargeObjects() << "(" 1338 << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, " 1339 << percent_free << "% free, " << PrettySize(current_heap_size) << "/" 1340 << PrettySize(total_memory) << ", " << "paused " << pause_string.str() 1341 << " total " << PrettyDuration((duration / 1000) * 1000); 1342 if (VLOG_IS_ON(heap)) { 1343 LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings()); 1344 } 1345 } 1346 } 1347 1348 { 1349 MutexLock mu(self, *gc_complete_lock_); 1350 is_gc_running_ = false; 1351 last_gc_type_ = gc_type; 1352 // Wake anyone who may have been waiting for the GC to complete. 1353 gc_complete_cond_->Broadcast(self); 1354 } 1355 1356 ATRACE_END(); 1357 1358 // Inform DDMS that a GC completed. 1359 Dbg::GcDidFinish(); 1360 return gc_type; 1361} 1362 1363static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) { 1364 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg); 1365 if (root == obj) { 1366 LOG(INFO) << "Object " << obj << " is a root"; 1367 } 1368 return root; 1369} 1370 1371class ScanVisitor { 1372 public: 1373 void operator()(const mirror::Object* obj) const { 1374 LOG(ERROR) << "Would have rescanned object " << obj; 1375 } 1376}; 1377 1378// Verify a reference from an object. 1379class VerifyReferenceVisitor { 1380 public: 1381 explicit VerifyReferenceVisitor(Heap* heap) 1382 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) 1383 : heap_(heap), failed_(false) {} 1384 1385 bool Failed() const { 1386 return failed_; 1387 } 1388 1389 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter 1390 // analysis on visitors. 1391 void operator()(const mirror::Object* obj, const mirror::Object* ref, 1392 const MemberOffset& offset, bool /* is_static */) const 1393 NO_THREAD_SAFETY_ANALYSIS { 1394 // Verify that the reference is live. 1395 if (UNLIKELY(ref != NULL && !IsLive(ref))) { 1396 accounting::CardTable* card_table = heap_->GetCardTable(); 1397 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get(); 1398 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1399 if (!failed_) { 1400 // Print message on only on first failure to prevent spam. 1401 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!"; 1402 failed_ = true; 1403 } 1404 if (obj != nullptr) { 1405 byte* card_addr = card_table->CardFromAddr(obj); 1406 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " 1407 << offset << "\n card value = " << static_cast<int>(*card_addr); 1408 if (heap_->IsValidObjectAddress(obj->GetClass())) { 1409 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj); 1410 } else { 1411 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address"; 1412 } 1413 1414 // Attmept to find the class inside of the recently freed objects. 1415 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true); 1416 if (ref_space != nullptr && ref_space->IsMallocSpace()) { 1417 space::MallocSpace* space = ref_space->AsMallocSpace(); 1418 mirror::Class* ref_class = space->FindRecentFreedObject(ref); 1419 if (ref_class != nullptr) { 1420 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class " 1421 << PrettyClass(ref_class); 1422 } else { 1423 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object"; 1424 } 1425 } 1426 1427 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) && 1428 ref->GetClass()->IsClass()) { 1429 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref); 1430 } else { 1431 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass() 1432 << ") is not a valid heap address"; 1433 } 1434 1435 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj)); 1436 void* cover_begin = card_table->AddrFromCard(card_addr); 1437 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) + 1438 accounting::CardTable::kCardSize); 1439 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin 1440 << "-" << cover_end; 1441 accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj); 1442 1443 // Print out how the object is live. 1444 if (bitmap != NULL && bitmap->Test(obj)) { 1445 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1446 } 1447 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) { 1448 LOG(ERROR) << "Object " << obj << " found in allocation stack"; 1449 } 1450 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) { 1451 LOG(ERROR) << "Object " << obj << " found in live stack"; 1452 } 1453 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) { 1454 LOG(ERROR) << "Ref " << ref << " found in allocation stack"; 1455 } 1456 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) { 1457 LOG(ERROR) << "Ref " << ref << " found in live stack"; 1458 } 1459 // Attempt to see if the card table missed the reference. 1460 ScanVisitor scan_visitor; 1461 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr)); 1462 card_table->Scan(bitmap, byte_cover_begin, 1463 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor); 1464 1465 // Search to see if any of the roots reference our object. 1466 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj)); 1467 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1468 1469 // Search to see if any of the roots reference our reference. 1470 arg = const_cast<void*>(reinterpret_cast<const void*>(ref)); 1471 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1472 } else { 1473 LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref); 1474 } 1475 } 1476 } 1477 1478 bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 1479 return heap_->IsLiveObjectLocked(obj, true, false, true); 1480 } 1481 1482 static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) { 1483 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg); 1484 (*visitor)(nullptr, root, MemberOffset(0), true); 1485 return root; 1486 } 1487 1488 private: 1489 Heap* const heap_; 1490 mutable bool failed_; 1491}; 1492 1493// Verify all references within an object, for use with HeapBitmap::Visit. 1494class VerifyObjectVisitor { 1495 public: 1496 explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {} 1497 1498 void operator()(mirror::Object* obj) const 1499 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1500 // Note: we are verifying the references in obj but not obj itself, this is because obj must 1501 // be live or else how did we find it in the live bitmap? 1502 VerifyReferenceVisitor visitor(heap_); 1503 // The class doesn't count as a reference but we should verify it anyways. 1504 collector::MarkSweep::VisitObjectReferences(obj, visitor, true); 1505 if (obj->GetClass()->IsReferenceClass()) { 1506 visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false); 1507 } 1508 failed_ = failed_ || visitor.Failed(); 1509 } 1510 1511 static void VisitCallback(mirror::Object* obj, void* arg) 1512 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1513 VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg); 1514 visitor->operator()(obj); 1515 } 1516 1517 bool Failed() const { 1518 return failed_; 1519 } 1520 1521 private: 1522 Heap* const heap_; 1523 mutable bool failed_; 1524}; 1525 1526// Must do this with mutators suspended since we are directly accessing the allocation stacks. 1527bool Heap::VerifyHeapReferences() { 1528 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1529 // Lets sort our allocation stacks so that we can efficiently binary search them. 1530 allocation_stack_->Sort(); 1531 live_stack_->Sort(); 1532 VerifyObjectVisitor visitor(this); 1533 // Verify objects in the allocation stack since these will be objects which were: 1534 // 1. Allocated prior to the GC (pre GC verification). 1535 // 2. Allocated during the GC (pre sweep GC verification). 1536 // We don't want to verify the objects in the live stack since they themselves may be 1537 // pointing to dead objects if they are not reachable. 1538 VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor); 1539 // Verify the roots: 1540 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false); 1541 if (visitor.Failed()) { 1542 // Dump mod-union tables. 1543 for (const auto& table_pair : mod_union_tables_) { 1544 accounting::ModUnionTable* mod_union_table = table_pair.second; 1545 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": "); 1546 } 1547 DumpSpaces(); 1548 return false; 1549 } 1550 return true; 1551} 1552 1553class VerifyReferenceCardVisitor { 1554 public: 1555 VerifyReferenceCardVisitor(Heap* heap, bool* failed) 1556 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, 1557 Locks::heap_bitmap_lock_) 1558 : heap_(heap), failed_(failed) { 1559 } 1560 1561 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 1562 // annotalysis on visitors. 1563 void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset, 1564 bool is_static) const NO_THREAD_SAFETY_ANALYSIS { 1565 // Filter out class references since changing an object's class does not mark the card as dirty. 1566 // Also handles large objects, since the only reference they hold is a class reference. 1567 if (ref != NULL && !ref->IsClass()) { 1568 accounting::CardTable* card_table = heap_->GetCardTable(); 1569 // If the object is not dirty and it is referencing something in the live stack other than 1570 // class, then it must be on a dirty card. 1571 if (!card_table->AddrIsInCardTable(obj)) { 1572 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table"; 1573 *failed_ = true; 1574 } else if (!card_table->IsDirty(obj)) { 1575 // Card should be either kCardDirty if it got re-dirtied after we aged it, or 1576 // kCardDirty - 1 if it didnt get touched since we aged it. 1577 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1578 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1579 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1580 LOG(ERROR) << "Object " << obj << " found in live stack"; 1581 } 1582 if (heap_->GetLiveBitmap()->Test(obj)) { 1583 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1584 } 1585 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj) 1586 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack"; 1587 1588 // Print which field of the object is dead. 1589 if (!obj->IsObjectArray()) { 1590 const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1591 CHECK(klass != NULL); 1592 const mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields() 1593 : klass->GetIFields(); 1594 CHECK(fields != NULL); 1595 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1596 const mirror::ArtField* cur = fields->Get(i); 1597 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1598 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " 1599 << PrettyField(cur); 1600 break; 1601 } 1602 } 1603 } else { 1604 const mirror::ObjectArray<mirror::Object>* object_array = 1605 obj->AsObjectArray<mirror::Object>(); 1606 for (int32_t i = 0; i < object_array->GetLength(); ++i) { 1607 if (object_array->Get(i) == ref) { 1608 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref"; 1609 } 1610 } 1611 } 1612 1613 *failed_ = true; 1614 } 1615 } 1616 } 1617 } 1618 1619 private: 1620 Heap* const heap_; 1621 bool* const failed_; 1622}; 1623 1624class VerifyLiveStackReferences { 1625 public: 1626 explicit VerifyLiveStackReferences(Heap* heap) 1627 : heap_(heap), 1628 failed_(false) {} 1629 1630 void operator()(mirror::Object* obj) const 1631 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1632 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); 1633 collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true); 1634 } 1635 1636 bool Failed() const { 1637 return failed_; 1638 } 1639 1640 private: 1641 Heap* const heap_; 1642 bool failed_; 1643}; 1644 1645bool Heap::VerifyMissingCardMarks() { 1646 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1647 1648 // We need to sort the live stack since we binary search it. 1649 live_stack_->Sort(); 1650 VerifyLiveStackReferences visitor(this); 1651 GetLiveBitmap()->Visit(visitor); 1652 1653 // We can verify objects in the live stack since none of these should reference dead objects. 1654 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) { 1655 visitor(*it); 1656 } 1657 1658 if (visitor.Failed()) { 1659 DumpSpaces(); 1660 return false; 1661 } 1662 return true; 1663} 1664 1665void Heap::SwapStacks() { 1666 allocation_stack_.swap(live_stack_); 1667} 1668 1669accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) { 1670 auto it = mod_union_tables_.find(space); 1671 if (it == mod_union_tables_.end()) { 1672 return nullptr; 1673 } 1674 return it->second; 1675} 1676 1677void Heap::ProcessCards(TimingLogger& timings) { 1678 // Clear cards and keep track of cards cleared in the mod-union table. 1679 for (const auto& space : continuous_spaces_) { 1680 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space); 1681 if (table != nullptr) { 1682 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" : 1683 "ImageModUnionClearCards"; 1684 TimingLogger::ScopedSplit split(name, &timings); 1685 table->ClearCards(); 1686 } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) { 1687 TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings); 1688 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards 1689 // were dirty before the GC started. 1690 // TODO: Don't need to use atomic. 1691 // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint 1692 // roots and then we scan / update mod union tables after. We will always scan either card.// 1693 // If we end up with the non aged card, we scan it it in the pause. 1694 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor()); 1695 } 1696 } 1697} 1698 1699static mirror::Object* IdentityCallback(mirror::Object* obj, void*) { 1700 return obj; 1701} 1702 1703void Heap::PreGcVerification(collector::GarbageCollector* gc) { 1704 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1705 Thread* self = Thread::Current(); 1706 1707 if (verify_pre_gc_heap_) { 1708 thread_list->SuspendAll(); 1709 { 1710 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1711 if (!VerifyHeapReferences()) { 1712 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed"; 1713 } 1714 } 1715 thread_list->ResumeAll(); 1716 } 1717 1718 // Check that all objects which reference things in the live stack are on dirty cards. 1719 if (verify_missing_card_marks_) { 1720 thread_list->SuspendAll(); 1721 { 1722 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1723 SwapStacks(); 1724 // Sort the live stack so that we can quickly binary search it later. 1725 if (!VerifyMissingCardMarks()) { 1726 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed"; 1727 } 1728 SwapStacks(); 1729 } 1730 thread_list->ResumeAll(); 1731 } 1732 1733 if (verify_mod_union_table_) { 1734 thread_list->SuspendAll(); 1735 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); 1736 for (const auto& table_pair : mod_union_tables_) { 1737 accounting::ModUnionTable* mod_union_table = table_pair.second; 1738 mod_union_table->UpdateAndMarkReferences(IdentityCallback, nullptr); 1739 mod_union_table->Verify(); 1740 } 1741 thread_list->ResumeAll(); 1742 } 1743} 1744 1745void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { 1746 // Called before sweeping occurs since we want to make sure we are not going so reclaim any 1747 // reachable objects. 1748 if (verify_post_gc_heap_) { 1749 Thread* self = Thread::Current(); 1750 CHECK_NE(self->GetState(), kRunnable); 1751 { 1752 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1753 // Swapping bound bitmaps does nothing. 1754 gc->SwapBitmaps(); 1755 if (!VerifyHeapReferences()) { 1756 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed"; 1757 } 1758 gc->SwapBitmaps(); 1759 } 1760 } 1761} 1762 1763void Heap::PostGcVerification(collector::GarbageCollector* gc) { 1764 if (verify_system_weaks_) { 1765 Thread* self = Thread::Current(); 1766 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1767 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc); 1768 mark_sweep->VerifySystemWeaks(); 1769 } 1770} 1771 1772collector::GcType Heap::WaitForGcToComplete(Thread* self) { 1773 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); 1774 MutexLock mu(self, *gc_complete_lock_); 1775 return WaitForGcToCompleteLocked(self); 1776} 1777 1778collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) { 1779 collector::GcType last_gc_type = collector::kGcTypeNone; 1780 uint64_t wait_start = NanoTime(); 1781 while (is_gc_running_) { 1782 ATRACE_BEGIN("GC: Wait For Completion"); 1783 // We must wait, change thread state then sleep on gc_complete_cond_; 1784 gc_complete_cond_->Wait(self); 1785 last_gc_type = last_gc_type_; 1786 ATRACE_END(); 1787 } 1788 uint64_t wait_time = NanoTime() - wait_start; 1789 total_wait_time_ += wait_time; 1790 if (wait_time > long_pause_log_threshold_) { 1791 LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time); 1792 } 1793 return last_gc_type; 1794} 1795 1796void Heap::DumpForSigQuit(std::ostream& os) { 1797 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/" 1798 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n"; 1799 DumpGcPerformanceInfo(os); 1800} 1801 1802size_t Heap::GetPercentFree() { 1803 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory()); 1804} 1805 1806void Heap::SetIdealFootprint(size_t max_allowed_footprint) { 1807 if (max_allowed_footprint > GetMaxMemory()) { 1808 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to " 1809 << PrettySize(GetMaxMemory()); 1810 max_allowed_footprint = GetMaxMemory(); 1811 } 1812 max_allowed_footprint_ = max_allowed_footprint; 1813} 1814 1815bool Heap::IsMovableObject(const mirror::Object* obj) const { 1816 if (kMovingCollector) { 1817 DCHECK(!IsInTempSpace(obj)); 1818 if (bump_pointer_space_->HasAddress(obj)) { 1819 return true; 1820 } 1821 } 1822 return false; 1823} 1824 1825bool Heap::IsInTempSpace(const mirror::Object* obj) const { 1826 if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) { 1827 return true; 1828 } 1829 return false; 1830} 1831 1832void Heap::UpdateMaxNativeFootprint() { 1833 size_t native_size = native_bytes_allocated_; 1834 // TODO: Tune the native heap utilization to be a value other than the java heap utilization. 1835 size_t target_size = native_size / GetTargetHeapUtilization(); 1836 if (target_size > native_size + max_free_) { 1837 target_size = native_size + max_free_; 1838 } else if (target_size < native_size + min_free_) { 1839 target_size = native_size + min_free_; 1840 } 1841 native_footprint_gc_watermark_ = target_size; 1842 native_footprint_limit_ = 2 * target_size - native_size; 1843} 1844 1845void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) { 1846 // We know what our utilization is at this moment. 1847 // This doesn't actually resize any memory. It just lets the heap grow more when necessary. 1848 const size_t bytes_allocated = GetBytesAllocated(); 1849 last_gc_size_ = bytes_allocated; 1850 last_gc_time_ns_ = NanoTime(); 1851 size_t target_size; 1852 if (gc_type != collector::kGcTypeSticky) { 1853 // Grow the heap for non sticky GC. 1854 target_size = bytes_allocated / GetTargetHeapUtilization(); 1855 if (target_size > bytes_allocated + max_free_) { 1856 target_size = bytes_allocated + max_free_; 1857 } else if (target_size < bytes_allocated + min_free_) { 1858 target_size = bytes_allocated + min_free_; 1859 } 1860 native_need_to_run_finalization_ = true; 1861 next_gc_type_ = collector::kGcTypeSticky; 1862 } else { 1863 // Based on how close the current heap size is to the target size, decide 1864 // whether or not to do a partial or sticky GC next. 1865 if (bytes_allocated + min_free_ <= max_allowed_footprint_) { 1866 next_gc_type_ = collector::kGcTypeSticky; 1867 } else { 1868 next_gc_type_ = collector::kGcTypePartial; 1869 } 1870 // If we have freed enough memory, shrink the heap back down. 1871 if (bytes_allocated + max_free_ < max_allowed_footprint_) { 1872 target_size = bytes_allocated + max_free_; 1873 } else { 1874 target_size = std::max(bytes_allocated, max_allowed_footprint_); 1875 } 1876 } 1877 if (!ignore_max_footprint_) { 1878 SetIdealFootprint(target_size); 1879 if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) { 1880 // Calculate when to perform the next ConcurrentGC. 1881 // Calculate the estimated GC duration. 1882 double gc_duration_seconds = NsToMs(gc_duration) / 1000.0; 1883 // Estimate how many remaining bytes we will have when we need to start the next GC. 1884 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds; 1885 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes); 1886 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) { 1887 // A never going to happen situation that from the estimated allocation rate we will exceed 1888 // the applications entire footprint with the given estimated allocation rate. Schedule 1889 // another GC straight away. 1890 concurrent_start_bytes_ = bytes_allocated; 1891 } else { 1892 // Start a concurrent GC when we get close to the estimated remaining bytes. When the 1893 // allocation rate is very high, remaining_bytes could tell us that we should start a GC 1894 // right away. 1895 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, 1896 bytes_allocated); 1897 } 1898 DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_); 1899 DCHECK_LE(max_allowed_footprint_, growth_limit_); 1900 } 1901 } 1902} 1903 1904void Heap::ClearGrowthLimit() { 1905 growth_limit_ = capacity_; 1906 non_moving_space_->ClearGrowthLimit(); 1907} 1908 1909void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, 1910 MemberOffset reference_queue_offset, 1911 MemberOffset reference_queueNext_offset, 1912 MemberOffset reference_pendingNext_offset, 1913 MemberOffset finalizer_reference_zombie_offset) { 1914 reference_referent_offset_ = reference_referent_offset; 1915 reference_queue_offset_ = reference_queue_offset; 1916 reference_queueNext_offset_ = reference_queueNext_offset; 1917 reference_pendingNext_offset_ = reference_pendingNext_offset; 1918 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset; 1919 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1920 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U); 1921 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U); 1922 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); 1923 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); 1924} 1925 1926void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) { 1927 DCHECK(reference != NULL); 1928 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1929 reference->SetFieldObject(reference_referent_offset_, referent, true); 1930} 1931 1932mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) { 1933 DCHECK(reference != NULL); 1934 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1935 return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true); 1936} 1937 1938void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) { 1939 ScopedObjectAccess soa(self); 1940 JValue result; 1941 ArgArray arg_array(NULL, 0); 1942 arg_array.Append(reinterpret_cast<uint32_t>(object)); 1943 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, 1944 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1945} 1946 1947void Heap::EnqueueClearedReferences() { 1948 if (!cleared_references_.IsEmpty()) { 1949 // When a runtime isn't started there are no reference queues to care about so ignore. 1950 if (LIKELY(Runtime::Current()->IsStarted())) { 1951 ScopedObjectAccess soa(Thread::Current()); 1952 JValue result; 1953 ArgArray arg_array(NULL, 0); 1954 arg_array.Append(reinterpret_cast<uint32_t>(cleared_references_.GetList())); 1955 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), 1956 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1957 } 1958 cleared_references_.Clear(); 1959 } 1960} 1961 1962void Heap::RequestConcurrentGC(Thread* self) { 1963 // Make sure that we can do a concurrent GC. 1964 Runtime* runtime = Runtime::Current(); 1965 DCHECK(concurrent_gc_); 1966 if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) || 1967 self->IsHandlingStackOverflow()) { 1968 return; 1969 } 1970 // We already have a request pending, no reason to start more until we update 1971 // concurrent_start_bytes_. 1972 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); 1973 JNIEnv* env = self->GetJniEnv(); 1974 DCHECK(WellKnownClasses::java_lang_Daemons != nullptr); 1975 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr); 1976 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 1977 WellKnownClasses::java_lang_Daemons_requestGC); 1978 CHECK(!env->ExceptionCheck()); 1979} 1980 1981void Heap::ConcurrentGC(Thread* self) { 1982 if (Runtime::Current()->IsShuttingDown(self)) { 1983 return; 1984 } 1985 // Wait for any GCs currently running to finish. 1986 if (WaitForGcToComplete(self) == collector::kGcTypeNone) { 1987 // If the we can't run the GC type we wanted to run, find the next appropriate one and try that 1988 // instead. E.g. can't do partial, so do full instead. 1989 if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) == 1990 collector::kGcTypeNone) { 1991 for (collector::GcType gc_type : gc_plan_) { 1992 // Attempt to run the collector, if we succeed, we are done. 1993 if (gc_type > next_gc_type_ && 1994 CollectGarbageInternal(gc_type, kGcCauseBackground, false) != collector::kGcTypeNone) { 1995 break; 1996 } 1997 } 1998 } 1999 } 2000} 2001 2002void Heap::RequestHeapTrim() { 2003 // GC completed and now we must decide whether to request a heap trim (advising pages back to the 2004 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans 2005 // a space it will hold its lock and can become a cause of jank. 2006 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since 2007 // forking. 2008 2009 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap 2010 // because that only marks object heads, so a large array looks like lots of empty space. We 2011 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional 2012 // to utilization (which is probably inversely proportional to how much benefit we can expect). 2013 // We could try mincore(2) but that's only a measure of how many pages we haven't given away, 2014 // not how much use we're making of those pages. 2015 uint64_t ms_time = MilliTime(); 2016 // Don't bother trimming the alloc space if a heap trim occurred in the last two seconds. 2017 if (ms_time - last_trim_time_ms_ < 2 * 1000) { 2018 return; 2019 } 2020 2021 Thread* self = Thread::Current(); 2022 Runtime* runtime = Runtime::Current(); 2023 if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) { 2024 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time) 2025 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check 2026 // as we don't hold the lock while requesting the trim). 2027 return; 2028 } 2029 2030 last_trim_time_ms_ = ms_time; 2031 2032 // Trim only if we do not currently care about pause times. 2033 if (!CareAboutPauseTimes()) { 2034 JNIEnv* env = self->GetJniEnv(); 2035 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 2036 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL); 2037 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 2038 WellKnownClasses::java_lang_Daemons_requestHeapTrim); 2039 CHECK(!env->ExceptionCheck()); 2040 } 2041} 2042 2043void Heap::RevokeThreadLocalBuffers(Thread* thread) { 2044 non_moving_space_->RevokeThreadLocalBuffers(thread); 2045} 2046 2047void Heap::RevokeAllThreadLocalBuffers() { 2048 non_moving_space_->RevokeAllThreadLocalBuffers(); 2049} 2050 2051bool Heap::IsGCRequestPending() const { 2052 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max(); 2053} 2054 2055void Heap::RunFinalization(JNIEnv* env) { 2056 // Can't do this in WellKnownClasses::Init since System is not properly set up at that point. 2057 if (WellKnownClasses::java_lang_System_runFinalization == nullptr) { 2058 CHECK(WellKnownClasses::java_lang_System != nullptr); 2059 WellKnownClasses::java_lang_System_runFinalization = 2060 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V"); 2061 CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr); 2062 } 2063 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 2064 WellKnownClasses::java_lang_System_runFinalization); 2065} 2066 2067void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) { 2068 Thread* self = ThreadForEnv(env); 2069 if (native_need_to_run_finalization_) { 2070 RunFinalization(env); 2071 UpdateMaxNativeFootprint(); 2072 native_need_to_run_finalization_ = false; 2073 } 2074 // Total number of native bytes allocated. 2075 native_bytes_allocated_.fetch_add(bytes); 2076 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) { 2077 collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial : 2078 collector::kGcTypeFull; 2079 2080 // The second watermark is higher than the gc watermark. If you hit this it means you are 2081 // allocating native objects faster than the GC can keep up with. 2082 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 2083 if (WaitForGcToComplete(self) != collector::kGcTypeNone) { 2084 // Just finished a GC, attempt to run finalizers. 2085 RunFinalization(env); 2086 CHECK(!env->ExceptionCheck()); 2087 } 2088 // If we still are over the watermark, attempt a GC for alloc and run finalizers. 2089 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 2090 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false); 2091 RunFinalization(env); 2092 native_need_to_run_finalization_ = false; 2093 CHECK(!env->ExceptionCheck()); 2094 } 2095 // We have just run finalizers, update the native watermark since it is very likely that 2096 // finalizers released native managed allocations. 2097 UpdateMaxNativeFootprint(); 2098 } else if (!IsGCRequestPending()) { 2099 if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) { 2100 RequestConcurrentGC(self); 2101 } else { 2102 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false); 2103 } 2104 } 2105 } 2106} 2107 2108void Heap::RegisterNativeFree(JNIEnv* env, int bytes) { 2109 int expected_size, new_size; 2110 do { 2111 expected_size = native_bytes_allocated_.load(); 2112 new_size = expected_size - bytes; 2113 if (UNLIKELY(new_size < 0)) { 2114 ScopedObjectAccess soa(env); 2115 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException, 2116 StringPrintf("Attempted to free %d native bytes with only %d native bytes " 2117 "registered as allocated", bytes, expected_size).c_str()); 2118 break; 2119 } 2120 } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size)); 2121} 2122 2123int64_t Heap::GetTotalMemory() const { 2124 int64_t ret = 0; 2125 for (const auto& space : continuous_spaces_) { 2126 // Currently don't include the image space. 2127 if (!space->IsImageSpace()) { 2128 ret += space->Size(); 2129 } 2130 } 2131 for (const auto& space : discontinuous_spaces_) { 2132 if (space->IsLargeObjectSpace()) { 2133 ret += space->AsLargeObjectSpace()->GetBytesAllocated(); 2134 } 2135 } 2136 return ret; 2137} 2138 2139void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) { 2140 DCHECK(mod_union_table != nullptr); 2141 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table); 2142} 2143 2144} // namespace gc 2145} // namespace art 2146