heap.cc revision 8d31bbd3d6536de12bc20e3d29cfe03fe848f9da
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "heap.h" 18 19#define ATRACE_TAG ATRACE_TAG_DALVIK 20#include <cutils/trace.h> 21 22#include <limits> 23#include <vector> 24#include <valgrind.h> 25 26#include "base/stl_util.h" 27#include "common_throws.h" 28#include "cutils/sched_policy.h" 29#include "debugger.h" 30#include "gc/accounting/atomic_stack.h" 31#include "gc/accounting/card_table-inl.h" 32#include "gc/accounting/heap_bitmap-inl.h" 33#include "gc/accounting/mod_union_table-inl.h" 34#include "gc/accounting/space_bitmap-inl.h" 35#include "gc/collector/mark_sweep-inl.h" 36#include "gc/collector/partial_mark_sweep.h" 37#include "gc/collector/sticky_mark_sweep.h" 38#include "gc/space/dlmalloc_space-inl.h" 39#include "gc/space/image_space.h" 40#include "gc/space/large_object_space.h" 41#include "gc/space/space-inl.h" 42#include "heap-inl.h" 43#include "image.h" 44#include "invoke_arg_array_builder.h" 45#include "mirror/art_field-inl.h" 46#include "mirror/class-inl.h" 47#include "mirror/object.h" 48#include "mirror/object-inl.h" 49#include "mirror/object_array-inl.h" 50#include "object_utils.h" 51#include "os.h" 52#include "ScopedLocalRef.h" 53#include "scoped_thread_state_change.h" 54#include "sirt_ref.h" 55#include "thread_list.h" 56#include "UniquePtr.h" 57#include "well_known_classes.h" 58 59namespace art { 60namespace gc { 61 62static constexpr bool kGCALotMode = false; 63static constexpr size_t kGcAlotInterval = KB; 64static constexpr bool kDumpGcPerformanceOnShutdown = false; 65// Minimum amount of remaining bytes before a concurrent GC is triggered. 66static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB; 67 68Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free, 69 double target_utilization, size_t capacity, const std::string& image_file_name, 70 bool concurrent_gc, size_t parallel_gc_threads, size_t conc_gc_threads, 71 bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold, 72 bool ignore_max_footprint) 73 : alloc_space_(NULL), 74 card_table_(NULL), 75 concurrent_gc_(concurrent_gc), 76 parallel_gc_threads_(parallel_gc_threads), 77 conc_gc_threads_(conc_gc_threads), 78 low_memory_mode_(low_memory_mode), 79 long_pause_log_threshold_(long_pause_log_threshold), 80 long_gc_log_threshold_(long_gc_log_threshold), 81 ignore_max_footprint_(ignore_max_footprint), 82 have_zygote_space_(false), 83 soft_ref_queue_lock_(NULL), 84 weak_ref_queue_lock_(NULL), 85 finalizer_ref_queue_lock_(NULL), 86 phantom_ref_queue_lock_(NULL), 87 is_gc_running_(false), 88 last_gc_type_(collector::kGcTypeNone), 89 next_gc_type_(collector::kGcTypePartial), 90 capacity_(capacity), 91 growth_limit_(growth_limit), 92 max_allowed_footprint_(initial_size), 93 native_footprint_gc_watermark_(initial_size), 94 native_footprint_limit_(2 * initial_size), 95 activity_thread_class_(NULL), 96 application_thread_class_(NULL), 97 activity_thread_(NULL), 98 application_thread_(NULL), 99 last_process_state_id_(NULL), 100 // Initially care about pauses in case we never get notified of process states, or if the JNI 101 // code becomes broken. 102 care_about_pause_times_(true), 103 concurrent_start_bytes_(concurrent_gc_ ? initial_size - kMinConcurrentRemainingBytes 104 : std::numeric_limits<size_t>::max()), 105 total_bytes_freed_ever_(0), 106 total_objects_freed_ever_(0), 107 num_bytes_allocated_(0), 108 native_bytes_allocated_(0), 109 gc_memory_overhead_(0), 110 verify_missing_card_marks_(false), 111 verify_system_weaks_(false), 112 verify_pre_gc_heap_(false), 113 verify_post_gc_heap_(false), 114 verify_mod_union_table_(false), 115 min_alloc_space_size_for_sticky_gc_(2 * MB), 116 min_remaining_space_for_sticky_gc_(1 * MB), 117 last_trim_time_ms_(0), 118 allocation_rate_(0), 119 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This 120 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap 121 * verification is enabled, we limit the size of allocation stacks to speed up their 122 * searching. 123 */ 124 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval 125 : (kDesiredHeapVerification > kNoHeapVerification) ? KB : MB), 126 reference_referent_offset_(0), 127 reference_queue_offset_(0), 128 reference_queueNext_offset_(0), 129 reference_pendingNext_offset_(0), 130 finalizer_reference_zombie_offset_(0), 131 min_free_(min_free), 132 max_free_(max_free), 133 target_utilization_(target_utilization), 134 total_wait_time_(0), 135 total_allocation_time_(0), 136 verify_object_mode_(kHeapVerificationNotPermitted), 137 running_on_valgrind_(RUNNING_ON_VALGRIND) { 138 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 139 LOG(INFO) << "Heap() entering"; 140 } 141 142 live_bitmap_.reset(new accounting::HeapBitmap(this)); 143 mark_bitmap_.reset(new accounting::HeapBitmap(this)); 144 145 // Requested begin for the alloc space, to follow the mapped image and oat files 146 byte* requested_alloc_space_begin = NULL; 147 if (!image_file_name.empty()) { 148 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str()); 149 CHECK(image_space != NULL) << "Failed to create space for " << image_file_name; 150 AddContinuousSpace(image_space); 151 // Oat files referenced by image files immediately follow them in memory, ensure alloc space 152 // isn't going to get in the middle 153 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd(); 154 CHECK_GT(oat_file_end_addr, image_space->End()); 155 if (oat_file_end_addr > requested_alloc_space_begin) { 156 requested_alloc_space_begin = 157 reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_file_end_addr), 158 kPageSize)); 159 } 160 } 161 162 alloc_space_ = space::DlMallocSpace::Create(Runtime::Current()->IsZygote() ? "zygote space" : "alloc space", 163 initial_size, 164 growth_limit, capacity, 165 requested_alloc_space_begin); 166 CHECK(alloc_space_ != NULL) << "Failed to create alloc space"; 167 alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); 168 AddContinuousSpace(alloc_space_); 169 170 // Allocate the large object space. 171 const bool kUseFreeListSpaceForLOS = false; 172 if (kUseFreeListSpaceForLOS) { 173 large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity); 174 } else { 175 large_object_space_ = space::LargeObjectMapSpace::Create("large object space"); 176 } 177 CHECK(large_object_space_ != NULL) << "Failed to create large object space"; 178 AddDiscontinuousSpace(large_object_space_); 179 180 // Compute heap capacity. Continuous spaces are sorted in order of Begin(). 181 byte* heap_begin = continuous_spaces_.front()->Begin(); 182 size_t heap_capacity = continuous_spaces_.back()->End() - continuous_spaces_.front()->Begin(); 183 if (continuous_spaces_.back()->IsDlMallocSpace()) { 184 heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity(); 185 } 186 187 // Allocate the card table. 188 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity)); 189 CHECK(card_table_.get() != NULL) << "Failed to create card table"; 190 191 accounting::ModUnionTable* mod_union_table = 192 new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this, 193 GetImageSpace()); 194 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table"; 195 AddModUnionTable(mod_union_table); 196 197 // TODO: Count objects in the image space here. 198 num_bytes_allocated_ = 0; 199 200 // Default mark stack size in bytes. 201 static const size_t default_mark_stack_size = 64 * KB; 202 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size)); 203 allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack", 204 max_allocation_stack_size_)); 205 live_stack_.reset(accounting::ObjectStack::Create("live stack", 206 max_allocation_stack_size_)); 207 208 // It's still too early to take a lock because there are no threads yet, but we can create locks 209 // now. We don't create it earlier to make it clear that you can't use locks during heap 210 // initialization. 211 gc_complete_lock_ = new Mutex("GC complete lock"); 212 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable", 213 *gc_complete_lock_)); 214 215 // Create the reference queue locks, this is required so for parallel object scanning in the GC. 216 soft_ref_queue_lock_ = new Mutex("Soft reference queue lock"); 217 weak_ref_queue_lock_ = new Mutex("Weak reference queue lock"); 218 finalizer_ref_queue_lock_ = new Mutex("Finalizer reference queue lock"); 219 phantom_ref_queue_lock_ = new Mutex("Phantom reference queue lock"); 220 221 last_gc_time_ns_ = NanoTime(); 222 last_gc_size_ = GetBytesAllocated(); 223 224 if (ignore_max_footprint_) { 225 SetIdealFootprint(std::numeric_limits<size_t>::max()); 226 concurrent_start_bytes_ = max_allowed_footprint_; 227 } 228 229 // Create our garbage collectors. 230 for (size_t i = 0; i < 2; ++i) { 231 const bool concurrent = i != 0; 232 mark_sweep_collectors_.push_back(new collector::MarkSweep(this, concurrent)); 233 mark_sweep_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent)); 234 mark_sweep_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent)); 235 } 236 237 CHECK_NE(max_allowed_footprint_, 0U); 238 239 if (running_on_valgrind_) { 240 Runtime::Current()->InstrumentQuickAllocEntryPoints(); 241 } 242 243 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 244 LOG(INFO) << "Heap() exiting"; 245 } 246} 247 248void Heap::CreateThreadPool() { 249 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_); 250 if (num_threads != 0) { 251 thread_pool_.reset(new ThreadPool(num_threads)); 252 } 253} 254 255void Heap::DeleteThreadPool() { 256 thread_pool_.reset(nullptr); 257} 258 259static bool ReadStaticInt(JNIEnvExt* env, jclass clz, const char* name, int* out_value) { 260 CHECK(out_value != NULL); 261 jfieldID field = env->GetStaticFieldID(clz, name, "I"); 262 if (field == NULL) { 263 env->ExceptionClear(); 264 return false; 265 } 266 *out_value = env->GetStaticIntField(clz, field); 267 return true; 268} 269 270void Heap::ListenForProcessStateChange() { 271 VLOG(heap) << "Heap notified of process state change"; 272 273 Thread* self = Thread::Current(); 274 JNIEnvExt* env = self->GetJniEnv(); 275 276 if (!have_zygote_space_) { 277 return; 278 } 279 280 if (activity_thread_class_ == NULL) { 281 jclass clz = env->FindClass("android/app/ActivityThread"); 282 if (clz == NULL) { 283 env->ExceptionClear(); 284 LOG(WARNING) << "Could not find activity thread class in process state change"; 285 return; 286 } 287 activity_thread_class_ = reinterpret_cast<jclass>(env->NewGlobalRef(clz)); 288 } 289 290 if (activity_thread_class_ != NULL && activity_thread_ == NULL) { 291 jmethodID current_activity_method = env->GetStaticMethodID(activity_thread_class_, 292 "currentActivityThread", 293 "()Landroid/app/ActivityThread;"); 294 if (current_activity_method == NULL) { 295 env->ExceptionClear(); 296 LOG(WARNING) << "Could not get method for currentActivityThread"; 297 return; 298 } 299 300 jobject obj = env->CallStaticObjectMethod(activity_thread_class_, current_activity_method); 301 if (obj == NULL) { 302 env->ExceptionClear(); 303 LOG(WARNING) << "Could not get current activity"; 304 return; 305 } 306 activity_thread_ = env->NewGlobalRef(obj); 307 } 308 309 if (process_state_cares_about_pause_time_.empty()) { 310 // Just attempt to do this the first time. 311 jclass clz = env->FindClass("android/app/ActivityManager"); 312 if (clz == NULL) { 313 LOG(WARNING) << "Activity manager class is null"; 314 return; 315 } 316 ScopedLocalRef<jclass> activity_manager(env, clz); 317 std::vector<const char*> care_about_pauses; 318 care_about_pauses.push_back("PROCESS_STATE_TOP"); 319 care_about_pauses.push_back("PROCESS_STATE_IMPORTANT_BACKGROUND"); 320 // Attempt to read the constants and classify them as whether or not we care about pause times. 321 for (size_t i = 0; i < care_about_pauses.size(); ++i) { 322 int process_state = 0; 323 if (ReadStaticInt(env, activity_manager.get(), care_about_pauses[i], &process_state)) { 324 process_state_cares_about_pause_time_.insert(process_state); 325 VLOG(heap) << "Adding process state " << process_state 326 << " to set of states which care about pause time"; 327 } 328 } 329 } 330 331 if (application_thread_class_ == NULL) { 332 jclass clz = env->FindClass("android/app/ActivityThread$ApplicationThread"); 333 if (clz == NULL) { 334 env->ExceptionClear(); 335 LOG(WARNING) << "Could not get application thread class"; 336 return; 337 } 338 application_thread_class_ = reinterpret_cast<jclass>(env->NewGlobalRef(clz)); 339 last_process_state_id_ = env->GetFieldID(application_thread_class_, "mLastProcessState", "I"); 340 if (last_process_state_id_ == NULL) { 341 env->ExceptionClear(); 342 LOG(WARNING) << "Could not get last process state member"; 343 return; 344 } 345 } 346 347 if (application_thread_class_ != NULL && application_thread_ == NULL) { 348 jmethodID get_application_thread = 349 env->GetMethodID(activity_thread_class_, "getApplicationThread", 350 "()Landroid/app/ActivityThread$ApplicationThread;"); 351 if (get_application_thread == NULL) { 352 LOG(WARNING) << "Could not get method ID for get application thread"; 353 return; 354 } 355 356 jobject obj = env->CallObjectMethod(activity_thread_, get_application_thread); 357 if (obj == NULL) { 358 LOG(WARNING) << "Could not get application thread"; 359 return; 360 } 361 362 application_thread_ = env->NewGlobalRef(obj); 363 } 364 365 if (application_thread_ != NULL && last_process_state_id_ != NULL) { 366 int process_state = env->GetIntField(application_thread_, last_process_state_id_); 367 env->ExceptionClear(); 368 369 care_about_pause_times_ = process_state_cares_about_pause_time_.find(process_state) != 370 process_state_cares_about_pause_time_.end(); 371 372 VLOG(heap) << "New process state " << process_state 373 << " care about pauses " << care_about_pause_times_; 374 } 375} 376 377void Heap::AddContinuousSpace(space::ContinuousSpace* space) { 378 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 379 DCHECK(space != NULL); 380 DCHECK(space->GetLiveBitmap() != NULL); 381 live_bitmap_->AddContinuousSpaceBitmap(space->GetLiveBitmap()); 382 DCHECK(space->GetMarkBitmap() != NULL); 383 mark_bitmap_->AddContinuousSpaceBitmap(space->GetMarkBitmap()); 384 continuous_spaces_.push_back(space); 385 if (space->IsDlMallocSpace() && !space->IsLargeObjectSpace()) { 386 alloc_space_ = space->AsDlMallocSpace(); 387 } 388 389 // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger) 390 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), 391 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) { 392 return a->Begin() < b->Begin(); 393 }); 394 395 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to 396 // avoid redundant marking. 397 bool seen_zygote = false, seen_alloc = false; 398 for (const auto& space : continuous_spaces_) { 399 if (space->IsImageSpace()) { 400 DCHECK(!seen_zygote); 401 DCHECK(!seen_alloc); 402 } else if (space->IsZygoteSpace()) { 403 DCHECK(!seen_alloc); 404 seen_zygote = true; 405 } else if (space->IsDlMallocSpace()) { 406 seen_alloc = true; 407 } 408 } 409} 410 411void Heap::RegisterGCAllocation(size_t bytes) { 412 if (this != NULL) { 413 gc_memory_overhead_.fetch_add(bytes); 414 } 415} 416 417void Heap::RegisterGCDeAllocation(size_t bytes) { 418 if (this != NULL) { 419 gc_memory_overhead_.fetch_sub(bytes); 420 } 421} 422 423void Heap::AddDiscontinuousSpace(space::DiscontinuousSpace* space) { 424 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 425 DCHECK(space != NULL); 426 DCHECK(space->GetLiveObjects() != NULL); 427 live_bitmap_->AddDiscontinuousObjectSet(space->GetLiveObjects()); 428 DCHECK(space->GetMarkObjects() != NULL); 429 mark_bitmap_->AddDiscontinuousObjectSet(space->GetMarkObjects()); 430 discontinuous_spaces_.push_back(space); 431} 432 433void Heap::DumpGcPerformanceInfo(std::ostream& os) { 434 // Dump cumulative timings. 435 os << "Dumping cumulative Gc timings\n"; 436 uint64_t total_duration = 0; 437 438 // Dump cumulative loggers for each GC type. 439 uint64_t total_paused_time = 0; 440 for (const auto& collector : mark_sweep_collectors_) { 441 CumulativeLogger& logger = collector->GetCumulativeTimings(); 442 if (logger.GetTotalNs() != 0) { 443 os << Dumpable<CumulativeLogger>(logger); 444 const uint64_t total_ns = logger.GetTotalNs(); 445 const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs(); 446 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0; 447 const uint64_t freed_bytes = collector->GetTotalFreedBytes(); 448 const uint64_t freed_objects = collector->GetTotalFreedObjects(); 449 os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n" 450 << collector->GetName() << " paused time: " << PrettyDuration(total_pause_ns) << "\n" 451 << collector->GetName() << " freed: " << freed_objects 452 << " objects with total size " << PrettySize(freed_bytes) << "\n" 453 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / " 454 << PrettySize(freed_bytes / seconds) << "/s\n"; 455 total_duration += total_ns; 456 total_paused_time += total_pause_ns; 457 } 458 } 459 uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust; 460 size_t total_objects_allocated = GetObjectsAllocatedEver(); 461 size_t total_bytes_allocated = GetBytesAllocatedEver(); 462 if (total_duration != 0) { 463 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0; 464 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; 465 os << "Mean GC size throughput: " 466 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n"; 467 os << "Mean GC object throughput: " 468 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n"; 469 } 470 os << "Total number of allocations: " << total_objects_allocated << "\n"; 471 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n"; 472 if (kMeasureAllocationTime) { 473 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n"; 474 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated) 475 << "\n"; 476 } 477 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n"; 478 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n"; 479 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_; 480} 481 482Heap::~Heap() { 483 if (kDumpGcPerformanceOnShutdown) { 484 DumpGcPerformanceInfo(LOG(INFO)); 485 } 486 487 STLDeleteElements(&mark_sweep_collectors_); 488 489 // If we don't reset then the mark stack complains in it's destructor. 490 allocation_stack_->Reset(); 491 live_stack_->Reset(); 492 493 VLOG(heap) << "~Heap()"; 494 STLDeleteValues(&mod_union_tables_); 495 STLDeleteElements(&continuous_spaces_); 496 STLDeleteElements(&discontinuous_spaces_); 497 delete gc_complete_lock_; 498 delete soft_ref_queue_lock_; 499 delete weak_ref_queue_lock_; 500 delete finalizer_ref_queue_lock_; 501 delete phantom_ref_queue_lock_; 502} 503 504space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj, 505 bool fail_ok) const { 506 for (const auto& space : continuous_spaces_) { 507 if (space->Contains(obj)) { 508 return space; 509 } 510 } 511 if (!fail_ok) { 512 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 513 } 514 return NULL; 515} 516 517space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj, 518 bool fail_ok) const { 519 for (const auto& space : discontinuous_spaces_) { 520 if (space->Contains(obj)) { 521 return space; 522 } 523 } 524 if (!fail_ok) { 525 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 526 } 527 return NULL; 528} 529 530space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const { 531 space::Space* result = FindContinuousSpaceFromObject(obj, true); 532 if (result != NULL) { 533 return result; 534 } 535 return FindDiscontinuousSpaceFromObject(obj, true); 536} 537 538space::ImageSpace* Heap::GetImageSpace() const { 539 for (const auto& space : continuous_spaces_) { 540 if (space->IsImageSpace()) { 541 return space->AsImageSpace(); 542 } 543 } 544 return NULL; 545} 546 547static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) { 548 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start); 549 if (used_bytes < chunk_size) { 550 size_t chunk_free_bytes = chunk_size - used_bytes; 551 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg); 552 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes); 553 } 554} 555 556void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) { 557 std::ostringstream oss; 558 int64_t total_bytes_free = GetFreeMemory(); 559 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free 560 << " free bytes"; 561 // If the allocation failed due to fragmentation, print out the largest continuous allocation. 562 if (!large_object_allocation && total_bytes_free >= byte_count) { 563 size_t max_contiguous_allocation = 0; 564 for (const auto& space : continuous_spaces_) { 565 if (space->IsDlMallocSpace()) { 566 space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation); 567 } 568 } 569 oss << "; failed due to fragmentation (largest possible contiguous allocation " 570 << max_contiguous_allocation << " bytes)"; 571 } 572 self->ThrowOutOfMemoryError(oss.str().c_str()); 573} 574 575inline bool Heap::TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count, 576 mirror::Object** obj_ptr, size_t* bytes_allocated) { 577 bool large_object_allocation = ShouldAllocLargeObject(c, byte_count); 578 if (UNLIKELY(large_object_allocation)) { 579 mirror::Object* obj = AllocateInstrumented(self, large_object_space_, byte_count, bytes_allocated); 580 // Make sure that our large object didn't get placed anywhere within the space interval or else 581 // it breaks the immune range. 582 DCHECK(obj == NULL || 583 reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() || 584 reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End()); 585 *obj_ptr = obj; 586 } 587 return large_object_allocation; 588} 589 590mirror::Object* Heap::AllocObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count) { 591 DebugCheckPreconditionsForAllobObject(c, byte_count); 592 mirror::Object* obj; 593 size_t bytes_allocated; 594 AllocationTimer alloc_timer(this, &obj); 595 bool large_object_allocation = TryAllocLargeObjectInstrumented(self, c, byte_count, 596 &obj, &bytes_allocated); 597 if (LIKELY(!large_object_allocation)) { 598 // Non-large object allocation. 599 obj = AllocateInstrumented(self, alloc_space_, byte_count, &bytes_allocated); 600 // Ensure that we did not allocate into a zygote space. 601 DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace()); 602 } 603 if (LIKELY(obj != NULL)) { 604 obj->SetClass(c); 605 // Record allocation after since we want to use the atomic add for the atomic fence to guard 606 // the SetClass since we do not want the class to appear NULL in another thread. 607 size_t new_num_bytes_allocated = RecordAllocationInstrumented(bytes_allocated, obj); 608 if (Dbg::IsAllocTrackingEnabled()) { 609 Dbg::RecordAllocation(c, byte_count); 610 } 611 CheckConcurrentGC(self, new_num_bytes_allocated, obj); 612 if (kDesiredHeapVerification > kNoHeapVerification) { 613 VerifyObject(obj); 614 } 615 return obj; 616 } 617 ThrowOutOfMemoryError(self, byte_count, large_object_allocation); 618 return NULL; 619} 620 621bool Heap::IsHeapAddress(const mirror::Object* obj) { 622 // Note: we deliberately don't take the lock here, and mustn't test anything that would 623 // require taking the lock. 624 if (obj == NULL) { 625 return true; 626 } 627 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 628 return false; 629 } 630 return FindSpaceFromObject(obj, true) != NULL; 631} 632 633bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack, 634 bool search_live_stack, bool sorted) { 635 // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current()); 636 if (obj == NULL || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 637 return false; 638 } 639 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true); 640 space::DiscontinuousSpace* d_space = NULL; 641 if (c_space != NULL) { 642 if (c_space->GetLiveBitmap()->Test(obj)) { 643 return true; 644 } 645 } else { 646 d_space = FindDiscontinuousSpaceFromObject(obj, true); 647 if (d_space != NULL) { 648 if (d_space->GetLiveObjects()->Test(obj)) { 649 return true; 650 } 651 } 652 } 653 // This is covering the allocation/live stack swapping that is done without mutators suspended. 654 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) { 655 if (i > 0) { 656 NanoSleep(MsToNs(10)); 657 } 658 659 if (search_allocation_stack) { 660 if (sorted) { 661 if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) { 662 return true; 663 } 664 } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) { 665 return true; 666 } 667 } 668 669 if (search_live_stack) { 670 if (sorted) { 671 if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) { 672 return true; 673 } 674 } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) { 675 return true; 676 } 677 } 678 } 679 // We need to check the bitmaps again since there is a race where we mark something as live and 680 // then clear the stack containing it. 681 if (c_space != NULL) { 682 if (c_space->GetLiveBitmap()->Test(obj)) { 683 return true; 684 } 685 } else { 686 d_space = FindDiscontinuousSpaceFromObject(obj, true); 687 if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) { 688 return true; 689 } 690 } 691 return false; 692} 693 694void Heap::VerifyObjectImpl(const mirror::Object* obj) { 695 if (Thread::Current() == NULL || 696 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) { 697 return; 698 } 699 VerifyObjectBody(obj); 700} 701 702void Heap::DumpSpaces() { 703 for (const auto& space : continuous_spaces_) { 704 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 705 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 706 LOG(INFO) << space << " " << *space << "\n" 707 << live_bitmap << " " << *live_bitmap << "\n" 708 << mark_bitmap << " " << *mark_bitmap; 709 } 710 for (const auto& space : discontinuous_spaces_) { 711 LOG(INFO) << space << " " << *space << "\n"; 712 } 713} 714 715void Heap::VerifyObjectBody(const mirror::Object* obj) { 716 CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj; 717 // Ignore early dawn of the universe verifications. 718 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.load()) < 10 * KB)) { 719 return; 720 } 721 const byte* raw_addr = reinterpret_cast<const byte*>(obj) + 722 mirror::Object::ClassOffset().Int32Value(); 723 const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 724 if (UNLIKELY(c == NULL)) { 725 LOG(FATAL) << "Null class in object: " << obj; 726 } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) { 727 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; 728 } 729 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() 730 // Note: we don't use the accessors here as they have internal sanity checks 731 // that we don't want to run 732 raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value(); 733 const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 734 raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value(); 735 const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 736 CHECK_EQ(c_c, c_c_c); 737 738 if (verify_object_mode_ != kVerifyAllFast) { 739 // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the 740 // heap_bitmap_lock_. 741 if (!IsLiveObjectLocked(obj)) { 742 DumpSpaces(); 743 LOG(FATAL) << "Object is dead: " << obj; 744 } 745 if (!IsLiveObjectLocked(c)) { 746 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; 747 } 748 } 749} 750 751void Heap::VerificationCallback(mirror::Object* obj, void* arg) { 752 DCHECK(obj != NULL); 753 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj); 754} 755 756void Heap::VerifyHeap() { 757 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 758 GetLiveBitmap()->Walk(Heap::VerificationCallback, this); 759} 760 761inline size_t Heap::RecordAllocationInstrumented(size_t size, mirror::Object* obj) { 762 DCHECK(obj != NULL); 763 DCHECK_GT(size, 0u); 764 size_t old_num_bytes_allocated = static_cast<size_t>(num_bytes_allocated_.fetch_add(size)); 765 766 if (Runtime::Current()->HasStatsEnabled()) { 767 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 768 ++thread_stats->allocated_objects; 769 thread_stats->allocated_bytes += size; 770 771 // TODO: Update these atomically. 772 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 773 ++global_stats->allocated_objects; 774 global_stats->allocated_bytes += size; 775 } 776 777 // This is safe to do since the GC will never free objects which are neither in the allocation 778 // stack or the live bitmap. 779 while (!allocation_stack_->AtomicPushBack(obj)) { 780 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 781 } 782 783 return old_num_bytes_allocated + size; 784} 785 786void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) { 787 DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_)); 788 num_bytes_allocated_.fetch_sub(freed_bytes); 789 790 if (Runtime::Current()->HasStatsEnabled()) { 791 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 792 thread_stats->freed_objects += freed_objects; 793 thread_stats->freed_bytes += freed_bytes; 794 795 // TODO: Do this concurrently. 796 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 797 global_stats->freed_objects += freed_objects; 798 global_stats->freed_bytes += freed_bytes; 799 } 800} 801 802inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size, 803 bool grow, size_t* bytes_allocated) { 804 if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) { 805 return NULL; 806 } 807 return space->Alloc(self, alloc_size, bytes_allocated); 808} 809 810// DlMallocSpace-specific version. 811inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size, 812 bool grow, size_t* bytes_allocated) { 813 if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) { 814 return NULL; 815 } 816 if (LIKELY(!running_on_valgrind_)) { 817 return space->AllocNonvirtual(self, alloc_size, bytes_allocated); 818 } else { 819 return space->Alloc(self, alloc_size, bytes_allocated); 820 } 821} 822 823template <class T> 824inline mirror::Object* Heap::AllocateInstrumented(Thread* self, T* space, size_t alloc_size, 825 size_t* bytes_allocated) { 826 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are 827 // done in the runnable state where suspension is expected. 828 DCHECK_EQ(self->GetState(), kRunnable); 829 self->AssertThreadSuspensionIsAllowable(); 830 831 mirror::Object* ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated); 832 if (LIKELY(ptr != NULL)) { 833 return ptr; 834 } 835 return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated); 836} 837 838mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* space, 839 size_t alloc_size, size_t* bytes_allocated) { 840 mirror::Object* ptr; 841 842 // The allocation failed. If the GC is running, block until it completes, and then retry the 843 // allocation. 844 collector::GcType last_gc = WaitForConcurrentGcToComplete(self); 845 if (last_gc != collector::kGcTypeNone) { 846 // A GC was in progress and we blocked, retry allocation now that memory has been freed. 847 ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated); 848 if (ptr != NULL) { 849 return ptr; 850 } 851 } 852 853 // Loop through our different Gc types and try to Gc until we get enough free memory. 854 for (size_t i = static_cast<size_t>(last_gc) + 1; 855 i < static_cast<size_t>(collector::kGcTypeMax); ++i) { 856 bool run_gc = false; 857 collector::GcType gc_type = static_cast<collector::GcType>(i); 858 switch (gc_type) { 859 case collector::kGcTypeSticky: { 860 const size_t alloc_space_size = alloc_space_->Size(); 861 run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ && 862 alloc_space_->Capacity() - alloc_space_size >= min_remaining_space_for_sticky_gc_; 863 break; 864 } 865 case collector::kGcTypePartial: 866 run_gc = have_zygote_space_; 867 break; 868 case collector::kGcTypeFull: 869 run_gc = true; 870 break; 871 default: 872 break; 873 } 874 875 if (run_gc) { 876 // If we actually ran a different type of Gc than requested, we can skip the index forwards. 877 collector::GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false); 878 DCHECK_GE(static_cast<size_t>(gc_type_ran), i); 879 i = static_cast<size_t>(gc_type_ran); 880 881 // Did we free sufficient memory for the allocation to succeed? 882 ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated); 883 if (ptr != NULL) { 884 return ptr; 885 } 886 } 887 } 888 889 // Allocations have failed after GCs; this is an exceptional state. 890 // Try harder, growing the heap if necessary. 891 ptr = TryToAllocateInstrumented(self, space, alloc_size, true, bytes_allocated); 892 if (ptr != NULL) { 893 return ptr; 894 } 895 896 // Most allocations should have succeeded by now, so the heap is really full, really fragmented, 897 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The 898 // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME. 899 900 // OLD-TODO: wait for the finalizers from the previous GC to finish 901 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) 902 << " allocation"; 903 904 // We don't need a WaitForConcurrentGcToComplete here either. 905 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true); 906 return TryToAllocateInstrumented(self, space, alloc_size, true, bytes_allocated); 907} 908 909void Heap::SetTargetHeapUtilization(float target) { 910 DCHECK_GT(target, 0.0f); // asserted in Java code 911 DCHECK_LT(target, 1.0f); 912 target_utilization_ = target; 913} 914 915size_t Heap::GetObjectsAllocated() const { 916 size_t total = 0; 917 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 918 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 919 space::ContinuousSpace* space = *it; 920 if (space->IsDlMallocSpace()) { 921 total += space->AsDlMallocSpace()->GetObjectsAllocated(); 922 } 923 } 924 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 925 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 926 space::DiscontinuousSpace* space = *it; 927 total += space->AsLargeObjectSpace()->GetObjectsAllocated(); 928 } 929 return total; 930} 931 932size_t Heap::GetObjectsAllocatedEver() const { 933 size_t total = 0; 934 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 935 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 936 space::ContinuousSpace* space = *it; 937 if (space->IsDlMallocSpace()) { 938 total += space->AsDlMallocSpace()->GetTotalObjectsAllocated(); 939 } 940 } 941 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 942 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 943 space::DiscontinuousSpace* space = *it; 944 total += space->AsLargeObjectSpace()->GetTotalObjectsAllocated(); 945 } 946 return total; 947} 948 949size_t Heap::GetBytesAllocatedEver() const { 950 size_t total = 0; 951 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 952 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 953 space::ContinuousSpace* space = *it; 954 if (space->IsDlMallocSpace()) { 955 total += space->AsDlMallocSpace()->GetTotalBytesAllocated(); 956 } 957 } 958 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 959 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 960 space::DiscontinuousSpace* space = *it; 961 total += space->AsLargeObjectSpace()->GetTotalBytesAllocated(); 962 } 963 return total; 964} 965 966class InstanceCounter { 967 public: 968 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts) 969 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 970 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) { 971 } 972 973 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 974 for (size_t i = 0; i < classes_.size(); ++i) { 975 const mirror::Class* instance_class = o->GetClass(); 976 if (use_is_assignable_from_) { 977 if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) { 978 ++counts_[i]; 979 } 980 } else { 981 if (instance_class == classes_[i]) { 982 ++counts_[i]; 983 } 984 } 985 } 986 } 987 988 private: 989 const std::vector<mirror::Class*>& classes_; 990 bool use_is_assignable_from_; 991 uint64_t* const counts_; 992 993 DISALLOW_COPY_AND_ASSIGN(InstanceCounter); 994}; 995 996void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, 997 uint64_t* counts) { 998 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 999 // is empty, so the live bitmap is the only place we need to look. 1000 Thread* self = Thread::Current(); 1001 self->TransitionFromRunnableToSuspended(kNative); 1002 CollectGarbage(false); 1003 self->TransitionFromSuspendedToRunnable(); 1004 1005 InstanceCounter counter(classes, use_is_assignable_from, counts); 1006 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1007 GetLiveBitmap()->Visit(counter); 1008} 1009 1010class InstanceCollector { 1011 public: 1012 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) 1013 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1014 : class_(c), max_count_(max_count), instances_(instances) { 1015 } 1016 1017 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1018 const mirror::Class* instance_class = o->GetClass(); 1019 if (instance_class == class_) { 1020 if (max_count_ == 0 || instances_.size() < max_count_) { 1021 instances_.push_back(const_cast<mirror::Object*>(o)); 1022 } 1023 } 1024 } 1025 1026 private: 1027 mirror::Class* class_; 1028 uint32_t max_count_; 1029 std::vector<mirror::Object*>& instances_; 1030 1031 DISALLOW_COPY_AND_ASSIGN(InstanceCollector); 1032}; 1033 1034void Heap::GetInstances(mirror::Class* c, int32_t max_count, 1035 std::vector<mirror::Object*>& instances) { 1036 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 1037 // is empty, so the live bitmap is the only place we need to look. 1038 Thread* self = Thread::Current(); 1039 self->TransitionFromRunnableToSuspended(kNative); 1040 CollectGarbage(false); 1041 self->TransitionFromSuspendedToRunnable(); 1042 1043 InstanceCollector collector(c, max_count, instances); 1044 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1045 GetLiveBitmap()->Visit(collector); 1046} 1047 1048class ReferringObjectsFinder { 1049 public: 1050 ReferringObjectsFinder(mirror::Object* object, int32_t max_count, 1051 std::vector<mirror::Object*>& referring_objects) 1052 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1053 : object_(object), max_count_(max_count), referring_objects_(referring_objects) { 1054 } 1055 1056 // For bitmap Visit. 1057 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 1058 // annotalysis on visitors. 1059 void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 1060 collector::MarkSweep::VisitObjectReferences(obj, *this, true); 1061 } 1062 1063 // For MarkSweep::VisitObjectReferences. 1064 void operator()(mirror::Object* referrer, mirror::Object* object, 1065 const MemberOffset&, bool) const { 1066 if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { 1067 referring_objects_.push_back(referrer); 1068 } 1069 } 1070 1071 private: 1072 mirror::Object* object_; 1073 uint32_t max_count_; 1074 std::vector<mirror::Object*>& referring_objects_; 1075 1076 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder); 1077}; 1078 1079void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count, 1080 std::vector<mirror::Object*>& referring_objects) { 1081 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 1082 // is empty, so the live bitmap is the only place we need to look. 1083 Thread* self = Thread::Current(); 1084 self->TransitionFromRunnableToSuspended(kNative); 1085 CollectGarbage(false); 1086 self->TransitionFromSuspendedToRunnable(); 1087 1088 ReferringObjectsFinder finder(o, max_count, referring_objects); 1089 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1090 GetLiveBitmap()->Visit(finder); 1091} 1092 1093void Heap::CollectGarbage(bool clear_soft_references) { 1094 // Even if we waited for a GC we still need to do another GC since weaks allocated during the 1095 // last GC will not have necessarily been cleared. 1096 Thread* self = Thread::Current(); 1097 WaitForConcurrentGcToComplete(self); 1098 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references); 1099} 1100 1101void Heap::PreZygoteFork() { 1102 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock); 1103 // Do this before acquiring the zygote creation lock so that we don't get lock order violations. 1104 CollectGarbage(false); 1105 Thread* self = Thread::Current(); 1106 MutexLock mu(self, zygote_creation_lock_); 1107 1108 // Try to see if we have any Zygote spaces. 1109 if (have_zygote_space_) { 1110 return; 1111 } 1112 1113 VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size()); 1114 1115 { 1116 // Flush the alloc stack. 1117 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1118 FlushAllocStack(); 1119 } 1120 1121 // Turns the current alloc space into a Zygote space and obtain the new alloc space composed 1122 // of the remaining available heap memory. 1123 space::DlMallocSpace* zygote_space = alloc_space_; 1124 alloc_space_ = zygote_space->CreateZygoteSpace("alloc space"); 1125 alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); 1126 1127 // Change the GC retention policy of the zygote space to only collect when full. 1128 zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect); 1129 AddContinuousSpace(alloc_space_); 1130 have_zygote_space_ = true; 1131 1132 // Create the zygote space mod union table. 1133 accounting::ModUnionTable* mod_union_table = 1134 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space); 1135 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table"; 1136 AddModUnionTable(mod_union_table); 1137 1138 // Reset the cumulative loggers since we now have a few additional timing phases. 1139 for (const auto& collector : mark_sweep_collectors_) { 1140 collector->ResetCumulativeStatistics(); 1141 } 1142} 1143 1144void Heap::FlushAllocStack() { 1145 MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), 1146 allocation_stack_.get()); 1147 allocation_stack_->Reset(); 1148} 1149 1150void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects, 1151 accounting::ObjectStack* stack) { 1152 mirror::Object** limit = stack->End(); 1153 for (mirror::Object** it = stack->Begin(); it != limit; ++it) { 1154 const mirror::Object* obj = *it; 1155 DCHECK(obj != NULL); 1156 if (LIKELY(bitmap->HasAddress(obj))) { 1157 bitmap->Set(obj); 1158 } else { 1159 large_objects->Set(obj); 1160 } 1161 } 1162} 1163 1164 1165const char* gc_cause_and_type_strings[3][4] = { 1166 {"", "GC Alloc Sticky", "GC Alloc Partial", "GC Alloc Full"}, 1167 {"", "GC Background Sticky", "GC Background Partial", "GC Background Full"}, 1168 {"", "GC Explicit Sticky", "GC Explicit Partial", "GC Explicit Full"}}; 1169 1170collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause, 1171 bool clear_soft_references) { 1172 Thread* self = Thread::Current(); 1173 1174 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); 1175 Locks::mutator_lock_->AssertNotHeld(self); 1176 1177 if (self->IsHandlingStackOverflow()) { 1178 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow."; 1179 } 1180 1181 // Ensure there is only one GC at a time. 1182 bool start_collect = false; 1183 while (!start_collect) { 1184 { 1185 MutexLock mu(self, *gc_complete_lock_); 1186 if (!is_gc_running_) { 1187 is_gc_running_ = true; 1188 start_collect = true; 1189 } 1190 } 1191 if (!start_collect) { 1192 // TODO: timinglog this. 1193 WaitForConcurrentGcToComplete(self); 1194 1195 // TODO: if another thread beat this one to do the GC, perhaps we should just return here? 1196 // Not doing at the moment to ensure soft references are cleared. 1197 } 1198 } 1199 gc_complete_lock_->AssertNotHeld(self); 1200 1201 if (gc_cause == kGcCauseForAlloc && Runtime::Current()->HasStatsEnabled()) { 1202 ++Runtime::Current()->GetStats()->gc_for_alloc_count; 1203 ++Thread::Current()->GetStats()->gc_for_alloc_count; 1204 } 1205 1206 uint64_t gc_start_time_ns = NanoTime(); 1207 uint64_t gc_start_size = GetBytesAllocated(); 1208 // Approximate allocation rate in bytes / second. 1209 if (UNLIKELY(gc_start_time_ns == last_gc_time_ns_)) { 1210 LOG(WARNING) << "Timers are broken (gc_start_time == last_gc_time_)."; 1211 } 1212 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_); 1213 if (ms_delta != 0) { 1214 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta; 1215 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s"; 1216 } 1217 1218 if (gc_type == collector::kGcTypeSticky && 1219 alloc_space_->Size() < min_alloc_space_size_for_sticky_gc_) { 1220 gc_type = collector::kGcTypePartial; 1221 } 1222 1223 DCHECK_LT(gc_type, collector::kGcTypeMax); 1224 DCHECK_NE(gc_type, collector::kGcTypeNone); 1225 DCHECK_LE(gc_cause, kGcCauseExplicit); 1226 1227 ATRACE_BEGIN(gc_cause_and_type_strings[gc_cause][gc_type]); 1228 1229 collector::MarkSweep* collector = NULL; 1230 for (const auto& cur_collector : mark_sweep_collectors_) { 1231 if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) { 1232 collector = cur_collector; 1233 break; 1234 } 1235 } 1236 CHECK(collector != NULL) 1237 << "Could not find garbage collector with concurrent=" << concurrent_gc_ 1238 << " and type=" << gc_type; 1239 1240 collector->clear_soft_references_ = clear_soft_references; 1241 collector->Run(); 1242 total_objects_freed_ever_ += collector->GetFreedObjects(); 1243 total_bytes_freed_ever_ += collector->GetFreedBytes(); 1244 if (care_about_pause_times_) { 1245 const size_t duration = collector->GetDurationNs(); 1246 std::vector<uint64_t> pauses = collector->GetPauseTimes(); 1247 // GC for alloc pauses the allocating thread, so consider it as a pause. 1248 bool was_slow = duration > long_gc_log_threshold_ || 1249 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_); 1250 if (!was_slow) { 1251 for (uint64_t pause : pauses) { 1252 was_slow = was_slow || pause > long_pause_log_threshold_; 1253 } 1254 } 1255 1256 if (was_slow) { 1257 const size_t percent_free = GetPercentFree(); 1258 const size_t current_heap_size = GetBytesAllocated(); 1259 const size_t total_memory = GetTotalMemory(); 1260 std::ostringstream pause_string; 1261 for (size_t i = 0; i < pauses.size(); ++i) { 1262 pause_string << PrettyDuration((pauses[i] / 1000) * 1000) 1263 << ((i != pauses.size() - 1) ? ", " : ""); 1264 } 1265 LOG(INFO) << gc_cause << " " << collector->GetName() 1266 << " GC freed " << collector->GetFreedObjects() << "(" 1267 << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, " 1268 << collector->GetFreedLargeObjects() << "(" 1269 << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, " 1270 << percent_free << "% free, " << PrettySize(current_heap_size) << "/" 1271 << PrettySize(total_memory) << ", " << "paused " << pause_string.str() 1272 << " total " << PrettyDuration((duration / 1000) * 1000); 1273 if (VLOG_IS_ON(heap)) { 1274 LOG(INFO) << Dumpable<base::TimingLogger>(collector->GetTimings()); 1275 } 1276 } 1277 } 1278 1279 { 1280 MutexLock mu(self, *gc_complete_lock_); 1281 is_gc_running_ = false; 1282 last_gc_type_ = gc_type; 1283 // Wake anyone who may have been waiting for the GC to complete. 1284 gc_complete_cond_->Broadcast(self); 1285 } 1286 1287 ATRACE_END(); 1288 1289 // Inform DDMS that a GC completed. 1290 Dbg::GcDidFinish(); 1291 return gc_type; 1292} 1293 1294static mirror::Object* RootMatchesObjectVisitor(mirror::Object* root, void* arg) { 1295 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg); 1296 if (root == obj) { 1297 LOG(INFO) << "Object " << obj << " is a root"; 1298 } 1299 return root; 1300} 1301 1302class ScanVisitor { 1303 public: 1304 void operator()(const mirror::Object* obj) const { 1305 LOG(ERROR) << "Would have rescanned object " << obj; 1306 } 1307}; 1308 1309// Verify a reference from an object. 1310class VerifyReferenceVisitor { 1311 public: 1312 explicit VerifyReferenceVisitor(Heap* heap) 1313 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) 1314 : heap_(heap), failed_(false) {} 1315 1316 bool Failed() const { 1317 return failed_; 1318 } 1319 1320 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter 1321 // analysis on visitors. 1322 void operator()(const mirror::Object* obj, const mirror::Object* ref, 1323 const MemberOffset& offset, bool /* is_static */) const 1324 NO_THREAD_SAFETY_ANALYSIS { 1325 // Verify that the reference is live. 1326 if (UNLIKELY(ref != NULL && !IsLive(ref))) { 1327 accounting::CardTable* card_table = heap_->GetCardTable(); 1328 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get(); 1329 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1330 1331 if (!failed_) { 1332 // Print message on only on first failure to prevent spam. 1333 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!"; 1334 failed_ = true; 1335 } 1336 if (obj != nullptr) { 1337 byte* card_addr = card_table->CardFromAddr(obj); 1338 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " 1339 << offset << "\n card value = " << static_cast<int>(*card_addr); 1340 if (heap_->IsHeapAddress(obj->GetClass())) { 1341 LOG(ERROR) << "Obj type " << PrettyTypeOf(obj); 1342 } else { 1343 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address"; 1344 } 1345 1346 // Attmept to find the class inside of the recently freed objects. 1347 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true); 1348 if (ref_space->IsDlMallocSpace()) { 1349 space::DlMallocSpace* space = ref_space->AsDlMallocSpace(); 1350 mirror::Class* ref_class = space->FindRecentFreedObject(ref); 1351 if (ref_class != nullptr) { 1352 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class " 1353 << PrettyClass(ref_class); 1354 } else { 1355 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object"; 1356 } 1357 } 1358 1359 if (ref->GetClass() != nullptr && heap_->IsHeapAddress(ref->GetClass()) && 1360 ref->GetClass()->IsClass()) { 1361 LOG(ERROR) << "Ref type " << PrettyTypeOf(ref); 1362 } else { 1363 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass() 1364 << ") is not a valid heap address"; 1365 } 1366 1367 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj)); 1368 void* cover_begin = card_table->AddrFromCard(card_addr); 1369 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) + 1370 accounting::CardTable::kCardSize); 1371 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin 1372 << "-" << cover_end; 1373 accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj); 1374 1375 // Print out how the object is live. 1376 if (bitmap != NULL && bitmap->Test(obj)) { 1377 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1378 } 1379 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) { 1380 LOG(ERROR) << "Object " << obj << " found in allocation stack"; 1381 } 1382 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) { 1383 LOG(ERROR) << "Object " << obj << " found in live stack"; 1384 } 1385 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) { 1386 LOG(ERROR) << "Ref " << ref << " found in allocation stack"; 1387 } 1388 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) { 1389 LOG(ERROR) << "Ref " << ref << " found in live stack"; 1390 } 1391 // Attempt to see if the card table missed the reference. 1392 ScanVisitor scan_visitor; 1393 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr)); 1394 card_table->Scan(bitmap, byte_cover_begin, 1395 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor); 1396 1397 // Search to see if any of the roots reference our object. 1398 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj)); 1399 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1400 1401 // Search to see if any of the roots reference our reference. 1402 arg = const_cast<void*>(reinterpret_cast<const void*>(ref)); 1403 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1404 } else { 1405 LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref); 1406 } 1407 } 1408 } 1409 1410 bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 1411 return heap_->IsLiveObjectLocked(obj, true, false, true); 1412 } 1413 1414 static mirror::Object* VerifyRoots(mirror::Object* root, void* arg) { 1415 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg); 1416 (*visitor)(nullptr, root, MemberOffset(0), true); 1417 return root; 1418 } 1419 1420 private: 1421 Heap* const heap_; 1422 mutable bool failed_; 1423}; 1424 1425// Verify all references within an object, for use with HeapBitmap::Visit. 1426class VerifyObjectVisitor { 1427 public: 1428 explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {} 1429 1430 void operator()(const mirror::Object* obj) const 1431 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1432 // Note: we are verifying the references in obj but not obj itself, this is because obj must 1433 // be live or else how did we find it in the live bitmap? 1434 VerifyReferenceVisitor visitor(heap_); 1435 // The class doesn't count as a reference but we should verify it anyways. 1436 visitor(obj, obj->GetClass(), MemberOffset(0), false); 1437 collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true); 1438 failed_ = failed_ || visitor.Failed(); 1439 } 1440 1441 bool Failed() const { 1442 return failed_; 1443 } 1444 1445 private: 1446 Heap* const heap_; 1447 mutable bool failed_; 1448}; 1449 1450// Must do this with mutators suspended since we are directly accessing the allocation stacks. 1451bool Heap::VerifyHeapReferences() { 1452 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1453 // Lets sort our allocation stacks so that we can efficiently binary search them. 1454 allocation_stack_->Sort(); 1455 live_stack_->Sort(); 1456 // Perform the verification. 1457 VerifyObjectVisitor visitor(this); 1458 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false); 1459 GetLiveBitmap()->Visit(visitor); 1460 // Verify objects in the allocation stack since these will be objects which were: 1461 // 1. Allocated prior to the GC (pre GC verification). 1462 // 2. Allocated during the GC (pre sweep GC verification). 1463 for (mirror::Object** it = allocation_stack_->Begin(); it != allocation_stack_->End(); ++it) { 1464 visitor(*it); 1465 } 1466 // We don't want to verify the objects in the live stack since they themselves may be 1467 // pointing to dead objects if they are not reachable. 1468 if (visitor.Failed()) { 1469 // Dump mod-union tables. 1470 for (const auto& table_pair : mod_union_tables_) { 1471 accounting::ModUnionTable* mod_union_table = table_pair.second; 1472 mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": "); 1473 } 1474 DumpSpaces(); 1475 return false; 1476 } 1477 return true; 1478} 1479 1480class VerifyReferenceCardVisitor { 1481 public: 1482 VerifyReferenceCardVisitor(Heap* heap, bool* failed) 1483 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, 1484 Locks::heap_bitmap_lock_) 1485 : heap_(heap), failed_(failed) { 1486 } 1487 1488 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 1489 // annotalysis on visitors. 1490 void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset, 1491 bool is_static) const NO_THREAD_SAFETY_ANALYSIS { 1492 // Filter out class references since changing an object's class does not mark the card as dirty. 1493 // Also handles large objects, since the only reference they hold is a class reference. 1494 if (ref != NULL && !ref->IsClass()) { 1495 accounting::CardTable* card_table = heap_->GetCardTable(); 1496 // If the object is not dirty and it is referencing something in the live stack other than 1497 // class, then it must be on a dirty card. 1498 if (!card_table->AddrIsInCardTable(obj)) { 1499 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table"; 1500 *failed_ = true; 1501 } else if (!card_table->IsDirty(obj)) { 1502 // Card should be either kCardDirty if it got re-dirtied after we aged it, or 1503 // kCardDirty - 1 if it didnt get touched since we aged it. 1504 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1505 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1506 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1507 LOG(ERROR) << "Object " << obj << " found in live stack"; 1508 } 1509 if (heap_->GetLiveBitmap()->Test(obj)) { 1510 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1511 } 1512 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj) 1513 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack"; 1514 1515 // Print which field of the object is dead. 1516 if (!obj->IsObjectArray()) { 1517 const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1518 CHECK(klass != NULL); 1519 const mirror::ObjectArray<mirror::ArtField>* fields = is_static ? klass->GetSFields() 1520 : klass->GetIFields(); 1521 CHECK(fields != NULL); 1522 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1523 const mirror::ArtField* cur = fields->Get(i); 1524 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1525 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " 1526 << PrettyField(cur); 1527 break; 1528 } 1529 } 1530 } else { 1531 const mirror::ObjectArray<mirror::Object>* object_array = 1532 obj->AsObjectArray<mirror::Object>(); 1533 for (int32_t i = 0; i < object_array->GetLength(); ++i) { 1534 if (object_array->Get(i) == ref) { 1535 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref"; 1536 } 1537 } 1538 } 1539 1540 *failed_ = true; 1541 } 1542 } 1543 } 1544 } 1545 1546 private: 1547 Heap* const heap_; 1548 bool* const failed_; 1549}; 1550 1551class VerifyLiveStackReferences { 1552 public: 1553 explicit VerifyLiveStackReferences(Heap* heap) 1554 : heap_(heap), 1555 failed_(false) {} 1556 1557 void operator()(mirror::Object* obj) const 1558 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1559 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); 1560 collector::MarkSweep::VisitObjectReferences(obj, visitor, true); 1561 } 1562 1563 bool Failed() const { 1564 return failed_; 1565 } 1566 1567 private: 1568 Heap* const heap_; 1569 bool failed_; 1570}; 1571 1572bool Heap::VerifyMissingCardMarks() { 1573 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1574 1575 // We need to sort the live stack since we binary search it. 1576 live_stack_->Sort(); 1577 VerifyLiveStackReferences visitor(this); 1578 GetLiveBitmap()->Visit(visitor); 1579 1580 // We can verify objects in the live stack since none of these should reference dead objects. 1581 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) { 1582 visitor(*it); 1583 } 1584 1585 if (visitor.Failed()) { 1586 DumpSpaces(); 1587 return false; 1588 } 1589 return true; 1590} 1591 1592void Heap::SwapStacks() { 1593 allocation_stack_.swap(live_stack_); 1594} 1595 1596accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) { 1597 auto it = mod_union_tables_.find(space); 1598 if (it == mod_union_tables_.end()) { 1599 return nullptr; 1600 } 1601 return it->second; 1602} 1603 1604void Heap::ProcessCards(base::TimingLogger& timings) { 1605 // Clear cards and keep track of cards cleared in the mod-union table. 1606 for (const auto& space : continuous_spaces_) { 1607 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space); 1608 if (table != nullptr) { 1609 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" : 1610 "ImageModUnionClearCards"; 1611 base::TimingLogger::ScopedSplit split(name, &timings); 1612 table->ClearCards(); 1613 } else { 1614 base::TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings); 1615 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards 1616 // were dirty before the GC started. 1617 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor()); 1618 } 1619 } 1620} 1621 1622static mirror::Object* IdentityCallback(mirror::Object* obj, void*) { 1623 return obj; 1624} 1625 1626void Heap::PreGcVerification(collector::GarbageCollector* gc) { 1627 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1628 Thread* self = Thread::Current(); 1629 1630 if (verify_pre_gc_heap_) { 1631 thread_list->SuspendAll(); 1632 { 1633 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1634 if (!VerifyHeapReferences()) { 1635 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed"; 1636 } 1637 } 1638 thread_list->ResumeAll(); 1639 } 1640 1641 // Check that all objects which reference things in the live stack are on dirty cards. 1642 if (verify_missing_card_marks_) { 1643 thread_list->SuspendAll(); 1644 { 1645 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1646 SwapStacks(); 1647 // Sort the live stack so that we can quickly binary search it later. 1648 if (!VerifyMissingCardMarks()) { 1649 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed"; 1650 } 1651 SwapStacks(); 1652 } 1653 thread_list->ResumeAll(); 1654 } 1655 1656 if (verify_mod_union_table_) { 1657 thread_list->SuspendAll(); 1658 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); 1659 for (const auto& table_pair : mod_union_tables_) { 1660 accounting::ModUnionTable* mod_union_table = table_pair.second; 1661 mod_union_table->UpdateAndMarkReferences(IdentityCallback, nullptr); 1662 mod_union_table->Verify(); 1663 } 1664 thread_list->ResumeAll(); 1665 } 1666} 1667 1668void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { 1669 // Called before sweeping occurs since we want to make sure we are not going so reclaim any 1670 // reachable objects. 1671 if (verify_post_gc_heap_) { 1672 Thread* self = Thread::Current(); 1673 CHECK_NE(self->GetState(), kRunnable); 1674 { 1675 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1676 // Swapping bound bitmaps does nothing. 1677 gc->SwapBitmaps(); 1678 if (!VerifyHeapReferences()) { 1679 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed"; 1680 } 1681 gc->SwapBitmaps(); 1682 } 1683 } 1684} 1685 1686void Heap::PostGcVerification(collector::GarbageCollector* gc) { 1687 if (verify_system_weaks_) { 1688 Thread* self = Thread::Current(); 1689 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1690 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc); 1691 mark_sweep->VerifySystemWeaks(); 1692 } 1693} 1694 1695collector::GcType Heap::WaitForConcurrentGcToComplete(Thread* self) { 1696 collector::GcType last_gc_type = collector::kGcTypeNone; 1697 if (concurrent_gc_) { 1698 ATRACE_BEGIN("GC: Wait For Concurrent"); 1699 bool do_wait; 1700 uint64_t wait_start = NanoTime(); 1701 { 1702 // Check if GC is running holding gc_complete_lock_. 1703 MutexLock mu(self, *gc_complete_lock_); 1704 do_wait = is_gc_running_; 1705 } 1706 if (do_wait) { 1707 uint64_t wait_time; 1708 // We must wait, change thread state then sleep on gc_complete_cond_; 1709 ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete); 1710 { 1711 MutexLock mu(self, *gc_complete_lock_); 1712 while (is_gc_running_) { 1713 gc_complete_cond_->Wait(self); 1714 } 1715 last_gc_type = last_gc_type_; 1716 wait_time = NanoTime() - wait_start; 1717 total_wait_time_ += wait_time; 1718 } 1719 if (wait_time > long_pause_log_threshold_) { 1720 LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time); 1721 } 1722 } 1723 ATRACE_END(); 1724 } 1725 return last_gc_type; 1726} 1727 1728void Heap::DumpForSigQuit(std::ostream& os) { 1729 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/" 1730 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n"; 1731 DumpGcPerformanceInfo(os); 1732} 1733 1734size_t Heap::GetPercentFree() { 1735 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory()); 1736} 1737 1738void Heap::SetIdealFootprint(size_t max_allowed_footprint) { 1739 if (max_allowed_footprint > GetMaxMemory()) { 1740 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to " 1741 << PrettySize(GetMaxMemory()); 1742 max_allowed_footprint = GetMaxMemory(); 1743 } 1744 max_allowed_footprint_ = max_allowed_footprint; 1745} 1746 1747void Heap::UpdateMaxNativeFootprint() { 1748 size_t native_size = native_bytes_allocated_; 1749 // TODO: Tune the native heap utilization to be a value other than the java heap utilization. 1750 size_t target_size = native_size / GetTargetHeapUtilization(); 1751 if (target_size > native_size + max_free_) { 1752 target_size = native_size + max_free_; 1753 } else if (target_size < native_size + min_free_) { 1754 target_size = native_size + min_free_; 1755 } 1756 native_footprint_gc_watermark_ = target_size; 1757 native_footprint_limit_ = 2 * target_size - native_size; 1758} 1759 1760void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) { 1761 // We know what our utilization is at this moment. 1762 // This doesn't actually resize any memory. It just lets the heap grow more when necessary. 1763 const size_t bytes_allocated = GetBytesAllocated(); 1764 last_gc_size_ = bytes_allocated; 1765 last_gc_time_ns_ = NanoTime(); 1766 1767 size_t target_size; 1768 if (gc_type != collector::kGcTypeSticky) { 1769 // Grow the heap for non sticky GC. 1770 target_size = bytes_allocated / GetTargetHeapUtilization(); 1771 if (target_size > bytes_allocated + max_free_) { 1772 target_size = bytes_allocated + max_free_; 1773 } else if (target_size < bytes_allocated + min_free_) { 1774 target_size = bytes_allocated + min_free_; 1775 } 1776 next_gc_type_ = collector::kGcTypeSticky; 1777 } else { 1778 // Based on how close the current heap size is to the target size, decide 1779 // whether or not to do a partial or sticky GC next. 1780 if (bytes_allocated + min_free_ <= max_allowed_footprint_) { 1781 next_gc_type_ = collector::kGcTypeSticky; 1782 } else { 1783 next_gc_type_ = collector::kGcTypePartial; 1784 } 1785 1786 // If we have freed enough memory, shrink the heap back down. 1787 if (bytes_allocated + max_free_ < max_allowed_footprint_) { 1788 target_size = bytes_allocated + max_free_; 1789 } else { 1790 target_size = std::max(bytes_allocated, max_allowed_footprint_); 1791 } 1792 } 1793 1794 if (!ignore_max_footprint_) { 1795 SetIdealFootprint(target_size); 1796 1797 if (concurrent_gc_) { 1798 // Calculate when to perform the next ConcurrentGC. 1799 1800 // Calculate the estimated GC duration. 1801 double gc_duration_seconds = NsToMs(gc_duration) / 1000.0; 1802 // Estimate how many remaining bytes we will have when we need to start the next GC. 1803 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds; 1804 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes); 1805 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) { 1806 // A never going to happen situation that from the estimated allocation rate we will exceed 1807 // the applications entire footprint with the given estimated allocation rate. Schedule 1808 // another GC straight away. 1809 concurrent_start_bytes_ = bytes_allocated; 1810 } else { 1811 // Start a concurrent GC when we get close to the estimated remaining bytes. When the 1812 // allocation rate is very high, remaining_bytes could tell us that we should start a GC 1813 // right away. 1814 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated); 1815 } 1816 DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_); 1817 DCHECK_LE(max_allowed_footprint_, growth_limit_); 1818 } 1819 } 1820 1821 UpdateMaxNativeFootprint(); 1822} 1823 1824void Heap::ClearGrowthLimit() { 1825 growth_limit_ = capacity_; 1826 alloc_space_->ClearGrowthLimit(); 1827} 1828 1829void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, 1830 MemberOffset reference_queue_offset, 1831 MemberOffset reference_queueNext_offset, 1832 MemberOffset reference_pendingNext_offset, 1833 MemberOffset finalizer_reference_zombie_offset) { 1834 reference_referent_offset_ = reference_referent_offset; 1835 reference_queue_offset_ = reference_queue_offset; 1836 reference_queueNext_offset_ = reference_queueNext_offset; 1837 reference_pendingNext_offset_ = reference_pendingNext_offset; 1838 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset; 1839 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1840 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U); 1841 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U); 1842 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); 1843 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); 1844} 1845 1846mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) { 1847 DCHECK(reference != NULL); 1848 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1849 return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true); 1850} 1851 1852void Heap::ClearReferenceReferent(mirror::Object* reference) { 1853 DCHECK(reference != NULL); 1854 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1855 reference->SetFieldObject(reference_referent_offset_, NULL, true); 1856} 1857 1858// Returns true if the reference object has not yet been enqueued. 1859bool Heap::IsEnqueuable(const mirror::Object* ref) { 1860 DCHECK(ref != NULL); 1861 const mirror::Object* queue = 1862 ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false); 1863 const mirror::Object* queue_next = 1864 ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false); 1865 return (queue != NULL) && (queue_next == NULL); 1866} 1867 1868void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_reference_list) { 1869 DCHECK(ref != NULL); 1870 CHECK(ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false) != NULL); 1871 CHECK(ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false) == NULL); 1872 EnqueuePendingReference(ref, cleared_reference_list); 1873} 1874 1875bool Heap::IsEnqueued(mirror::Object* ref) { 1876 // Since the references are stored as cyclic lists it means that once enqueued, the pending next 1877 // will always be non-null. 1878 return ref->GetFieldObject<mirror::Object*>(GetReferencePendingNextOffset(), false) != nullptr; 1879} 1880 1881void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) { 1882 DCHECK(ref != NULL); 1883 DCHECK(list != NULL); 1884 if (*list == NULL) { 1885 // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref; 1886 ref->SetFieldObject(reference_pendingNext_offset_, ref, false); 1887 *list = ref; 1888 } else { 1889 mirror::Object* head = 1890 (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false); 1891 ref->SetFieldObject(reference_pendingNext_offset_, head, false); 1892 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false); 1893 } 1894} 1895 1896mirror::Object* Heap::DequeuePendingReference(mirror::Object** list) { 1897 DCHECK(list != NULL); 1898 DCHECK(*list != NULL); 1899 mirror::Object* head = (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, 1900 false); 1901 mirror::Object* ref; 1902 1903 // Note: the following code is thread-safe because it is only called from ProcessReferences which 1904 // is single threaded. 1905 if (*list == head) { 1906 ref = *list; 1907 *list = NULL; 1908 } else { 1909 mirror::Object* next = head->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, 1910 false); 1911 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false); 1912 ref = head; 1913 } 1914 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false); 1915 return ref; 1916} 1917 1918void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) { 1919 ScopedObjectAccess soa(self); 1920 JValue result; 1921 ArgArray arg_array(NULL, 0); 1922 arg_array.Append(reinterpret_cast<uint32_t>(object)); 1923 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, 1924 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1925} 1926 1927void Heap::EnqueueClearedReferences(mirror::Object** cleared) { 1928 DCHECK(cleared != NULL); 1929 if (*cleared != NULL) { 1930 // When a runtime isn't started there are no reference queues to care about so ignore. 1931 if (LIKELY(Runtime::Current()->IsStarted())) { 1932 ScopedObjectAccess soa(Thread::Current()); 1933 JValue result; 1934 ArgArray arg_array(NULL, 0); 1935 arg_array.Append(reinterpret_cast<uint32_t>(*cleared)); 1936 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), 1937 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1938 } 1939 *cleared = NULL; 1940 } 1941} 1942 1943void Heap::RequestConcurrentGC(Thread* self) { 1944 // Make sure that we can do a concurrent GC. 1945 Runtime* runtime = Runtime::Current(); 1946 DCHECK(concurrent_gc_); 1947 if (runtime == NULL || !runtime->IsFinishedStarting() || 1948 !runtime->IsConcurrentGcEnabled()) { 1949 return; 1950 } 1951 { 1952 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1953 if (runtime->IsShuttingDown()) { 1954 return; 1955 } 1956 } 1957 if (self->IsHandlingStackOverflow()) { 1958 return; 1959 } 1960 1961 // We already have a request pending, no reason to start more until we update 1962 // concurrent_start_bytes_. 1963 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); 1964 1965 JNIEnv* env = self->GetJniEnv(); 1966 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 1967 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL); 1968 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 1969 WellKnownClasses::java_lang_Daemons_requestGC); 1970 CHECK(!env->ExceptionCheck()); 1971} 1972 1973void Heap::ConcurrentGC(Thread* self) { 1974 { 1975 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1976 if (Runtime::Current()->IsShuttingDown()) { 1977 return; 1978 } 1979 } 1980 1981 // Wait for any GCs currently running to finish. 1982 if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) { 1983 CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false); 1984 } 1985} 1986 1987void Heap::RequestHeapTrim() { 1988 // GC completed and now we must decide whether to request a heap trim (advising pages back to the 1989 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans 1990 // a space it will hold its lock and can become a cause of jank. 1991 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since 1992 // forking. 1993 1994 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap 1995 // because that only marks object heads, so a large array looks like lots of empty space. We 1996 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional 1997 // to utilization (which is probably inversely proportional to how much benefit we can expect). 1998 // We could try mincore(2) but that's only a measure of how many pages we haven't given away, 1999 // not how much use we're making of those pages. 2000 uint64_t ms_time = MilliTime(); 2001 // Note the large object space's bytes allocated is equal to its capacity. 2002 uint64_t los_bytes_allocated = large_object_space_->GetBytesAllocated(); 2003 float utilization = static_cast<float>(GetBytesAllocated() - los_bytes_allocated) / 2004 (GetTotalMemory() - los_bytes_allocated); 2005 if ((utilization > 0.75f && !IsLowMemoryMode()) || ((ms_time - last_trim_time_ms_) < 2 * 1000)) { 2006 // Don't bother trimming the alloc space if it's more than 75% utilized and low memory mode is 2007 // not enabled, or if a heap trim occurred in the last two seconds. 2008 return; 2009 } 2010 2011 Thread* self = Thread::Current(); 2012 { 2013 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 2014 Runtime* runtime = Runtime::Current(); 2015 if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown()) { 2016 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time) 2017 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check 2018 // as we don't hold the lock while requesting the trim). 2019 return; 2020 } 2021 } 2022 2023 last_trim_time_ms_ = ms_time; 2024 ListenForProcessStateChange(); 2025 2026 // Trim only if we do not currently care about pause times. 2027 if (!care_about_pause_times_) { 2028 JNIEnv* env = self->GetJniEnv(); 2029 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 2030 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL); 2031 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 2032 WellKnownClasses::java_lang_Daemons_requestHeapTrim); 2033 CHECK(!env->ExceptionCheck()); 2034 } 2035} 2036 2037size_t Heap::Trim() { 2038 // Handle a requested heap trim on a thread outside of the main GC thread. 2039 return alloc_space_->Trim(); 2040} 2041 2042bool Heap::IsGCRequestPending() const { 2043 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max(); 2044} 2045 2046void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) { 2047 // Total number of native bytes allocated. 2048 native_bytes_allocated_.fetch_add(bytes); 2049 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) { 2050 // The second watermark is higher than the gc watermark. If you hit this it means you are 2051 // allocating native objects faster than the GC can keep up with. 2052 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 2053 // Can't do this in WellKnownClasses::Init since System is not properly set up at that 2054 // point. 2055 if (UNLIKELY(WellKnownClasses::java_lang_System_runFinalization == NULL)) { 2056 DCHECK(WellKnownClasses::java_lang_System != NULL); 2057 WellKnownClasses::java_lang_System_runFinalization = 2058 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V"); 2059 CHECK(WellKnownClasses::java_lang_System_runFinalization != NULL); 2060 } 2061 if (WaitForConcurrentGcToComplete(ThreadForEnv(env)) != collector::kGcTypeNone) { 2062 // Just finished a GC, attempt to run finalizers. 2063 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 2064 WellKnownClasses::java_lang_System_runFinalization); 2065 CHECK(!env->ExceptionCheck()); 2066 } 2067 2068 // If we still are over the watermark, attempt a GC for alloc and run finalizers. 2069 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 2070 CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false); 2071 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 2072 WellKnownClasses::java_lang_System_runFinalization); 2073 CHECK(!env->ExceptionCheck()); 2074 } 2075 // We have just run finalizers, update the native watermark since it is very likely that 2076 // finalizers released native managed allocations. 2077 UpdateMaxNativeFootprint(); 2078 } else { 2079 if (!IsGCRequestPending()) { 2080 RequestConcurrentGC(ThreadForEnv(env)); 2081 } 2082 } 2083 } 2084} 2085 2086void Heap::RegisterNativeFree(JNIEnv* env, int bytes) { 2087 int expected_size, new_size; 2088 do { 2089 expected_size = native_bytes_allocated_.load(); 2090 new_size = expected_size - bytes; 2091 if (UNLIKELY(new_size < 0)) { 2092 ScopedObjectAccess soa(env); 2093 env->ThrowNew(WellKnownClasses::java_lang_RuntimeException, 2094 StringPrintf("Attempted to free %d native bytes with only %d native bytes " 2095 "registered as allocated", bytes, expected_size).c_str()); 2096 break; 2097 } 2098 } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size)); 2099} 2100 2101int64_t Heap::GetTotalMemory() const { 2102 int64_t ret = 0; 2103 for (const auto& space : continuous_spaces_) { 2104 if (space->IsImageSpace()) { 2105 // Currently don't include the image space. 2106 } else if (space->IsDlMallocSpace()) { 2107 // Zygote or alloc space 2108 ret += space->AsDlMallocSpace()->GetFootprint(); 2109 } 2110 } 2111 for (const auto& space : discontinuous_spaces_) { 2112 if (space->IsLargeObjectSpace()) { 2113 ret += space->AsLargeObjectSpace()->GetBytesAllocated(); 2114 } 2115 } 2116 return ret; 2117} 2118 2119void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) { 2120 DCHECK(mod_union_table != nullptr); 2121 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table); 2122} 2123 2124} // namespace gc 2125} // namespace art 2126