heap.cc revision 67f99418f648c3a95256ed3dcd8e8b64eef0b372
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "heap.h" 18 19#define ATRACE_TAG ATRACE_TAG_DALVIK 20#include <cutils/trace.h> 21 22#include <limits> 23#include <vector> 24#include <valgrind.h> 25 26#include "base/stl_util.h" 27#include "common_throws.h" 28#include "cutils/sched_policy.h" 29#include "debugger.h" 30#include "gc/accounting/atomic_stack.h" 31#include "gc/accounting/card_table-inl.h" 32#include "gc/accounting/heap_bitmap-inl.h" 33#include "gc/accounting/mod_union_table-inl.h" 34#include "gc/accounting/space_bitmap-inl.h" 35#include "gc/collector/mark_sweep-inl.h" 36#include "gc/collector/partial_mark_sweep.h" 37#include "gc/collector/sticky_mark_sweep.h" 38#include "gc/space/dlmalloc_space-inl.h" 39#include "gc/space/image_space.h" 40#include "gc/space/large_object_space.h" 41#include "gc/space/space-inl.h" 42#include "image.h" 43#include "invoke_arg_array_builder.h" 44#include "mirror/class-inl.h" 45#include "mirror/field-inl.h" 46#include "mirror/object.h" 47#include "mirror/object-inl.h" 48#include "mirror/object_array-inl.h" 49#include "object_utils.h" 50#include "os.h" 51#include "ScopedLocalRef.h" 52#include "scoped_thread_state_change.h" 53#include "sirt_ref.h" 54#include "thread_list.h" 55#include "UniquePtr.h" 56#include "well_known_classes.h" 57 58namespace art { 59namespace gc { 60 61// When to create a log message about a slow GC, 100ms. 62static const uint64_t kSlowGcThreshold = MsToNs(100); 63// When to create a log message about a long pause, 5ms. 64static const uint64_t kLongGcPauseThreshold = MsToNs(5); 65static const bool kGCALotMode = false; 66static const size_t kGcAlotInterval = KB; 67static const bool kDumpGcPerformanceOnShutdown = false; 68// Minimum amount of remaining bytes before a concurrent GC is triggered. 69static const size_t kMinConcurrentRemainingBytes = 128 * KB; 70const double Heap::kDefaultTargetUtilization = 0.5; 71// If true, measure the total allocation time. 72static const bool kMeasureAllocationTime = false; 73 74Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free, 75 double target_utilization, size_t capacity, const std::string& original_image_file_name, 76 bool concurrent_gc, size_t num_gc_threads, bool low_memory_mode) 77 : alloc_space_(NULL), 78 card_table_(NULL), 79 concurrent_gc_(concurrent_gc), 80 num_gc_threads_(num_gc_threads), 81 low_memory_mode_(low_memory_mode), 82 have_zygote_space_(false), 83 reference_queue_lock_(NULL), 84 is_gc_running_(false), 85 last_gc_type_(collector::kGcTypeNone), 86 next_gc_type_(collector::kGcTypePartial), 87 capacity_(capacity), 88 growth_limit_(growth_limit), 89 max_allowed_footprint_(initial_size), 90 native_footprint_gc_watermark_(initial_size), 91 native_footprint_limit_(2 * initial_size), 92 activity_thread_class_(NULL), 93 application_thread_class_(NULL), 94 activity_thread_(NULL), 95 application_thread_(NULL), 96 last_process_state_id_(NULL), 97 // Initially care about pauses in case we never get notified of process states, or if the JNI 98 // code becomes broken. 99 care_about_pause_times_(true), 100 concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes) 101 : std::numeric_limits<size_t>::max()), 102 total_bytes_freed_ever_(0), 103 total_objects_freed_ever_(0), 104 large_object_threshold_(3 * kPageSize), 105 num_bytes_allocated_(0), 106 native_bytes_allocated_(0), 107 gc_memory_overhead_(0), 108 verify_missing_card_marks_(false), 109 verify_system_weaks_(false), 110 verify_pre_gc_heap_(false), 111 verify_post_gc_heap_(false), 112 verify_mod_union_table_(false), 113 min_alloc_space_size_for_sticky_gc_(2 * MB), 114 min_remaining_space_for_sticky_gc_(1 * MB), 115 last_trim_time_ms_(0), 116 allocation_rate_(0), 117 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This 118 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap 119 * verification is enabled, we limit the size of allocation stacks to speed up their 120 * searching. 121 */ 122 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval 123 : (kDesiredHeapVerification > kNoHeapVerification) ? KB : MB), 124 reference_referent_offset_(0), 125 reference_queue_offset_(0), 126 reference_queueNext_offset_(0), 127 reference_pendingNext_offset_(0), 128 finalizer_reference_zombie_offset_(0), 129 min_free_(min_free), 130 max_free_(max_free), 131 target_utilization_(target_utilization), 132 total_wait_time_(0), 133 total_allocation_time_(0), 134 verify_object_mode_(kHeapVerificationNotPermitted), 135 running_on_valgrind_(RUNNING_ON_VALGRIND) { 136 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 137 LOG(INFO) << "Heap() entering"; 138 } 139 140 live_bitmap_.reset(new accounting::HeapBitmap(this)); 141 mark_bitmap_.reset(new accounting::HeapBitmap(this)); 142 143 // Requested begin for the alloc space, to follow the mapped image and oat files 144 byte* requested_alloc_space_begin = NULL; 145 std::string image_file_name(original_image_file_name); 146 if (!image_file_name.empty()) { 147 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name); 148 CHECK(image_space != NULL) << "Failed to create space for " << image_file_name; 149 AddContinuousSpace(image_space); 150 // Oat files referenced by image files immediately follow them in memory, ensure alloc space 151 // isn't going to get in the middle 152 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd(); 153 CHECK_GT(oat_file_end_addr, image_space->End()); 154 if (oat_file_end_addr > requested_alloc_space_begin) { 155 requested_alloc_space_begin = 156 reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_file_end_addr), 157 kPageSize)); 158 } 159 } 160 161 alloc_space_ = space::DlMallocSpace::Create(Runtime::Current()->IsZygote() ? "zygote space" : "alloc space", 162 initial_size, 163 growth_limit, capacity, 164 requested_alloc_space_begin); 165 CHECK(alloc_space_ != NULL) << "Failed to create alloc space"; 166 alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); 167 AddContinuousSpace(alloc_space_); 168 169 // Allocate the large object space. 170 const bool kUseFreeListSpaceForLOS = false; 171 if (kUseFreeListSpaceForLOS) { 172 large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity); 173 } else { 174 large_object_space_ = space::LargeObjectMapSpace::Create("large object space"); 175 } 176 CHECK(large_object_space_ != NULL) << "Failed to create large object space"; 177 AddDiscontinuousSpace(large_object_space_); 178 179 // Compute heap capacity. Continuous spaces are sorted in order of Begin(). 180 byte* heap_begin = continuous_spaces_.front()->Begin(); 181 size_t heap_capacity = continuous_spaces_.back()->End() - continuous_spaces_.front()->Begin(); 182 if (continuous_spaces_.back()->IsDlMallocSpace()) { 183 heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity(); 184 } 185 186 // Mark image objects in the live bitmap 187 // TODO: C++0x 188 typedef std::vector<space::ContinuousSpace*>::iterator It; 189 for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) { 190 space::ContinuousSpace* space = *it; 191 if (space->IsImageSpace()) { 192 space::ImageSpace* image_space = space->AsImageSpace(); 193 image_space->RecordImageAllocations(image_space->GetLiveBitmap()); 194 } 195 } 196 197 // Allocate the card table. 198 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity)); 199 CHECK(card_table_.get() != NULL) << "Failed to create card table"; 200 201 image_mod_union_table_.reset(new accounting::ModUnionTableToZygoteAllocspace(this)); 202 CHECK(image_mod_union_table_.get() != NULL) << "Failed to create image mod-union table"; 203 204 zygote_mod_union_table_.reset(new accounting::ModUnionTableCardCache(this)); 205 CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table"; 206 207 // TODO: Count objects in the image space here. 208 num_bytes_allocated_ = 0; 209 210 // Default mark stack size in bytes. 211 static const size_t default_mark_stack_size = 64 * KB; 212 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size)); 213 allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack", 214 max_allocation_stack_size_)); 215 live_stack_.reset(accounting::ObjectStack::Create("live stack", 216 max_allocation_stack_size_)); 217 218 // It's still too early to take a lock because there are no threads yet, but we can create locks 219 // now. We don't create it earlier to make it clear that you can't use locks during heap 220 // initialization. 221 gc_complete_lock_ = new Mutex("GC complete lock"); 222 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable", 223 *gc_complete_lock_)); 224 225 // Create the reference queue lock, this is required so for parallel object scanning in the GC. 226 reference_queue_lock_ = new Mutex("reference queue lock"); 227 228 last_gc_time_ns_ = NanoTime(); 229 last_gc_size_ = GetBytesAllocated(); 230 231 // Create our garbage collectors. 232 for (size_t i = 0; i < 2; ++i) { 233 const bool concurrent = i != 0; 234 mark_sweep_collectors_.push_back(new collector::MarkSweep(this, concurrent)); 235 mark_sweep_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent)); 236 mark_sweep_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent)); 237 } 238 239 CHECK_NE(max_allowed_footprint_, 0U); 240 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 241 LOG(INFO) << "Heap() exiting"; 242 } 243} 244 245void Heap::CreateThreadPool() { 246 thread_pool_.reset(new ThreadPool(num_gc_threads_)); 247} 248 249void Heap::DeleteThreadPool() { 250 thread_pool_.reset(NULL); 251} 252 253// Sort spaces based on begin address 254struct ContinuousSpaceSorter { 255 bool operator()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const { 256 return a->Begin() < b->Begin(); 257 } 258}; 259 260static bool ReadStaticInt(JNIEnvExt* env, jclass clz, const char* name, int* out_value) { 261 CHECK(out_value != NULL); 262 jfieldID field = env->GetStaticFieldID(clz, name, "I"); 263 if (field == NULL) { 264 env->ExceptionClear(); 265 return false; 266 } 267 *out_value = env->GetStaticIntField(clz, field); 268 return true; 269} 270 271void Heap::ListenForProcessStateChange() { 272 VLOG(gc) << "Heap notified of process state change"; 273 274 Thread* self = Thread::Current(); 275 JNIEnvExt* env = self->GetJniEnv(); 276 277 if (!have_zygote_space_) { 278 return; 279 } 280 281 if (activity_thread_class_ == NULL) { 282 jclass clz = env->FindClass("android/app/ActivityThread"); 283 if (clz == NULL) { 284 env->ExceptionClear(); 285 LOG(WARNING) << "Could not find activity thread class in process state change"; 286 return; 287 } 288 activity_thread_class_ = reinterpret_cast<jclass>(env->NewGlobalRef(clz)); 289 } 290 291 if (activity_thread_class_ != NULL && activity_thread_ == NULL) { 292 jmethodID current_activity_method = env->GetStaticMethodID(activity_thread_class_, 293 "currentActivityThread", 294 "()Landroid/app/ActivityThread;"); 295 if (current_activity_method == NULL) { 296 env->ExceptionClear(); 297 LOG(WARNING) << "Could not get method for currentActivityThread"; 298 return; 299 } 300 301 jobject obj = env->CallStaticObjectMethod(activity_thread_class_, current_activity_method); 302 if (obj == NULL) { 303 env->ExceptionClear(); 304 LOG(WARNING) << "Could not get current activity"; 305 return; 306 } 307 activity_thread_ = env->NewGlobalRef(obj); 308 } 309 310 if (process_state_cares_about_pause_time_.empty()) { 311 // Just attempt to do this the first time. 312 jclass clz = env->FindClass("android/app/ActivityManager"); 313 if (clz == NULL) { 314 LOG(WARNING) << "Activity manager class is null"; 315 return; 316 } 317 ScopedLocalRef<jclass> activity_manager(env, clz); 318 std::vector<const char*> care_about_pauses; 319 care_about_pauses.push_back("PROCESS_STATE_TOP"); 320 care_about_pauses.push_back("PROCESS_STATE_IMPORTANT_BACKGROUND"); 321 // Attempt to read the constants and classify them as whether or not we care about pause times. 322 for (size_t i = 0; i < care_about_pauses.size(); ++i) { 323 int process_state = 0; 324 if (ReadStaticInt(env, activity_manager.get(), care_about_pauses[i], &process_state)) { 325 process_state_cares_about_pause_time_.insert(process_state); 326 VLOG(gc) << "Adding process state " << process_state 327 << " to set of states which care about pause time"; 328 } 329 } 330 } 331 332 if (application_thread_class_ == NULL) { 333 jclass clz = env->FindClass("android/app/ActivityThread$ApplicationThread"); 334 if (clz == NULL) { 335 env->ExceptionClear(); 336 LOG(WARNING) << "Could not get application thread class"; 337 return; 338 } 339 application_thread_class_ = reinterpret_cast<jclass>(env->NewGlobalRef(clz)); 340 last_process_state_id_ = env->GetFieldID(application_thread_class_, "mLastProcessState", "I"); 341 if (last_process_state_id_ == NULL) { 342 env->ExceptionClear(); 343 LOG(WARNING) << "Could not get last process state member"; 344 return; 345 } 346 } 347 348 if (application_thread_class_ != NULL && application_thread_ == NULL) { 349 jmethodID get_application_thread = 350 env->GetMethodID(activity_thread_class_, "getApplicationThread", 351 "()Landroid/app/ActivityThread$ApplicationThread;"); 352 if (get_application_thread == NULL) { 353 LOG(WARNING) << "Could not get method ID for get application thread"; 354 return; 355 } 356 357 jobject obj = env->CallObjectMethod(activity_thread_, get_application_thread); 358 if (obj == NULL) { 359 LOG(WARNING) << "Could not get application thread"; 360 return; 361 } 362 363 application_thread_ = env->NewGlobalRef(obj); 364 } 365 366 if (application_thread_ != NULL && last_process_state_id_ != NULL) { 367 int process_state = env->GetIntField(application_thread_, last_process_state_id_); 368 env->ExceptionClear(); 369 370 care_about_pause_times_ = process_state_cares_about_pause_time_.find(process_state) != 371 process_state_cares_about_pause_time_.end(); 372 373 VLOG(gc) << "New process state " << process_state 374 << " care about pauses " << care_about_pause_times_; 375 } 376} 377 378void Heap::AddContinuousSpace(space::ContinuousSpace* space) { 379 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 380 DCHECK(space != NULL); 381 DCHECK(space->GetLiveBitmap() != NULL); 382 live_bitmap_->AddContinuousSpaceBitmap(space->GetLiveBitmap()); 383 DCHECK(space->GetMarkBitmap() != NULL); 384 mark_bitmap_->AddContinuousSpaceBitmap(space->GetMarkBitmap()); 385 continuous_spaces_.push_back(space); 386 if (space->IsDlMallocSpace() && !space->IsLargeObjectSpace()) { 387 alloc_space_ = space->AsDlMallocSpace(); 388 } 389 390 // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger) 391 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), ContinuousSpaceSorter()); 392 393 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to 394 // avoid redundant marking. 395 bool seen_zygote = false, seen_alloc = false; 396 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 397 for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) { 398 space::ContinuousSpace* space = *it; 399 if (space->IsImageSpace()) { 400 DCHECK(!seen_zygote); 401 DCHECK(!seen_alloc); 402 } else if (space->IsZygoteSpace()) { 403 DCHECK(!seen_alloc); 404 seen_zygote = true; 405 } else if (space->IsDlMallocSpace()) { 406 seen_alloc = true; 407 } 408 } 409} 410 411void Heap::RegisterGCAllocation(size_t bytes) { 412 if (this != NULL) { 413 gc_memory_overhead_.fetch_add(bytes); 414 } 415} 416 417void Heap::RegisterGCDeAllocation(size_t bytes) { 418 if (this != NULL) { 419 gc_memory_overhead_.fetch_sub(bytes); 420 } 421} 422 423void Heap::AddDiscontinuousSpace(space::DiscontinuousSpace* space) { 424 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 425 DCHECK(space != NULL); 426 DCHECK(space->GetLiveObjects() != NULL); 427 live_bitmap_->AddDiscontinuousObjectSet(space->GetLiveObjects()); 428 DCHECK(space->GetMarkObjects() != NULL); 429 mark_bitmap_->AddDiscontinuousObjectSet(space->GetMarkObjects()); 430 discontinuous_spaces_.push_back(space); 431} 432 433void Heap::DumpGcPerformanceInfo(std::ostream& os) { 434 // Dump cumulative timings. 435 os << "Dumping cumulative Gc timings\n"; 436 uint64_t total_duration = 0; 437 438 // Dump cumulative loggers for each GC type. 439 // TODO: C++0x 440 uint64_t total_paused_time = 0; 441 typedef std::vector<collector::MarkSweep*>::const_iterator It; 442 for (It it = mark_sweep_collectors_.begin(); 443 it != mark_sweep_collectors_.end(); ++it) { 444 collector::MarkSweep* collector = *it; 445 CumulativeLogger& logger = collector->GetCumulativeTimings(); 446 if (logger.GetTotalNs() != 0) { 447 os << Dumpable<CumulativeLogger>(logger); 448 const uint64_t total_ns = logger.GetTotalNs(); 449 const uint64_t total_pause_ns = (*it)->GetTotalPausedTimeNs(); 450 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0; 451 const uint64_t freed_bytes = collector->GetTotalFreedBytes(); 452 const uint64_t freed_objects = collector->GetTotalFreedObjects(); 453 os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n" 454 << collector->GetName() << " paused time: " << PrettyDuration(total_pause_ns) << "\n" 455 << collector->GetName() << " freed: " << freed_objects 456 << " objects with total size " << PrettySize(freed_bytes) << "\n" 457 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / " 458 << PrettySize(freed_bytes / seconds) << "/s\n"; 459 total_duration += total_ns; 460 total_paused_time += total_pause_ns; 461 } 462 } 463 uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust; 464 size_t total_objects_allocated = GetObjectsAllocatedEver(); 465 size_t total_bytes_allocated = GetBytesAllocatedEver(); 466 if (total_duration != 0) { 467 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0; 468 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; 469 os << "Mean GC size throughput: " 470 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n"; 471 os << "Mean GC object throughput: " 472 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n"; 473 } 474 os << "Total number of allocations: " << total_objects_allocated << "\n"; 475 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n"; 476 if (kMeasureAllocationTime) { 477 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n"; 478 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated) 479 << "\n"; 480 } 481 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n"; 482 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n"; 483 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_; 484} 485 486Heap::~Heap() { 487 if (kDumpGcPerformanceOnShutdown) { 488 DumpGcPerformanceInfo(LOG(INFO)); 489 } 490 491 STLDeleteElements(&mark_sweep_collectors_); 492 493 // If we don't reset then the mark stack complains in it's destructor. 494 allocation_stack_->Reset(); 495 live_stack_->Reset(); 496 497 VLOG(heap) << "~Heap()"; 498 // We can't take the heap lock here because there might be a daemon thread suspended with the 499 // heap lock held. We know though that no non-daemon threads are executing, and we know that 500 // all daemon threads are suspended, and we also know that the threads list have been deleted, so 501 // those threads can't resume. We're the only running thread, and we can do whatever we like... 502 STLDeleteElements(&continuous_spaces_); 503 STLDeleteElements(&discontinuous_spaces_); 504 delete gc_complete_lock_; 505 delete reference_queue_lock_; 506} 507 508space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj, 509 bool fail_ok) const { 510 // TODO: C++0x auto 511 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 512 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 513 if ((*it)->Contains(obj)) { 514 return *it; 515 } 516 } 517 if (!fail_ok) { 518 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 519 } 520 return NULL; 521} 522 523space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj, 524 bool fail_ok) const { 525 // TODO: C++0x auto 526 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It; 527 for (It it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 528 if ((*it)->Contains(obj)) { 529 return *it; 530 } 531 } 532 if (!fail_ok) { 533 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 534 } 535 return NULL; 536} 537 538space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const { 539 space::Space* result = FindContinuousSpaceFromObject(obj, true); 540 if (result != NULL) { 541 return result; 542 } 543 return FindDiscontinuousSpaceFromObject(obj, true); 544} 545 546space::ImageSpace* Heap::GetImageSpace() const { 547 // TODO: C++0x auto 548 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 549 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 550 if ((*it)->IsImageSpace()) { 551 return (*it)->AsImageSpace(); 552 } 553 } 554 return NULL; 555} 556 557static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) { 558 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start); 559 if (used_bytes < chunk_size) { 560 size_t chunk_free_bytes = chunk_size - used_bytes; 561 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg); 562 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes); 563 } 564} 565 566mirror::Object* Heap::AllocObject(Thread* self, mirror::Class* c, size_t byte_count) { 567 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || 568 (c->IsVariableSize() || c->GetObjectSize() == byte_count) || 569 strlen(ClassHelper(c).GetDescriptor()) == 0); 570 DCHECK_GE(byte_count, sizeof(mirror::Object)); 571 572 mirror::Object* obj = NULL; 573 size_t bytes_allocated = 0; 574 uint64_t allocation_start = 0; 575 if (UNLIKELY(kMeasureAllocationTime)) { 576 allocation_start = NanoTime() / kTimeAdjust; 577 } 578 579 // We need to have a zygote space or else our newly allocated large object can end up in the 580 // Zygote resulting in it being prematurely freed. 581 // We can only do this for primitive objects since large objects will not be within the card table 582 // range. This also means that we rely on SetClass not dirtying the object's card. 583 bool large_object_allocation = 584 byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray(); 585 if (UNLIKELY(large_object_allocation)) { 586 obj = Allocate(self, large_object_space_, byte_count, &bytes_allocated); 587 // Make sure that our large object didn't get placed anywhere within the space interval or else 588 // it breaks the immune range. 589 DCHECK(obj == NULL || 590 reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() || 591 reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End()); 592 } else { 593 obj = Allocate(self, alloc_space_, byte_count, &bytes_allocated); 594 // Ensure that we did not allocate into a zygote space. 595 DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace()); 596 } 597 598 if (LIKELY(obj != NULL)) { 599 obj->SetClass(c); 600 601 // Record allocation after since we want to use the atomic add for the atomic fence to guard 602 // the SetClass since we do not want the class to appear NULL in another thread. 603 RecordAllocation(bytes_allocated, obj); 604 605 if (Dbg::IsAllocTrackingEnabled()) { 606 Dbg::RecordAllocation(c, byte_count); 607 } 608 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_)) { 609 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. 610 SirtRef<mirror::Object> ref(self, obj); 611 RequestConcurrentGC(self); 612 } 613 if (kDesiredHeapVerification > kNoHeapVerification) { 614 VerifyObject(obj); 615 } 616 617 if (UNLIKELY(kMeasureAllocationTime)) { 618 total_allocation_time_.fetch_add(NanoTime() / kTimeAdjust - allocation_start); 619 } 620 621 return obj; 622 } else { 623 std::ostringstream oss; 624 int64_t total_bytes_free = GetFreeMemory(); 625 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free 626 << " free bytes"; 627 // If the allocation failed due to fragmentation, print out the largest continuous allocation. 628 if (!large_object_allocation && total_bytes_free >= byte_count) { 629 size_t max_contiguous_allocation = 0; 630 // TODO: C++0x auto 631 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 632 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 633 space::ContinuousSpace* space = *it; 634 if (space->IsDlMallocSpace()) { 635 space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation); 636 } 637 } 638 oss << "; failed due to fragmentation (largest possible contiguous allocation " 639 << max_contiguous_allocation << " bytes)"; 640 } 641 self->ThrowOutOfMemoryError(oss.str().c_str()); 642 return NULL; 643 } 644} 645 646bool Heap::IsHeapAddress(const mirror::Object* obj) { 647 // Note: we deliberately don't take the lock here, and mustn't test anything that would 648 // require taking the lock. 649 if (obj == NULL) { 650 return true; 651 } 652 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 653 return false; 654 } 655 return FindSpaceFromObject(obj, true) != NULL; 656} 657 658bool Heap::IsLiveObjectLocked(const mirror::Object* obj) { 659 // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current()); 660 if (obj == NULL || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 661 return false; 662 } 663 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true); 664 space::DiscontinuousSpace* d_space = NULL; 665 if (c_space != NULL) { 666 if (c_space->GetLiveBitmap()->Test(obj)) { 667 return true; 668 } 669 } else { 670 d_space = FindDiscontinuousSpaceFromObject(obj, true); 671 if (d_space != NULL) { 672 if (d_space->GetLiveObjects()->Test(obj)) { 673 return true; 674 } 675 } 676 } 677 // This is covering the allocation/live stack swapping that is done without mutators suspended. 678 for (size_t i = 0; i < 5; ++i) { 679 if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj)) || 680 live_stack_->Contains(const_cast<mirror::Object*>(obj))) { 681 return true; 682 } 683 NanoSleep(MsToNs(10)); 684 } 685 // We need to check the bitmaps again since there is a race where we mark something as live and 686 // then clear the stack containing it. 687 if (c_space != NULL) { 688 if (c_space->GetLiveBitmap()->Test(obj)) { 689 return true; 690 } 691 } else { 692 d_space = FindDiscontinuousSpaceFromObject(obj, true); 693 if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) { 694 return true; 695 } 696 } 697 return false; 698} 699 700void Heap::VerifyObjectImpl(const mirror::Object* obj) { 701 if (Thread::Current() == NULL || 702 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) { 703 return; 704 } 705 VerifyObjectBody(obj); 706} 707 708void Heap::DumpSpaces() { 709 // TODO: C++0x auto 710 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 711 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 712 space::ContinuousSpace* space = *it; 713 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 714 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 715 LOG(INFO) << space << " " << *space << "\n" 716 << live_bitmap << " " << *live_bitmap << "\n" 717 << mark_bitmap << " " << *mark_bitmap; 718 } 719 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 720 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 721 space::DiscontinuousSpace* space = *it; 722 LOG(INFO) << space << " " << *space << "\n"; 723 } 724} 725 726void Heap::VerifyObjectBody(const mirror::Object* obj) { 727 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 728 LOG(FATAL) << "Object isn't aligned: " << obj; 729 } 730 if (UNLIKELY(GetObjectsAllocated() <= 10)) { // Ignore early dawn of the universe verifications. 731 return; 732 } 733 const byte* raw_addr = reinterpret_cast<const byte*>(obj) + 734 mirror::Object::ClassOffset().Int32Value(); 735 const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 736 if (UNLIKELY(c == NULL)) { 737 LOG(FATAL) << "Null class in object: " << obj; 738 } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) { 739 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; 740 } 741 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() 742 // Note: we don't use the accessors here as they have internal sanity checks 743 // that we don't want to run 744 raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value(); 745 const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 746 raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value(); 747 const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 748 CHECK_EQ(c_c, c_c_c); 749 750 if (verify_object_mode_ != kVerifyAllFast) { 751 // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the 752 // heap_bitmap_lock_. 753 if (!IsLiveObjectLocked(obj)) { 754 DumpSpaces(); 755 LOG(FATAL) << "Object is dead: " << obj; 756 } 757 if (!IsLiveObjectLocked(c)) { 758 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; 759 } 760 } 761} 762 763void Heap::VerificationCallback(mirror::Object* obj, void* arg) { 764 DCHECK(obj != NULL); 765 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj); 766} 767 768void Heap::VerifyHeap() { 769 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 770 GetLiveBitmap()->Walk(Heap::VerificationCallback, this); 771} 772 773inline void Heap::RecordAllocation(size_t size, mirror::Object* obj) { 774 DCHECK(obj != NULL); 775 DCHECK_GT(size, 0u); 776 num_bytes_allocated_.fetch_add(size); 777 778 if (Runtime::Current()->HasStatsEnabled()) { 779 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 780 ++thread_stats->allocated_objects; 781 thread_stats->allocated_bytes += size; 782 783 // TODO: Update these atomically. 784 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 785 ++global_stats->allocated_objects; 786 global_stats->allocated_bytes += size; 787 } 788 789 // This is safe to do since the GC will never free objects which are neither in the allocation 790 // stack or the live bitmap. 791 while (!allocation_stack_->AtomicPushBack(obj)) { 792 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 793 } 794} 795 796void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) { 797 DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_)); 798 num_bytes_allocated_.fetch_sub(freed_bytes); 799 800 if (Runtime::Current()->HasStatsEnabled()) { 801 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 802 thread_stats->freed_objects += freed_objects; 803 thread_stats->freed_bytes += freed_bytes; 804 805 // TODO: Do this concurrently. 806 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 807 global_stats->freed_objects += freed_objects; 808 global_stats->freed_bytes += freed_bytes; 809 } 810} 811 812inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size) { 813 return num_bytes_allocated_ + alloc_size > growth_limit_; 814} 815 816inline mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, 817 bool grow, size_t* bytes_allocated) { 818 if (IsOutOfMemoryOnAllocation(alloc_size)) { 819 return NULL; 820 } 821 return space->Alloc(self, alloc_size, bytes_allocated); 822} 823 824// DlMallocSpace-specific version. 825inline mirror::Object* Heap::TryToAllocate(Thread* self, space::DlMallocSpace* space, size_t alloc_size, 826 bool grow, size_t* bytes_allocated) { 827 if (IsOutOfMemoryOnAllocation(alloc_size)) { 828 return NULL; 829 } 830 if (!running_on_valgrind_) { 831 return space->AllocNonvirtual(self, alloc_size, bytes_allocated); 832 } else { 833 return space->Alloc(self, alloc_size, bytes_allocated); 834 } 835} 836 837template <class T> 838inline mirror::Object* Heap::Allocate(Thread* self, T* space, size_t alloc_size, size_t* bytes_allocated) { 839 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are 840 // done in the runnable state where suspension is expected. 841 DCHECK_EQ(self->GetState(), kRunnable); 842 self->AssertThreadSuspensionIsAllowable(); 843 844 mirror::Object* ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated); 845 if (ptr != NULL) { 846 return ptr; 847 } 848 return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated); 849} 850 851mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* space, size_t alloc_size, 852 size_t* bytes_allocated) { 853 mirror::Object* ptr; 854 855 // The allocation failed. If the GC is running, block until it completes, and then retry the 856 // allocation. 857 collector::GcType last_gc = WaitForConcurrentGcToComplete(self); 858 if (last_gc != collector::kGcTypeNone) { 859 // A GC was in progress and we blocked, retry allocation now that memory has been freed. 860 ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated); 861 if (ptr != NULL) { 862 return ptr; 863 } 864 } 865 866 // Loop through our different Gc types and try to Gc until we get enough free memory. 867 for (size_t i = static_cast<size_t>(last_gc) + 1; 868 i < static_cast<size_t>(collector::kGcTypeMax); ++i) { 869 bool run_gc = false; 870 collector::GcType gc_type = static_cast<collector::GcType>(i); 871 switch (gc_type) { 872 case collector::kGcTypeSticky: { 873 const size_t alloc_space_size = alloc_space_->Size(); 874 run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ && 875 alloc_space_->Capacity() - alloc_space_size >= min_remaining_space_for_sticky_gc_; 876 break; 877 } 878 case collector::kGcTypePartial: 879 run_gc = have_zygote_space_; 880 break; 881 case collector::kGcTypeFull: 882 run_gc = true; 883 break; 884 default: 885 break; 886 } 887 888 if (run_gc) { 889 // If we actually ran a different type of Gc than requested, we can skip the index forwards. 890 collector::GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false); 891 DCHECK_GE(static_cast<size_t>(gc_type_ran), i); 892 i = static_cast<size_t>(gc_type_ran); 893 894 // Did we free sufficient memory for the allocation to succeed? 895 ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated); 896 if (ptr != NULL) { 897 return ptr; 898 } 899 } 900 } 901 902 // Allocations have failed after GCs; this is an exceptional state. 903 // Try harder, growing the heap if necessary. 904 ptr = TryToAllocate(self, space, alloc_size, true, bytes_allocated); 905 if (ptr != NULL) { 906 return ptr; 907 } 908 909 // Most allocations should have succeeded by now, so the heap is really full, really fragmented, 910 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The 911 // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME. 912 913 // OLD-TODO: wait for the finalizers from the previous GC to finish 914 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) 915 << " allocation"; 916 917 // We don't need a WaitForConcurrentGcToComplete here either. 918 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true); 919 return TryToAllocate(self, space, alloc_size, true, bytes_allocated); 920} 921 922void Heap::SetTargetHeapUtilization(float target) { 923 DCHECK_GT(target, 0.0f); // asserted in Java code 924 DCHECK_LT(target, 1.0f); 925 target_utilization_ = target; 926} 927 928size_t Heap::GetObjectsAllocated() const { 929 size_t total = 0; 930 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 931 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 932 space::ContinuousSpace* space = *it; 933 if (space->IsDlMallocSpace()) { 934 total += space->AsDlMallocSpace()->GetObjectsAllocated(); 935 } 936 } 937 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 938 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 939 space::DiscontinuousSpace* space = *it; 940 total += space->AsLargeObjectSpace()->GetObjectsAllocated(); 941 } 942 return total; 943} 944 945size_t Heap::GetObjectsAllocatedEver() const { 946 size_t total = 0; 947 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 948 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 949 space::ContinuousSpace* space = *it; 950 if (space->IsDlMallocSpace()) { 951 total += space->AsDlMallocSpace()->GetTotalObjectsAllocated(); 952 } 953 } 954 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 955 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 956 space::DiscontinuousSpace* space = *it; 957 total += space->AsLargeObjectSpace()->GetTotalObjectsAllocated(); 958 } 959 return total; 960} 961 962size_t Heap::GetBytesAllocatedEver() const { 963 size_t total = 0; 964 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 965 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 966 space::ContinuousSpace* space = *it; 967 if (space->IsDlMallocSpace()) { 968 total += space->AsDlMallocSpace()->GetTotalBytesAllocated(); 969 } 970 } 971 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 972 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 973 space::DiscontinuousSpace* space = *it; 974 total += space->AsLargeObjectSpace()->GetTotalBytesAllocated(); 975 } 976 return total; 977} 978 979class InstanceCounter { 980 public: 981 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts) 982 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 983 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) { 984 } 985 986 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 987 for (size_t i = 0; i < classes_.size(); ++i) { 988 const mirror::Class* instance_class = o->GetClass(); 989 if (use_is_assignable_from_) { 990 if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) { 991 ++counts_[i]; 992 } 993 } else { 994 if (instance_class == classes_[i]) { 995 ++counts_[i]; 996 } 997 } 998 } 999 } 1000 1001 private: 1002 const std::vector<mirror::Class*>& classes_; 1003 bool use_is_assignable_from_; 1004 uint64_t* const counts_; 1005 1006 DISALLOW_COPY_AND_ASSIGN(InstanceCounter); 1007}; 1008 1009void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, 1010 uint64_t* counts) { 1011 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 1012 // is empty, so the live bitmap is the only place we need to look. 1013 Thread* self = Thread::Current(); 1014 self->TransitionFromRunnableToSuspended(kNative); 1015 CollectGarbage(false); 1016 self->TransitionFromSuspendedToRunnable(); 1017 1018 InstanceCounter counter(classes, use_is_assignable_from, counts); 1019 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1020 GetLiveBitmap()->Visit(counter); 1021} 1022 1023class InstanceCollector { 1024 public: 1025 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) 1026 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1027 : class_(c), max_count_(max_count), instances_(instances) { 1028 } 1029 1030 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1031 const mirror::Class* instance_class = o->GetClass(); 1032 if (instance_class == class_) { 1033 if (max_count_ == 0 || instances_.size() < max_count_) { 1034 instances_.push_back(const_cast<mirror::Object*>(o)); 1035 } 1036 } 1037 } 1038 1039 private: 1040 mirror::Class* class_; 1041 uint32_t max_count_; 1042 std::vector<mirror::Object*>& instances_; 1043 1044 DISALLOW_COPY_AND_ASSIGN(InstanceCollector); 1045}; 1046 1047void Heap::GetInstances(mirror::Class* c, int32_t max_count, 1048 std::vector<mirror::Object*>& instances) { 1049 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 1050 // is empty, so the live bitmap is the only place we need to look. 1051 Thread* self = Thread::Current(); 1052 self->TransitionFromRunnableToSuspended(kNative); 1053 CollectGarbage(false); 1054 self->TransitionFromSuspendedToRunnable(); 1055 1056 InstanceCollector collector(c, max_count, instances); 1057 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1058 GetLiveBitmap()->Visit(collector); 1059} 1060 1061class ReferringObjectsFinder { 1062 public: 1063 ReferringObjectsFinder(mirror::Object* object, int32_t max_count, 1064 std::vector<mirror::Object*>& referring_objects) 1065 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1066 : object_(object), max_count_(max_count), referring_objects_(referring_objects) { 1067 } 1068 1069 // For bitmap Visit. 1070 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 1071 // annotalysis on visitors. 1072 void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS { 1073 collector::MarkSweep::VisitObjectReferences(o, *this); 1074 } 1075 1076 // For MarkSweep::VisitObjectReferences. 1077 void operator()(const mirror::Object* referrer, const mirror::Object* object, 1078 const MemberOffset&, bool) const { 1079 if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { 1080 referring_objects_.push_back(const_cast<mirror::Object*>(referrer)); 1081 } 1082 } 1083 1084 private: 1085 mirror::Object* object_; 1086 uint32_t max_count_; 1087 std::vector<mirror::Object*>& referring_objects_; 1088 1089 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder); 1090}; 1091 1092void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count, 1093 std::vector<mirror::Object*>& referring_objects) { 1094 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 1095 // is empty, so the live bitmap is the only place we need to look. 1096 Thread* self = Thread::Current(); 1097 self->TransitionFromRunnableToSuspended(kNative); 1098 CollectGarbage(false); 1099 self->TransitionFromSuspendedToRunnable(); 1100 1101 ReferringObjectsFinder finder(o, max_count, referring_objects); 1102 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1103 GetLiveBitmap()->Visit(finder); 1104} 1105 1106void Heap::CollectGarbage(bool clear_soft_references) { 1107 // Even if we waited for a GC we still need to do another GC since weaks allocated during the 1108 // last GC will not have necessarily been cleared. 1109 Thread* self = Thread::Current(); 1110 WaitForConcurrentGcToComplete(self); 1111 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references); 1112} 1113 1114void Heap::PreZygoteFork() { 1115 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock); 1116 // Do this before acquiring the zygote creation lock so that we don't get lock order violations. 1117 CollectGarbage(false); 1118 Thread* self = Thread::Current(); 1119 MutexLock mu(self, zygote_creation_lock_); 1120 1121 // Try to see if we have any Zygote spaces. 1122 if (have_zygote_space_) { 1123 return; 1124 } 1125 1126 VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size()); 1127 1128 { 1129 // Flush the alloc stack. 1130 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1131 FlushAllocStack(); 1132 } 1133 1134 // Turns the current alloc space into a Zygote space and obtain the new alloc space composed 1135 // of the remaining available heap memory. 1136 space::DlMallocSpace* zygote_space = alloc_space_; 1137 alloc_space_ = zygote_space->CreateZygoteSpace("alloc space"); 1138 alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); 1139 1140 // Change the GC retention policy of the zygote space to only collect when full. 1141 zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect); 1142 AddContinuousSpace(alloc_space_); 1143 have_zygote_space_ = true; 1144 1145 // Reset the cumulative loggers since we now have a few additional timing phases. 1146 // TODO: C++0x 1147 typedef std::vector<collector::MarkSweep*>::const_iterator It; 1148 for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end(); 1149 it != end; ++it) { 1150 (*it)->ResetCumulativeStatistics(); 1151 } 1152} 1153 1154void Heap::FlushAllocStack() { 1155 MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), 1156 allocation_stack_.get()); 1157 allocation_stack_->Reset(); 1158} 1159 1160void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects, 1161 accounting::ObjectStack* stack) { 1162 mirror::Object** limit = stack->End(); 1163 for (mirror::Object** it = stack->Begin(); it != limit; ++it) { 1164 const mirror::Object* obj = *it; 1165 DCHECK(obj != NULL); 1166 if (LIKELY(bitmap->HasAddress(obj))) { 1167 bitmap->Set(obj); 1168 } else { 1169 large_objects->Set(obj); 1170 } 1171 } 1172} 1173 1174 1175const char* gc_cause_and_type_strings[3][4] = { 1176 {"", "GC Alloc Sticky", "GC Alloc Partial", "GC Alloc Full"}, 1177 {"", "GC Background Sticky", "GC Background Partial", "GC Background Full"}, 1178 {"", "GC Explicit Sticky", "GC Explicit Partial", "GC Explicit Full"}}; 1179 1180collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause, 1181 bool clear_soft_references) { 1182 Thread* self = Thread::Current(); 1183 1184 1185 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); 1186 Locks::mutator_lock_->AssertNotHeld(self); 1187 1188 if (self->IsHandlingStackOverflow()) { 1189 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow."; 1190 } 1191 1192 // Ensure there is only one GC at a time. 1193 bool start_collect = false; 1194 while (!start_collect) { 1195 { 1196 MutexLock mu(self, *gc_complete_lock_); 1197 if (!is_gc_running_) { 1198 is_gc_running_ = true; 1199 start_collect = true; 1200 } 1201 } 1202 if (!start_collect) { 1203 // TODO: timinglog this. 1204 WaitForConcurrentGcToComplete(self); 1205 1206 // TODO: if another thread beat this one to do the GC, perhaps we should just return here? 1207 // Not doing at the moment to ensure soft references are cleared. 1208 } 1209 } 1210 gc_complete_lock_->AssertNotHeld(self); 1211 1212 if (gc_cause == kGcCauseForAlloc && Runtime::Current()->HasStatsEnabled()) { 1213 ++Runtime::Current()->GetStats()->gc_for_alloc_count; 1214 ++Thread::Current()->GetStats()->gc_for_alloc_count; 1215 } 1216 1217 uint64_t gc_start_time_ns = NanoTime(); 1218 uint64_t gc_start_size = GetBytesAllocated(); 1219 // Approximate allocation rate in bytes / second. 1220 if (UNLIKELY(gc_start_time_ns == last_gc_time_ns_)) { 1221 LOG(WARNING) << "Timers are broken (gc_start_time == last_gc_time_)."; 1222 } 1223 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_); 1224 if (ms_delta != 0) { 1225 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta; 1226 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s"; 1227 } 1228 1229 if (gc_type == collector::kGcTypeSticky && 1230 alloc_space_->Size() < min_alloc_space_size_for_sticky_gc_) { 1231 gc_type = collector::kGcTypePartial; 1232 } 1233 1234 DCHECK_LT(gc_type, collector::kGcTypeMax); 1235 DCHECK_NE(gc_type, collector::kGcTypeNone); 1236 DCHECK_LE(gc_cause, kGcCauseExplicit); 1237 1238 ATRACE_BEGIN(gc_cause_and_type_strings[gc_cause][gc_type]); 1239 1240 collector::MarkSweep* collector = NULL; 1241 typedef std::vector<collector::MarkSweep*>::iterator It; 1242 for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end(); 1243 it != end; ++it) { 1244 collector::MarkSweep* cur_collector = *it; 1245 if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) { 1246 collector = cur_collector; 1247 break; 1248 } 1249 } 1250 CHECK(collector != NULL) 1251 << "Could not find garbage collector with concurrent=" << concurrent_gc_ 1252 << " and type=" << gc_type; 1253 1254 base::TimingLogger& timings = collector->GetTimings(); 1255 1256 collector->clear_soft_references_ = clear_soft_references; 1257 collector->Run(); 1258 total_objects_freed_ever_ += collector->GetFreedObjects(); 1259 total_bytes_freed_ever_ += collector->GetFreedBytes(); 1260 1261 const size_t duration = collector->GetDurationNs(); 1262 std::vector<uint64_t> pauses = collector->GetPauseTimes(); 1263 bool was_slow = duration > kSlowGcThreshold || 1264 (gc_cause == kGcCauseForAlloc && duration > kLongGcPauseThreshold); 1265 for (size_t i = 0; i < pauses.size(); ++i) { 1266 if (pauses[i] > kLongGcPauseThreshold) { 1267 was_slow = true; 1268 } 1269 } 1270 1271 if (was_slow) { 1272 const size_t percent_free = GetPercentFree(); 1273 const size_t current_heap_size = GetBytesAllocated(); 1274 const size_t total_memory = GetTotalMemory(); 1275 std::ostringstream pause_string; 1276 for (size_t i = 0; i < pauses.size(); ++i) { 1277 pause_string << PrettyDuration((pauses[i] / 1000) * 1000) 1278 << ((i != pauses.size() - 1) ? ", " : ""); 1279 } 1280 LOG(INFO) << gc_cause << " " << collector->GetName() 1281 << "GC freed " << PrettySize(collector->GetFreedBytes()) << ", " 1282 << percent_free << "% free, " << PrettySize(current_heap_size) << "/" 1283 << PrettySize(total_memory) << ", " << "paused " << pause_string.str() 1284 << " total " << PrettyDuration((duration / 1000) * 1000); 1285 if (VLOG_IS_ON(heap)) { 1286 LOG(INFO) << Dumpable<base::TimingLogger>(timings); 1287 } 1288 } 1289 1290 { 1291 MutexLock mu(self, *gc_complete_lock_); 1292 is_gc_running_ = false; 1293 last_gc_type_ = gc_type; 1294 // Wake anyone who may have been waiting for the GC to complete. 1295 gc_complete_cond_->Broadcast(self); 1296 } 1297 1298 ATRACE_END(); 1299 1300 // Inform DDMS that a GC completed. 1301 Dbg::GcDidFinish(); 1302 return gc_type; 1303} 1304 1305void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings, 1306 collector::GcType gc_type) { 1307 if (gc_type == collector::kGcTypeSticky) { 1308 // Don't need to do anything for mod union table in this case since we are only scanning dirty 1309 // cards. 1310 return; 1311 } 1312 1313 base::TimingLogger::ScopedSplit split("UpdateModUnionTable", &timings); 1314 // Update zygote mod union table. 1315 if (gc_type == collector::kGcTypePartial) { 1316 base::TimingLogger::ScopedSplit split("UpdateZygoteModUnionTable", &timings); 1317 zygote_mod_union_table_->Update(); 1318 1319 timings.NewSplit("ZygoteMarkReferences"); 1320 zygote_mod_union_table_->MarkReferences(mark_sweep); 1321 } 1322 1323 // Processes the cards we cleared earlier and adds their objects into the mod-union table. 1324 timings.NewSplit("UpdateModUnionTable"); 1325 image_mod_union_table_->Update(); 1326 1327 // Scans all objects in the mod-union table. 1328 timings.NewSplit("MarkImageToAllocSpaceReferences"); 1329 image_mod_union_table_->MarkReferences(mark_sweep); 1330} 1331 1332static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) { 1333 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg); 1334 if (root == obj) { 1335 LOG(INFO) << "Object " << obj << " is a root"; 1336 } 1337} 1338 1339class ScanVisitor { 1340 public: 1341 void operator()(const mirror::Object* obj) const { 1342 LOG(INFO) << "Would have rescanned object " << obj; 1343 } 1344}; 1345 1346// Verify a reference from an object. 1347class VerifyReferenceVisitor { 1348 public: 1349 explicit VerifyReferenceVisitor(Heap* heap) 1350 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) 1351 : heap_(heap), failed_(false) {} 1352 1353 bool Failed() const { 1354 return failed_; 1355 } 1356 1357 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter 1358 // analysis on visitors. 1359 void operator()(const mirror::Object* obj, const mirror::Object* ref, 1360 const MemberOffset& offset, bool /* is_static */) const 1361 NO_THREAD_SAFETY_ANALYSIS { 1362 // Verify that the reference is live. 1363 if (UNLIKELY(ref != NULL && !IsLive(ref))) { 1364 accounting::CardTable* card_table = heap_->GetCardTable(); 1365 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get(); 1366 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1367 1368 if (obj != NULL) { 1369 byte* card_addr = card_table->CardFromAddr(obj); 1370 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " << offset 1371 << "\nIsDirty = " << (*card_addr == accounting::CardTable::kCardDirty) 1372 << "\nObj type " << PrettyTypeOf(obj) 1373 << "\nRef type " << PrettyTypeOf(ref); 1374 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj)); 1375 void* cover_begin = card_table->AddrFromCard(card_addr); 1376 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) + 1377 accounting::CardTable::kCardSize); 1378 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin 1379 << "-" << cover_end; 1380 accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj); 1381 1382 // Print out how the object is live. 1383 if (bitmap != NULL && bitmap->Test(obj)) { 1384 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1385 } 1386 if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1387 LOG(ERROR) << "Object " << obj << " found in allocation stack"; 1388 } 1389 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1390 LOG(ERROR) << "Object " << obj << " found in live stack"; 1391 } 1392 // Attempt to see if the card table missed the reference. 1393 ScanVisitor scan_visitor; 1394 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr)); 1395 card_table->Scan(bitmap, byte_cover_begin, 1396 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor); 1397 1398 // Search to see if any of the roots reference our object. 1399 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj)); 1400 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1401 1402 // Search to see if any of the roots reference our reference. 1403 arg = const_cast<void*>(reinterpret_cast<const void*>(ref)); 1404 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1405 } else { 1406 LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref); 1407 } 1408 if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1409 LOG(ERROR) << "Reference " << ref << " found in allocation stack!"; 1410 } 1411 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1412 LOG(ERROR) << "Reference " << ref << " found in live stack!"; 1413 } 1414 heap_->image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: "); 1415 heap_->zygote_mod_union_table_->Dump(LOG(ERROR) << "Zygote mod-union table: "); 1416 failed_ = true; 1417 } 1418 } 1419 1420 bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 1421 return heap_->IsLiveObjectLocked(obj); 1422 } 1423 1424 static void VerifyRoots(const mirror::Object* root, void* arg) { 1425 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg); 1426 (*visitor)(NULL, root, MemberOffset(0), true); 1427 } 1428 1429 private: 1430 Heap* const heap_; 1431 mutable bool failed_; 1432}; 1433 1434// Verify all references within an object, for use with HeapBitmap::Visit. 1435class VerifyObjectVisitor { 1436 public: 1437 explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {} 1438 1439 void operator()(const mirror::Object* obj) const 1440 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1441 // Note: we are verifying the references in obj but not obj itself, this is because obj must 1442 // be live or else how did we find it in the live bitmap? 1443 VerifyReferenceVisitor visitor(heap_); 1444 collector::MarkSweep::VisitObjectReferences(obj, visitor); 1445 failed_ = failed_ || visitor.Failed(); 1446 } 1447 1448 bool Failed() const { 1449 return failed_; 1450 } 1451 1452 private: 1453 Heap* const heap_; 1454 mutable bool failed_; 1455}; 1456 1457// Must do this with mutators suspended since we are directly accessing the allocation stacks. 1458bool Heap::VerifyHeapReferences() { 1459 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1460 // Lets sort our allocation stacks so that we can efficiently binary search them. 1461 allocation_stack_->Sort(); 1462 live_stack_->Sort(); 1463 // Perform the verification. 1464 VerifyObjectVisitor visitor(this); 1465 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false); 1466 GetLiveBitmap()->Visit(visitor); 1467 // We don't want to verify the objects in the allocation stack since they themselves may be 1468 // pointing to dead objects if they are not reachable. 1469 if (visitor.Failed()) { 1470 DumpSpaces(); 1471 return false; 1472 } 1473 return true; 1474} 1475 1476class VerifyReferenceCardVisitor { 1477 public: 1478 VerifyReferenceCardVisitor(Heap* heap, bool* failed) 1479 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, 1480 Locks::heap_bitmap_lock_) 1481 : heap_(heap), failed_(failed) { 1482 } 1483 1484 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 1485 // annotalysis on visitors. 1486 void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset, 1487 bool is_static) const NO_THREAD_SAFETY_ANALYSIS { 1488 // Filter out class references since changing an object's class does not mark the card as dirty. 1489 // Also handles large objects, since the only reference they hold is a class reference. 1490 if (ref != NULL && !ref->IsClass()) { 1491 accounting::CardTable* card_table = heap_->GetCardTable(); 1492 // If the object is not dirty and it is referencing something in the live stack other than 1493 // class, then it must be on a dirty card. 1494 if (!card_table->AddrIsInCardTable(obj)) { 1495 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table"; 1496 *failed_ = true; 1497 } else if (!card_table->IsDirty(obj)) { 1498 // Card should be either kCardDirty if it got re-dirtied after we aged it, or 1499 // kCardDirty - 1 if it didnt get touched since we aged it. 1500 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1501 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1502 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1503 LOG(ERROR) << "Object " << obj << " found in live stack"; 1504 } 1505 if (heap_->GetLiveBitmap()->Test(obj)) { 1506 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1507 } 1508 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj) 1509 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack"; 1510 1511 // Print which field of the object is dead. 1512 if (!obj->IsObjectArray()) { 1513 const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1514 CHECK(klass != NULL); 1515 const mirror::ObjectArray<mirror::Field>* fields = is_static ? klass->GetSFields() 1516 : klass->GetIFields(); 1517 CHECK(fields != NULL); 1518 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1519 const mirror::Field* cur = fields->Get(i); 1520 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1521 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " 1522 << PrettyField(cur); 1523 break; 1524 } 1525 } 1526 } else { 1527 const mirror::ObjectArray<mirror::Object>* object_array = 1528 obj->AsObjectArray<mirror::Object>(); 1529 for (int32_t i = 0; i < object_array->GetLength(); ++i) { 1530 if (object_array->Get(i) == ref) { 1531 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref"; 1532 } 1533 } 1534 } 1535 1536 *failed_ = true; 1537 } 1538 } 1539 } 1540 } 1541 1542 private: 1543 Heap* const heap_; 1544 bool* const failed_; 1545}; 1546 1547class VerifyLiveStackReferences { 1548 public: 1549 explicit VerifyLiveStackReferences(Heap* heap) 1550 : heap_(heap), 1551 failed_(false) {} 1552 1553 void operator()(const mirror::Object* obj) const 1554 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1555 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); 1556 collector::MarkSweep::VisitObjectReferences(obj, visitor); 1557 } 1558 1559 bool Failed() const { 1560 return failed_; 1561 } 1562 1563 private: 1564 Heap* const heap_; 1565 bool failed_; 1566}; 1567 1568bool Heap::VerifyMissingCardMarks() { 1569 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1570 1571 // We need to sort the live stack since we binary search it. 1572 live_stack_->Sort(); 1573 VerifyLiveStackReferences visitor(this); 1574 GetLiveBitmap()->Visit(visitor); 1575 1576 // We can verify objects in the live stack since none of these should reference dead objects. 1577 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) { 1578 visitor(*it); 1579 } 1580 1581 if (visitor.Failed()) { 1582 DumpSpaces(); 1583 return false; 1584 } 1585 return true; 1586} 1587 1588void Heap::SwapStacks() { 1589 allocation_stack_.swap(live_stack_); 1590 1591 // Sort the live stack so that we can quickly binary search it later. 1592 if (verify_object_mode_ > kNoHeapVerification) { 1593 live_stack_->Sort(); 1594 } 1595} 1596 1597void Heap::ProcessCards(base::TimingLogger& timings) { 1598 // Clear cards and keep track of cards cleared in the mod-union table. 1599 typedef std::vector<space::ContinuousSpace*>::iterator It; 1600 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 1601 space::ContinuousSpace* space = *it; 1602 if (space->IsImageSpace()) { 1603 base::TimingLogger::ScopedSplit split("ImageModUnionClearCards", &timings); 1604 image_mod_union_table_->ClearCards(space); 1605 } else if (space->IsZygoteSpace()) { 1606 base::TimingLogger::ScopedSplit split("ZygoteModUnionClearCards", &timings); 1607 zygote_mod_union_table_->ClearCards(space); 1608 } else { 1609 base::TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings); 1610 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards 1611 // were dirty before the GC started. 1612 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor()); 1613 } 1614 } 1615} 1616 1617void Heap::PreGcVerification(collector::GarbageCollector* gc) { 1618 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1619 Thread* self = Thread::Current(); 1620 1621 if (verify_pre_gc_heap_) { 1622 thread_list->SuspendAll(); 1623 { 1624 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1625 if (!VerifyHeapReferences()) { 1626 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed"; 1627 } 1628 } 1629 thread_list->ResumeAll(); 1630 } 1631 1632 // Check that all objects which reference things in the live stack are on dirty cards. 1633 if (verify_missing_card_marks_) { 1634 thread_list->SuspendAll(); 1635 { 1636 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1637 SwapStacks(); 1638 // Sort the live stack so that we can quickly binary search it later. 1639 if (!VerifyMissingCardMarks()) { 1640 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed"; 1641 } 1642 SwapStacks(); 1643 } 1644 thread_list->ResumeAll(); 1645 } 1646 1647 if (verify_mod_union_table_) { 1648 thread_list->SuspendAll(); 1649 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); 1650 zygote_mod_union_table_->Update(); 1651 zygote_mod_union_table_->Verify(); 1652 image_mod_union_table_->Update(); 1653 image_mod_union_table_->Verify(); 1654 thread_list->ResumeAll(); 1655 } 1656} 1657 1658void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { 1659 // Called before sweeping occurs since we want to make sure we are not going so reclaim any 1660 // reachable objects. 1661 if (verify_post_gc_heap_) { 1662 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1663 Thread* self = Thread::Current(); 1664 CHECK_NE(self->GetState(), kRunnable); 1665 Locks::mutator_lock_->SharedUnlock(self); 1666 thread_list->SuspendAll(); 1667 { 1668 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1669 // Swapping bound bitmaps does nothing. 1670 gc->SwapBitmaps(); 1671 if (!VerifyHeapReferences()) { 1672 LOG(FATAL) << "Post " << gc->GetName() << "GC verification failed"; 1673 } 1674 gc->SwapBitmaps(); 1675 } 1676 thread_list->ResumeAll(); 1677 Locks::mutator_lock_->SharedLock(self); 1678 } 1679} 1680 1681void Heap::PostGcVerification(collector::GarbageCollector* gc) { 1682 if (verify_system_weaks_) { 1683 Thread* self = Thread::Current(); 1684 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1685 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc); 1686 mark_sweep->VerifySystemWeaks(); 1687 } 1688} 1689 1690collector::GcType Heap::WaitForConcurrentGcToComplete(Thread* self) { 1691 collector::GcType last_gc_type = collector::kGcTypeNone; 1692 if (concurrent_gc_) { 1693 ATRACE_BEGIN("GC: Wait For Concurrent"); 1694 bool do_wait; 1695 uint64_t wait_start = NanoTime(); 1696 { 1697 // Check if GC is running holding gc_complete_lock_. 1698 MutexLock mu(self, *gc_complete_lock_); 1699 do_wait = is_gc_running_; 1700 } 1701 if (do_wait) { 1702 uint64_t wait_time; 1703 // We must wait, change thread state then sleep on gc_complete_cond_; 1704 ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete); 1705 { 1706 MutexLock mu(self, *gc_complete_lock_); 1707 while (is_gc_running_) { 1708 gc_complete_cond_->Wait(self); 1709 } 1710 last_gc_type = last_gc_type_; 1711 wait_time = NanoTime() - wait_start; 1712 total_wait_time_ += wait_time; 1713 } 1714 if (wait_time > kLongGcPauseThreshold) { 1715 LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time); 1716 } 1717 } 1718 ATRACE_END(); 1719 } 1720 return last_gc_type; 1721} 1722 1723void Heap::DumpForSigQuit(std::ostream& os) { 1724 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/" 1725 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n"; 1726 DumpGcPerformanceInfo(os); 1727} 1728 1729size_t Heap::GetPercentFree() { 1730 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory()); 1731} 1732 1733void Heap::SetIdealFootprint(size_t max_allowed_footprint) { 1734 if (max_allowed_footprint > GetMaxMemory()) { 1735 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to " 1736 << PrettySize(GetMaxMemory()); 1737 max_allowed_footprint = GetMaxMemory(); 1738 } 1739 max_allowed_footprint_ = max_allowed_footprint; 1740} 1741 1742void Heap::UpdateMaxNativeFootprint() { 1743 size_t native_size = native_bytes_allocated_; 1744 // TODO: Tune the native heap utilization to be a value other than the java heap utilization. 1745 size_t target_size = native_size / GetTargetHeapUtilization(); 1746 if (target_size > native_size + max_free_) { 1747 target_size = native_size + max_free_; 1748 } else if (target_size < native_size + min_free_) { 1749 target_size = native_size + min_free_; 1750 } 1751 native_footprint_gc_watermark_ = target_size; 1752 native_footprint_limit_ = 2 * target_size - native_size; 1753} 1754 1755void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) { 1756 // We know what our utilization is at this moment. 1757 // This doesn't actually resize any memory. It just lets the heap grow more when necessary. 1758 const size_t bytes_allocated = GetBytesAllocated(); 1759 last_gc_size_ = bytes_allocated; 1760 last_gc_time_ns_ = NanoTime(); 1761 1762 size_t target_size; 1763 if (gc_type != collector::kGcTypeSticky) { 1764 // Grow the heap for non sticky GC. 1765 target_size = bytes_allocated / GetTargetHeapUtilization(); 1766 if (target_size > bytes_allocated + max_free_) { 1767 target_size = bytes_allocated + max_free_; 1768 } else if (target_size < bytes_allocated + min_free_) { 1769 target_size = bytes_allocated + min_free_; 1770 } 1771 next_gc_type_ = collector::kGcTypeSticky; 1772 } else { 1773 // Based on how close the current heap size is to the target size, decide 1774 // whether or not to do a partial or sticky GC next. 1775 if (bytes_allocated + min_free_ <= max_allowed_footprint_) { 1776 next_gc_type_ = collector::kGcTypeSticky; 1777 } else { 1778 next_gc_type_ = collector::kGcTypePartial; 1779 } 1780 1781 // If we have freed enough memory, shrink the heap back down. 1782 if (bytes_allocated + max_free_ < max_allowed_footprint_) { 1783 target_size = bytes_allocated + max_free_; 1784 } else { 1785 target_size = std::max(bytes_allocated, max_allowed_footprint_); 1786 } 1787 } 1788 SetIdealFootprint(target_size); 1789 1790 // Calculate when to perform the next ConcurrentGC. 1791 if (concurrent_gc_) { 1792 // Calculate the estimated GC duration. 1793 double gc_duration_seconds = NsToMs(gc_duration) / 1000.0; 1794 // Estimate how many remaining bytes we will have when we need to start the next GC. 1795 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds; 1796 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes); 1797 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) { 1798 // A never going to happen situation that from the estimated allocation rate we will exceed 1799 // the applications entire footprint with the given estimated allocation rate. Schedule 1800 // another GC straight away. 1801 concurrent_start_bytes_ = bytes_allocated; 1802 } else { 1803 // Start a concurrent GC when we get close to the estimated remaining bytes. When the 1804 // allocation rate is very high, remaining_bytes could tell us that we should start a GC 1805 // right away. 1806 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated); 1807 } 1808 DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_); 1809 DCHECK_LE(max_allowed_footprint_, growth_limit_); 1810 } 1811 1812 UpdateMaxNativeFootprint(); 1813} 1814 1815void Heap::ClearGrowthLimit() { 1816 growth_limit_ = capacity_; 1817 alloc_space_->ClearGrowthLimit(); 1818} 1819 1820void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, 1821 MemberOffset reference_queue_offset, 1822 MemberOffset reference_queueNext_offset, 1823 MemberOffset reference_pendingNext_offset, 1824 MemberOffset finalizer_reference_zombie_offset) { 1825 reference_referent_offset_ = reference_referent_offset; 1826 reference_queue_offset_ = reference_queue_offset; 1827 reference_queueNext_offset_ = reference_queueNext_offset; 1828 reference_pendingNext_offset_ = reference_pendingNext_offset; 1829 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset; 1830 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1831 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U); 1832 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U); 1833 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); 1834 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); 1835} 1836 1837mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) { 1838 DCHECK(reference != NULL); 1839 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1840 return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true); 1841} 1842 1843void Heap::ClearReferenceReferent(mirror::Object* reference) { 1844 DCHECK(reference != NULL); 1845 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1846 reference->SetFieldObject(reference_referent_offset_, NULL, true); 1847} 1848 1849// Returns true if the reference object has not yet been enqueued. 1850bool Heap::IsEnqueuable(const mirror::Object* ref) { 1851 DCHECK(ref != NULL); 1852 const mirror::Object* queue = 1853 ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false); 1854 const mirror::Object* queue_next = 1855 ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false); 1856 return (queue != NULL) && (queue_next == NULL); 1857} 1858 1859void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_reference_list) { 1860 DCHECK(ref != NULL); 1861 CHECK(ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false) != NULL); 1862 CHECK(ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false) == NULL); 1863 EnqueuePendingReference(ref, cleared_reference_list); 1864} 1865 1866void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) { 1867 DCHECK(ref != NULL); 1868 DCHECK(list != NULL); 1869 1870 // TODO: Remove this lock, use atomic stacks for storing references. 1871 MutexLock mu(Thread::Current(), *reference_queue_lock_); 1872 if (*list == NULL) { 1873 ref->SetFieldObject(reference_pendingNext_offset_, ref, false); 1874 *list = ref; 1875 } else { 1876 mirror::Object* head = 1877 (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false); 1878 ref->SetFieldObject(reference_pendingNext_offset_, head, false); 1879 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false); 1880 } 1881} 1882 1883mirror::Object* Heap::DequeuePendingReference(mirror::Object** list) { 1884 DCHECK(list != NULL); 1885 DCHECK(*list != NULL); 1886 mirror::Object* head = (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, 1887 false); 1888 mirror::Object* ref; 1889 1890 // Note: the following code is thread-safe because it is only called from ProcessReferences which 1891 // is single threaded. 1892 if (*list == head) { 1893 ref = *list; 1894 *list = NULL; 1895 } else { 1896 mirror::Object* next = head->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, 1897 false); 1898 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false); 1899 ref = head; 1900 } 1901 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false); 1902 return ref; 1903} 1904 1905void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) { 1906 ScopedObjectAccess soa(self); 1907 JValue result; 1908 ArgArray arg_array(NULL, 0); 1909 arg_array.Append(reinterpret_cast<uint32_t>(object)); 1910 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, 1911 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1912} 1913 1914void Heap::EnqueueClearedReferences(mirror::Object** cleared) { 1915 DCHECK(cleared != NULL); 1916 if (*cleared != NULL) { 1917 // When a runtime isn't started there are no reference queues to care about so ignore. 1918 if (LIKELY(Runtime::Current()->IsStarted())) { 1919 ScopedObjectAccess soa(Thread::Current()); 1920 JValue result; 1921 ArgArray arg_array(NULL, 0); 1922 arg_array.Append(reinterpret_cast<uint32_t>(*cleared)); 1923 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), 1924 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1925 } 1926 *cleared = NULL; 1927 } 1928} 1929 1930void Heap::RequestConcurrentGC(Thread* self) { 1931 // Make sure that we can do a concurrent GC. 1932 Runtime* runtime = Runtime::Current(); 1933 DCHECK(concurrent_gc_); 1934 if (runtime == NULL || !runtime->IsFinishedStarting() || 1935 !runtime->IsConcurrentGcEnabled()) { 1936 return; 1937 } 1938 { 1939 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1940 if (runtime->IsShuttingDown()) { 1941 return; 1942 } 1943 } 1944 if (self->IsHandlingStackOverflow()) { 1945 return; 1946 } 1947 1948 // We already have a request pending, no reason to start more until we update 1949 // concurrent_start_bytes_. 1950 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); 1951 1952 JNIEnv* env = self->GetJniEnv(); 1953 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 1954 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL); 1955 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 1956 WellKnownClasses::java_lang_Daemons_requestGC); 1957 CHECK(!env->ExceptionCheck()); 1958} 1959 1960void Heap::ConcurrentGC(Thread* self) { 1961 { 1962 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1963 if (Runtime::Current()->IsShuttingDown()) { 1964 return; 1965 } 1966 } 1967 1968 // Wait for any GCs currently running to finish. 1969 if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) { 1970 CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false); 1971 } 1972} 1973 1974void Heap::RequestHeapTrim() { 1975 // GC completed and now we must decide whether to request a heap trim (advising pages back to the 1976 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans 1977 // a space it will hold its lock and can become a cause of jank. 1978 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since 1979 // forking. 1980 1981 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap 1982 // because that only marks object heads, so a large array looks like lots of empty space. We 1983 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional 1984 // to utilization (which is probably inversely proportional to how much benefit we can expect). 1985 // We could try mincore(2) but that's only a measure of how many pages we haven't given away, 1986 // not how much use we're making of those pages. 1987 uint64_t ms_time = MilliTime(); 1988 float utilization = 1989 static_cast<float>(alloc_space_->GetBytesAllocated()) / alloc_space_->Size(); 1990 if ((utilization > 0.75f && !IsLowMemoryMode()) || ((ms_time - last_trim_time_ms_) < 2 * 1000)) { 1991 // Don't bother trimming the alloc space if it's more than 75% utilized and low memory mode is 1992 // not enabled, or if a heap trim occurred in the last two seconds. 1993 return; 1994 } 1995 1996 Thread* self = Thread::Current(); 1997 { 1998 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1999 Runtime* runtime = Runtime::Current(); 2000 if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown()) { 2001 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time) 2002 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check 2003 // as we don't hold the lock while requesting the trim). 2004 return; 2005 } 2006 } 2007 2008 last_trim_time_ms_ = ms_time; 2009 ListenForProcessStateChange(); 2010 2011 // Trim only if we do not currently care about pause times. 2012 if (!care_about_pause_times_) { 2013 JNIEnv* env = self->GetJniEnv(); 2014 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 2015 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL); 2016 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 2017 WellKnownClasses::java_lang_Daemons_requestHeapTrim); 2018 CHECK(!env->ExceptionCheck()); 2019 } 2020} 2021 2022size_t Heap::Trim() { 2023 // Handle a requested heap trim on a thread outside of the main GC thread. 2024 return alloc_space_->Trim(); 2025} 2026 2027bool Heap::IsGCRequestPending() const { 2028 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max(); 2029} 2030 2031void Heap::RegisterNativeAllocation(int bytes) { 2032 // Total number of native bytes allocated. 2033 native_bytes_allocated_.fetch_add(bytes); 2034 Thread* self = Thread::Current(); 2035 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) { 2036 // The second watermark is higher than the gc watermark. If you hit this it means you are 2037 // allocating native objects faster than the GC can keep up with. 2038 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 2039 JNIEnv* env = self->GetJniEnv(); 2040 // Can't do this in WellKnownClasses::Init since System is not properly set up at that 2041 // point. 2042 if (WellKnownClasses::java_lang_System_runFinalization == NULL) { 2043 DCHECK(WellKnownClasses::java_lang_System != NULL); 2044 WellKnownClasses::java_lang_System_runFinalization = 2045 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V"); 2046 assert(WellKnownClasses::java_lang_System_runFinalization != NULL); 2047 } 2048 if (WaitForConcurrentGcToComplete(self) != collector::kGcTypeNone) { 2049 // Just finished a GC, attempt to run finalizers. 2050 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 2051 WellKnownClasses::java_lang_System_runFinalization); 2052 CHECK(!env->ExceptionCheck()); 2053 } 2054 2055 // If we still are over the watermark, attempt a GC for alloc and run finalizers. 2056 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 2057 CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false); 2058 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 2059 WellKnownClasses::java_lang_System_runFinalization); 2060 CHECK(!env->ExceptionCheck()); 2061 } 2062 // We have just run finalizers, update the native watermark since it is very likely that 2063 // finalizers released native managed allocations. 2064 UpdateMaxNativeFootprint(); 2065 } else { 2066 if (!IsGCRequestPending()) { 2067 RequestConcurrentGC(self); 2068 } 2069 } 2070 } 2071} 2072 2073void Heap::RegisterNativeFree(int bytes) { 2074 int expected_size, new_size; 2075 do { 2076 expected_size = native_bytes_allocated_.load(); 2077 new_size = expected_size - bytes; 2078 if (new_size < 0) { 2079 ThrowRuntimeException("attempted to free %d native bytes with only %d native bytes registered as allocated", 2080 bytes, expected_size); 2081 break; 2082 } 2083 } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size)); 2084} 2085 2086int64_t Heap::GetTotalMemory() const { 2087 int64_t ret = 0; 2088 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 2089 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 2090 space::ContinuousSpace* space = *it; 2091 if (space->IsImageSpace()) { 2092 // Currently don't include the image space. 2093 } else if (space->IsDlMallocSpace()) { 2094 // Zygote or alloc space 2095 ret += space->AsDlMallocSpace()->GetFootprint(); 2096 } 2097 } 2098 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 2099 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 2100 space::DiscontinuousSpace* space = *it; 2101 if (space->IsLargeObjectSpace()) { 2102 ret += space->AsLargeObjectSpace()->GetBytesAllocated(); 2103 } 2104 } 2105 return ret; 2106} 2107 2108} // namespace gc 2109} // namespace art 2110