heap.cc revision eb5710eba75bf338da56386ca29039df9d5134cb
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "heap.h" 18 19#define ATRACE_TAG ATRACE_TAG_DALVIK 20#include <cutils/trace.h> 21 22#include <limits> 23#include <vector> 24#include <valgrind.h> 25 26#include "base/stl_util.h" 27#include "common_throws.h" 28#include "cutils/sched_policy.h" 29#include "debugger.h" 30#include "gc/accounting/atomic_stack.h" 31#include "gc/accounting/card_table-inl.h" 32#include "gc/accounting/heap_bitmap-inl.h" 33#include "gc/accounting/mod_union_table-inl.h" 34#include "gc/accounting/space_bitmap-inl.h" 35#include "gc/collector/mark_sweep-inl.h" 36#include "gc/collector/partial_mark_sweep.h" 37#include "gc/collector/sticky_mark_sweep.h" 38#include "gc/space/dlmalloc_space-inl.h" 39#include "gc/space/image_space.h" 40#include "gc/space/large_object_space.h" 41#include "gc/space/space-inl.h" 42#include "image.h" 43#include "invoke_arg_array_builder.h" 44#include "mirror/class-inl.h" 45#include "mirror/field-inl.h" 46#include "mirror/object.h" 47#include "mirror/object-inl.h" 48#include "mirror/object_array-inl.h" 49#include "object_utils.h" 50#include "os.h" 51#include "ScopedLocalRef.h" 52#include "scoped_thread_state_change.h" 53#include "sirt_ref.h" 54#include "thread_list.h" 55#include "UniquePtr.h" 56#include "well_known_classes.h" 57 58namespace art { 59namespace gc { 60 61// When to create a log message about a slow GC, 100ms. 62static const uint64_t kSlowGcThreshold = MsToNs(100); 63// When to create a log message about a long pause, 5ms. 64static const uint64_t kLongGcPauseThreshold = MsToNs(5); 65static const bool kGCALotMode = false; 66static const size_t kGcAlotInterval = KB; 67static const bool kDumpGcPerformanceOnShutdown = false; 68// Minimum amount of remaining bytes before a concurrent GC is triggered. 69static const size_t kMinConcurrentRemainingBytes = 128 * KB; 70const double Heap::kDefaultTargetUtilization = 0.5; 71// If true, measure the total allocation time. 72static const bool kMeasureAllocationTime = false; 73 74Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free, 75 double target_utilization, size_t capacity, 76 const std::string& original_image_file_name, bool concurrent_gc, size_t num_gc_threads) 77 : alloc_space_(NULL), 78 card_table_(NULL), 79 concurrent_gc_(concurrent_gc), 80 num_gc_threads_(num_gc_threads), 81 have_zygote_space_(false), 82 reference_queue_lock_(NULL), 83 is_gc_running_(false), 84 last_gc_type_(collector::kGcTypeNone), 85 next_gc_type_(collector::kGcTypePartial), 86 capacity_(capacity), 87 growth_limit_(growth_limit), 88 max_allowed_footprint_(initial_size), 89 native_footprint_gc_watermark_(initial_size), 90 native_footprint_limit_(2 * initial_size), 91 concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes) 92 : std::numeric_limits<size_t>::max()), 93 total_bytes_freed_ever_(0), 94 total_objects_freed_ever_(0), 95 large_object_threshold_(3 * kPageSize), 96 num_bytes_allocated_(0), 97 native_bytes_allocated_(0), 98 process_state_(PROCESS_STATE_TOP), 99 gc_memory_overhead_(0), 100 verify_missing_card_marks_(false), 101 verify_system_weaks_(false), 102 verify_pre_gc_heap_(false), 103 verify_post_gc_heap_(false), 104 verify_mod_union_table_(false), 105 min_alloc_space_size_for_sticky_gc_(2 * MB), 106 min_remaining_space_for_sticky_gc_(1 * MB), 107 last_trim_time_ms_(0), 108 allocation_rate_(0), 109 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This 110 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap 111 * verification is enabled, we limit the size of allocation stacks to speed up their 112 * searching. 113 */ 114 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval 115 : (kDesiredHeapVerification > kNoHeapVerification) ? KB : MB), 116 reference_referent_offset_(0), 117 reference_queue_offset_(0), 118 reference_queueNext_offset_(0), 119 reference_pendingNext_offset_(0), 120 finalizer_reference_zombie_offset_(0), 121 min_free_(min_free), 122 max_free_(max_free), 123 target_utilization_(target_utilization), 124 total_wait_time_(0), 125 total_allocation_time_(0), 126 verify_object_mode_(kHeapVerificationNotPermitted), 127 running_on_valgrind_(RUNNING_ON_VALGRIND) { 128 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 129 LOG(INFO) << "Heap() entering"; 130 } 131 132 live_bitmap_.reset(new accounting::HeapBitmap(this)); 133 mark_bitmap_.reset(new accounting::HeapBitmap(this)); 134 135 // Requested begin for the alloc space, to follow the mapped image and oat files 136 byte* requested_alloc_space_begin = NULL; 137 std::string image_file_name(original_image_file_name); 138 if (!image_file_name.empty()) { 139 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name); 140 CHECK(image_space != NULL) << "Failed to create space for " << image_file_name; 141 AddContinuousSpace(image_space); 142 // Oat files referenced by image files immediately follow them in memory, ensure alloc space 143 // isn't going to get in the middle 144 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd(); 145 CHECK_GT(oat_file_end_addr, image_space->End()); 146 if (oat_file_end_addr > requested_alloc_space_begin) { 147 requested_alloc_space_begin = 148 reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_file_end_addr), 149 kPageSize)); 150 } 151 } 152 153 alloc_space_ = space::DlMallocSpace::Create(Runtime::Current()->IsZygote() ? "zygote space" : "alloc space", 154 initial_size, 155 growth_limit, capacity, 156 requested_alloc_space_begin); 157 CHECK(alloc_space_ != NULL) << "Failed to create alloc space"; 158 alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); 159 AddContinuousSpace(alloc_space_); 160 161 // Allocate the large object space. 162 const bool kUseFreeListSpaceForLOS = false; 163 if (kUseFreeListSpaceForLOS) { 164 large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity); 165 } else { 166 large_object_space_ = space::LargeObjectMapSpace::Create("large object space"); 167 } 168 CHECK(large_object_space_ != NULL) << "Failed to create large object space"; 169 AddDiscontinuousSpace(large_object_space_); 170 171 // Compute heap capacity. Continuous spaces are sorted in order of Begin(). 172 byte* heap_begin = continuous_spaces_.front()->Begin(); 173 size_t heap_capacity = continuous_spaces_.back()->End() - continuous_spaces_.front()->Begin(); 174 if (continuous_spaces_.back()->IsDlMallocSpace()) { 175 heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity(); 176 } 177 178 // Mark image objects in the live bitmap 179 // TODO: C++0x 180 typedef std::vector<space::ContinuousSpace*>::iterator It; 181 for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) { 182 space::ContinuousSpace* space = *it; 183 if (space->IsImageSpace()) { 184 space::ImageSpace* image_space = space->AsImageSpace(); 185 image_space->RecordImageAllocations(image_space->GetLiveBitmap()); 186 } 187 } 188 189 // Allocate the card table. 190 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity)); 191 CHECK(card_table_.get() != NULL) << "Failed to create card table"; 192 193 image_mod_union_table_.reset(new accounting::ModUnionTableToZygoteAllocspace(this)); 194 CHECK(image_mod_union_table_.get() != NULL) << "Failed to create image mod-union table"; 195 196 zygote_mod_union_table_.reset(new accounting::ModUnionTableCardCache(this)); 197 CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table"; 198 199 // TODO: Count objects in the image space here. 200 num_bytes_allocated_ = 0; 201 202 // Default mark stack size in bytes. 203 static const size_t default_mark_stack_size = 64 * KB; 204 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size)); 205 allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack", 206 max_allocation_stack_size_)); 207 live_stack_.reset(accounting::ObjectStack::Create("live stack", 208 max_allocation_stack_size_)); 209 210 // It's still too early to take a lock because there are no threads yet, but we can create locks 211 // now. We don't create it earlier to make it clear that you can't use locks during heap 212 // initialization. 213 gc_complete_lock_ = new Mutex("GC complete lock"); 214 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable", 215 *gc_complete_lock_)); 216 217 // Create the reference queue lock, this is required so for parallel object scanning in the GC. 218 reference_queue_lock_ = new Mutex("reference queue lock"); 219 220 last_gc_time_ns_ = NanoTime(); 221 last_gc_size_ = GetBytesAllocated(); 222 223 // Create our garbage collectors. 224 for (size_t i = 0; i < 2; ++i) { 225 const bool concurrent = i != 0; 226 mark_sweep_collectors_.push_back(new collector::MarkSweep(this, concurrent)); 227 mark_sweep_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent)); 228 mark_sweep_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent)); 229 } 230 231 CHECK_NE(max_allowed_footprint_, 0U); 232 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 233 LOG(INFO) << "Heap() exiting"; 234 } 235} 236 237void Heap::CreateThreadPool() { 238 thread_pool_.reset(new ThreadPool(num_gc_threads_)); 239} 240 241void Heap::DeleteThreadPool() { 242 thread_pool_.reset(NULL); 243} 244 245// Sort spaces based on begin address 246struct ContinuousSpaceSorter { 247 bool operator()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const { 248 return a->Begin() < b->Begin(); 249 } 250}; 251 252void Heap::UpdateProcessState(ProcessState process_state) { 253 process_state_ = process_state; 254} 255 256void Heap::AddContinuousSpace(space::ContinuousSpace* space) { 257 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 258 DCHECK(space != NULL); 259 DCHECK(space->GetLiveBitmap() != NULL); 260 live_bitmap_->AddContinuousSpaceBitmap(space->GetLiveBitmap()); 261 DCHECK(space->GetMarkBitmap() != NULL); 262 mark_bitmap_->AddContinuousSpaceBitmap(space->GetMarkBitmap()); 263 continuous_spaces_.push_back(space); 264 if (space->IsDlMallocSpace() && !space->IsLargeObjectSpace()) { 265 alloc_space_ = space->AsDlMallocSpace(); 266 } 267 268 // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger) 269 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), ContinuousSpaceSorter()); 270 271 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to 272 // avoid redundant marking. 273 bool seen_zygote = false, seen_alloc = false; 274 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 275 for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) { 276 space::ContinuousSpace* space = *it; 277 if (space->IsImageSpace()) { 278 DCHECK(!seen_zygote); 279 DCHECK(!seen_alloc); 280 } else if (space->IsZygoteSpace()) { 281 DCHECK(!seen_alloc); 282 seen_zygote = true; 283 } else if (space->IsDlMallocSpace()) { 284 seen_alloc = true; 285 } 286 } 287} 288 289void Heap::RegisterGCAllocation(size_t bytes) { 290 if (this != NULL) { 291 gc_memory_overhead_.fetch_add(bytes); 292 } 293} 294 295void Heap::RegisterGCDeAllocation(size_t bytes) { 296 if (this != NULL) { 297 gc_memory_overhead_.fetch_sub(bytes); 298 } 299} 300 301void Heap::AddDiscontinuousSpace(space::DiscontinuousSpace* space) { 302 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 303 DCHECK(space != NULL); 304 DCHECK(space->GetLiveObjects() != NULL); 305 live_bitmap_->AddDiscontinuousObjectSet(space->GetLiveObjects()); 306 DCHECK(space->GetMarkObjects() != NULL); 307 mark_bitmap_->AddDiscontinuousObjectSet(space->GetMarkObjects()); 308 discontinuous_spaces_.push_back(space); 309} 310 311void Heap::DumpGcPerformanceInfo(std::ostream& os) { 312 // Dump cumulative timings. 313 os << "Dumping cumulative Gc timings\n"; 314 uint64_t total_duration = 0; 315 316 // Dump cumulative loggers for each GC type. 317 // TODO: C++0x 318 uint64_t total_paused_time = 0; 319 typedef std::vector<collector::MarkSweep*>::const_iterator It; 320 for (It it = mark_sweep_collectors_.begin(); 321 it != mark_sweep_collectors_.end(); ++it) { 322 collector::MarkSweep* collector = *it; 323 CumulativeLogger& logger = collector->GetCumulativeTimings(); 324 if (logger.GetTotalNs() != 0) { 325 os << Dumpable<CumulativeLogger>(logger); 326 const uint64_t total_ns = logger.GetTotalNs(); 327 const uint64_t total_pause_ns = (*it)->GetTotalPausedTimeNs(); 328 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0; 329 const uint64_t freed_bytes = collector->GetTotalFreedBytes(); 330 const uint64_t freed_objects = collector->GetTotalFreedObjects(); 331 os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n" 332 << collector->GetName() << " paused time: " << PrettyDuration(total_pause_ns) << "\n" 333 << collector->GetName() << " freed: " << freed_objects 334 << " objects with total size " << PrettySize(freed_bytes) << "\n" 335 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / " 336 << PrettySize(freed_bytes / seconds) << "/s\n"; 337 total_duration += total_ns; 338 total_paused_time += total_pause_ns; 339 } 340 } 341 uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust; 342 size_t total_objects_allocated = GetObjectsAllocatedEver(); 343 size_t total_bytes_allocated = GetBytesAllocatedEver(); 344 if (total_duration != 0) { 345 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0; 346 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; 347 os << "Mean GC size throughput: " 348 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n"; 349 os << "Mean GC object throughput: " 350 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n"; 351 } 352 os << "Total number of allocations: " << total_objects_allocated << "\n"; 353 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n"; 354 if (kMeasureAllocationTime) { 355 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n"; 356 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated) 357 << "\n"; 358 } 359 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n"; 360 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n"; 361 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_; 362} 363 364Heap::~Heap() { 365 if (kDumpGcPerformanceOnShutdown) { 366 DumpGcPerformanceInfo(LOG(INFO)); 367 } 368 369 STLDeleteElements(&mark_sweep_collectors_); 370 371 // If we don't reset then the mark stack complains in it's destructor. 372 allocation_stack_->Reset(); 373 live_stack_->Reset(); 374 375 VLOG(heap) << "~Heap()"; 376 // We can't take the heap lock here because there might be a daemon thread suspended with the 377 // heap lock held. We know though that no non-daemon threads are executing, and we know that 378 // all daemon threads are suspended, and we also know that the threads list have been deleted, so 379 // those threads can't resume. We're the only running thread, and we can do whatever we like... 380 STLDeleteElements(&continuous_spaces_); 381 STLDeleteElements(&discontinuous_spaces_); 382 delete gc_complete_lock_; 383 delete reference_queue_lock_; 384} 385 386space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj, 387 bool fail_ok) const { 388 // TODO: C++0x auto 389 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 390 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 391 if ((*it)->Contains(obj)) { 392 return *it; 393 } 394 } 395 if (!fail_ok) { 396 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 397 } 398 return NULL; 399} 400 401space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj, 402 bool fail_ok) const { 403 // TODO: C++0x auto 404 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It; 405 for (It it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 406 if ((*it)->Contains(obj)) { 407 return *it; 408 } 409 } 410 if (!fail_ok) { 411 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 412 } 413 return NULL; 414} 415 416space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const { 417 space::Space* result = FindContinuousSpaceFromObject(obj, true); 418 if (result != NULL) { 419 return result; 420 } 421 return FindDiscontinuousSpaceFromObject(obj, true); 422} 423 424space::ImageSpace* Heap::GetImageSpace() const { 425 // TODO: C++0x auto 426 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 427 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 428 if ((*it)->IsImageSpace()) { 429 return (*it)->AsImageSpace(); 430 } 431 } 432 return NULL; 433} 434 435static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) { 436 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start); 437 if (used_bytes < chunk_size) { 438 size_t chunk_free_bytes = chunk_size - used_bytes; 439 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg); 440 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes); 441 } 442} 443 444mirror::Object* Heap::AllocObject(Thread* self, mirror::Class* c, size_t byte_count) { 445 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || 446 (c->IsVariableSize() || c->GetObjectSize() == byte_count) || 447 strlen(ClassHelper(c).GetDescriptor()) == 0); 448 DCHECK_GE(byte_count, sizeof(mirror::Object)); 449 450 mirror::Object* obj = NULL; 451 size_t bytes_allocated = 0; 452 uint64_t allocation_start = 0; 453 if (UNLIKELY(kMeasureAllocationTime)) { 454 allocation_start = NanoTime() / kTimeAdjust; 455 } 456 457 // We need to have a zygote space or else our newly allocated large object can end up in the 458 // Zygote resulting in it being prematurely freed. 459 // We can only do this for primitive objects since large objects will not be within the card table 460 // range. This also means that we rely on SetClass not dirtying the object's card. 461 bool large_object_allocation = 462 byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray(); 463 if (UNLIKELY(large_object_allocation)) { 464 obj = Allocate(self, large_object_space_, byte_count, &bytes_allocated); 465 // Make sure that our large object didn't get placed anywhere within the space interval or else 466 // it breaks the immune range. 467 DCHECK(obj == NULL || 468 reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() || 469 reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End()); 470 } else { 471 obj = Allocate(self, alloc_space_, byte_count, &bytes_allocated); 472 // Ensure that we did not allocate into a zygote space. 473 DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace()); 474 } 475 476 if (LIKELY(obj != NULL)) { 477 obj->SetClass(c); 478 479 // Record allocation after since we want to use the atomic add for the atomic fence to guard 480 // the SetClass since we do not want the class to appear NULL in another thread. 481 RecordAllocation(bytes_allocated, obj); 482 483 if (Dbg::IsAllocTrackingEnabled()) { 484 Dbg::RecordAllocation(c, byte_count); 485 } 486 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_)) { 487 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. 488 SirtRef<mirror::Object> ref(self, obj); 489 RequestConcurrentGC(self); 490 } 491 if (kDesiredHeapVerification > kNoHeapVerification) { 492 VerifyObject(obj); 493 } 494 495 if (UNLIKELY(kMeasureAllocationTime)) { 496 total_allocation_time_.fetch_add(NanoTime() / kTimeAdjust - allocation_start); 497 } 498 499 return obj; 500 } else { 501 std::ostringstream oss; 502 int64_t total_bytes_free = GetFreeMemory(); 503 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free 504 << " free bytes"; 505 // If the allocation failed due to fragmentation, print out the largest continuous allocation. 506 if (!large_object_allocation && total_bytes_free >= byte_count) { 507 size_t max_contiguous_allocation = 0; 508 // TODO: C++0x auto 509 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 510 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 511 space::ContinuousSpace* space = *it; 512 if (space->IsDlMallocSpace()) { 513 space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation); 514 } 515 } 516 oss << "; failed due to fragmentation (largest possible contiguous allocation " 517 << max_contiguous_allocation << " bytes)"; 518 } 519 self->ThrowOutOfMemoryError(oss.str().c_str()); 520 return NULL; 521 } 522} 523 524bool Heap::IsHeapAddress(const mirror::Object* obj) { 525 // Note: we deliberately don't take the lock here, and mustn't test anything that would 526 // require taking the lock. 527 if (obj == NULL) { 528 return true; 529 } 530 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 531 return false; 532 } 533 return FindSpaceFromObject(obj, true) != NULL; 534} 535 536bool Heap::IsLiveObjectLocked(const mirror::Object* obj) { 537 // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current()); 538 if (obj == NULL || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 539 return false; 540 } 541 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true); 542 space::DiscontinuousSpace* d_space = NULL; 543 if (c_space != NULL) { 544 if (c_space->GetLiveBitmap()->Test(obj)) { 545 return true; 546 } 547 } else { 548 d_space = FindDiscontinuousSpaceFromObject(obj, true); 549 if (d_space != NULL) { 550 if (d_space->GetLiveObjects()->Test(obj)) { 551 return true; 552 } 553 } 554 } 555 // This is covering the allocation/live stack swapping that is done without mutators suspended. 556 for (size_t i = 0; i < 5; ++i) { 557 if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj)) || 558 live_stack_->Contains(const_cast<mirror::Object*>(obj))) { 559 return true; 560 } 561 NanoSleep(MsToNs(10)); 562 } 563 // We need to check the bitmaps again since there is a race where we mark something as live and 564 // then clear the stack containing it. 565 if (c_space != NULL) { 566 if (c_space->GetLiveBitmap()->Test(obj)) { 567 return true; 568 } 569 } else { 570 d_space = FindDiscontinuousSpaceFromObject(obj, true); 571 if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) { 572 return true; 573 } 574 } 575 return false; 576} 577 578void Heap::VerifyObjectImpl(const mirror::Object* obj) { 579 if (Thread::Current() == NULL || 580 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) { 581 return; 582 } 583 VerifyObjectBody(obj); 584} 585 586void Heap::DumpSpaces() { 587 // TODO: C++0x auto 588 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 589 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 590 space::ContinuousSpace* space = *it; 591 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 592 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 593 LOG(INFO) << space << " " << *space << "\n" 594 << live_bitmap << " " << *live_bitmap << "\n" 595 << mark_bitmap << " " << *mark_bitmap; 596 } 597 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 598 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 599 space::DiscontinuousSpace* space = *it; 600 LOG(INFO) << space << " " << *space << "\n"; 601 } 602} 603 604void Heap::VerifyObjectBody(const mirror::Object* obj) { 605 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 606 LOG(FATAL) << "Object isn't aligned: " << obj; 607 } 608 if (UNLIKELY(GetObjectsAllocated() <= 10)) { // Ignore early dawn of the universe verifications. 609 return; 610 } 611 const byte* raw_addr = reinterpret_cast<const byte*>(obj) + 612 mirror::Object::ClassOffset().Int32Value(); 613 const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 614 if (UNLIKELY(c == NULL)) { 615 LOG(FATAL) << "Null class in object: " << obj; 616 } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) { 617 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; 618 } 619 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() 620 // Note: we don't use the accessors here as they have internal sanity checks 621 // that we don't want to run 622 raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value(); 623 const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 624 raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value(); 625 const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 626 CHECK_EQ(c_c, c_c_c); 627 628 if (verify_object_mode_ != kVerifyAllFast) { 629 // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the 630 // heap_bitmap_lock_. 631 if (!IsLiveObjectLocked(obj)) { 632 DumpSpaces(); 633 LOG(FATAL) << "Object is dead: " << obj; 634 } 635 if (!IsLiveObjectLocked(c)) { 636 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; 637 } 638 } 639} 640 641void Heap::VerificationCallback(mirror::Object* obj, void* arg) { 642 DCHECK(obj != NULL); 643 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj); 644} 645 646void Heap::VerifyHeap() { 647 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 648 GetLiveBitmap()->Walk(Heap::VerificationCallback, this); 649} 650 651inline void Heap::RecordAllocation(size_t size, mirror::Object* obj) { 652 DCHECK(obj != NULL); 653 DCHECK_GT(size, 0u); 654 num_bytes_allocated_.fetch_add(size); 655 656 if (Runtime::Current()->HasStatsEnabled()) { 657 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 658 ++thread_stats->allocated_objects; 659 thread_stats->allocated_bytes += size; 660 661 // TODO: Update these atomically. 662 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 663 ++global_stats->allocated_objects; 664 global_stats->allocated_bytes += size; 665 } 666 667 // This is safe to do since the GC will never free objects which are neither in the allocation 668 // stack or the live bitmap. 669 while (!allocation_stack_->AtomicPushBack(obj)) { 670 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 671 } 672} 673 674void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) { 675 DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_)); 676 num_bytes_allocated_.fetch_sub(freed_bytes); 677 678 if (Runtime::Current()->HasStatsEnabled()) { 679 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 680 thread_stats->freed_objects += freed_objects; 681 thread_stats->freed_bytes += freed_bytes; 682 683 // TODO: Do this concurrently. 684 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 685 global_stats->freed_objects += freed_objects; 686 global_stats->freed_bytes += freed_bytes; 687 } 688} 689 690inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size) { 691 return num_bytes_allocated_ + alloc_size > growth_limit_; 692} 693 694inline mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, 695 bool grow, size_t* bytes_allocated) { 696 if (IsOutOfMemoryOnAllocation(alloc_size)) { 697 return NULL; 698 } 699 return space->Alloc(self, alloc_size, bytes_allocated); 700} 701 702// DlMallocSpace-specific version. 703inline mirror::Object* Heap::TryToAllocate(Thread* self, space::DlMallocSpace* space, size_t alloc_size, 704 bool grow, size_t* bytes_allocated) { 705 if (IsOutOfMemoryOnAllocation(alloc_size)) { 706 return NULL; 707 } 708 if (!running_on_valgrind_) { 709 return space->AllocNonvirtual(self, alloc_size, bytes_allocated); 710 } else { 711 return space->Alloc(self, alloc_size, bytes_allocated); 712 } 713} 714 715template <class T> 716inline mirror::Object* Heap::Allocate(Thread* self, T* space, size_t alloc_size, size_t* bytes_allocated) { 717 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are 718 // done in the runnable state where suspension is expected. 719 DCHECK_EQ(self->GetState(), kRunnable); 720 self->AssertThreadSuspensionIsAllowable(); 721 722 mirror::Object* ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated); 723 if (ptr != NULL) { 724 return ptr; 725 } 726 return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated); 727} 728 729mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* space, size_t alloc_size, 730 size_t* bytes_allocated) { 731 mirror::Object* ptr; 732 733 // The allocation failed. If the GC is running, block until it completes, and then retry the 734 // allocation. 735 collector::GcType last_gc = WaitForConcurrentGcToComplete(self); 736 if (last_gc != collector::kGcTypeNone) { 737 // A GC was in progress and we blocked, retry allocation now that memory has been freed. 738 ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated); 739 if (ptr != NULL) { 740 return ptr; 741 } 742 } 743 744 // Loop through our different Gc types and try to Gc until we get enough free memory. 745 for (size_t i = static_cast<size_t>(last_gc) + 1; 746 i < static_cast<size_t>(collector::kGcTypeMax); ++i) { 747 bool run_gc = false; 748 collector::GcType gc_type = static_cast<collector::GcType>(i); 749 switch (gc_type) { 750 case collector::kGcTypeSticky: { 751 const size_t alloc_space_size = alloc_space_->Size(); 752 run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ && 753 alloc_space_->Capacity() - alloc_space_size >= min_remaining_space_for_sticky_gc_; 754 break; 755 } 756 case collector::kGcTypePartial: 757 run_gc = have_zygote_space_; 758 break; 759 case collector::kGcTypeFull: 760 run_gc = true; 761 break; 762 default: 763 break; 764 } 765 766 if (run_gc) { 767 // If we actually ran a different type of Gc than requested, we can skip the index forwards. 768 collector::GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false); 769 DCHECK_GE(static_cast<size_t>(gc_type_ran), i); 770 i = static_cast<size_t>(gc_type_ran); 771 772 // Did we free sufficient memory for the allocation to succeed? 773 ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated); 774 if (ptr != NULL) { 775 return ptr; 776 } 777 } 778 } 779 780 // Allocations have failed after GCs; this is an exceptional state. 781 // Try harder, growing the heap if necessary. 782 ptr = TryToAllocate(self, space, alloc_size, true, bytes_allocated); 783 if (ptr != NULL) { 784 return ptr; 785 } 786 787 // Most allocations should have succeeded by now, so the heap is really full, really fragmented, 788 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The 789 // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME. 790 791 // OLD-TODO: wait for the finalizers from the previous GC to finish 792 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) 793 << " allocation"; 794 795 // We don't need a WaitForConcurrentGcToComplete here either. 796 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true); 797 return TryToAllocate(self, space, alloc_size, true, bytes_allocated); 798} 799 800void Heap::SetTargetHeapUtilization(float target) { 801 DCHECK_GT(target, 0.0f); // asserted in Java code 802 DCHECK_LT(target, 1.0f); 803 target_utilization_ = target; 804} 805 806size_t Heap::GetObjectsAllocated() const { 807 size_t total = 0; 808 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 809 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 810 space::ContinuousSpace* space = *it; 811 if (space->IsDlMallocSpace()) { 812 total += space->AsDlMallocSpace()->GetObjectsAllocated(); 813 } 814 } 815 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 816 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 817 space::DiscontinuousSpace* space = *it; 818 total += space->AsLargeObjectSpace()->GetObjectsAllocated(); 819 } 820 return total; 821} 822 823size_t Heap::GetObjectsAllocatedEver() const { 824 size_t total = 0; 825 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 826 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 827 space::ContinuousSpace* space = *it; 828 if (space->IsDlMallocSpace()) { 829 total += space->AsDlMallocSpace()->GetTotalObjectsAllocated(); 830 } 831 } 832 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 833 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 834 space::DiscontinuousSpace* space = *it; 835 total += space->AsLargeObjectSpace()->GetTotalObjectsAllocated(); 836 } 837 return total; 838} 839 840size_t Heap::GetBytesAllocatedEver() const { 841 size_t total = 0; 842 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 843 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 844 space::ContinuousSpace* space = *it; 845 if (space->IsDlMallocSpace()) { 846 total += space->AsDlMallocSpace()->GetTotalBytesAllocated(); 847 } 848 } 849 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 850 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 851 space::DiscontinuousSpace* space = *it; 852 total += space->AsLargeObjectSpace()->GetTotalBytesAllocated(); 853 } 854 return total; 855} 856 857class InstanceCounter { 858 public: 859 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts) 860 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 861 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) { 862 } 863 864 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 865 for (size_t i = 0; i < classes_.size(); ++i) { 866 const mirror::Class* instance_class = o->GetClass(); 867 if (use_is_assignable_from_) { 868 if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) { 869 ++counts_[i]; 870 } 871 } else { 872 if (instance_class == classes_[i]) { 873 ++counts_[i]; 874 } 875 } 876 } 877 } 878 879 private: 880 const std::vector<mirror::Class*>& classes_; 881 bool use_is_assignable_from_; 882 uint64_t* const counts_; 883 884 DISALLOW_COPY_AND_ASSIGN(InstanceCounter); 885}; 886 887void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, 888 uint64_t* counts) { 889 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 890 // is empty, so the live bitmap is the only place we need to look. 891 Thread* self = Thread::Current(); 892 self->TransitionFromRunnableToSuspended(kNative); 893 CollectGarbage(false); 894 self->TransitionFromSuspendedToRunnable(); 895 896 InstanceCounter counter(classes, use_is_assignable_from, counts); 897 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 898 GetLiveBitmap()->Visit(counter); 899} 900 901class InstanceCollector { 902 public: 903 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) 904 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 905 : class_(c), max_count_(max_count), instances_(instances) { 906 } 907 908 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 909 const mirror::Class* instance_class = o->GetClass(); 910 if (instance_class == class_) { 911 if (max_count_ == 0 || instances_.size() < max_count_) { 912 instances_.push_back(const_cast<mirror::Object*>(o)); 913 } 914 } 915 } 916 917 private: 918 mirror::Class* class_; 919 uint32_t max_count_; 920 std::vector<mirror::Object*>& instances_; 921 922 DISALLOW_COPY_AND_ASSIGN(InstanceCollector); 923}; 924 925void Heap::GetInstances(mirror::Class* c, int32_t max_count, 926 std::vector<mirror::Object*>& instances) { 927 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 928 // is empty, so the live bitmap is the only place we need to look. 929 Thread* self = Thread::Current(); 930 self->TransitionFromRunnableToSuspended(kNative); 931 CollectGarbage(false); 932 self->TransitionFromSuspendedToRunnable(); 933 934 InstanceCollector collector(c, max_count, instances); 935 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 936 GetLiveBitmap()->Visit(collector); 937} 938 939class ReferringObjectsFinder { 940 public: 941 ReferringObjectsFinder(mirror::Object* object, int32_t max_count, 942 std::vector<mirror::Object*>& referring_objects) 943 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 944 : object_(object), max_count_(max_count), referring_objects_(referring_objects) { 945 } 946 947 // For bitmap Visit. 948 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 949 // annotalysis on visitors. 950 void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS { 951 collector::MarkSweep::VisitObjectReferences(o, *this); 952 } 953 954 // For MarkSweep::VisitObjectReferences. 955 void operator()(const mirror::Object* referrer, const mirror::Object* object, 956 const MemberOffset&, bool) const { 957 if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { 958 referring_objects_.push_back(const_cast<mirror::Object*>(referrer)); 959 } 960 } 961 962 private: 963 mirror::Object* object_; 964 uint32_t max_count_; 965 std::vector<mirror::Object*>& referring_objects_; 966 967 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder); 968}; 969 970void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count, 971 std::vector<mirror::Object*>& referring_objects) { 972 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 973 // is empty, so the live bitmap is the only place we need to look. 974 Thread* self = Thread::Current(); 975 self->TransitionFromRunnableToSuspended(kNative); 976 CollectGarbage(false); 977 self->TransitionFromSuspendedToRunnable(); 978 979 ReferringObjectsFinder finder(o, max_count, referring_objects); 980 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 981 GetLiveBitmap()->Visit(finder); 982} 983 984void Heap::CollectGarbage(bool clear_soft_references) { 985 // Even if we waited for a GC we still need to do another GC since weaks allocated during the 986 // last GC will not have necessarily been cleared. 987 Thread* self = Thread::Current(); 988 WaitForConcurrentGcToComplete(self); 989 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references); 990} 991 992void Heap::PreZygoteFork() { 993 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock); 994 // Do this before acquiring the zygote creation lock so that we don't get lock order violations. 995 CollectGarbage(false); 996 Thread* self = Thread::Current(); 997 MutexLock mu(self, zygote_creation_lock_); 998 999 // Try to see if we have any Zygote spaces. 1000 if (have_zygote_space_) { 1001 return; 1002 } 1003 1004 VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size()); 1005 1006 { 1007 // Flush the alloc stack. 1008 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1009 FlushAllocStack(); 1010 } 1011 1012 // Turns the current alloc space into a Zygote space and obtain the new alloc space composed 1013 // of the remaining available heap memory. 1014 space::DlMallocSpace* zygote_space = alloc_space_; 1015 alloc_space_ = zygote_space->CreateZygoteSpace("alloc space"); 1016 alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); 1017 1018 // Change the GC retention policy of the zygote space to only collect when full. 1019 zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect); 1020 AddContinuousSpace(alloc_space_); 1021 have_zygote_space_ = true; 1022 1023 // Reset the cumulative loggers since we now have a few additional timing phases. 1024 // TODO: C++0x 1025 typedef std::vector<collector::MarkSweep*>::const_iterator It; 1026 for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end(); 1027 it != end; ++it) { 1028 (*it)->ResetCumulativeStatistics(); 1029 } 1030} 1031 1032void Heap::FlushAllocStack() { 1033 MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), 1034 allocation_stack_.get()); 1035 allocation_stack_->Reset(); 1036} 1037 1038void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects, 1039 accounting::ObjectStack* stack) { 1040 mirror::Object** limit = stack->End(); 1041 for (mirror::Object** it = stack->Begin(); it != limit; ++it) { 1042 const mirror::Object* obj = *it; 1043 DCHECK(obj != NULL); 1044 if (LIKELY(bitmap->HasAddress(obj))) { 1045 bitmap->Set(obj); 1046 } else { 1047 large_objects->Set(obj); 1048 } 1049 } 1050} 1051 1052collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause, 1053 bool clear_soft_references) { 1054 Thread* self = Thread::Current(); 1055 1056 switch (gc_cause) { 1057 case kGcCauseForAlloc: 1058 ATRACE_BEGIN("GC (alloc)"); 1059 break; 1060 case kGcCauseBackground: 1061 ATRACE_BEGIN("GC (background)"); 1062 break; 1063 case kGcCauseExplicit: 1064 ATRACE_BEGIN("GC (explicit)"); 1065 break; 1066 } 1067 1068 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); 1069 Locks::mutator_lock_->AssertNotHeld(self); 1070 1071 if (self->IsHandlingStackOverflow()) { 1072 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow."; 1073 } 1074 1075 // Ensure there is only one GC at a time. 1076 bool start_collect = false; 1077 while (!start_collect) { 1078 { 1079 MutexLock mu(self, *gc_complete_lock_); 1080 if (!is_gc_running_) { 1081 is_gc_running_ = true; 1082 start_collect = true; 1083 } 1084 } 1085 if (!start_collect) { 1086 WaitForConcurrentGcToComplete(self); 1087 // TODO: if another thread beat this one to do the GC, perhaps we should just return here? 1088 // Not doing at the moment to ensure soft references are cleared. 1089 } 1090 } 1091 gc_complete_lock_->AssertNotHeld(self); 1092 1093 if (gc_cause == kGcCauseForAlloc && Runtime::Current()->HasStatsEnabled()) { 1094 ++Runtime::Current()->GetStats()->gc_for_alloc_count; 1095 ++Thread::Current()->GetStats()->gc_for_alloc_count; 1096 } 1097 1098 uint64_t gc_start_time_ns = NanoTime(); 1099 uint64_t gc_start_size = GetBytesAllocated(); 1100 // Approximate allocation rate in bytes / second. 1101 if (UNLIKELY(gc_start_time_ns == last_gc_time_ns_)) { 1102 LOG(WARNING) << "Timers are broken (gc_start_time == last_gc_time_)."; 1103 } 1104 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_); 1105 if (ms_delta != 0) { 1106 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta; 1107 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s"; 1108 } 1109 1110 if (gc_type == collector::kGcTypeSticky && 1111 alloc_space_->Size() < min_alloc_space_size_for_sticky_gc_) { 1112 gc_type = collector::kGcTypePartial; 1113 } 1114 1115 DCHECK_LT(gc_type, collector::kGcTypeMax); 1116 DCHECK_NE(gc_type, collector::kGcTypeNone); 1117 collector::MarkSweep* collector = NULL; 1118 typedef std::vector<collector::MarkSweep*>::iterator It; 1119 for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end(); 1120 it != end; ++it) { 1121 collector::MarkSweep* cur_collector = *it; 1122 if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) { 1123 collector = cur_collector; 1124 break; 1125 } 1126 } 1127 CHECK(collector != NULL) 1128 << "Could not find garbage collector with concurrent=" << concurrent_gc_ 1129 << " and type=" << gc_type; 1130 collector->clear_soft_references_ = clear_soft_references; 1131 collector->Run(); 1132 total_objects_freed_ever_ += collector->GetFreedObjects(); 1133 total_bytes_freed_ever_ += collector->GetFreedBytes(); 1134 1135 const size_t duration = collector->GetDurationNs(); 1136 std::vector<uint64_t> pauses = collector->GetPauseTimes(); 1137 bool was_slow = duration > kSlowGcThreshold || 1138 (gc_cause == kGcCauseForAlloc && duration > kLongGcPauseThreshold); 1139 for (size_t i = 0; i < pauses.size(); ++i) { 1140 if (pauses[i] > kLongGcPauseThreshold) { 1141 was_slow = true; 1142 } 1143 } 1144 1145 if (was_slow) { 1146 const size_t percent_free = GetPercentFree(); 1147 const size_t current_heap_size = GetBytesAllocated(); 1148 const size_t total_memory = GetTotalMemory(); 1149 std::ostringstream pause_string; 1150 for (size_t i = 0; i < pauses.size(); ++i) { 1151 pause_string << PrettyDuration((pauses[i] / 1000) * 1000) 1152 << ((i != pauses.size() - 1) ? ", " : ""); 1153 } 1154 LOG(INFO) << gc_cause << " " << collector->GetName() 1155 << "GC freed " << PrettySize(collector->GetFreedBytes()) << ", " 1156 << percent_free << "% free, " << PrettySize(current_heap_size) << "/" 1157 << PrettySize(total_memory) << ", " << "paused " << pause_string.str() 1158 << " total " << PrettyDuration((duration / 1000) * 1000); 1159 if (VLOG_IS_ON(heap)) { 1160 LOG(INFO) << Dumpable<base::TimingLogger>(collector->GetTimings()); 1161 } 1162 } 1163 1164 { 1165 MutexLock mu(self, *gc_complete_lock_); 1166 is_gc_running_ = false; 1167 last_gc_type_ = gc_type; 1168 // Wake anyone who may have been waiting for the GC to complete. 1169 gc_complete_cond_->Broadcast(self); 1170 } 1171 1172 // Inform DDMS that a GC completed. 1173 ATRACE_END(); 1174 Dbg::GcDidFinish(); 1175 return gc_type; 1176} 1177 1178void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings, 1179 collector::GcType gc_type) { 1180 if (gc_type == collector::kGcTypeSticky) { 1181 // Don't need to do anything for mod union table in this case since we are only scanning dirty 1182 // cards. 1183 return; 1184 } 1185 1186 // Update zygote mod union table. 1187 if (gc_type == collector::kGcTypePartial) { 1188 timings.NewSplit("UpdateZygoteModUnionTable"); 1189 zygote_mod_union_table_->Update(); 1190 1191 timings.NewSplit("ZygoteMarkReferences"); 1192 zygote_mod_union_table_->MarkReferences(mark_sweep); 1193 } 1194 1195 // Processes the cards we cleared earlier and adds their objects into the mod-union table. 1196 timings.NewSplit("UpdateModUnionTable"); 1197 image_mod_union_table_->Update(); 1198 1199 // Scans all objects in the mod-union table. 1200 timings.NewSplit("MarkImageToAllocSpaceReferences"); 1201 image_mod_union_table_->MarkReferences(mark_sweep); 1202} 1203 1204static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) { 1205 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg); 1206 if (root == obj) { 1207 LOG(INFO) << "Object " << obj << " is a root"; 1208 } 1209} 1210 1211class ScanVisitor { 1212 public: 1213 void operator()(const mirror::Object* obj) const { 1214 LOG(INFO) << "Would have rescanned object " << obj; 1215 } 1216}; 1217 1218// Verify a reference from an object. 1219class VerifyReferenceVisitor { 1220 public: 1221 explicit VerifyReferenceVisitor(Heap* heap) 1222 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) 1223 : heap_(heap), failed_(false) {} 1224 1225 bool Failed() const { 1226 return failed_; 1227 } 1228 1229 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter 1230 // analysis on visitors. 1231 void operator()(const mirror::Object* obj, const mirror::Object* ref, 1232 const MemberOffset& offset, bool /* is_static */) const 1233 NO_THREAD_SAFETY_ANALYSIS { 1234 // Verify that the reference is live. 1235 if (UNLIKELY(ref != NULL && !IsLive(ref))) { 1236 accounting::CardTable* card_table = heap_->GetCardTable(); 1237 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get(); 1238 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1239 1240 if (obj != NULL) { 1241 byte* card_addr = card_table->CardFromAddr(obj); 1242 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " << offset 1243 << "\nIsDirty = " << (*card_addr == accounting::CardTable::kCardDirty) 1244 << "\nObj type " << PrettyTypeOf(obj) 1245 << "\nRef type " << PrettyTypeOf(ref); 1246 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj)); 1247 void* cover_begin = card_table->AddrFromCard(card_addr); 1248 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) + 1249 accounting::CardTable::kCardSize); 1250 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin 1251 << "-" << cover_end; 1252 accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj); 1253 1254 // Print out how the object is live. 1255 if (bitmap != NULL && bitmap->Test(obj)) { 1256 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1257 } 1258 if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1259 LOG(ERROR) << "Object " << obj << " found in allocation stack"; 1260 } 1261 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1262 LOG(ERROR) << "Object " << obj << " found in live stack"; 1263 } 1264 // Attempt to see if the card table missed the reference. 1265 ScanVisitor scan_visitor; 1266 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr)); 1267 card_table->Scan(bitmap, byte_cover_begin, 1268 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor); 1269 1270 // Search to see if any of the roots reference our object. 1271 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj)); 1272 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1273 1274 // Search to see if any of the roots reference our reference. 1275 arg = const_cast<void*>(reinterpret_cast<const void*>(ref)); 1276 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1277 } else { 1278 LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref); 1279 } 1280 if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1281 LOG(ERROR) << "Reference " << ref << " found in allocation stack!"; 1282 } 1283 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1284 LOG(ERROR) << "Reference " << ref << " found in live stack!"; 1285 } 1286 heap_->image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: "); 1287 heap_->zygote_mod_union_table_->Dump(LOG(ERROR) << "Zygote mod-union table: "); 1288 failed_ = true; 1289 } 1290 } 1291 1292 bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 1293 return heap_->IsLiveObjectLocked(obj); 1294 } 1295 1296 static void VerifyRoots(const mirror::Object* root, void* arg) { 1297 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg); 1298 (*visitor)(NULL, root, MemberOffset(0), true); 1299 } 1300 1301 private: 1302 Heap* const heap_; 1303 mutable bool failed_; 1304}; 1305 1306// Verify all references within an object, for use with HeapBitmap::Visit. 1307class VerifyObjectVisitor { 1308 public: 1309 explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {} 1310 1311 void operator()(const mirror::Object* obj) const 1312 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1313 // Note: we are verifying the references in obj but not obj itself, this is because obj must 1314 // be live or else how did we find it in the live bitmap? 1315 VerifyReferenceVisitor visitor(heap_); 1316 collector::MarkSweep::VisitObjectReferences(obj, visitor); 1317 failed_ = failed_ || visitor.Failed(); 1318 } 1319 1320 bool Failed() const { 1321 return failed_; 1322 } 1323 1324 private: 1325 Heap* const heap_; 1326 mutable bool failed_; 1327}; 1328 1329// Must do this with mutators suspended since we are directly accessing the allocation stacks. 1330bool Heap::VerifyHeapReferences() { 1331 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1332 // Lets sort our allocation stacks so that we can efficiently binary search them. 1333 allocation_stack_->Sort(); 1334 live_stack_->Sort(); 1335 // Perform the verification. 1336 VerifyObjectVisitor visitor(this); 1337 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false); 1338 GetLiveBitmap()->Visit(visitor); 1339 // We don't want to verify the objects in the allocation stack since they themselves may be 1340 // pointing to dead objects if they are not reachable. 1341 if (visitor.Failed()) { 1342 DumpSpaces(); 1343 return false; 1344 } 1345 return true; 1346} 1347 1348class VerifyReferenceCardVisitor { 1349 public: 1350 VerifyReferenceCardVisitor(Heap* heap, bool* failed) 1351 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, 1352 Locks::heap_bitmap_lock_) 1353 : heap_(heap), failed_(failed) { 1354 } 1355 1356 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 1357 // annotalysis on visitors. 1358 void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset, 1359 bool is_static) const NO_THREAD_SAFETY_ANALYSIS { 1360 // Filter out class references since changing an object's class does not mark the card as dirty. 1361 // Also handles large objects, since the only reference they hold is a class reference. 1362 if (ref != NULL && !ref->IsClass()) { 1363 accounting::CardTable* card_table = heap_->GetCardTable(); 1364 // If the object is not dirty and it is referencing something in the live stack other than 1365 // class, then it must be on a dirty card. 1366 if (!card_table->AddrIsInCardTable(obj)) { 1367 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table"; 1368 *failed_ = true; 1369 } else if (!card_table->IsDirty(obj)) { 1370 // Card should be either kCardDirty if it got re-dirtied after we aged it, or 1371 // kCardDirty - 1 if it didnt get touched since we aged it. 1372 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1373 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1374 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1375 LOG(ERROR) << "Object " << obj << " found in live stack"; 1376 } 1377 if (heap_->GetLiveBitmap()->Test(obj)) { 1378 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1379 } 1380 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj) 1381 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack"; 1382 1383 // Print which field of the object is dead. 1384 if (!obj->IsObjectArray()) { 1385 const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1386 CHECK(klass != NULL); 1387 const mirror::ObjectArray<mirror::Field>* fields = is_static ? klass->GetSFields() 1388 : klass->GetIFields(); 1389 CHECK(fields != NULL); 1390 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1391 const mirror::Field* cur = fields->Get(i); 1392 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1393 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " 1394 << PrettyField(cur); 1395 break; 1396 } 1397 } 1398 } else { 1399 const mirror::ObjectArray<mirror::Object>* object_array = 1400 obj->AsObjectArray<mirror::Object>(); 1401 for (int32_t i = 0; i < object_array->GetLength(); ++i) { 1402 if (object_array->Get(i) == ref) { 1403 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref"; 1404 } 1405 } 1406 } 1407 1408 *failed_ = true; 1409 } 1410 } 1411 } 1412 } 1413 1414 private: 1415 Heap* const heap_; 1416 bool* const failed_; 1417}; 1418 1419class VerifyLiveStackReferences { 1420 public: 1421 explicit VerifyLiveStackReferences(Heap* heap) 1422 : heap_(heap), 1423 failed_(false) {} 1424 1425 void operator()(const mirror::Object* obj) const 1426 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1427 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); 1428 collector::MarkSweep::VisitObjectReferences(obj, visitor); 1429 } 1430 1431 bool Failed() const { 1432 return failed_; 1433 } 1434 1435 private: 1436 Heap* const heap_; 1437 bool failed_; 1438}; 1439 1440bool Heap::VerifyMissingCardMarks() { 1441 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1442 1443 // We need to sort the live stack since we binary search it. 1444 live_stack_->Sort(); 1445 VerifyLiveStackReferences visitor(this); 1446 GetLiveBitmap()->Visit(visitor); 1447 1448 // We can verify objects in the live stack since none of these should reference dead objects. 1449 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) { 1450 visitor(*it); 1451 } 1452 1453 if (visitor.Failed()) { 1454 DumpSpaces(); 1455 return false; 1456 } 1457 return true; 1458} 1459 1460void Heap::SwapStacks() { 1461 allocation_stack_.swap(live_stack_); 1462 1463 // Sort the live stack so that we can quickly binary search it later. 1464 if (verify_object_mode_ > kNoHeapVerification) { 1465 live_stack_->Sort(); 1466 } 1467} 1468 1469void Heap::ProcessCards(base::TimingLogger& timings) { 1470 // Clear cards and keep track of cards cleared in the mod-union table. 1471 typedef std::vector<space::ContinuousSpace*>::iterator It; 1472 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 1473 space::ContinuousSpace* space = *it; 1474 if (space->IsImageSpace()) { 1475 timings.NewSplit("ModUnionClearCards"); 1476 image_mod_union_table_->ClearCards(space); 1477 } else if (space->IsZygoteSpace()) { 1478 timings.NewSplit("ZygoteModUnionClearCards"); 1479 zygote_mod_union_table_->ClearCards(space); 1480 } else { 1481 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards 1482 // were dirty before the GC started. 1483 timings.NewSplit("AllocSpaceClearCards"); 1484 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor()); 1485 } 1486 } 1487} 1488 1489void Heap::PreGcVerification(collector::GarbageCollector* gc) { 1490 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1491 Thread* self = Thread::Current(); 1492 1493 if (verify_pre_gc_heap_) { 1494 thread_list->SuspendAll(); 1495 { 1496 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1497 if (!VerifyHeapReferences()) { 1498 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed"; 1499 } 1500 } 1501 thread_list->ResumeAll(); 1502 } 1503 1504 // Check that all objects which reference things in the live stack are on dirty cards. 1505 if (verify_missing_card_marks_) { 1506 thread_list->SuspendAll(); 1507 { 1508 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1509 SwapStacks(); 1510 // Sort the live stack so that we can quickly binary search it later. 1511 if (!VerifyMissingCardMarks()) { 1512 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed"; 1513 } 1514 SwapStacks(); 1515 } 1516 thread_list->ResumeAll(); 1517 } 1518 1519 if (verify_mod_union_table_) { 1520 thread_list->SuspendAll(); 1521 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); 1522 zygote_mod_union_table_->Update(); 1523 zygote_mod_union_table_->Verify(); 1524 image_mod_union_table_->Update(); 1525 image_mod_union_table_->Verify(); 1526 thread_list->ResumeAll(); 1527 } 1528} 1529 1530void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { 1531 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1532 1533 // Called before sweeping occurs since we want to make sure we are not going so reclaim any 1534 // reachable objects. 1535 if (verify_post_gc_heap_) { 1536 Thread* self = Thread::Current(); 1537 CHECK_NE(self->GetState(), kRunnable); 1538 Locks::mutator_lock_->SharedUnlock(self); 1539 thread_list->SuspendAll(); 1540 { 1541 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1542 // Swapping bound bitmaps does nothing. 1543 gc->SwapBitmaps(); 1544 if (!VerifyHeapReferences()) { 1545 LOG(FATAL) << "Post " << gc->GetName() << "GC verification failed"; 1546 } 1547 gc->SwapBitmaps(); 1548 } 1549 thread_list->ResumeAll(); 1550 Locks::mutator_lock_->SharedLock(self); 1551 } 1552} 1553 1554void Heap::PostGcVerification(collector::GarbageCollector* gc) { 1555 Thread* self = Thread::Current(); 1556 1557 if (verify_system_weaks_) { 1558 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1559 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc); 1560 mark_sweep->VerifySystemWeaks(); 1561 } 1562} 1563 1564collector::GcType Heap::WaitForConcurrentGcToComplete(Thread* self) { 1565 collector::GcType last_gc_type = collector::kGcTypeNone; 1566 if (concurrent_gc_) { 1567 ATRACE_BEGIN("GC: Wait For Concurrent"); 1568 bool do_wait; 1569 uint64_t wait_start = NanoTime(); 1570 { 1571 // Check if GC is running holding gc_complete_lock_. 1572 MutexLock mu(self, *gc_complete_lock_); 1573 do_wait = is_gc_running_; 1574 } 1575 if (do_wait) { 1576 uint64_t wait_time; 1577 // We must wait, change thread state then sleep on gc_complete_cond_; 1578 ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete); 1579 { 1580 MutexLock mu(self, *gc_complete_lock_); 1581 while (is_gc_running_) { 1582 gc_complete_cond_->Wait(self); 1583 } 1584 last_gc_type = last_gc_type_; 1585 wait_time = NanoTime() - wait_start; 1586 total_wait_time_ += wait_time; 1587 } 1588 if (wait_time > kLongGcPauseThreshold) { 1589 LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time); 1590 } 1591 } 1592 ATRACE_END(); 1593 } 1594 return last_gc_type; 1595} 1596 1597void Heap::DumpForSigQuit(std::ostream& os) { 1598 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/" 1599 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n"; 1600 DumpGcPerformanceInfo(os); 1601} 1602 1603size_t Heap::GetPercentFree() { 1604 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory()); 1605} 1606 1607void Heap::SetIdealFootprint(size_t max_allowed_footprint) { 1608 if (max_allowed_footprint > GetMaxMemory()) { 1609 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to " 1610 << PrettySize(GetMaxMemory()); 1611 max_allowed_footprint = GetMaxMemory(); 1612 } 1613 max_allowed_footprint_ = max_allowed_footprint; 1614} 1615 1616void Heap::UpdateMaxNativeFootprint() { 1617 size_t native_size = native_bytes_allocated_; 1618 // TODO: Tune the native heap utilization to be a value other than the java heap utilization. 1619 size_t target_size = native_size / GetTargetHeapUtilization(); 1620 if (target_size > native_size + max_free_) { 1621 target_size = native_size + max_free_; 1622 } else if (target_size < native_size + min_free_) { 1623 target_size = native_size + min_free_; 1624 } 1625 native_footprint_gc_watermark_ = target_size; 1626 native_footprint_limit_ = 2 * target_size - native_size; 1627} 1628 1629void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) { 1630 // We know what our utilization is at this moment. 1631 // This doesn't actually resize any memory. It just lets the heap grow more when necessary. 1632 const size_t bytes_allocated = GetBytesAllocated(); 1633 last_gc_size_ = bytes_allocated; 1634 last_gc_time_ns_ = NanoTime(); 1635 1636 size_t target_size; 1637 if (gc_type != collector::kGcTypeSticky) { 1638 // Grow the heap for non sticky GC. 1639 target_size = bytes_allocated / GetTargetHeapUtilization(); 1640 if (target_size > bytes_allocated + max_free_) { 1641 target_size = bytes_allocated + max_free_; 1642 } else if (target_size < bytes_allocated + min_free_) { 1643 target_size = bytes_allocated + min_free_; 1644 } 1645 next_gc_type_ = collector::kGcTypeSticky; 1646 } else { 1647 // Based on how close the current heap size is to the target size, decide 1648 // whether or not to do a partial or sticky GC next. 1649 if (bytes_allocated + min_free_ <= max_allowed_footprint_) { 1650 next_gc_type_ = collector::kGcTypeSticky; 1651 } else { 1652 next_gc_type_ = collector::kGcTypePartial; 1653 } 1654 1655 // If we have freed enough memory, shrink the heap back down. 1656 if (bytes_allocated + max_free_ < max_allowed_footprint_) { 1657 target_size = bytes_allocated + max_free_; 1658 } else { 1659 target_size = std::max(bytes_allocated, max_allowed_footprint_); 1660 } 1661 } 1662 SetIdealFootprint(target_size); 1663 1664 // Calculate when to perform the next ConcurrentGC. 1665 if (concurrent_gc_) { 1666 // Calculate the estimated GC duration. 1667 double gc_duration_seconds = NsToMs(gc_duration) / 1000.0; 1668 // Estimate how many remaining bytes we will have when we need to start the next GC. 1669 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds; 1670 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes); 1671 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) { 1672 // A never going to happen situation that from the estimated allocation rate we will exceed 1673 // the applications entire footprint with the given estimated allocation rate. Schedule 1674 // another GC straight away. 1675 concurrent_start_bytes_ = bytes_allocated; 1676 } else { 1677 // Start a concurrent GC when we get close to the estimated remaining bytes. When the 1678 // allocation rate is very high, remaining_bytes could tell us that we should start a GC 1679 // right away. 1680 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated); 1681 } 1682 DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_); 1683 DCHECK_LE(max_allowed_footprint_, growth_limit_); 1684 } 1685 1686 UpdateMaxNativeFootprint(); 1687} 1688 1689void Heap::ClearGrowthLimit() { 1690 growth_limit_ = capacity_; 1691 alloc_space_->ClearGrowthLimit(); 1692} 1693 1694void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, 1695 MemberOffset reference_queue_offset, 1696 MemberOffset reference_queueNext_offset, 1697 MemberOffset reference_pendingNext_offset, 1698 MemberOffset finalizer_reference_zombie_offset) { 1699 reference_referent_offset_ = reference_referent_offset; 1700 reference_queue_offset_ = reference_queue_offset; 1701 reference_queueNext_offset_ = reference_queueNext_offset; 1702 reference_pendingNext_offset_ = reference_pendingNext_offset; 1703 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset; 1704 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1705 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U); 1706 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U); 1707 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); 1708 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); 1709} 1710 1711mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) { 1712 DCHECK(reference != NULL); 1713 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1714 return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true); 1715} 1716 1717void Heap::ClearReferenceReferent(mirror::Object* reference) { 1718 DCHECK(reference != NULL); 1719 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1720 reference->SetFieldObject(reference_referent_offset_, NULL, true); 1721} 1722 1723// Returns true if the reference object has not yet been enqueued. 1724bool Heap::IsEnqueuable(const mirror::Object* ref) { 1725 DCHECK(ref != NULL); 1726 const mirror::Object* queue = 1727 ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false); 1728 const mirror::Object* queue_next = 1729 ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false); 1730 return (queue != NULL) && (queue_next == NULL); 1731} 1732 1733void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_reference_list) { 1734 DCHECK(ref != NULL); 1735 CHECK(ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false) != NULL); 1736 CHECK(ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false) == NULL); 1737 EnqueuePendingReference(ref, cleared_reference_list); 1738} 1739 1740void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) { 1741 DCHECK(ref != NULL); 1742 DCHECK(list != NULL); 1743 1744 // TODO: Remove this lock, use atomic stacks for storing references. 1745 MutexLock mu(Thread::Current(), *reference_queue_lock_); 1746 if (*list == NULL) { 1747 ref->SetFieldObject(reference_pendingNext_offset_, ref, false); 1748 *list = ref; 1749 } else { 1750 mirror::Object* head = 1751 (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false); 1752 ref->SetFieldObject(reference_pendingNext_offset_, head, false); 1753 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false); 1754 } 1755} 1756 1757mirror::Object* Heap::DequeuePendingReference(mirror::Object** list) { 1758 DCHECK(list != NULL); 1759 DCHECK(*list != NULL); 1760 mirror::Object* head = (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, 1761 false); 1762 mirror::Object* ref; 1763 1764 // Note: the following code is thread-safe because it is only called from ProcessReferences which 1765 // is single threaded. 1766 if (*list == head) { 1767 ref = *list; 1768 *list = NULL; 1769 } else { 1770 mirror::Object* next = head->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, 1771 false); 1772 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false); 1773 ref = head; 1774 } 1775 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false); 1776 return ref; 1777} 1778 1779void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) { 1780 ScopedObjectAccess soa(self); 1781 JValue result; 1782 ArgArray arg_array(NULL, 0); 1783 arg_array.Append(reinterpret_cast<uint32_t>(object)); 1784 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, 1785 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1786} 1787 1788void Heap::EnqueueClearedReferences(mirror::Object** cleared) { 1789 DCHECK(cleared != NULL); 1790 if (*cleared != NULL) { 1791 // When a runtime isn't started there are no reference queues to care about so ignore. 1792 if (LIKELY(Runtime::Current()->IsStarted())) { 1793 ScopedObjectAccess soa(Thread::Current()); 1794 JValue result; 1795 ArgArray arg_array(NULL, 0); 1796 arg_array.Append(reinterpret_cast<uint32_t>(*cleared)); 1797 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), 1798 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1799 } 1800 *cleared = NULL; 1801 } 1802} 1803 1804void Heap::RequestConcurrentGC(Thread* self) { 1805 // Make sure that we can do a concurrent GC. 1806 Runtime* runtime = Runtime::Current(); 1807 DCHECK(concurrent_gc_); 1808 if (runtime == NULL || !runtime->IsFinishedStarting() || 1809 !runtime->IsConcurrentGcEnabled()) { 1810 return; 1811 } 1812 { 1813 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1814 if (runtime->IsShuttingDown()) { 1815 return; 1816 } 1817 } 1818 if (self->IsHandlingStackOverflow()) { 1819 return; 1820 } 1821 1822 // We already have a request pending, no reason to start more until we update 1823 // concurrent_start_bytes_. 1824 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); 1825 1826 JNIEnv* env = self->GetJniEnv(); 1827 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 1828 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL); 1829 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 1830 WellKnownClasses::java_lang_Daemons_requestGC); 1831 CHECK(!env->ExceptionCheck()); 1832} 1833 1834void Heap::ConcurrentGC(Thread* self) { 1835 { 1836 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1837 if (Runtime::Current()->IsShuttingDown()) { 1838 return; 1839 } 1840 } 1841 1842 // Wait for any GCs currently running to finish. 1843 if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) { 1844 CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false); 1845 } 1846} 1847 1848void Heap::RequestHeapTrim() { 1849 // GC completed and now we must decide whether to request a heap trim (advising pages back to the 1850 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans 1851 // a space it will hold its lock and can become a cause of jank. 1852 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since 1853 // forking. 1854 1855 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap 1856 // because that only marks object heads, so a large array looks like lots of empty space. We 1857 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional 1858 // to utilization (which is probably inversely proportional to how much benefit we can expect). 1859 // We could try mincore(2) but that's only a measure of how many pages we haven't given away, 1860 // not how much use we're making of those pages. 1861 uint64_t ms_time = MilliTime(); 1862 float utilization = 1863 static_cast<float>(alloc_space_->GetBytesAllocated()) / alloc_space_->Size(); 1864 if ((utilization > 0.75f) || ((ms_time - last_trim_time_ms_) < 2 * 1000)) { 1865 // Don't bother trimming the alloc space if it's more than 75% utilized, or if a 1866 // heap trim occurred in the last two seconds. 1867 return; 1868 } 1869 1870 Thread* self = Thread::Current(); 1871 { 1872 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1873 Runtime* runtime = Runtime::Current(); 1874 if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown()) { 1875 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time) 1876 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check 1877 // as we don't hold the lock while requesting the trim). 1878 return; 1879 } 1880 } 1881 1882 SchedPolicy policy; 1883 get_sched_policy(self->GetTid(), &policy); 1884 if (policy == SP_FOREGROUND || policy == SP_AUDIO_APP) { 1885 // Don't trim the heap if we are a foreground or audio app. 1886 return; 1887 } 1888 1889 last_trim_time_ms_ = ms_time; 1890 JNIEnv* env = self->GetJniEnv(); 1891 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 1892 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL); 1893 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 1894 WellKnownClasses::java_lang_Daemons_requestHeapTrim); 1895 CHECK(!env->ExceptionCheck()); 1896} 1897 1898size_t Heap::Trim() { 1899 // Handle a requested heap trim on a thread outside of the main GC thread. 1900 return alloc_space_->Trim(); 1901} 1902 1903bool Heap::IsGCRequestPending() const { 1904 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max(); 1905} 1906 1907void Heap::RegisterNativeAllocation(int bytes) { 1908 // Total number of native bytes allocated. 1909 native_bytes_allocated_.fetch_add(bytes); 1910 Thread* self = Thread::Current(); 1911 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) { 1912 // The second watermark is higher than the gc watermark. If you hit this it means you are 1913 // allocating native objects faster than the GC can keep up with. 1914 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 1915 JNIEnv* env = self->GetJniEnv(); 1916 // Can't do this in WellKnownClasses::Init since System is not properly set up at that 1917 // point. 1918 if (WellKnownClasses::java_lang_System_runFinalization == NULL) { 1919 DCHECK(WellKnownClasses::java_lang_System != NULL); 1920 WellKnownClasses::java_lang_System_runFinalization = 1921 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V"); 1922 assert(WellKnownClasses::java_lang_System_runFinalization != NULL); 1923 } 1924 if (WaitForConcurrentGcToComplete(self) != collector::kGcTypeNone) { 1925 // Just finished a GC, attempt to run finalizers. 1926 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 1927 WellKnownClasses::java_lang_System_runFinalization); 1928 CHECK(!env->ExceptionCheck()); 1929 } 1930 1931 // If we still are over the watermark, attempt a GC for alloc and run finalizers. 1932 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 1933 CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false); 1934 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 1935 WellKnownClasses::java_lang_System_runFinalization); 1936 CHECK(!env->ExceptionCheck()); 1937 } 1938 // We have just run finalizers, update the native watermark since it is very likely that 1939 // finalizers released native managed allocations. 1940 UpdateMaxNativeFootprint(); 1941 } else { 1942 if (!IsGCRequestPending()) { 1943 RequestConcurrentGC(self); 1944 } 1945 } 1946 } 1947} 1948 1949void Heap::RegisterNativeFree(int bytes) { 1950 int expected_size, new_size; 1951 do { 1952 expected_size = native_bytes_allocated_.load(); 1953 new_size = expected_size - bytes; 1954 if (new_size < 0) { 1955 ThrowRuntimeException("attempted to free %d native bytes with only %d native bytes registered as allocated", 1956 bytes, expected_size); 1957 break; 1958 } 1959 } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size)); 1960} 1961 1962int64_t Heap::GetTotalMemory() const { 1963 int64_t ret = 0; 1964 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1965 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 1966 space::ContinuousSpace* space = *it; 1967 if (space->IsImageSpace()) { 1968 // Currently don't include the image space. 1969 } else if (space->IsDlMallocSpace()) { 1970 // Zygote or alloc space 1971 ret += space->AsDlMallocSpace()->GetFootprint(); 1972 } 1973 } 1974 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 1975 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 1976 space::DiscontinuousSpace* space = *it; 1977 if (space->IsLargeObjectSpace()) { 1978 ret += space->AsLargeObjectSpace()->GetBytesAllocated(); 1979 } 1980 } 1981 return ret; 1982} 1983 1984} // namespace gc 1985} // namespace art 1986