heap.cc revision e0a53e99e2a01f8668d6616c3cec7e2f5a711286
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "heap.h" 18 19#define ATRACE_TAG ATRACE_TAG_DALVIK 20#include <cutils/trace.h> 21 22#include <limits> 23#include <vector> 24 25#include "base/stl_util.h" 26#include "common_throws.h" 27#include "cutils/sched_policy.h" 28#include "debugger.h" 29#include "gc/accounting/atomic_stack.h" 30#include "gc/accounting/card_table-inl.h" 31#include "gc/accounting/heap_bitmap-inl.h" 32#include "gc/accounting/mod_union_table-inl.h" 33#include "gc/accounting/space_bitmap-inl.h" 34#include "gc/collector/mark_sweep-inl.h" 35#include "gc/collector/partial_mark_sweep.h" 36#include "gc/collector/sticky_mark_sweep.h" 37#include "gc/space/image_space.h" 38#include "gc/space/large_object_space.h" 39#include "gc/space/space-inl.h" 40#include "image.h" 41#include "invoke_arg_array_builder.h" 42#include "mirror/class-inl.h" 43#include "mirror/field-inl.h" 44#include "mirror/object.h" 45#include "mirror/object-inl.h" 46#include "mirror/object_array-inl.h" 47#include "object_utils.h" 48#include "os.h" 49#include "ScopedLocalRef.h" 50#include "scoped_thread_state_change.h" 51#include "sirt_ref.h" 52#include "thread_list.h" 53#include "UniquePtr.h" 54#include "well_known_classes.h" 55 56namespace art { 57namespace gc { 58 59// When to create a log message about a slow GC, 100ms. 60static const uint64_t kSlowGcThreshold = MsToNs(100); 61// When to create a log message about a long pause, 5ms. 62static const uint64_t kLongGcPauseThreshold = MsToNs(5); 63static const bool kGCALotMode = false; 64static const size_t kGcAlotInterval = KB; 65static const bool kDumpGcPerformanceOnShutdown = false; 66// Minimum amount of remaining bytes before a concurrent GC is triggered. 67static const size_t kMinConcurrentRemainingBytes = 128 * KB; 68const double Heap::kDefaultTargetUtilization = 0.5; 69 70Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free, 71 double target_utilization, size_t capacity, const std::string& original_image_file_name, 72 bool concurrent_gc, size_t num_gc_threads, bool low_memory_mode) 73 : alloc_space_(NULL), 74 card_table_(NULL), 75 concurrent_gc_(concurrent_gc), 76 num_gc_threads_(num_gc_threads), 77 low_memory_mode_(low_memory_mode), 78 have_zygote_space_(false), 79 reference_queue_lock_(NULL), 80 is_gc_running_(false), 81 last_gc_type_(collector::kGcTypeNone), 82 next_gc_type_(collector::kGcTypePartial), 83 capacity_(capacity), 84 growth_limit_(growth_limit), 85 max_allowed_footprint_(initial_size), 86 native_footprint_gc_watermark_(initial_size), 87 native_footprint_limit_(2 * initial_size), 88 concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes) 89 : std::numeric_limits<size_t>::max()), 90 total_bytes_freed_ever_(0), 91 total_objects_freed_ever_(0), 92 large_object_threshold_(3 * kPageSize), 93 num_bytes_allocated_(0), 94 native_bytes_allocated_(0), 95 process_state_(PROCESS_STATE_TOP), 96 gc_memory_overhead_(0), 97 verify_missing_card_marks_(false), 98 verify_system_weaks_(false), 99 verify_pre_gc_heap_(false), 100 verify_post_gc_heap_(false), 101 verify_mod_union_table_(false), 102 min_alloc_space_size_for_sticky_gc_(2 * MB), 103 min_remaining_space_for_sticky_gc_(1 * MB), 104 last_trim_time_ms_(0), 105 allocation_rate_(0), 106 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This 107 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap 108 * verification is enabled, we limit the size of allocation stacks to speed up their 109 * searching. 110 */ 111 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval 112 : (kDesiredHeapVerification > kNoHeapVerification) ? KB : MB), 113 reference_referent_offset_(0), 114 reference_queue_offset_(0), 115 reference_queueNext_offset_(0), 116 reference_pendingNext_offset_(0), 117 finalizer_reference_zombie_offset_(0), 118 min_free_(min_free), 119 max_free_(max_free), 120 target_utilization_(target_utilization), 121 total_wait_time_(0), 122 measure_allocation_time_(false), 123 total_allocation_time_(0), 124 verify_object_mode_(kHeapVerificationNotPermitted) { 125 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 126 LOG(INFO) << "Heap() entering"; 127 } 128 129 live_bitmap_.reset(new accounting::HeapBitmap(this)); 130 mark_bitmap_.reset(new accounting::HeapBitmap(this)); 131 132 // Requested begin for the alloc space, to follow the mapped image and oat files 133 byte* requested_alloc_space_begin = NULL; 134 std::string image_file_name(original_image_file_name); 135 if (!image_file_name.empty()) { 136 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name); 137 CHECK(image_space != NULL) << "Failed to create space for " << image_file_name; 138 AddContinuousSpace(image_space); 139 // Oat files referenced by image files immediately follow them in memory, ensure alloc space 140 // isn't going to get in the middle 141 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd(); 142 CHECK_GT(oat_file_end_addr, image_space->End()); 143 if (oat_file_end_addr > requested_alloc_space_begin) { 144 requested_alloc_space_begin = 145 reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_file_end_addr), 146 kPageSize)); 147 } 148 } 149 150 // Allocate the large object space. 151 const bool kUseFreeListSpaceForLOS = false; 152 if (kUseFreeListSpaceForLOS) { 153 large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity); 154 } else { 155 large_object_space_ = space::LargeObjectMapSpace::Create("large object space"); 156 } 157 CHECK(large_object_space_ != NULL) << "Failed to create large object space"; 158 AddDiscontinuousSpace(large_object_space_); 159 160 alloc_space_ = space::DlMallocSpace::Create(Runtime::Current()->IsZygote() ? "zygote space" : "alloc space", 161 initial_size, 162 growth_limit, capacity, 163 requested_alloc_space_begin); 164 CHECK(alloc_space_ != NULL) << "Failed to create alloc space"; 165 alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); 166 AddContinuousSpace(alloc_space_); 167 168 // Compute heap capacity. Continuous spaces are sorted in order of Begin(). 169 byte* heap_begin = continuous_spaces_.front()->Begin(); 170 size_t heap_capacity = continuous_spaces_.back()->End() - continuous_spaces_.front()->Begin(); 171 if (continuous_spaces_.back()->IsDlMallocSpace()) { 172 heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity(); 173 } 174 175 // Mark image objects in the live bitmap 176 // TODO: C++0x 177 typedef std::vector<space::ContinuousSpace*>::iterator It; 178 for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) { 179 space::ContinuousSpace* space = *it; 180 if (space->IsImageSpace()) { 181 space::ImageSpace* image_space = space->AsImageSpace(); 182 image_space->RecordImageAllocations(image_space->GetLiveBitmap()); 183 } 184 } 185 186 // Allocate the card table. 187 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity)); 188 CHECK(card_table_.get() != NULL) << "Failed to create card table"; 189 190 image_mod_union_table_.reset(new accounting::ModUnionTableToZygoteAllocspace(this)); 191 CHECK(image_mod_union_table_.get() != NULL) << "Failed to create image mod-union table"; 192 193 zygote_mod_union_table_.reset(new accounting::ModUnionTableCardCache(this)); 194 CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table"; 195 196 // TODO: Count objects in the image space here. 197 num_bytes_allocated_ = 0; 198 199 // Default mark stack size in bytes. 200 static const size_t default_mark_stack_size = 64 * KB; 201 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size)); 202 allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack", 203 max_allocation_stack_size_)); 204 live_stack_.reset(accounting::ObjectStack::Create("live stack", 205 max_allocation_stack_size_)); 206 207 // It's still too early to take a lock because there are no threads yet, but we can create locks 208 // now. We don't create it earlier to make it clear that you can't use locks during heap 209 // initialization. 210 gc_complete_lock_ = new Mutex("GC complete lock"); 211 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable", 212 *gc_complete_lock_)); 213 214 // Create the reference queue lock, this is required so for parallel object scanning in the GC. 215 reference_queue_lock_ = new Mutex("reference queue lock"); 216 217 last_gc_time_ns_ = NanoTime(); 218 last_gc_size_ = GetBytesAllocated(); 219 220 // Create our garbage collectors. 221 for (size_t i = 0; i < 2; ++i) { 222 const bool concurrent = i != 0; 223 mark_sweep_collectors_.push_back(new collector::MarkSweep(this, concurrent)); 224 mark_sweep_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent)); 225 mark_sweep_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent)); 226 } 227 228 CHECK_NE(max_allowed_footprint_, 0U); 229 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 230 LOG(INFO) << "Heap() exiting"; 231 } 232} 233 234void Heap::CreateThreadPool() { 235 thread_pool_.reset(new ThreadPool(num_gc_threads_)); 236} 237 238void Heap::DeleteThreadPool() { 239 thread_pool_.reset(NULL); 240} 241 242// Sort spaces based on begin address 243struct ContinuousSpaceSorter { 244 bool operator()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const { 245 return a->Begin() < b->Begin(); 246 } 247}; 248 249void Heap::UpdateProcessState(ProcessState process_state) { 250 process_state_ = process_state; 251} 252 253void Heap::AddContinuousSpace(space::ContinuousSpace* space) { 254 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 255 DCHECK(space != NULL); 256 DCHECK(space->GetLiveBitmap() != NULL); 257 live_bitmap_->AddContinuousSpaceBitmap(space->GetLiveBitmap()); 258 DCHECK(space->GetMarkBitmap() != NULL); 259 mark_bitmap_->AddContinuousSpaceBitmap(space->GetMarkBitmap()); 260 continuous_spaces_.push_back(space); 261 if (space->IsDlMallocSpace() && !space->IsLargeObjectSpace()) { 262 alloc_space_ = space->AsDlMallocSpace(); 263 } 264 265 // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger) 266 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), ContinuousSpaceSorter()); 267 268 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to 269 // avoid redundant marking. 270 bool seen_zygote = false, seen_alloc = false; 271 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 272 for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) { 273 space::ContinuousSpace* space = *it; 274 if (space->IsImageSpace()) { 275 DCHECK(!seen_zygote); 276 DCHECK(!seen_alloc); 277 } else if (space->IsZygoteSpace()) { 278 DCHECK(!seen_alloc); 279 seen_zygote = true; 280 } else if (space->IsDlMallocSpace()) { 281 seen_alloc = true; 282 } 283 } 284} 285 286void Heap::RegisterGCAllocation(size_t bytes) { 287 if (this != NULL) { 288 gc_memory_overhead_.fetch_add(bytes); 289 } 290} 291 292void Heap::RegisterGCDeAllocation(size_t bytes) { 293 if (this != NULL) { 294 gc_memory_overhead_.fetch_sub(bytes); 295 } 296} 297 298void Heap::AddDiscontinuousSpace(space::DiscontinuousSpace* space) { 299 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 300 DCHECK(space != NULL); 301 DCHECK(space->GetLiveObjects() != NULL); 302 live_bitmap_->AddDiscontinuousObjectSet(space->GetLiveObjects()); 303 DCHECK(space->GetMarkObjects() != NULL); 304 mark_bitmap_->AddDiscontinuousObjectSet(space->GetMarkObjects()); 305 discontinuous_spaces_.push_back(space); 306} 307 308void Heap::DumpGcPerformanceInfo(std::ostream& os) { 309 // Dump cumulative timings. 310 os << "Dumping cumulative Gc timings\n"; 311 uint64_t total_duration = 0; 312 313 // Dump cumulative loggers for each GC type. 314 // TODO: C++0x 315 uint64_t total_paused_time = 0; 316 typedef std::vector<collector::MarkSweep*>::const_iterator It; 317 for (It it = mark_sweep_collectors_.begin(); 318 it != mark_sweep_collectors_.end(); ++it) { 319 collector::MarkSweep* collector = *it; 320 CumulativeLogger& logger = collector->GetCumulativeTimings(); 321 if (logger.GetTotalNs() != 0) { 322 os << Dumpable<CumulativeLogger>(logger); 323 const uint64_t total_ns = logger.GetTotalNs(); 324 const uint64_t total_pause_ns = (*it)->GetTotalPausedTimeNs(); 325 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0; 326 const uint64_t freed_bytes = collector->GetTotalFreedBytes(); 327 const uint64_t freed_objects = collector->GetTotalFreedObjects(); 328 os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n" 329 << collector->GetName() << " paused time: " << PrettyDuration(total_pause_ns) << "\n" 330 << collector->GetName() << " freed: " << freed_objects 331 << " objects with total size " << PrettySize(freed_bytes) << "\n" 332 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / " 333 << PrettySize(freed_bytes / seconds) << "/s\n"; 334 total_duration += total_ns; 335 total_paused_time += total_pause_ns; 336 } 337 } 338 uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust; 339 size_t total_objects_allocated = GetObjectsAllocatedEver(); 340 size_t total_bytes_allocated = GetBytesAllocatedEver(); 341 if (total_duration != 0) { 342 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0; 343 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; 344 os << "Mean GC size throughput: " 345 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n"; 346 os << "Mean GC object throughput: " 347 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n"; 348 } 349 os << "Total number of allocations: " << total_objects_allocated << "\n"; 350 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n"; 351 if (measure_allocation_time_) { 352 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n"; 353 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated) 354 << "\n"; 355 } 356 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n"; 357 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n"; 358 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_; 359} 360 361Heap::~Heap() { 362 if (kDumpGcPerformanceOnShutdown) { 363 DumpGcPerformanceInfo(LOG(INFO)); 364 } 365 366 STLDeleteElements(&mark_sweep_collectors_); 367 368 // If we don't reset then the mark stack complains in it's destructor. 369 allocation_stack_->Reset(); 370 live_stack_->Reset(); 371 372 VLOG(heap) << "~Heap()"; 373 // We can't take the heap lock here because there might be a daemon thread suspended with the 374 // heap lock held. We know though that no non-daemon threads are executing, and we know that 375 // all daemon threads are suspended, and we also know that the threads list have been deleted, so 376 // those threads can't resume. We're the only running thread, and we can do whatever we like... 377 STLDeleteElements(&continuous_spaces_); 378 STLDeleteElements(&discontinuous_spaces_); 379 delete gc_complete_lock_; 380 delete reference_queue_lock_; 381} 382 383space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj, 384 bool fail_ok) const { 385 // TODO: C++0x auto 386 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 387 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 388 if ((*it)->Contains(obj)) { 389 return *it; 390 } 391 } 392 if (!fail_ok) { 393 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 394 } 395 return NULL; 396} 397 398space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj, 399 bool fail_ok) const { 400 // TODO: C++0x auto 401 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It; 402 for (It it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 403 if ((*it)->Contains(obj)) { 404 return *it; 405 } 406 } 407 if (!fail_ok) { 408 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 409 } 410 return NULL; 411} 412 413space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const { 414 space::Space* result = FindContinuousSpaceFromObject(obj, true); 415 if (result != NULL) { 416 return result; 417 } 418 return FindDiscontinuousSpaceFromObject(obj, true); 419} 420 421space::ImageSpace* Heap::GetImageSpace() const { 422 // TODO: C++0x auto 423 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 424 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 425 if ((*it)->IsImageSpace()) { 426 return (*it)->AsImageSpace(); 427 } 428 } 429 return NULL; 430} 431 432static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) { 433 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start); 434 if (used_bytes < chunk_size) { 435 size_t chunk_free_bytes = chunk_size - used_bytes; 436 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg); 437 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes); 438 } 439} 440 441mirror::Object* Heap::AllocObject(Thread* self, mirror::Class* c, size_t byte_count) { 442 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || 443 (c->IsVariableSize() || c->GetObjectSize() == byte_count) || 444 strlen(ClassHelper(c).GetDescriptor()) == 0); 445 DCHECK_GE(byte_count, sizeof(mirror::Object)); 446 447 mirror::Object* obj = NULL; 448 size_t size = 0; 449 uint64_t allocation_start = 0; 450 if (UNLIKELY(measure_allocation_time_)) { 451 allocation_start = NanoTime() / kTimeAdjust; 452 } 453 454 // We need to have a zygote space or else our newly allocated large object can end up in the 455 // Zygote resulting in it being prematurely freed. 456 // We can only do this for primive objects since large objects will not be within the card table 457 // range. This also means that we rely on SetClass not dirtying the object's card. 458 bool large_object_allocation = 459 byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray(); 460 if (UNLIKELY(large_object_allocation)) { 461 size = RoundUp(byte_count, kPageSize); 462 obj = Allocate(self, large_object_space_, size); 463 // Make sure that our large object didn't get placed anywhere within the space interval or else 464 // it breaks the immune range. 465 DCHECK(obj == NULL || 466 reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() || 467 reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End()); 468 } else { 469 obj = Allocate(self, alloc_space_, byte_count); 470 471 // Ensure that we did not allocate into a zygote space. 472 DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace()); 473 size = alloc_space_->AllocationSize(obj); 474 } 475 476 if (LIKELY(obj != NULL)) { 477 obj->SetClass(c); 478 479 // Record allocation after since we want to use the atomic add for the atomic fence to guard 480 // the SetClass since we do not want the class to appear NULL in another thread. 481 RecordAllocation(size, obj); 482 483 if (Dbg::IsAllocTrackingEnabled()) { 484 Dbg::RecordAllocation(c, byte_count); 485 } 486 if (static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_) { 487 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. 488 SirtRef<mirror::Object> ref(self, obj); 489 RequestConcurrentGC(self); 490 } 491 VerifyObject(obj); 492 493 if (UNLIKELY(measure_allocation_time_)) { 494 total_allocation_time_.fetch_add(NanoTime() / kTimeAdjust - allocation_start); 495 } 496 497 return obj; 498 } else { 499 std::ostringstream oss; 500 int64_t total_bytes_free = GetFreeMemory(); 501 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free 502 << " free bytes"; 503 // If the allocation failed due to fragmentation, print out the largest continuous allocation. 504 if (!large_object_allocation && total_bytes_free >= byte_count) { 505 size_t max_contiguous_allocation = 0; 506 // TODO: C++0x auto 507 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 508 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 509 space::ContinuousSpace* space = *it; 510 if (space->IsDlMallocSpace()) { 511 space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation); 512 } 513 } 514 oss << "; failed due to fragmentation (largest possible contiguous allocation " 515 << max_contiguous_allocation << " bytes)"; 516 } 517 self->ThrowOutOfMemoryError(oss.str().c_str()); 518 return NULL; 519 } 520} 521 522bool Heap::IsHeapAddress(const mirror::Object* obj) { 523 // Note: we deliberately don't take the lock here, and mustn't test anything that would 524 // require taking the lock. 525 if (obj == NULL) { 526 return true; 527 } 528 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 529 return false; 530 } 531 return FindSpaceFromObject(obj, true) != NULL; 532} 533 534bool Heap::IsLiveObjectLocked(const mirror::Object* obj) { 535 // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current()); 536 if (obj == NULL || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 537 return false; 538 } 539 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true); 540 space::DiscontinuousSpace* d_space = NULL; 541 if (c_space != NULL) { 542 if (c_space->GetLiveBitmap()->Test(obj)) { 543 return true; 544 } 545 } else { 546 d_space = FindDiscontinuousSpaceFromObject(obj, true); 547 if (d_space != NULL) { 548 if (d_space->GetLiveObjects()->Test(obj)) { 549 return true; 550 } 551 } 552 } 553 // This is covering the allocation/live stack swapping that is done without mutators suspended. 554 for (size_t i = 0; i < 5; ++i) { 555 if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj)) || 556 live_stack_->Contains(const_cast<mirror::Object*>(obj))) { 557 return true; 558 } 559 NanoSleep(MsToNs(10)); 560 } 561 // We need to check the bitmaps again since there is a race where we mark something as live and 562 // then clear the stack containing it. 563 if (c_space != NULL) { 564 if (c_space->GetLiveBitmap()->Test(obj)) { 565 return true; 566 } 567 } else { 568 d_space = FindDiscontinuousSpaceFromObject(obj, true); 569 if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) { 570 return true; 571 } 572 } 573 return false; 574} 575 576void Heap::VerifyObjectImpl(const mirror::Object* obj) { 577 if (Thread::Current() == NULL || 578 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) { 579 return; 580 } 581 VerifyObjectBody(obj); 582} 583 584void Heap::DumpSpaces() { 585 // TODO: C++0x auto 586 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 587 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 588 space::ContinuousSpace* space = *it; 589 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 590 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 591 LOG(INFO) << space << " " << *space << "\n" 592 << live_bitmap << " " << *live_bitmap << "\n" 593 << mark_bitmap << " " << *mark_bitmap; 594 } 595 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 596 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 597 space::DiscontinuousSpace* space = *it; 598 LOG(INFO) << space << " " << *space << "\n"; 599 } 600} 601 602void Heap::VerifyObjectBody(const mirror::Object* obj) { 603 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 604 LOG(FATAL) << "Object isn't aligned: " << obj; 605 } 606 if (UNLIKELY(GetObjectsAllocated() <= 10)) { // Ignore early dawn of the universe verifications. 607 return; 608 } 609 const byte* raw_addr = reinterpret_cast<const byte*>(obj) + 610 mirror::Object::ClassOffset().Int32Value(); 611 const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 612 if (UNLIKELY(c == NULL)) { 613 LOG(FATAL) << "Null class in object: " << obj; 614 } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) { 615 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; 616 } 617 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() 618 // Note: we don't use the accessors here as they have internal sanity checks 619 // that we don't want to run 620 raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value(); 621 const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 622 raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value(); 623 const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 624 CHECK_EQ(c_c, c_c_c); 625 626 if (verify_object_mode_ != kVerifyAllFast) { 627 // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the 628 // heap_bitmap_lock_. 629 if (!IsLiveObjectLocked(obj)) { 630 DumpSpaces(); 631 LOG(FATAL) << "Object is dead: " << obj; 632 } 633 if (!IsLiveObjectLocked(c)) { 634 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; 635 } 636 } 637} 638 639void Heap::VerificationCallback(mirror::Object* obj, void* arg) { 640 DCHECK(obj != NULL); 641 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj); 642} 643 644void Heap::VerifyHeap() { 645 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 646 GetLiveBitmap()->Walk(Heap::VerificationCallback, this); 647} 648 649void Heap::RecordAllocation(size_t size, mirror::Object* obj) { 650 DCHECK(obj != NULL); 651 DCHECK_GT(size, 0u); 652 num_bytes_allocated_.fetch_add(size); 653 654 if (Runtime::Current()->HasStatsEnabled()) { 655 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 656 ++thread_stats->allocated_objects; 657 thread_stats->allocated_bytes += size; 658 659 // TODO: Update these atomically. 660 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 661 ++global_stats->allocated_objects; 662 global_stats->allocated_bytes += size; 663 } 664 665 // This is safe to do since the GC will never free objects which are neither in the allocation 666 // stack or the live bitmap. 667 while (!allocation_stack_->AtomicPushBack(obj)) { 668 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 669 } 670} 671 672void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) { 673 DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_)); 674 num_bytes_allocated_.fetch_sub(freed_bytes); 675 676 if (Runtime::Current()->HasStatsEnabled()) { 677 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 678 thread_stats->freed_objects += freed_objects; 679 thread_stats->freed_bytes += freed_bytes; 680 681 // TODO: Do this concurrently. 682 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 683 global_stats->freed_objects += freed_objects; 684 global_stats->freed_bytes += freed_bytes; 685 } 686} 687 688mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, 689 bool grow) { 690 // Should we try to use a CAS here and fix up num_bytes_allocated_ later with AllocationSize? 691 if (num_bytes_allocated_ + alloc_size > max_allowed_footprint_) { 692 // max_allowed_footprint_ <= growth_limit_ so it is safe to check in here. 693 if (num_bytes_allocated_ + alloc_size > growth_limit_) { 694 // Completely out of memory. 695 return NULL; 696 } 697 } 698 699 return space->Alloc(self, alloc_size); 700} 701 702mirror::Object* Heap::Allocate(Thread* self, space::AllocSpace* space, size_t alloc_size) { 703 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are 704 // done in the runnable state where suspension is expected. 705 DCHECK_EQ(self->GetState(), kRunnable); 706 self->AssertThreadSuspensionIsAllowable(); 707 708 mirror::Object* ptr = TryToAllocate(self, space, alloc_size, false); 709 if (ptr != NULL) { 710 return ptr; 711 } 712 713 // The allocation failed. If the GC is running, block until it completes, and then retry the 714 // allocation. 715 collector::GcType last_gc = WaitForConcurrentGcToComplete(self); 716 if (last_gc != collector::kGcTypeNone) { 717 // A GC was in progress and we blocked, retry allocation now that memory has been freed. 718 ptr = TryToAllocate(self, space, alloc_size, false); 719 if (ptr != NULL) { 720 return ptr; 721 } 722 } 723 724 // Loop through our different Gc types and try to Gc until we get enough free memory. 725 for (size_t i = static_cast<size_t>(last_gc) + 1; 726 i < static_cast<size_t>(collector::kGcTypeMax); ++i) { 727 bool run_gc = false; 728 collector::GcType gc_type = static_cast<collector::GcType>(i); 729 switch (gc_type) { 730 case collector::kGcTypeSticky: { 731 const size_t alloc_space_size = alloc_space_->Size(); 732 run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ && 733 alloc_space_->Capacity() - alloc_space_size >= min_remaining_space_for_sticky_gc_; 734 break; 735 } 736 case collector::kGcTypePartial: 737 run_gc = have_zygote_space_; 738 break; 739 case collector::kGcTypeFull: 740 run_gc = true; 741 break; 742 default: 743 break; 744 } 745 746 if (run_gc) { 747 // If we actually ran a different type of Gc than requested, we can skip the index forwards. 748 collector::GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false); 749 DCHECK_GE(static_cast<size_t>(gc_type_ran), i); 750 i = static_cast<size_t>(gc_type_ran); 751 752 // Did we free sufficient memory for the allocation to succeed? 753 ptr = TryToAllocate(self, space, alloc_size, false); 754 if (ptr != NULL) { 755 return ptr; 756 } 757 } 758 } 759 760 // Allocations have failed after GCs; this is an exceptional state. 761 // Try harder, growing the heap if necessary. 762 ptr = TryToAllocate(self, space, alloc_size, true); 763 if (ptr != NULL) { 764 return ptr; 765 } 766 767 // Most allocations should have succeeded by now, so the heap is really full, really fragmented, 768 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The 769 // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME. 770 771 // OLD-TODO: wait for the finalizers from the previous GC to finish 772 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) 773 << " allocation"; 774 775 // We don't need a WaitForConcurrentGcToComplete here either. 776 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true); 777 return TryToAllocate(self, space, alloc_size, true); 778} 779 780void Heap::SetTargetHeapUtilization(float target) { 781 DCHECK_GT(target, 0.0f); // asserted in Java code 782 DCHECK_LT(target, 1.0f); 783 target_utilization_ = target; 784} 785 786size_t Heap::GetObjectsAllocated() const { 787 size_t total = 0; 788 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 789 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 790 space::ContinuousSpace* space = *it; 791 if (space->IsDlMallocSpace()) { 792 total += space->AsDlMallocSpace()->GetObjectsAllocated(); 793 } 794 } 795 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 796 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 797 space::DiscontinuousSpace* space = *it; 798 total += space->AsLargeObjectSpace()->GetObjectsAllocated(); 799 } 800 return total; 801} 802 803size_t Heap::GetObjectsAllocatedEver() const { 804 size_t total = 0; 805 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 806 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 807 space::ContinuousSpace* space = *it; 808 if (space->IsDlMallocSpace()) { 809 total += space->AsDlMallocSpace()->GetTotalObjectsAllocated(); 810 } 811 } 812 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 813 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 814 space::DiscontinuousSpace* space = *it; 815 total += space->AsLargeObjectSpace()->GetTotalObjectsAllocated(); 816 } 817 return total; 818} 819 820size_t Heap::GetBytesAllocatedEver() const { 821 size_t total = 0; 822 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 823 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 824 space::ContinuousSpace* space = *it; 825 if (space->IsDlMallocSpace()) { 826 total += space->AsDlMallocSpace()->GetTotalBytesAllocated(); 827 } 828 } 829 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 830 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 831 space::DiscontinuousSpace* space = *it; 832 total += space->AsLargeObjectSpace()->GetTotalBytesAllocated(); 833 } 834 return total; 835} 836 837class InstanceCounter { 838 public: 839 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts) 840 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 841 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) { 842 } 843 844 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 845 for (size_t i = 0; i < classes_.size(); ++i) { 846 const mirror::Class* instance_class = o->GetClass(); 847 if (use_is_assignable_from_) { 848 if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) { 849 ++counts_[i]; 850 } 851 } else { 852 if (instance_class == classes_[i]) { 853 ++counts_[i]; 854 } 855 } 856 } 857 } 858 859 private: 860 const std::vector<mirror::Class*>& classes_; 861 bool use_is_assignable_from_; 862 uint64_t* const counts_; 863 864 DISALLOW_COPY_AND_ASSIGN(InstanceCounter); 865}; 866 867void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, 868 uint64_t* counts) { 869 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 870 // is empty, so the live bitmap is the only place we need to look. 871 Thread* self = Thread::Current(); 872 self->TransitionFromRunnableToSuspended(kNative); 873 CollectGarbage(false); 874 self->TransitionFromSuspendedToRunnable(); 875 876 InstanceCounter counter(classes, use_is_assignable_from, counts); 877 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 878 GetLiveBitmap()->Visit(counter); 879} 880 881class InstanceCollector { 882 public: 883 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) 884 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 885 : class_(c), max_count_(max_count), instances_(instances) { 886 } 887 888 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 889 const mirror::Class* instance_class = o->GetClass(); 890 if (instance_class == class_) { 891 if (max_count_ == 0 || instances_.size() < max_count_) { 892 instances_.push_back(const_cast<mirror::Object*>(o)); 893 } 894 } 895 } 896 897 private: 898 mirror::Class* class_; 899 uint32_t max_count_; 900 std::vector<mirror::Object*>& instances_; 901 902 DISALLOW_COPY_AND_ASSIGN(InstanceCollector); 903}; 904 905void Heap::GetInstances(mirror::Class* c, int32_t max_count, 906 std::vector<mirror::Object*>& instances) { 907 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 908 // is empty, so the live bitmap is the only place we need to look. 909 Thread* self = Thread::Current(); 910 self->TransitionFromRunnableToSuspended(kNative); 911 CollectGarbage(false); 912 self->TransitionFromSuspendedToRunnable(); 913 914 InstanceCollector collector(c, max_count, instances); 915 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 916 GetLiveBitmap()->Visit(collector); 917} 918 919class ReferringObjectsFinder { 920 public: 921 ReferringObjectsFinder(mirror::Object* object, int32_t max_count, 922 std::vector<mirror::Object*>& referring_objects) 923 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 924 : object_(object), max_count_(max_count), referring_objects_(referring_objects) { 925 } 926 927 // For bitmap Visit. 928 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 929 // annotalysis on visitors. 930 void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS { 931 collector::MarkSweep::VisitObjectReferences(o, *this); 932 } 933 934 // For MarkSweep::VisitObjectReferences. 935 void operator()(const mirror::Object* referrer, const mirror::Object* object, 936 const MemberOffset&, bool) const { 937 if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { 938 referring_objects_.push_back(const_cast<mirror::Object*>(referrer)); 939 } 940 } 941 942 private: 943 mirror::Object* object_; 944 uint32_t max_count_; 945 std::vector<mirror::Object*>& referring_objects_; 946 947 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder); 948}; 949 950void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count, 951 std::vector<mirror::Object*>& referring_objects) { 952 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 953 // is empty, so the live bitmap is the only place we need to look. 954 Thread* self = Thread::Current(); 955 self->TransitionFromRunnableToSuspended(kNative); 956 CollectGarbage(false); 957 self->TransitionFromSuspendedToRunnable(); 958 959 ReferringObjectsFinder finder(o, max_count, referring_objects); 960 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 961 GetLiveBitmap()->Visit(finder); 962} 963 964void Heap::CollectGarbage(bool clear_soft_references) { 965 // Even if we waited for a GC we still need to do another GC since weaks allocated during the 966 // last GC will not have necessarily been cleared. 967 Thread* self = Thread::Current(); 968 WaitForConcurrentGcToComplete(self); 969 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references); 970} 971 972void Heap::PreZygoteFork() { 973 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock); 974 // Do this before acquiring the zygote creation lock so that we don't get lock order violations. 975 CollectGarbage(false); 976 Thread* self = Thread::Current(); 977 MutexLock mu(self, zygote_creation_lock_); 978 979 // Try to see if we have any Zygote spaces. 980 if (have_zygote_space_) { 981 return; 982 } 983 984 VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size()); 985 986 { 987 // Flush the alloc stack. 988 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 989 FlushAllocStack(); 990 } 991 992 // Turns the current alloc space into a Zygote space and obtain the new alloc space composed 993 // of the remaining available heap memory. 994 space::DlMallocSpace* zygote_space = alloc_space_; 995 alloc_space_ = zygote_space->CreateZygoteSpace("alloc space"); 996 alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); 997 998 // Change the GC retention policy of the zygote space to only collect when full. 999 zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect); 1000 AddContinuousSpace(alloc_space_); 1001 have_zygote_space_ = true; 1002 1003 // Reset the cumulative loggers since we now have a few additional timing phases. 1004 // TODO: C++0x 1005 typedef std::vector<collector::MarkSweep*>::const_iterator It; 1006 for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end(); 1007 it != end; ++it) { 1008 (*it)->ResetCumulativeStatistics(); 1009 } 1010} 1011 1012void Heap::FlushAllocStack() { 1013 MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), 1014 allocation_stack_.get()); 1015 allocation_stack_->Reset(); 1016} 1017 1018void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects, 1019 accounting::ObjectStack* stack) { 1020 mirror::Object** limit = stack->End(); 1021 for (mirror::Object** it = stack->Begin(); it != limit; ++it) { 1022 const mirror::Object* obj = *it; 1023 DCHECK(obj != NULL); 1024 if (LIKELY(bitmap->HasAddress(obj))) { 1025 bitmap->Set(obj); 1026 } else { 1027 large_objects->Set(obj); 1028 } 1029 } 1030} 1031 1032void Heap::UnMarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects, 1033 accounting::ObjectStack* stack) { 1034 mirror::Object** limit = stack->End(); 1035 for (mirror::Object** it = stack->Begin(); it != limit; ++it) { 1036 const mirror::Object* obj = *it; 1037 DCHECK(obj != NULL); 1038 if (LIKELY(bitmap->HasAddress(obj))) { 1039 bitmap->Clear(obj); 1040 } else { 1041 large_objects->Clear(obj); 1042 } 1043 } 1044} 1045 1046collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause, 1047 bool clear_soft_references) { 1048 Thread* self = Thread::Current(); 1049 1050 switch (gc_cause) { 1051 case kGcCauseForAlloc: 1052 ATRACE_BEGIN("GC (alloc)"); 1053 break; 1054 case kGcCauseBackground: 1055 ATRACE_BEGIN("GC (background)"); 1056 break; 1057 case kGcCauseExplicit: 1058 ATRACE_BEGIN("GC (explicit)"); 1059 break; 1060 } 1061 1062 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); 1063 Locks::mutator_lock_->AssertNotHeld(self); 1064 1065 if (self->IsHandlingStackOverflow()) { 1066 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow."; 1067 } 1068 1069 // Ensure there is only one GC at a time. 1070 bool start_collect = false; 1071 while (!start_collect) { 1072 { 1073 MutexLock mu(self, *gc_complete_lock_); 1074 if (!is_gc_running_) { 1075 is_gc_running_ = true; 1076 start_collect = true; 1077 } 1078 } 1079 if (!start_collect) { 1080 WaitForConcurrentGcToComplete(self); 1081 // TODO: if another thread beat this one to do the GC, perhaps we should just return here? 1082 // Not doing at the moment to ensure soft references are cleared. 1083 } 1084 } 1085 gc_complete_lock_->AssertNotHeld(self); 1086 1087 if (gc_cause == kGcCauseForAlloc && Runtime::Current()->HasStatsEnabled()) { 1088 ++Runtime::Current()->GetStats()->gc_for_alloc_count; 1089 ++Thread::Current()->GetStats()->gc_for_alloc_count; 1090 } 1091 1092 uint64_t gc_start_time_ns = NanoTime(); 1093 uint64_t gc_start_size = GetBytesAllocated(); 1094 // Approximate allocation rate in bytes / second. 1095 if (UNLIKELY(gc_start_time_ns == last_gc_time_ns_)) { 1096 LOG(WARNING) << "Timers are broken (gc_start_time == last_gc_time_)."; 1097 } 1098 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_); 1099 if (ms_delta != 0) { 1100 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta; 1101 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s"; 1102 } 1103 1104 if (gc_type == collector::kGcTypeSticky && 1105 alloc_space_->Size() < min_alloc_space_size_for_sticky_gc_) { 1106 gc_type = collector::kGcTypePartial; 1107 } 1108 1109 DCHECK_LT(gc_type, collector::kGcTypeMax); 1110 DCHECK_NE(gc_type, collector::kGcTypeNone); 1111 collector::MarkSweep* collector = NULL; 1112 typedef std::vector<collector::MarkSweep*>::iterator It; 1113 for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end(); 1114 it != end; ++it) { 1115 collector::MarkSweep* cur_collector = *it; 1116 if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) { 1117 collector = cur_collector; 1118 break; 1119 } 1120 } 1121 CHECK(collector != NULL) 1122 << "Could not find garbage collector with concurrent=" << concurrent_gc_ 1123 << " and type=" << gc_type; 1124 collector->clear_soft_references_ = clear_soft_references; 1125 collector->Run(); 1126 total_objects_freed_ever_ += collector->GetFreedObjects(); 1127 total_bytes_freed_ever_ += collector->GetFreedBytes(); 1128 1129 const size_t duration = collector->GetDurationNs(); 1130 std::vector<uint64_t> pauses = collector->GetPauseTimes(); 1131 bool was_slow = duration > kSlowGcThreshold || 1132 (gc_cause == kGcCauseForAlloc && duration > kLongGcPauseThreshold); 1133 for (size_t i = 0; i < pauses.size(); ++i) { 1134 if (pauses[i] > kLongGcPauseThreshold) { 1135 was_slow = true; 1136 } 1137 } 1138 1139 if (was_slow) { 1140 const size_t percent_free = GetPercentFree(); 1141 const size_t current_heap_size = GetBytesAllocated(); 1142 const size_t total_memory = GetTotalMemory(); 1143 std::ostringstream pause_string; 1144 for (size_t i = 0; i < pauses.size(); ++i) { 1145 pause_string << PrettyDuration((pauses[i] / 1000) * 1000) 1146 << ((i != pauses.size() - 1) ? ", " : ""); 1147 } 1148 LOG(INFO) << gc_cause << " " << collector->GetName() 1149 << "GC freed " << PrettySize(collector->GetFreedBytes()) << ", " 1150 << percent_free << "% free, " << PrettySize(current_heap_size) << "/" 1151 << PrettySize(total_memory) << ", " << "paused " << pause_string.str() 1152 << " total " << PrettyDuration((duration / 1000) * 1000); 1153 if (VLOG_IS_ON(heap)) { 1154 LOG(INFO) << Dumpable<base::TimingLogger>(collector->GetTimings()); 1155 } 1156 } 1157 1158 { 1159 MutexLock mu(self, *gc_complete_lock_); 1160 is_gc_running_ = false; 1161 last_gc_type_ = gc_type; 1162 // Wake anyone who may have been waiting for the GC to complete. 1163 gc_complete_cond_->Broadcast(self); 1164 } 1165 1166 // Inform DDMS that a GC completed. 1167 ATRACE_END(); 1168 Dbg::GcDidFinish(); 1169 return gc_type; 1170} 1171 1172void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings, 1173 collector::GcType gc_type) { 1174 if (gc_type == collector::kGcTypeSticky) { 1175 // Don't need to do anything for mod union table in this case since we are only scanning dirty 1176 // cards. 1177 return; 1178 } 1179 1180 // Update zygote mod union table. 1181 if (gc_type == collector::kGcTypePartial) { 1182 timings.NewSplit("UpdateZygoteModUnionTable"); 1183 zygote_mod_union_table_->Update(); 1184 1185 timings.NewSplit("ZygoteMarkReferences"); 1186 zygote_mod_union_table_->MarkReferences(mark_sweep); 1187 } 1188 1189 // Processes the cards we cleared earlier and adds their objects into the mod-union table. 1190 timings.NewSplit("UpdateModUnionTable"); 1191 image_mod_union_table_->Update(); 1192 1193 // Scans all objects in the mod-union table. 1194 timings.NewSplit("MarkImageToAllocSpaceReferences"); 1195 image_mod_union_table_->MarkReferences(mark_sweep); 1196} 1197 1198static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) { 1199 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg); 1200 if (root == obj) { 1201 LOG(INFO) << "Object " << obj << " is a root"; 1202 } 1203} 1204 1205class ScanVisitor { 1206 public: 1207 void operator()(const mirror::Object* obj) const { 1208 LOG(INFO) << "Would have rescanned object " << obj; 1209 } 1210}; 1211 1212// Verify a reference from an object. 1213class VerifyReferenceVisitor { 1214 public: 1215 explicit VerifyReferenceVisitor(Heap* heap) 1216 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) 1217 : heap_(heap), failed_(false) {} 1218 1219 bool Failed() const { 1220 return failed_; 1221 } 1222 1223 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter 1224 // analysis on visitors. 1225 void operator()(const mirror::Object* obj, const mirror::Object* ref, 1226 const MemberOffset& offset, bool /* is_static */) const 1227 NO_THREAD_SAFETY_ANALYSIS { 1228 // Verify that the reference is live. 1229 if (UNLIKELY(ref != NULL && !IsLive(ref))) { 1230 accounting::CardTable* card_table = heap_->GetCardTable(); 1231 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get(); 1232 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1233 1234 if (obj != NULL) { 1235 byte* card_addr = card_table->CardFromAddr(obj); 1236 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " << offset 1237 << "\nIsDirty = " << (*card_addr == accounting::CardTable::kCardDirty) 1238 << "\nObj type " << PrettyTypeOf(obj) 1239 << "\nRef type " << PrettyTypeOf(ref); 1240 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj)); 1241 void* cover_begin = card_table->AddrFromCard(card_addr); 1242 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) + 1243 accounting::CardTable::kCardSize); 1244 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin 1245 << "-" << cover_end; 1246 accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj); 1247 1248 // Print out how the object is live. 1249 if (bitmap != NULL && bitmap->Test(obj)) { 1250 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1251 } 1252 if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1253 LOG(ERROR) << "Object " << obj << " found in allocation stack"; 1254 } 1255 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1256 LOG(ERROR) << "Object " << obj << " found in live stack"; 1257 } 1258 // Attempt to see if the card table missed the reference. 1259 ScanVisitor scan_visitor; 1260 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr)); 1261 card_table->Scan(bitmap, byte_cover_begin, 1262 byte_cover_begin + accounting::CardTable::kCardSize, 1263 scan_visitor, VoidFunctor()); 1264 1265 // Search to see if any of the roots reference our object. 1266 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj)); 1267 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1268 1269 // Search to see if any of the roots reference our reference. 1270 arg = const_cast<void*>(reinterpret_cast<const void*>(ref)); 1271 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1272 } else { 1273 LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref); 1274 } 1275 if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1276 LOG(ERROR) << "Reference " << ref << " found in allocation stack!"; 1277 } 1278 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1279 LOG(ERROR) << "Reference " << ref << " found in live stack!"; 1280 } 1281 heap_->image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: "); 1282 heap_->zygote_mod_union_table_->Dump(LOG(ERROR) << "Zygote mod-union table: "); 1283 failed_ = true; 1284 } 1285 } 1286 1287 bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 1288 return heap_->IsLiveObjectLocked(obj); 1289 } 1290 1291 static void VerifyRoots(const mirror::Object* root, void* arg) { 1292 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg); 1293 (*visitor)(NULL, root, MemberOffset(0), true); 1294 } 1295 1296 private: 1297 Heap* const heap_; 1298 mutable bool failed_; 1299}; 1300 1301// Verify all references within an object, for use with HeapBitmap::Visit. 1302class VerifyObjectVisitor { 1303 public: 1304 explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {} 1305 1306 void operator()(const mirror::Object* obj) const 1307 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1308 // Note: we are verifying the references in obj but not obj itself, this is because obj must 1309 // be live or else how did we find it in the live bitmap? 1310 VerifyReferenceVisitor visitor(heap_); 1311 collector::MarkSweep::VisitObjectReferences(obj, visitor); 1312 failed_ = failed_ || visitor.Failed(); 1313 } 1314 1315 bool Failed() const { 1316 return failed_; 1317 } 1318 1319 private: 1320 Heap* const heap_; 1321 mutable bool failed_; 1322}; 1323 1324// Must do this with mutators suspended since we are directly accessing the allocation stacks. 1325bool Heap::VerifyHeapReferences() { 1326 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1327 // Lets sort our allocation stacks so that we can efficiently binary search them. 1328 allocation_stack_->Sort(); 1329 live_stack_->Sort(); 1330 // Perform the verification. 1331 VerifyObjectVisitor visitor(this); 1332 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false); 1333 GetLiveBitmap()->Visit(visitor); 1334 // We don't want to verify the objects in the allocation stack since they themselves may be 1335 // pointing to dead objects if they are not reachable. 1336 if (visitor.Failed()) { 1337 DumpSpaces(); 1338 return false; 1339 } 1340 return true; 1341} 1342 1343class VerifyReferenceCardVisitor { 1344 public: 1345 VerifyReferenceCardVisitor(Heap* heap, bool* failed) 1346 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, 1347 Locks::heap_bitmap_lock_) 1348 : heap_(heap), failed_(failed) { 1349 } 1350 1351 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 1352 // annotalysis on visitors. 1353 void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset, 1354 bool is_static) const NO_THREAD_SAFETY_ANALYSIS { 1355 // Filter out class references since changing an object's class does not mark the card as dirty. 1356 // Also handles large objects, since the only reference they hold is a class reference. 1357 if (ref != NULL && !ref->IsClass()) { 1358 accounting::CardTable* card_table = heap_->GetCardTable(); 1359 // If the object is not dirty and it is referencing something in the live stack other than 1360 // class, then it must be on a dirty card. 1361 if (!card_table->AddrIsInCardTable(obj)) { 1362 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table"; 1363 *failed_ = true; 1364 } else if (!card_table->IsDirty(obj)) { 1365 // Card should be either kCardDirty if it got re-dirtied after we aged it, or 1366 // kCardDirty - 1 if it didnt get touched since we aged it. 1367 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1368 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1369 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1370 LOG(ERROR) << "Object " << obj << " found in live stack"; 1371 } 1372 if (heap_->GetLiveBitmap()->Test(obj)) { 1373 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1374 } 1375 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj) 1376 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack"; 1377 1378 // Print which field of the object is dead. 1379 if (!obj->IsObjectArray()) { 1380 const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1381 CHECK(klass != NULL); 1382 const mirror::ObjectArray<mirror::Field>* fields = is_static ? klass->GetSFields() 1383 : klass->GetIFields(); 1384 CHECK(fields != NULL); 1385 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1386 const mirror::Field* cur = fields->Get(i); 1387 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1388 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " 1389 << PrettyField(cur); 1390 break; 1391 } 1392 } 1393 } else { 1394 const mirror::ObjectArray<mirror::Object>* object_array = 1395 obj->AsObjectArray<mirror::Object>(); 1396 for (int32_t i = 0; i < object_array->GetLength(); ++i) { 1397 if (object_array->Get(i) == ref) { 1398 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref"; 1399 } 1400 } 1401 } 1402 1403 *failed_ = true; 1404 } 1405 } 1406 } 1407 } 1408 1409 private: 1410 Heap* const heap_; 1411 bool* const failed_; 1412}; 1413 1414class VerifyLiveStackReferences { 1415 public: 1416 explicit VerifyLiveStackReferences(Heap* heap) 1417 : heap_(heap), 1418 failed_(false) {} 1419 1420 void operator()(const mirror::Object* obj) const 1421 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1422 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); 1423 collector::MarkSweep::VisitObjectReferences(obj, visitor); 1424 } 1425 1426 bool Failed() const { 1427 return failed_; 1428 } 1429 1430 private: 1431 Heap* const heap_; 1432 bool failed_; 1433}; 1434 1435bool Heap::VerifyMissingCardMarks() { 1436 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1437 1438 // We need to sort the live stack since we binary search it. 1439 live_stack_->Sort(); 1440 VerifyLiveStackReferences visitor(this); 1441 GetLiveBitmap()->Visit(visitor); 1442 1443 // We can verify objects in the live stack since none of these should reference dead objects. 1444 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) { 1445 visitor(*it); 1446 } 1447 1448 if (visitor.Failed()) { 1449 DumpSpaces(); 1450 return false; 1451 } 1452 return true; 1453} 1454 1455void Heap::SwapStacks() { 1456 allocation_stack_.swap(live_stack_); 1457 1458 // Sort the live stack so that we can quickly binary search it later. 1459 if (verify_object_mode_ > kNoHeapVerification) { 1460 live_stack_->Sort(); 1461 } 1462} 1463 1464void Heap::ProcessCards(base::TimingLogger& timings) { 1465 // Clear cards and keep track of cards cleared in the mod-union table. 1466 typedef std::vector<space::ContinuousSpace*>::iterator It; 1467 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 1468 space::ContinuousSpace* space = *it; 1469 if (space->IsImageSpace()) { 1470 timings.NewSplit("ModUnionClearCards"); 1471 image_mod_union_table_->ClearCards(space); 1472 } else if (space->IsZygoteSpace()) { 1473 timings.NewSplit("ZygoteModUnionClearCards"); 1474 zygote_mod_union_table_->ClearCards(space); 1475 } else { 1476 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards 1477 // were dirty before the GC started. 1478 timings.NewSplit("AllocSpaceClearCards"); 1479 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor()); 1480 } 1481 } 1482} 1483 1484void Heap::PreGcVerification(collector::GarbageCollector* gc) { 1485 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1486 Thread* self = Thread::Current(); 1487 1488 if (verify_pre_gc_heap_) { 1489 thread_list->SuspendAll(); 1490 { 1491 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1492 if (!VerifyHeapReferences()) { 1493 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed"; 1494 } 1495 } 1496 thread_list->ResumeAll(); 1497 } 1498 1499 // Check that all objects which reference things in the live stack are on dirty cards. 1500 if (verify_missing_card_marks_) { 1501 thread_list->SuspendAll(); 1502 { 1503 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1504 SwapStacks(); 1505 // Sort the live stack so that we can quickly binary search it later. 1506 if (!VerifyMissingCardMarks()) { 1507 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed"; 1508 } 1509 SwapStacks(); 1510 } 1511 thread_list->ResumeAll(); 1512 } 1513 1514 if (verify_mod_union_table_) { 1515 thread_list->SuspendAll(); 1516 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); 1517 zygote_mod_union_table_->Update(); 1518 zygote_mod_union_table_->Verify(); 1519 image_mod_union_table_->Update(); 1520 image_mod_union_table_->Verify(); 1521 thread_list->ResumeAll(); 1522 } 1523} 1524 1525void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { 1526 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1527 1528 // Called before sweeping occurs since we want to make sure we are not going so reclaim any 1529 // reachable objects. 1530 if (verify_post_gc_heap_) { 1531 Thread* self = Thread::Current(); 1532 CHECK_NE(self->GetState(), kRunnable); 1533 Locks::mutator_lock_->SharedUnlock(self); 1534 thread_list->SuspendAll(); 1535 { 1536 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1537 // Swapping bound bitmaps does nothing. 1538 gc->SwapBitmaps(); 1539 if (!VerifyHeapReferences()) { 1540 LOG(FATAL) << "Post " << gc->GetName() << "GC verification failed"; 1541 } 1542 gc->SwapBitmaps(); 1543 } 1544 thread_list->ResumeAll(); 1545 Locks::mutator_lock_->SharedLock(self); 1546 } 1547} 1548 1549void Heap::PostGcVerification(collector::GarbageCollector* gc) { 1550 Thread* self = Thread::Current(); 1551 1552 if (verify_system_weaks_) { 1553 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1554 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc); 1555 mark_sweep->VerifySystemWeaks(); 1556 } 1557} 1558 1559collector::GcType Heap::WaitForConcurrentGcToComplete(Thread* self) { 1560 collector::GcType last_gc_type = collector::kGcTypeNone; 1561 if (concurrent_gc_) { 1562 ATRACE_BEGIN("GC: Wait For Concurrent"); 1563 bool do_wait; 1564 uint64_t wait_start = NanoTime(); 1565 { 1566 // Check if GC is running holding gc_complete_lock_. 1567 MutexLock mu(self, *gc_complete_lock_); 1568 do_wait = is_gc_running_; 1569 } 1570 if (do_wait) { 1571 uint64_t wait_time; 1572 // We must wait, change thread state then sleep on gc_complete_cond_; 1573 ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete); 1574 { 1575 MutexLock mu(self, *gc_complete_lock_); 1576 while (is_gc_running_) { 1577 gc_complete_cond_->Wait(self); 1578 } 1579 last_gc_type = last_gc_type_; 1580 wait_time = NanoTime() - wait_start; 1581 total_wait_time_ += wait_time; 1582 } 1583 if (wait_time > kLongGcPauseThreshold) { 1584 LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time); 1585 } 1586 } 1587 ATRACE_END(); 1588 } 1589 return last_gc_type; 1590} 1591 1592void Heap::DumpForSigQuit(std::ostream& os) { 1593 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/" 1594 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n"; 1595 DumpGcPerformanceInfo(os); 1596} 1597 1598size_t Heap::GetPercentFree() { 1599 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory()); 1600} 1601 1602void Heap::SetIdealFootprint(size_t max_allowed_footprint) { 1603 if (max_allowed_footprint > GetMaxMemory()) { 1604 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to " 1605 << PrettySize(GetMaxMemory()); 1606 max_allowed_footprint = GetMaxMemory(); 1607 } 1608 max_allowed_footprint_ = max_allowed_footprint; 1609} 1610 1611void Heap::UpdateMaxNativeFootprint() { 1612 size_t native_size = native_bytes_allocated_; 1613 // TODO: Tune the native heap utilization to be a value other than the java heap utilization. 1614 size_t target_size = native_size / GetTargetHeapUtilization(); 1615 if (target_size > native_size + max_free_) { 1616 target_size = native_size + max_free_; 1617 } else if (target_size < native_size + min_free_) { 1618 target_size = native_size + min_free_; 1619 } 1620 native_footprint_gc_watermark_ = target_size; 1621 native_footprint_limit_ = 2 * target_size - native_size; 1622} 1623 1624void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) { 1625 // We know what our utilization is at this moment. 1626 // This doesn't actually resize any memory. It just lets the heap grow more when necessary. 1627 const size_t bytes_allocated = GetBytesAllocated(); 1628 last_gc_size_ = bytes_allocated; 1629 last_gc_time_ns_ = NanoTime(); 1630 1631 size_t target_size; 1632 if (gc_type != collector::kGcTypeSticky) { 1633 // Grow the heap for non sticky GC. 1634 target_size = bytes_allocated / GetTargetHeapUtilization(); 1635 if (target_size > bytes_allocated + max_free_) { 1636 target_size = bytes_allocated + max_free_; 1637 } else if (target_size < bytes_allocated + min_free_) { 1638 target_size = bytes_allocated + min_free_; 1639 } 1640 next_gc_type_ = collector::kGcTypeSticky; 1641 } else { 1642 // Based on how close the current heap size is to the target size, decide 1643 // whether or not to do a partial or sticky GC next. 1644 if (bytes_allocated + min_free_ <= max_allowed_footprint_) { 1645 next_gc_type_ = collector::kGcTypeSticky; 1646 } else { 1647 next_gc_type_ = collector::kGcTypePartial; 1648 } 1649 1650 // If we have freed enough memory, shrink the heap back down. 1651 if (bytes_allocated + max_free_ < max_allowed_footprint_) { 1652 target_size = bytes_allocated + max_free_; 1653 } else { 1654 target_size = std::max(bytes_allocated, max_allowed_footprint_); 1655 } 1656 } 1657 SetIdealFootprint(target_size); 1658 1659 // Calculate when to perform the next ConcurrentGC. 1660 if (concurrent_gc_) { 1661 // Calculate the estimated GC duration. 1662 double gc_duration_seconds = NsToMs(gc_duration) / 1000.0; 1663 // Estimate how many remaining bytes we will have when we need to start the next GC. 1664 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds; 1665 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes); 1666 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) { 1667 // A never going to happen situation that from the estimated allocation rate we will exceed 1668 // the applications entire footprint with the given estimated allocation rate. Schedule 1669 // another GC straight away. 1670 concurrent_start_bytes_ = bytes_allocated; 1671 } else { 1672 // Start a concurrent GC when we get close to the estimated remaining bytes. When the 1673 // allocation rate is very high, remaining_bytes could tell us that we should start a GC 1674 // right away. 1675 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated); 1676 } 1677 DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_); 1678 DCHECK_LE(max_allowed_footprint_, growth_limit_); 1679 } 1680 1681 UpdateMaxNativeFootprint(); 1682} 1683 1684void Heap::ClearGrowthLimit() { 1685 growth_limit_ = capacity_; 1686 alloc_space_->ClearGrowthLimit(); 1687} 1688 1689void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, 1690 MemberOffset reference_queue_offset, 1691 MemberOffset reference_queueNext_offset, 1692 MemberOffset reference_pendingNext_offset, 1693 MemberOffset finalizer_reference_zombie_offset) { 1694 reference_referent_offset_ = reference_referent_offset; 1695 reference_queue_offset_ = reference_queue_offset; 1696 reference_queueNext_offset_ = reference_queueNext_offset; 1697 reference_pendingNext_offset_ = reference_pendingNext_offset; 1698 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset; 1699 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1700 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U); 1701 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U); 1702 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); 1703 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); 1704} 1705 1706mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) { 1707 DCHECK(reference != NULL); 1708 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1709 return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true); 1710} 1711 1712void Heap::ClearReferenceReferent(mirror::Object* reference) { 1713 DCHECK(reference != NULL); 1714 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1715 reference->SetFieldObject(reference_referent_offset_, NULL, true); 1716} 1717 1718// Returns true if the reference object has not yet been enqueued. 1719bool Heap::IsEnqueuable(const mirror::Object* ref) { 1720 DCHECK(ref != NULL); 1721 const mirror::Object* queue = 1722 ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false); 1723 const mirror::Object* queue_next = 1724 ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false); 1725 return (queue != NULL) && (queue_next == NULL); 1726} 1727 1728void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_reference_list) { 1729 DCHECK(ref != NULL); 1730 CHECK(ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false) != NULL); 1731 CHECK(ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false) == NULL); 1732 EnqueuePendingReference(ref, cleared_reference_list); 1733} 1734 1735void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) { 1736 DCHECK(ref != NULL); 1737 DCHECK(list != NULL); 1738 1739 // TODO: Remove this lock, use atomic stacks for storing references. 1740 MutexLock mu(Thread::Current(), *reference_queue_lock_); 1741 if (*list == NULL) { 1742 ref->SetFieldObject(reference_pendingNext_offset_, ref, false); 1743 *list = ref; 1744 } else { 1745 mirror::Object* head = 1746 (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false); 1747 ref->SetFieldObject(reference_pendingNext_offset_, head, false); 1748 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false); 1749 } 1750} 1751 1752mirror::Object* Heap::DequeuePendingReference(mirror::Object** list) { 1753 DCHECK(list != NULL); 1754 DCHECK(*list != NULL); 1755 mirror::Object* head = (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, 1756 false); 1757 mirror::Object* ref; 1758 1759 // Note: the following code is thread-safe because it is only called from ProcessReferences which 1760 // is single threaded. 1761 if (*list == head) { 1762 ref = *list; 1763 *list = NULL; 1764 } else { 1765 mirror::Object* next = head->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, 1766 false); 1767 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false); 1768 ref = head; 1769 } 1770 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false); 1771 return ref; 1772} 1773 1774void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) { 1775 ScopedObjectAccess soa(self); 1776 JValue result; 1777 ArgArray arg_array(NULL, 0); 1778 arg_array.Append(reinterpret_cast<uint32_t>(object)); 1779 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, 1780 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1781} 1782 1783void Heap::EnqueueClearedReferences(mirror::Object** cleared) { 1784 DCHECK(cleared != NULL); 1785 if (*cleared != NULL) { 1786 // When a runtime isn't started there are no reference queues to care about so ignore. 1787 if (LIKELY(Runtime::Current()->IsStarted())) { 1788 ScopedObjectAccess soa(Thread::Current()); 1789 JValue result; 1790 ArgArray arg_array(NULL, 0); 1791 arg_array.Append(reinterpret_cast<uint32_t>(*cleared)); 1792 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), 1793 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1794 } 1795 *cleared = NULL; 1796 } 1797} 1798 1799void Heap::RequestConcurrentGC(Thread* self) { 1800 // Make sure that we can do a concurrent GC. 1801 Runtime* runtime = Runtime::Current(); 1802 DCHECK(concurrent_gc_); 1803 if (runtime == NULL || !runtime->IsFinishedStarting() || 1804 !runtime->IsConcurrentGcEnabled()) { 1805 return; 1806 } 1807 { 1808 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1809 if (runtime->IsShuttingDown()) { 1810 return; 1811 } 1812 } 1813 if (self->IsHandlingStackOverflow()) { 1814 return; 1815 } 1816 1817 // We already have a request pending, no reason to start more until we update 1818 // concurrent_start_bytes_. 1819 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); 1820 1821 JNIEnv* env = self->GetJniEnv(); 1822 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 1823 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL); 1824 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 1825 WellKnownClasses::java_lang_Daemons_requestGC); 1826 CHECK(!env->ExceptionCheck()); 1827} 1828 1829void Heap::ConcurrentGC(Thread* self) { 1830 { 1831 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1832 if (Runtime::Current()->IsShuttingDown()) { 1833 return; 1834 } 1835 } 1836 1837 // Wait for any GCs currently running to finish. 1838 if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) { 1839 CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false); 1840 } 1841} 1842 1843void Heap::RequestHeapTrim() { 1844 // GC completed and now we must decide whether to request a heap trim (advising pages back to the 1845 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans 1846 // a space it will hold its lock and can become a cause of jank. 1847 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since 1848 // forking. 1849 1850 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap 1851 // because that only marks object heads, so a large array looks like lots of empty space. We 1852 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional 1853 // to utilization (which is probably inversely proportional to how much benefit we can expect). 1854 // We could try mincore(2) but that's only a measure of how many pages we haven't given away, 1855 // not how much use we're making of those pages. 1856 uint64_t ms_time = MilliTime(); 1857 float utilization = 1858 static_cast<float>(alloc_space_->GetBytesAllocated()) / alloc_space_->Size(); 1859 if ((utilization > 0.75f && !IsLowMemoryMode()) || ((ms_time - last_trim_time_ms_) < 2 * 1000)) { 1860 // Don't bother trimming the alloc space if it's more than 75% utilized and low memory mode is 1861 // not enabled, or if a heap trim occurred in the last two seconds. 1862 return; 1863 } 1864 1865 Thread* self = Thread::Current(); 1866 { 1867 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1868 Runtime* runtime = Runtime::Current(); 1869 if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown()) { 1870 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time) 1871 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check 1872 // as we don't hold the lock while requesting the trim). 1873 return; 1874 } 1875 } 1876 1877 SchedPolicy policy; 1878 get_sched_policy(self->GetTid(), &policy); 1879 if (policy == SP_FOREGROUND || policy == SP_AUDIO_APP) { 1880 // Don't trim the heap if we are a foreground or audio app. 1881 return; 1882 } 1883 1884 last_trim_time_ms_ = ms_time; 1885 JNIEnv* env = self->GetJniEnv(); 1886 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 1887 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL); 1888 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 1889 WellKnownClasses::java_lang_Daemons_requestHeapTrim); 1890 CHECK(!env->ExceptionCheck()); 1891} 1892 1893size_t Heap::Trim() { 1894 // Handle a requested heap trim on a thread outside of the main GC thread. 1895 return alloc_space_->Trim(); 1896} 1897 1898bool Heap::IsGCRequestPending() const { 1899 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max(); 1900} 1901 1902void Heap::RegisterNativeAllocation(int bytes) { 1903 // Total number of native bytes allocated. 1904 native_bytes_allocated_.fetch_add(bytes); 1905 Thread* self = Thread::Current(); 1906 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) { 1907 // The second watermark is higher than the gc watermark. If you hit this it means you are 1908 // allocating native objects faster than the GC can keep up with. 1909 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 1910 JNIEnv* env = self->GetJniEnv(); 1911 // Can't do this in WellKnownClasses::Init since System is not properly set up at that 1912 // point. 1913 if (WellKnownClasses::java_lang_System_runFinalization == NULL) { 1914 DCHECK(WellKnownClasses::java_lang_System != NULL); 1915 WellKnownClasses::java_lang_System_runFinalization = 1916 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V"); 1917 assert(WellKnownClasses::java_lang_System_runFinalization != NULL); 1918 } 1919 if (WaitForConcurrentGcToComplete(self) != collector::kGcTypeNone) { 1920 // Just finished a GC, attempt to run finalizers. 1921 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 1922 WellKnownClasses::java_lang_System_runFinalization); 1923 CHECK(!env->ExceptionCheck()); 1924 } 1925 1926 // If we still are over the watermark, attempt a GC for alloc and run finalizers. 1927 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 1928 CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false); 1929 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 1930 WellKnownClasses::java_lang_System_runFinalization); 1931 CHECK(!env->ExceptionCheck()); 1932 } 1933 // We have just run finalizers, update the native watermark since it is very likely that 1934 // finalizers released native managed allocations. 1935 UpdateMaxNativeFootprint(); 1936 } else { 1937 if (!IsGCRequestPending()) { 1938 RequestConcurrentGC(self); 1939 } 1940 } 1941 } 1942} 1943 1944void Heap::RegisterNativeFree(int bytes) { 1945 int expected_size, new_size; 1946 do { 1947 expected_size = native_bytes_allocated_.load(); 1948 new_size = expected_size - bytes; 1949 if (new_size < 0) { 1950 ThrowRuntimeException("attempted to free %d native bytes with only %d native bytes registered as allocated", 1951 bytes, expected_size); 1952 break; 1953 } 1954 } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size)); 1955} 1956 1957int64_t Heap::GetTotalMemory() const { 1958 int64_t ret = 0; 1959 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1960 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 1961 space::ContinuousSpace* space = *it; 1962 if (space->IsImageSpace()) { 1963 // Currently don't include the image space. 1964 } else if (space->IsDlMallocSpace()) { 1965 // Zygote or alloc space 1966 ret += space->AsDlMallocSpace()->GetFootprint(); 1967 } 1968 } 1969 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 1970 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 1971 space::DiscontinuousSpace* space = *it; 1972 if (space->IsLargeObjectSpace()) { 1973 ret += space->AsLargeObjectSpace()->GetBytesAllocated(); 1974 } 1975 } 1976 return ret; 1977} 1978 1979} // namespace gc 1980} // namespace art 1981