heap.cc revision 161a8e0fcbf26922c97654c00b68082be71eeb50
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "heap.h" 18 19#define ATRACE_TAG ATRACE_TAG_DALVIK 20#include <cutils/trace.h> 21 22#include <limits> 23#include <vector> 24 25#include "base/stl_util.h" 26#include "common_throws.h" 27#include "cutils/sched_policy.h" 28#include "debugger.h" 29#include "gc/accounting/atomic_stack.h" 30#include "gc/accounting/card_table-inl.h" 31#include "gc/accounting/heap_bitmap-inl.h" 32#include "gc/accounting/mod_union_table-inl.h" 33#include "gc/accounting/space_bitmap-inl.h" 34#include "gc/collector/mark_sweep-inl.h" 35#include "gc/collector/partial_mark_sweep.h" 36#include "gc/collector/sticky_mark_sweep.h" 37#include "gc/space/image_space.h" 38#include "gc/space/large_object_space.h" 39#include "gc/space/space-inl.h" 40#include "image.h" 41#include "invoke_arg_array_builder.h" 42#include "mirror/class-inl.h" 43#include "mirror/field-inl.h" 44#include "mirror/object.h" 45#include "mirror/object-inl.h" 46#include "mirror/object_array-inl.h" 47#include "object_utils.h" 48#include "os.h" 49#include "ScopedLocalRef.h" 50#include "scoped_thread_state_change.h" 51#include "sirt_ref.h" 52#include "thread_list.h" 53#include "UniquePtr.h" 54#include "well_known_classes.h" 55 56namespace art { 57namespace gc { 58 59// When to create a log message about a slow GC, 100ms. 60static const uint64_t kSlowGcThreshold = MsToNs(100); 61// When to create a log message about a long pause, 5ms. 62static const uint64_t kLongGcPauseThreshold = MsToNs(5); 63static const bool kGCALotMode = false; 64static const size_t kGcAlotInterval = KB; 65static const bool kDumpGcPerformanceOnShutdown = false; 66// Minimum amount of remaining bytes before a concurrent GC is triggered. 67static const size_t kMinConcurrentRemainingBytes = 128 * KB; 68const double Heap::kDefaultTargetUtilization = 0.5; 69 70Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free, 71 double target_utilization, size_t capacity, 72 const std::string& original_image_file_name, bool concurrent_gc, size_t num_gc_threads) 73 : alloc_space_(NULL), 74 card_table_(NULL), 75 concurrent_gc_(concurrent_gc), 76 num_gc_threads_(num_gc_threads), 77 have_zygote_space_(false), 78 reference_queue_lock_(NULL), 79 is_gc_running_(false), 80 last_gc_type_(collector::kGcTypeNone), 81 next_gc_type_(collector::kGcTypePartial), 82 capacity_(capacity), 83 growth_limit_(growth_limit), 84 max_allowed_footprint_(initial_size), 85 native_footprint_gc_watermark_(initial_size), 86 native_footprint_limit_(2 * initial_size), 87 concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes) 88 : std::numeric_limits<size_t>::max()), 89 total_bytes_freed_ever_(0), 90 total_objects_freed_ever_(0), 91 large_object_threshold_(3 * kPageSize), 92 num_bytes_allocated_(0), 93 native_bytes_allocated_(0), 94 process_state_(PROCESS_STATE_TOP), 95 gc_memory_overhead_(0), 96 verify_missing_card_marks_(false), 97 verify_system_weaks_(false), 98 verify_pre_gc_heap_(false), 99 verify_post_gc_heap_(false), 100 verify_mod_union_table_(false), 101 min_alloc_space_size_for_sticky_gc_(2 * MB), 102 min_remaining_space_for_sticky_gc_(1 * MB), 103 last_trim_time_ms_(0), 104 allocation_rate_(0), 105 /* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This 106 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap 107 * verification is enabled, we limit the size of allocation stacks to speed up their 108 * searching. 109 */ 110 max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval 111 : (kDesiredHeapVerification > kNoHeapVerification) ? KB : MB), 112 reference_referent_offset_(0), 113 reference_queue_offset_(0), 114 reference_queueNext_offset_(0), 115 reference_pendingNext_offset_(0), 116 finalizer_reference_zombie_offset_(0), 117 min_free_(min_free), 118 max_free_(max_free), 119 target_utilization_(target_utilization), 120 total_wait_time_(0), 121 measure_allocation_time_(false), 122 total_allocation_time_(0), 123 verify_object_mode_(kHeapVerificationNotPermitted) { 124 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 125 LOG(INFO) << "Heap() entering"; 126 } 127 128 live_bitmap_.reset(new accounting::HeapBitmap(this)); 129 mark_bitmap_.reset(new accounting::HeapBitmap(this)); 130 131 // Requested begin for the alloc space, to follow the mapped image and oat files 132 byte* requested_alloc_space_begin = NULL; 133 std::string image_file_name(original_image_file_name); 134 if (!image_file_name.empty()) { 135 space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name); 136 CHECK(image_space != NULL) << "Failed to create space for " << image_file_name; 137 AddContinuousSpace(image_space); 138 // Oat files referenced by image files immediately follow them in memory, ensure alloc space 139 // isn't going to get in the middle 140 byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd(); 141 CHECK_GT(oat_file_end_addr, image_space->End()); 142 if (oat_file_end_addr > requested_alloc_space_begin) { 143 requested_alloc_space_begin = 144 reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_file_end_addr), 145 kPageSize)); 146 } 147 } 148 149 // Allocate the large object space. 150 const bool kUseFreeListSpaceForLOS = false; 151 if (kUseFreeListSpaceForLOS) { 152 large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity); 153 } else { 154 large_object_space_ = space::LargeObjectMapSpace::Create("large object space"); 155 } 156 CHECK(large_object_space_ != NULL) << "Failed to create large object space"; 157 AddDiscontinuousSpace(large_object_space_); 158 159 alloc_space_ = space::DlMallocSpace::Create(Runtime::Current()->IsZygote() ? "zygote space" : "alloc space", 160 initial_size, 161 growth_limit, capacity, 162 requested_alloc_space_begin); 163 CHECK(alloc_space_ != NULL) << "Failed to create alloc space"; 164 alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); 165 AddContinuousSpace(alloc_space_); 166 167 // Compute heap capacity. Continuous spaces are sorted in order of Begin(). 168 byte* heap_begin = continuous_spaces_.front()->Begin(); 169 size_t heap_capacity = continuous_spaces_.back()->End() - continuous_spaces_.front()->Begin(); 170 if (continuous_spaces_.back()->IsDlMallocSpace()) { 171 heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity(); 172 } 173 174 // Mark image objects in the live bitmap 175 // TODO: C++0x 176 typedef std::vector<space::ContinuousSpace*>::iterator It; 177 for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) { 178 space::ContinuousSpace* space = *it; 179 if (space->IsImageSpace()) { 180 space::ImageSpace* image_space = space->AsImageSpace(); 181 image_space->RecordImageAllocations(image_space->GetLiveBitmap()); 182 } 183 } 184 185 // Allocate the card table. 186 card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity)); 187 CHECK(card_table_.get() != NULL) << "Failed to create card table"; 188 189 image_mod_union_table_.reset(new accounting::ModUnionTableToZygoteAllocspace(this)); 190 CHECK(image_mod_union_table_.get() != NULL) << "Failed to create image mod-union table"; 191 192 zygote_mod_union_table_.reset(new accounting::ModUnionTableCardCache(this)); 193 CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table"; 194 195 // TODO: Count objects in the image space here. 196 num_bytes_allocated_ = 0; 197 198 // Default mark stack size in bytes. 199 static const size_t default_mark_stack_size = 64 * KB; 200 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size)); 201 allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack", 202 max_allocation_stack_size_)); 203 live_stack_.reset(accounting::ObjectStack::Create("live stack", 204 max_allocation_stack_size_)); 205 206 // It's still too early to take a lock because there are no threads yet, but we can create locks 207 // now. We don't create it earlier to make it clear that you can't use locks during heap 208 // initialization. 209 gc_complete_lock_ = new Mutex("GC complete lock"); 210 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable", 211 *gc_complete_lock_)); 212 213 // Create the reference queue lock, this is required so for parallel object scanning in the GC. 214 reference_queue_lock_ = new Mutex("reference queue lock"); 215 216 last_gc_time_ns_ = NanoTime(); 217 last_gc_size_ = GetBytesAllocated(); 218 219 // Create our garbage collectors. 220 for (size_t i = 0; i < 2; ++i) { 221 const bool concurrent = i != 0; 222 mark_sweep_collectors_.push_back(new collector::MarkSweep(this, concurrent)); 223 mark_sweep_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent)); 224 mark_sweep_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent)); 225 } 226 227 CHECK_NE(max_allowed_footprint_, 0U); 228 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 229 LOG(INFO) << "Heap() exiting"; 230 } 231} 232 233void Heap::CreateThreadPool() { 234 thread_pool_.reset(new ThreadPool(num_gc_threads_)); 235} 236 237void Heap::DeleteThreadPool() { 238 thread_pool_.reset(NULL); 239} 240 241// Sort spaces based on begin address 242struct ContinuousSpaceSorter { 243 bool operator()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const { 244 return a->Begin() < b->Begin(); 245 } 246}; 247 248void Heap::UpdateProcessState(ProcessState process_state) { 249 process_state_ = process_state; 250} 251 252void Heap::AddContinuousSpace(space::ContinuousSpace* space) { 253 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 254 DCHECK(space != NULL); 255 DCHECK(space->GetLiveBitmap() != NULL); 256 live_bitmap_->AddContinuousSpaceBitmap(space->GetLiveBitmap()); 257 DCHECK(space->GetMarkBitmap() != NULL); 258 mark_bitmap_->AddContinuousSpaceBitmap(space->GetMarkBitmap()); 259 continuous_spaces_.push_back(space); 260 if (space->IsDlMallocSpace() && !space->IsLargeObjectSpace()) { 261 alloc_space_ = space->AsDlMallocSpace(); 262 } 263 264 // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger) 265 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), ContinuousSpaceSorter()); 266 267 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to 268 // avoid redundant marking. 269 bool seen_zygote = false, seen_alloc = false; 270 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 271 for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) { 272 space::ContinuousSpace* space = *it; 273 if (space->IsImageSpace()) { 274 DCHECK(!seen_zygote); 275 DCHECK(!seen_alloc); 276 } else if (space->IsZygoteSpace()) { 277 DCHECK(!seen_alloc); 278 seen_zygote = true; 279 } else if (space->IsDlMallocSpace()) { 280 seen_alloc = true; 281 } 282 } 283} 284 285void Heap::RegisterGCAllocation(size_t bytes) { 286 if (this != NULL) { 287 gc_memory_overhead_.fetch_add(bytes); 288 } 289} 290 291void Heap::RegisterGCDeAllocation(size_t bytes) { 292 if (this != NULL) { 293 gc_memory_overhead_.fetch_sub(bytes); 294 } 295} 296 297void Heap::AddDiscontinuousSpace(space::DiscontinuousSpace* space) { 298 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 299 DCHECK(space != NULL); 300 DCHECK(space->GetLiveObjects() != NULL); 301 live_bitmap_->AddDiscontinuousObjectSet(space->GetLiveObjects()); 302 DCHECK(space->GetMarkObjects() != NULL); 303 mark_bitmap_->AddDiscontinuousObjectSet(space->GetMarkObjects()); 304 discontinuous_spaces_.push_back(space); 305} 306 307void Heap::DumpGcPerformanceInfo(std::ostream& os) { 308 // Dump cumulative timings. 309 os << "Dumping cumulative Gc timings\n"; 310 uint64_t total_duration = 0; 311 312 // Dump cumulative loggers for each GC type. 313 // TODO: C++0x 314 uint64_t total_paused_time = 0; 315 typedef std::vector<collector::MarkSweep*>::const_iterator It; 316 for (It it = mark_sweep_collectors_.begin(); 317 it != mark_sweep_collectors_.end(); ++it) { 318 collector::MarkSweep* collector = *it; 319 CumulativeLogger& logger = collector->GetCumulativeTimings(); 320 if (logger.GetTotalNs() != 0) { 321 os << Dumpable<CumulativeLogger>(logger); 322 const uint64_t total_ns = logger.GetTotalNs(); 323 const uint64_t total_pause_ns = (*it)->GetTotalPausedTimeNs(); 324 double seconds = NsToMs(logger.GetTotalNs()) / 1000.0; 325 const uint64_t freed_bytes = collector->GetTotalFreedBytes(); 326 const uint64_t freed_objects = collector->GetTotalFreedObjects(); 327 os << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n" 328 << collector->GetName() << " paused time: " << PrettyDuration(total_pause_ns) << "\n" 329 << collector->GetName() << " freed: " << freed_objects 330 << " objects with total size " << PrettySize(freed_bytes) << "\n" 331 << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / " 332 << PrettySize(freed_bytes / seconds) << "/s\n"; 333 total_duration += total_ns; 334 total_paused_time += total_pause_ns; 335 } 336 } 337 uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust; 338 size_t total_objects_allocated = GetObjectsAllocatedEver(); 339 size_t total_bytes_allocated = GetBytesAllocatedEver(); 340 if (total_duration != 0) { 341 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0; 342 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; 343 os << "Mean GC size throughput: " 344 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n"; 345 os << "Mean GC object throughput: " 346 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n"; 347 } 348 os << "Total number of allocations: " << total_objects_allocated << "\n"; 349 os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n"; 350 if (measure_allocation_time_) { 351 os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n"; 352 os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated) 353 << "\n"; 354 } 355 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n"; 356 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n"; 357 os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_; 358} 359 360Heap::~Heap() { 361 if (kDumpGcPerformanceOnShutdown) { 362 DumpGcPerformanceInfo(LOG(INFO)); 363 } 364 365 STLDeleteElements(&mark_sweep_collectors_); 366 367 // If we don't reset then the mark stack complains in it's destructor. 368 allocation_stack_->Reset(); 369 live_stack_->Reset(); 370 371 VLOG(heap) << "~Heap()"; 372 // We can't take the heap lock here because there might be a daemon thread suspended with the 373 // heap lock held. We know though that no non-daemon threads are executing, and we know that 374 // all daemon threads are suspended, and we also know that the threads list have been deleted, so 375 // those threads can't resume. We're the only running thread, and we can do whatever we like... 376 STLDeleteElements(&continuous_spaces_); 377 STLDeleteElements(&discontinuous_spaces_); 378 delete gc_complete_lock_; 379 delete reference_queue_lock_; 380} 381 382space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj, 383 bool fail_ok) const { 384 // TODO: C++0x auto 385 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 386 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 387 if ((*it)->Contains(obj)) { 388 return *it; 389 } 390 } 391 if (!fail_ok) { 392 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 393 } 394 return NULL; 395} 396 397space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj, 398 bool fail_ok) const { 399 // TODO: C++0x auto 400 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It; 401 for (It it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 402 if ((*it)->Contains(obj)) { 403 return *it; 404 } 405 } 406 if (!fail_ok) { 407 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!"; 408 } 409 return NULL; 410} 411 412space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const { 413 space::Space* result = FindContinuousSpaceFromObject(obj, true); 414 if (result != NULL) { 415 return result; 416 } 417 return FindDiscontinuousSpaceFromObject(obj, true); 418} 419 420space::ImageSpace* Heap::GetImageSpace() const { 421 // TODO: C++0x auto 422 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 423 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 424 if ((*it)->IsImageSpace()) { 425 return (*it)->AsImageSpace(); 426 } 427 } 428 return NULL; 429} 430 431static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) { 432 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start); 433 if (used_bytes < chunk_size) { 434 size_t chunk_free_bytes = chunk_size - used_bytes; 435 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg); 436 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes); 437 } 438} 439 440mirror::Object* Heap::AllocObject(Thread* self, mirror::Class* c, size_t byte_count) { 441 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || 442 (c->IsVariableSize() || c->GetObjectSize() == byte_count) || 443 strlen(ClassHelper(c).GetDescriptor()) == 0); 444 DCHECK_GE(byte_count, sizeof(mirror::Object)); 445 446 mirror::Object* obj = NULL; 447 size_t size = 0; 448 uint64_t allocation_start = 0; 449 if (UNLIKELY(measure_allocation_time_)) { 450 allocation_start = NanoTime() / kTimeAdjust; 451 } 452 453 // We need to have a zygote space or else our newly allocated large object can end up in the 454 // Zygote resulting in it being prematurely freed. 455 // We can only do this for primive objects since large objects will not be within the card table 456 // range. This also means that we rely on SetClass not dirtying the object's card. 457 bool large_object_allocation = 458 byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray(); 459 if (UNLIKELY(large_object_allocation)) { 460 size = RoundUp(byte_count, kPageSize); 461 obj = Allocate(self, large_object_space_, size); 462 // Make sure that our large object didn't get placed anywhere within the space interval or else 463 // it breaks the immune range. 464 DCHECK(obj == NULL || 465 reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() || 466 reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End()); 467 } else { 468 obj = Allocate(self, alloc_space_, byte_count); 469 470 // Ensure that we did not allocate into a zygote space. 471 DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace()); 472 size = alloc_space_->AllocationSize(obj); 473 } 474 475 if (LIKELY(obj != NULL)) { 476 obj->SetClass(c); 477 478 // Record allocation after since we want to use the atomic add for the atomic fence to guard 479 // the SetClass since we do not want the class to appear NULL in another thread. 480 RecordAllocation(size, obj); 481 482 if (Dbg::IsAllocTrackingEnabled()) { 483 Dbg::RecordAllocation(c, byte_count); 484 } 485 if (static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_) { 486 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. 487 SirtRef<mirror::Object> ref(self, obj); 488 RequestConcurrentGC(self); 489 } 490 VerifyObject(obj); 491 492 if (UNLIKELY(measure_allocation_time_)) { 493 total_allocation_time_.fetch_add(NanoTime() / kTimeAdjust - allocation_start); 494 } 495 496 return obj; 497 } else { 498 std::ostringstream oss; 499 int64_t total_bytes_free = GetFreeMemory(); 500 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free 501 << " free bytes"; 502 // If the allocation failed due to fragmentation, print out the largest continuous allocation. 503 if (!large_object_allocation && total_bytes_free >= byte_count) { 504 size_t max_contiguous_allocation = 0; 505 // TODO: C++0x auto 506 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 507 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 508 space::ContinuousSpace* space = *it; 509 if (space->IsDlMallocSpace()) { 510 space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation); 511 } 512 } 513 oss << "; failed due to fragmentation (largest possible contiguous allocation " 514 << max_contiguous_allocation << " bytes)"; 515 } 516 self->ThrowOutOfMemoryError(oss.str().c_str()); 517 return NULL; 518 } 519} 520 521bool Heap::IsHeapAddress(const mirror::Object* obj) { 522 // Note: we deliberately don't take the lock here, and mustn't test anything that would 523 // require taking the lock. 524 if (obj == NULL) { 525 return true; 526 } 527 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 528 return false; 529 } 530 return FindSpaceFromObject(obj, true) != NULL; 531} 532 533bool Heap::IsLiveObjectLocked(const mirror::Object* obj) { 534 // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current()); 535 if (obj == NULL || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 536 return false; 537 } 538 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true); 539 space::DiscontinuousSpace* d_space = NULL; 540 if (c_space != NULL) { 541 if (c_space->GetLiveBitmap()->Test(obj)) { 542 return true; 543 } 544 } else { 545 d_space = FindDiscontinuousSpaceFromObject(obj, true); 546 if (d_space != NULL) { 547 if (d_space->GetLiveObjects()->Test(obj)) { 548 return true; 549 } 550 } 551 } 552 // This is covering the allocation/live stack swapping that is done without mutators suspended. 553 for (size_t i = 0; i < 5; ++i) { 554 if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj)) || 555 live_stack_->Contains(const_cast<mirror::Object*>(obj))) { 556 return true; 557 } 558 NanoSleep(MsToNs(10)); 559 } 560 // We need to check the bitmaps again since there is a race where we mark something as live and 561 // then clear the stack containing it. 562 if (c_space != NULL) { 563 if (c_space->GetLiveBitmap()->Test(obj)) { 564 return true; 565 } 566 } else { 567 d_space = FindDiscontinuousSpaceFromObject(obj, true); 568 if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) { 569 return true; 570 } 571 } 572 return false; 573} 574 575void Heap::VerifyObjectImpl(const mirror::Object* obj) { 576 if (Thread::Current() == NULL || 577 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) { 578 return; 579 } 580 VerifyObjectBody(obj); 581} 582 583void Heap::DumpSpaces() { 584 // TODO: C++0x auto 585 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 586 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 587 space::ContinuousSpace* space = *it; 588 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 589 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 590 LOG(INFO) << space << " " << *space << "\n" 591 << live_bitmap << " " << *live_bitmap << "\n" 592 << mark_bitmap << " " << *mark_bitmap; 593 } 594 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 595 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 596 space::DiscontinuousSpace* space = *it; 597 LOG(INFO) << space << " " << *space << "\n"; 598 } 599} 600 601void Heap::VerifyObjectBody(const mirror::Object* obj) { 602 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) { 603 LOG(FATAL) << "Object isn't aligned: " << obj; 604 } 605 if (UNLIKELY(GetObjectsAllocated() <= 10)) { // Ignore early dawn of the universe verifications. 606 return; 607 } 608 const byte* raw_addr = reinterpret_cast<const byte*>(obj) + 609 mirror::Object::ClassOffset().Int32Value(); 610 const mirror::Class* c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 611 if (UNLIKELY(c == NULL)) { 612 LOG(FATAL) << "Null class in object: " << obj; 613 } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) { 614 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; 615 } 616 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() 617 // Note: we don't use the accessors here as they have internal sanity checks 618 // that we don't want to run 619 raw_addr = reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value(); 620 const mirror::Class* c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 621 raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value(); 622 const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr); 623 CHECK_EQ(c_c, c_c_c); 624 625 if (verify_object_mode_ != kVerifyAllFast) { 626 // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the 627 // heap_bitmap_lock_. 628 if (!IsLiveObjectLocked(obj)) { 629 DumpSpaces(); 630 LOG(FATAL) << "Object is dead: " << obj; 631 } 632 if (!IsLiveObjectLocked(c)) { 633 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; 634 } 635 } 636} 637 638void Heap::VerificationCallback(mirror::Object* obj, void* arg) { 639 DCHECK(obj != NULL); 640 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj); 641} 642 643void Heap::VerifyHeap() { 644 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 645 GetLiveBitmap()->Walk(Heap::VerificationCallback, this); 646} 647 648void Heap::RecordAllocation(size_t size, mirror::Object* obj) { 649 DCHECK(obj != NULL); 650 DCHECK_GT(size, 0u); 651 num_bytes_allocated_.fetch_add(size); 652 653 if (Runtime::Current()->HasStatsEnabled()) { 654 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 655 ++thread_stats->allocated_objects; 656 thread_stats->allocated_bytes += size; 657 658 // TODO: Update these atomically. 659 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 660 ++global_stats->allocated_objects; 661 global_stats->allocated_bytes += size; 662 } 663 664 // This is safe to do since the GC will never free objects which are neither in the allocation 665 // stack or the live bitmap. 666 while (!allocation_stack_->AtomicPushBack(obj)) { 667 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 668 } 669} 670 671void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) { 672 DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_)); 673 num_bytes_allocated_.fetch_sub(freed_bytes); 674 675 if (Runtime::Current()->HasStatsEnabled()) { 676 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 677 thread_stats->freed_objects += freed_objects; 678 thread_stats->freed_bytes += freed_bytes; 679 680 // TODO: Do this concurrently. 681 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 682 global_stats->freed_objects += freed_objects; 683 global_stats->freed_bytes += freed_bytes; 684 } 685} 686 687mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, 688 bool grow) { 689 // Should we try to use a CAS here and fix up num_bytes_allocated_ later with AllocationSize? 690 if (num_bytes_allocated_ + alloc_size > max_allowed_footprint_) { 691 // max_allowed_footprint_ <= growth_limit_ so it is safe to check in here. 692 if (num_bytes_allocated_ + alloc_size > growth_limit_) { 693 // Completely out of memory. 694 return NULL; 695 } 696 } 697 698 return space->Alloc(self, alloc_size); 699} 700 701mirror::Object* Heap::Allocate(Thread* self, space::AllocSpace* space, size_t alloc_size) { 702 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are 703 // done in the runnable state where suspension is expected. 704 DCHECK_EQ(self->GetState(), kRunnable); 705 self->AssertThreadSuspensionIsAllowable(); 706 707 mirror::Object* ptr = TryToAllocate(self, space, alloc_size, false); 708 if (ptr != NULL) { 709 return ptr; 710 } 711 712 // The allocation failed. If the GC is running, block until it completes, and then retry the 713 // allocation. 714 collector::GcType last_gc = WaitForConcurrentGcToComplete(self); 715 if (last_gc != collector::kGcTypeNone) { 716 // A GC was in progress and we blocked, retry allocation now that memory has been freed. 717 ptr = TryToAllocate(self, space, alloc_size, false); 718 if (ptr != NULL) { 719 return ptr; 720 } 721 } 722 723 // Loop through our different Gc types and try to Gc until we get enough free memory. 724 for (size_t i = static_cast<size_t>(last_gc) + 1; 725 i < static_cast<size_t>(collector::kGcTypeMax); ++i) { 726 bool run_gc = false; 727 collector::GcType gc_type = static_cast<collector::GcType>(i); 728 switch (gc_type) { 729 case collector::kGcTypeSticky: { 730 const size_t alloc_space_size = alloc_space_->Size(); 731 run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ && 732 alloc_space_->Capacity() - alloc_space_size >= min_remaining_space_for_sticky_gc_; 733 break; 734 } 735 case collector::kGcTypePartial: 736 run_gc = have_zygote_space_; 737 break; 738 case collector::kGcTypeFull: 739 run_gc = true; 740 break; 741 default: 742 break; 743 } 744 745 if (run_gc) { 746 // If we actually ran a different type of Gc than requested, we can skip the index forwards. 747 collector::GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false); 748 DCHECK_GE(static_cast<size_t>(gc_type_ran), i); 749 i = static_cast<size_t>(gc_type_ran); 750 751 // Did we free sufficient memory for the allocation to succeed? 752 ptr = TryToAllocate(self, space, alloc_size, false); 753 if (ptr != NULL) { 754 return ptr; 755 } 756 } 757 } 758 759 // Allocations have failed after GCs; this is an exceptional state. 760 // Try harder, growing the heap if necessary. 761 ptr = TryToAllocate(self, space, alloc_size, true); 762 if (ptr != NULL) { 763 return ptr; 764 } 765 766 // Most allocations should have succeeded by now, so the heap is really full, really fragmented, 767 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The 768 // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME. 769 770 // OLD-TODO: wait for the finalizers from the previous GC to finish 771 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) 772 << " allocation"; 773 774 // We don't need a WaitForConcurrentGcToComplete here either. 775 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true); 776 return TryToAllocate(self, space, alloc_size, true); 777} 778 779void Heap::SetTargetHeapUtilization(float target) { 780 DCHECK_GT(target, 0.0f); // asserted in Java code 781 DCHECK_LT(target, 1.0f); 782 target_utilization_ = target; 783} 784 785size_t Heap::GetObjectsAllocated() const { 786 size_t total = 0; 787 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 788 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 789 space::ContinuousSpace* space = *it; 790 if (space->IsDlMallocSpace()) { 791 total += space->AsDlMallocSpace()->GetObjectsAllocated(); 792 } 793 } 794 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 795 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 796 space::DiscontinuousSpace* space = *it; 797 total += space->AsLargeObjectSpace()->GetObjectsAllocated(); 798 } 799 return total; 800} 801 802size_t Heap::GetObjectsAllocatedEver() const { 803 size_t total = 0; 804 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 805 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 806 space::ContinuousSpace* space = *it; 807 if (space->IsDlMallocSpace()) { 808 total += space->AsDlMallocSpace()->GetTotalObjectsAllocated(); 809 } 810 } 811 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 812 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 813 space::DiscontinuousSpace* space = *it; 814 total += space->AsLargeObjectSpace()->GetTotalObjectsAllocated(); 815 } 816 return total; 817} 818 819size_t Heap::GetBytesAllocatedEver() const { 820 size_t total = 0; 821 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 822 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 823 space::ContinuousSpace* space = *it; 824 if (space->IsDlMallocSpace()) { 825 total += space->AsDlMallocSpace()->GetTotalBytesAllocated(); 826 } 827 } 828 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 829 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 830 space::DiscontinuousSpace* space = *it; 831 total += space->AsLargeObjectSpace()->GetTotalBytesAllocated(); 832 } 833 return total; 834} 835 836class InstanceCounter { 837 public: 838 InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts) 839 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 840 : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) { 841 } 842 843 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 844 for (size_t i = 0; i < classes_.size(); ++i) { 845 const mirror::Class* instance_class = o->GetClass(); 846 if (use_is_assignable_from_) { 847 if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) { 848 ++counts_[i]; 849 } 850 } else { 851 if (instance_class == classes_[i]) { 852 ++counts_[i]; 853 } 854 } 855 } 856 } 857 858 private: 859 const std::vector<mirror::Class*>& classes_; 860 bool use_is_assignable_from_; 861 uint64_t* const counts_; 862 863 DISALLOW_COPY_AND_ASSIGN(InstanceCounter); 864}; 865 866void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, 867 uint64_t* counts) { 868 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 869 // is empty, so the live bitmap is the only place we need to look. 870 Thread* self = Thread::Current(); 871 self->TransitionFromRunnableToSuspended(kNative); 872 CollectGarbage(false); 873 self->TransitionFromSuspendedToRunnable(); 874 875 InstanceCounter counter(classes, use_is_assignable_from, counts); 876 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 877 GetLiveBitmap()->Visit(counter); 878} 879 880class InstanceCollector { 881 public: 882 InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) 883 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 884 : class_(c), max_count_(max_count), instances_(instances) { 885 } 886 887 void operator()(const mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 888 const mirror::Class* instance_class = o->GetClass(); 889 if (instance_class == class_) { 890 if (max_count_ == 0 || instances_.size() < max_count_) { 891 instances_.push_back(const_cast<mirror::Object*>(o)); 892 } 893 } 894 } 895 896 private: 897 mirror::Class* class_; 898 uint32_t max_count_; 899 std::vector<mirror::Object*>& instances_; 900 901 DISALLOW_COPY_AND_ASSIGN(InstanceCollector); 902}; 903 904void Heap::GetInstances(mirror::Class* c, int32_t max_count, 905 std::vector<mirror::Object*>& instances) { 906 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 907 // is empty, so the live bitmap is the only place we need to look. 908 Thread* self = Thread::Current(); 909 self->TransitionFromRunnableToSuspended(kNative); 910 CollectGarbage(false); 911 self->TransitionFromSuspendedToRunnable(); 912 913 InstanceCollector collector(c, max_count, instances); 914 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 915 GetLiveBitmap()->Visit(collector); 916} 917 918class ReferringObjectsFinder { 919 public: 920 ReferringObjectsFinder(mirror::Object* object, int32_t max_count, 921 std::vector<mirror::Object*>& referring_objects) 922 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 923 : object_(object), max_count_(max_count), referring_objects_(referring_objects) { 924 } 925 926 // For bitmap Visit. 927 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 928 // annotalysis on visitors. 929 void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS { 930 collector::MarkSweep::VisitObjectReferences(o, *this); 931 } 932 933 // For MarkSweep::VisitObjectReferences. 934 void operator()(const mirror::Object* referrer, const mirror::Object* object, 935 const MemberOffset&, bool) const { 936 if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { 937 referring_objects_.push_back(const_cast<mirror::Object*>(referrer)); 938 } 939 } 940 941 private: 942 mirror::Object* object_; 943 uint32_t max_count_; 944 std::vector<mirror::Object*>& referring_objects_; 945 946 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder); 947}; 948 949void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count, 950 std::vector<mirror::Object*>& referring_objects) { 951 // We only want reachable instances, so do a GC. This also ensures that the alloc stack 952 // is empty, so the live bitmap is the only place we need to look. 953 Thread* self = Thread::Current(); 954 self->TransitionFromRunnableToSuspended(kNative); 955 CollectGarbage(false); 956 self->TransitionFromSuspendedToRunnable(); 957 958 ReferringObjectsFinder finder(o, max_count, referring_objects); 959 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 960 GetLiveBitmap()->Visit(finder); 961} 962 963void Heap::CollectGarbage(bool clear_soft_references) { 964 // Even if we waited for a GC we still need to do another GC since weaks allocated during the 965 // last GC will not have necessarily been cleared. 966 Thread* self = Thread::Current(); 967 WaitForConcurrentGcToComplete(self); 968 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references); 969} 970 971void Heap::PreZygoteFork() { 972 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock); 973 // Do this before acquiring the zygote creation lock so that we don't get lock order violations. 974 CollectGarbage(false); 975 Thread* self = Thread::Current(); 976 MutexLock mu(self, zygote_creation_lock_); 977 978 // Try to see if we have any Zygote spaces. 979 if (have_zygote_space_) { 980 return; 981 } 982 983 VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size()); 984 985 { 986 // Flush the alloc stack. 987 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 988 FlushAllocStack(); 989 } 990 991 // Turns the current alloc space into a Zygote space and obtain the new alloc space composed 992 // of the remaining available heap memory. 993 space::DlMallocSpace* zygote_space = alloc_space_; 994 alloc_space_ = zygote_space->CreateZygoteSpace("alloc space"); 995 alloc_space_->SetFootprintLimit(alloc_space_->Capacity()); 996 997 // Change the GC retention policy of the zygote space to only collect when full. 998 zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect); 999 AddContinuousSpace(alloc_space_); 1000 have_zygote_space_ = true; 1001 1002 // Reset the cumulative loggers since we now have a few additional timing phases. 1003 // TODO: C++0x 1004 typedef std::vector<collector::MarkSweep*>::const_iterator It; 1005 for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end(); 1006 it != end; ++it) { 1007 (*it)->ResetCumulativeStatistics(); 1008 } 1009} 1010 1011void Heap::FlushAllocStack() { 1012 MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), 1013 allocation_stack_.get()); 1014 allocation_stack_->Reset(); 1015} 1016 1017void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects, 1018 accounting::ObjectStack* stack) { 1019 mirror::Object** limit = stack->End(); 1020 for (mirror::Object** it = stack->Begin(); it != limit; ++it) { 1021 const mirror::Object* obj = *it; 1022 DCHECK(obj != NULL); 1023 if (LIKELY(bitmap->HasAddress(obj))) { 1024 bitmap->Set(obj); 1025 } else { 1026 large_objects->Set(obj); 1027 } 1028 } 1029} 1030 1031void Heap::UnMarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects, 1032 accounting::ObjectStack* stack) { 1033 mirror::Object** limit = stack->End(); 1034 for (mirror::Object** it = stack->Begin(); it != limit; ++it) { 1035 const mirror::Object* obj = *it; 1036 DCHECK(obj != NULL); 1037 if (LIKELY(bitmap->HasAddress(obj))) { 1038 bitmap->Clear(obj); 1039 } else { 1040 large_objects->Clear(obj); 1041 } 1042 } 1043} 1044 1045collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause, 1046 bool clear_soft_references) { 1047 Thread* self = Thread::Current(); 1048 1049 switch (gc_cause) { 1050 case kGcCauseForAlloc: 1051 ATRACE_BEGIN("GC (alloc)"); 1052 break; 1053 case kGcCauseBackground: 1054 ATRACE_BEGIN("GC (background)"); 1055 break; 1056 case kGcCauseExplicit: 1057 ATRACE_BEGIN("GC (explicit)"); 1058 break; 1059 } 1060 1061 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); 1062 Locks::mutator_lock_->AssertNotHeld(self); 1063 1064 if (self->IsHandlingStackOverflow()) { 1065 LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow."; 1066 } 1067 1068 // Ensure there is only one GC at a time. 1069 bool start_collect = false; 1070 while (!start_collect) { 1071 { 1072 MutexLock mu(self, *gc_complete_lock_); 1073 if (!is_gc_running_) { 1074 is_gc_running_ = true; 1075 start_collect = true; 1076 } 1077 } 1078 if (!start_collect) { 1079 WaitForConcurrentGcToComplete(self); 1080 // TODO: if another thread beat this one to do the GC, perhaps we should just return here? 1081 // Not doing at the moment to ensure soft references are cleared. 1082 } 1083 } 1084 gc_complete_lock_->AssertNotHeld(self); 1085 1086 if (gc_cause == kGcCauseForAlloc && Runtime::Current()->HasStatsEnabled()) { 1087 ++Runtime::Current()->GetStats()->gc_for_alloc_count; 1088 ++Thread::Current()->GetStats()->gc_for_alloc_count; 1089 } 1090 1091 uint64_t gc_start_time_ns = NanoTime(); 1092 uint64_t gc_start_size = GetBytesAllocated(); 1093 // Approximate allocation rate in bytes / second. 1094 if (UNLIKELY(gc_start_time_ns == last_gc_time_ns_)) { 1095 LOG(WARNING) << "Timers are broken (gc_start_time == last_gc_time_)."; 1096 } 1097 uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_); 1098 if (ms_delta != 0) { 1099 allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta; 1100 VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s"; 1101 } 1102 1103 if (gc_type == collector::kGcTypeSticky && 1104 alloc_space_->Size() < min_alloc_space_size_for_sticky_gc_) { 1105 gc_type = collector::kGcTypePartial; 1106 } 1107 1108 DCHECK_LT(gc_type, collector::kGcTypeMax); 1109 DCHECK_NE(gc_type, collector::kGcTypeNone); 1110 collector::MarkSweep* collector = NULL; 1111 typedef std::vector<collector::MarkSweep*>::iterator It; 1112 for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end(); 1113 it != end; ++it) { 1114 collector::MarkSweep* cur_collector = *it; 1115 if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) { 1116 collector = cur_collector; 1117 break; 1118 } 1119 } 1120 CHECK(collector != NULL) 1121 << "Could not find garbage collector with concurrent=" << concurrent_gc_ 1122 << " and type=" << gc_type; 1123 collector->clear_soft_references_ = clear_soft_references; 1124 collector->Run(); 1125 total_objects_freed_ever_ += collector->GetFreedObjects(); 1126 total_bytes_freed_ever_ += collector->GetFreedBytes(); 1127 1128 const size_t duration = collector->GetDurationNs(); 1129 std::vector<uint64_t> pauses = collector->GetPauseTimes(); 1130 bool was_slow = duration > kSlowGcThreshold || 1131 (gc_cause == kGcCauseForAlloc && duration > kLongGcPauseThreshold); 1132 for (size_t i = 0; i < pauses.size(); ++i) { 1133 if (pauses[i] > kLongGcPauseThreshold) { 1134 was_slow = true; 1135 } 1136 } 1137 1138 if (was_slow) { 1139 const size_t percent_free = GetPercentFree(); 1140 const size_t current_heap_size = GetBytesAllocated(); 1141 const size_t total_memory = GetTotalMemory(); 1142 std::ostringstream pause_string; 1143 for (size_t i = 0; i < pauses.size(); ++i) { 1144 pause_string << PrettyDuration((pauses[i] / 1000) * 1000) 1145 << ((i != pauses.size() - 1) ? ", " : ""); 1146 } 1147 LOG(INFO) << gc_cause << " " << collector->GetName() 1148 << "GC freed " << PrettySize(collector->GetFreedBytes()) << ", " 1149 << percent_free << "% free, " << PrettySize(current_heap_size) << "/" 1150 << PrettySize(total_memory) << ", " << "paused " << pause_string.str() 1151 << " total " << PrettyDuration((duration / 1000) * 1000); 1152 if (VLOG_IS_ON(heap)) { 1153 LOG(INFO) << Dumpable<base::TimingLogger>(collector->GetTimings()); 1154 } 1155 } 1156 1157 { 1158 MutexLock mu(self, *gc_complete_lock_); 1159 is_gc_running_ = false; 1160 last_gc_type_ = gc_type; 1161 // Wake anyone who may have been waiting for the GC to complete. 1162 gc_complete_cond_->Broadcast(self); 1163 } 1164 1165 // Inform DDMS that a GC completed. 1166 ATRACE_END(); 1167 Dbg::GcDidFinish(); 1168 return gc_type; 1169} 1170 1171void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings, 1172 collector::GcType gc_type) { 1173 if (gc_type == collector::kGcTypeSticky) { 1174 // Don't need to do anything for mod union table in this case since we are only scanning dirty 1175 // cards. 1176 return; 1177 } 1178 1179 // Update zygote mod union table. 1180 if (gc_type == collector::kGcTypePartial) { 1181 timings.NewSplit("UpdateZygoteModUnionTable"); 1182 zygote_mod_union_table_->Update(); 1183 1184 timings.NewSplit("ZygoteMarkReferences"); 1185 zygote_mod_union_table_->MarkReferences(mark_sweep); 1186 } 1187 1188 // Processes the cards we cleared earlier and adds their objects into the mod-union table. 1189 timings.NewSplit("UpdateModUnionTable"); 1190 image_mod_union_table_->Update(); 1191 1192 // Scans all objects in the mod-union table. 1193 timings.NewSplit("MarkImageToAllocSpaceReferences"); 1194 image_mod_union_table_->MarkReferences(mark_sweep); 1195} 1196 1197static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) { 1198 mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg); 1199 if (root == obj) { 1200 LOG(INFO) << "Object " << obj << " is a root"; 1201 } 1202} 1203 1204class ScanVisitor { 1205 public: 1206 void operator()(const mirror::Object* obj) const { 1207 LOG(INFO) << "Would have rescanned object " << obj; 1208 } 1209}; 1210 1211// Verify a reference from an object. 1212class VerifyReferenceVisitor { 1213 public: 1214 explicit VerifyReferenceVisitor(Heap* heap) 1215 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) 1216 : heap_(heap), failed_(false) {} 1217 1218 bool Failed() const { 1219 return failed_; 1220 } 1221 1222 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter 1223 // analysis on visitors. 1224 void operator()(const mirror::Object* obj, const mirror::Object* ref, 1225 const MemberOffset& offset, bool /* is_static */) const 1226 NO_THREAD_SAFETY_ANALYSIS { 1227 // Verify that the reference is live. 1228 if (UNLIKELY(ref != NULL && !IsLive(ref))) { 1229 accounting::CardTable* card_table = heap_->GetCardTable(); 1230 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get(); 1231 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1232 1233 if (obj != NULL) { 1234 byte* card_addr = card_table->CardFromAddr(obj); 1235 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " << offset 1236 << "\nIsDirty = " << (*card_addr == accounting::CardTable::kCardDirty) 1237 << "\nObj type " << PrettyTypeOf(obj) 1238 << "\nRef type " << PrettyTypeOf(ref); 1239 card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj)); 1240 void* cover_begin = card_table->AddrFromCard(card_addr); 1241 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) + 1242 accounting::CardTable::kCardSize); 1243 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin 1244 << "-" << cover_end; 1245 accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj); 1246 1247 // Print out how the object is live. 1248 if (bitmap != NULL && bitmap->Test(obj)) { 1249 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1250 } 1251 if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1252 LOG(ERROR) << "Object " << obj << " found in allocation stack"; 1253 } 1254 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1255 LOG(ERROR) << "Object " << obj << " found in live stack"; 1256 } 1257 // Attempt to see if the card table missed the reference. 1258 ScanVisitor scan_visitor; 1259 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr)); 1260 card_table->Scan(bitmap, byte_cover_begin, 1261 byte_cover_begin + accounting::CardTable::kCardSize, 1262 scan_visitor, VoidFunctor()); 1263 1264 // Search to see if any of the roots reference our object. 1265 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj)); 1266 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1267 1268 // Search to see if any of the roots reference our reference. 1269 arg = const_cast<void*>(reinterpret_cast<const void*>(ref)); 1270 Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false); 1271 } else { 1272 LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref); 1273 } 1274 if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1275 LOG(ERROR) << "Reference " << ref << " found in allocation stack!"; 1276 } 1277 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1278 LOG(ERROR) << "Reference " << ref << " found in live stack!"; 1279 } 1280 heap_->image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: "); 1281 heap_->zygote_mod_union_table_->Dump(LOG(ERROR) << "Zygote mod-union table: "); 1282 failed_ = true; 1283 } 1284 } 1285 1286 bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 1287 return heap_->IsLiveObjectLocked(obj); 1288 } 1289 1290 static void VerifyRoots(const mirror::Object* root, void* arg) { 1291 VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg); 1292 (*visitor)(NULL, root, MemberOffset(0), true); 1293 } 1294 1295 private: 1296 Heap* const heap_; 1297 mutable bool failed_; 1298}; 1299 1300// Verify all references within an object, for use with HeapBitmap::Visit. 1301class VerifyObjectVisitor { 1302 public: 1303 explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {} 1304 1305 void operator()(const mirror::Object* obj) const 1306 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1307 // Note: we are verifying the references in obj but not obj itself, this is because obj must 1308 // be live or else how did we find it in the live bitmap? 1309 VerifyReferenceVisitor visitor(heap_); 1310 collector::MarkSweep::VisitObjectReferences(obj, visitor); 1311 failed_ = failed_ || visitor.Failed(); 1312 } 1313 1314 bool Failed() const { 1315 return failed_; 1316 } 1317 1318 private: 1319 Heap* const heap_; 1320 mutable bool failed_; 1321}; 1322 1323// Must do this with mutators suspended since we are directly accessing the allocation stacks. 1324bool Heap::VerifyHeapReferences() { 1325 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1326 // Lets sort our allocation stacks so that we can efficiently binary search them. 1327 allocation_stack_->Sort(); 1328 live_stack_->Sort(); 1329 // Perform the verification. 1330 VerifyObjectVisitor visitor(this); 1331 Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false); 1332 GetLiveBitmap()->Visit(visitor); 1333 // We don't want to verify the objects in the allocation stack since they themselves may be 1334 // pointing to dead objects if they are not reachable. 1335 if (visitor.Failed()) { 1336 DumpSpaces(); 1337 return false; 1338 } 1339 return true; 1340} 1341 1342class VerifyReferenceCardVisitor { 1343 public: 1344 VerifyReferenceCardVisitor(Heap* heap, bool* failed) 1345 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, 1346 Locks::heap_bitmap_lock_) 1347 : heap_(heap), failed_(failed) { 1348 } 1349 1350 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 1351 // annotalysis on visitors. 1352 void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset, 1353 bool is_static) const NO_THREAD_SAFETY_ANALYSIS { 1354 // Filter out class references since changing an object's class does not mark the card as dirty. 1355 // Also handles large objects, since the only reference they hold is a class reference. 1356 if (ref != NULL && !ref->IsClass()) { 1357 accounting::CardTable* card_table = heap_->GetCardTable(); 1358 // If the object is not dirty and it is referencing something in the live stack other than 1359 // class, then it must be on a dirty card. 1360 if (!card_table->AddrIsInCardTable(obj)) { 1361 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table"; 1362 *failed_ = true; 1363 } else if (!card_table->IsDirty(obj)) { 1364 // Card should be either kCardDirty if it got re-dirtied after we aged it, or 1365 // kCardDirty - 1 if it didnt get touched since we aged it. 1366 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 1367 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) { 1368 if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) { 1369 LOG(ERROR) << "Object " << obj << " found in live stack"; 1370 } 1371 if (heap_->GetLiveBitmap()->Test(obj)) { 1372 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 1373 } 1374 LOG(ERROR) << "Object " << obj << " " << PrettyTypeOf(obj) 1375 << " references " << ref << " " << PrettyTypeOf(ref) << " in live stack"; 1376 1377 // Print which field of the object is dead. 1378 if (!obj->IsObjectArray()) { 1379 const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1380 CHECK(klass != NULL); 1381 const mirror::ObjectArray<mirror::Field>* fields = is_static ? klass->GetSFields() 1382 : klass->GetIFields(); 1383 CHECK(fields != NULL); 1384 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1385 const mirror::Field* cur = fields->Get(i); 1386 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1387 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " 1388 << PrettyField(cur); 1389 break; 1390 } 1391 } 1392 } else { 1393 const mirror::ObjectArray<mirror::Object>* object_array = 1394 obj->AsObjectArray<mirror::Object>(); 1395 for (int32_t i = 0; i < object_array->GetLength(); ++i) { 1396 if (object_array->Get(i) == ref) { 1397 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref"; 1398 } 1399 } 1400 } 1401 1402 *failed_ = true; 1403 } 1404 } 1405 } 1406 } 1407 1408 private: 1409 Heap* const heap_; 1410 bool* const failed_; 1411}; 1412 1413class VerifyLiveStackReferences { 1414 public: 1415 explicit VerifyLiveStackReferences(Heap* heap) 1416 : heap_(heap), 1417 failed_(false) {} 1418 1419 void operator()(const mirror::Object* obj) const 1420 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 1421 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); 1422 collector::MarkSweep::VisitObjectReferences(obj, visitor); 1423 } 1424 1425 bool Failed() const { 1426 return failed_; 1427 } 1428 1429 private: 1430 Heap* const heap_; 1431 bool failed_; 1432}; 1433 1434bool Heap::VerifyMissingCardMarks() { 1435 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); 1436 1437 // We need to sort the live stack since we binary search it. 1438 live_stack_->Sort(); 1439 VerifyLiveStackReferences visitor(this); 1440 GetLiveBitmap()->Visit(visitor); 1441 1442 // We can verify objects in the live stack since none of these should reference dead objects. 1443 for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) { 1444 visitor(*it); 1445 } 1446 1447 if (visitor.Failed()) { 1448 DumpSpaces(); 1449 return false; 1450 } 1451 return true; 1452} 1453 1454void Heap::SwapStacks() { 1455 allocation_stack_.swap(live_stack_); 1456 1457 // Sort the live stack so that we can quickly binary search it later. 1458 if (verify_object_mode_ > kNoHeapVerification) { 1459 live_stack_->Sort(); 1460 } 1461} 1462 1463void Heap::ProcessCards(base::TimingLogger& timings) { 1464 // Clear cards and keep track of cards cleared in the mod-union table. 1465 typedef std::vector<space::ContinuousSpace*>::iterator It; 1466 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 1467 space::ContinuousSpace* space = *it; 1468 if (space->IsImageSpace()) { 1469 timings.NewSplit("ModUnionClearCards"); 1470 image_mod_union_table_->ClearCards(space); 1471 } else if (space->IsZygoteSpace()) { 1472 timings.NewSplit("ZygoteModUnionClearCards"); 1473 zygote_mod_union_table_->ClearCards(space); 1474 } else { 1475 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards 1476 // were dirty before the GC started. 1477 timings.NewSplit("AllocSpaceClearCards"); 1478 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor()); 1479 } 1480 } 1481} 1482 1483void Heap::PreGcVerification(collector::GarbageCollector* gc) { 1484 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1485 Thread* self = Thread::Current(); 1486 1487 if (verify_pre_gc_heap_) { 1488 thread_list->SuspendAll(); 1489 { 1490 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1491 if (!VerifyHeapReferences()) { 1492 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed"; 1493 } 1494 } 1495 thread_list->ResumeAll(); 1496 } 1497 1498 // Check that all objects which reference things in the live stack are on dirty cards. 1499 if (verify_missing_card_marks_) { 1500 thread_list->SuspendAll(); 1501 { 1502 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1503 SwapStacks(); 1504 // Sort the live stack so that we can quickly binary search it later. 1505 if (!VerifyMissingCardMarks()) { 1506 LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed"; 1507 } 1508 SwapStacks(); 1509 } 1510 thread_list->ResumeAll(); 1511 } 1512 1513 if (verify_mod_union_table_) { 1514 thread_list->SuspendAll(); 1515 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); 1516 zygote_mod_union_table_->Update(); 1517 zygote_mod_union_table_->Verify(); 1518 image_mod_union_table_->Update(); 1519 image_mod_union_table_->Verify(); 1520 thread_list->ResumeAll(); 1521 } 1522} 1523 1524void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { 1525 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 1526 1527 // Called before sweeping occurs since we want to make sure we are not going so reclaim any 1528 // reachable objects. 1529 if (verify_post_gc_heap_) { 1530 Thread* self = Thread::Current(); 1531 CHECK_NE(self->GetState(), kRunnable); 1532 Locks::mutator_lock_->SharedUnlock(self); 1533 thread_list->SuspendAll(); 1534 { 1535 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1536 // Swapping bound bitmaps does nothing. 1537 gc->SwapBitmaps(); 1538 if (!VerifyHeapReferences()) { 1539 LOG(FATAL) << "Post " << gc->GetName() << "GC verification failed"; 1540 } 1541 gc->SwapBitmaps(); 1542 } 1543 thread_list->ResumeAll(); 1544 Locks::mutator_lock_->SharedLock(self); 1545 } 1546} 1547 1548void Heap::PostGcVerification(collector::GarbageCollector* gc) { 1549 Thread* self = Thread::Current(); 1550 1551 if (verify_system_weaks_) { 1552 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1553 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc); 1554 mark_sweep->VerifySystemWeaks(); 1555 } 1556} 1557 1558collector::GcType Heap::WaitForConcurrentGcToComplete(Thread* self) { 1559 collector::GcType last_gc_type = collector::kGcTypeNone; 1560 if (concurrent_gc_) { 1561 ATRACE_BEGIN("GC: Wait For Concurrent"); 1562 bool do_wait; 1563 uint64_t wait_start = NanoTime(); 1564 { 1565 // Check if GC is running holding gc_complete_lock_. 1566 MutexLock mu(self, *gc_complete_lock_); 1567 do_wait = is_gc_running_; 1568 } 1569 if (do_wait) { 1570 uint64_t wait_time; 1571 // We must wait, change thread state then sleep on gc_complete_cond_; 1572 ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete); 1573 { 1574 MutexLock mu(self, *gc_complete_lock_); 1575 while (is_gc_running_) { 1576 gc_complete_cond_->Wait(self); 1577 } 1578 last_gc_type = last_gc_type_; 1579 wait_time = NanoTime() - wait_start; 1580 total_wait_time_ += wait_time; 1581 } 1582 if (wait_time > kLongGcPauseThreshold) { 1583 LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time); 1584 } 1585 } 1586 ATRACE_END(); 1587 } 1588 return last_gc_type; 1589} 1590 1591void Heap::DumpForSigQuit(std::ostream& os) { 1592 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/" 1593 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n"; 1594 DumpGcPerformanceInfo(os); 1595} 1596 1597size_t Heap::GetPercentFree() { 1598 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory()); 1599} 1600 1601void Heap::SetIdealFootprint(size_t max_allowed_footprint) { 1602 if (max_allowed_footprint > GetMaxMemory()) { 1603 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to " 1604 << PrettySize(GetMaxMemory()); 1605 max_allowed_footprint = GetMaxMemory(); 1606 } 1607 max_allowed_footprint_ = max_allowed_footprint; 1608} 1609 1610void Heap::UpdateMaxNativeFootprint() { 1611 size_t native_size = native_bytes_allocated_; 1612 // TODO: Tune the native heap utilization to be a value other than the java heap utilization. 1613 size_t target_size = native_size / GetTargetHeapUtilization(); 1614 if (target_size > native_size + max_free_) { 1615 target_size = native_size + max_free_; 1616 } else if (target_size < native_size + min_free_) { 1617 target_size = native_size + min_free_; 1618 } 1619 native_footprint_gc_watermark_ = target_size; 1620 native_footprint_limit_ = 2 * target_size - native_size; 1621} 1622 1623void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) { 1624 // We know what our utilization is at this moment. 1625 // This doesn't actually resize any memory. It just lets the heap grow more when necessary. 1626 const size_t bytes_allocated = GetBytesAllocated(); 1627 last_gc_size_ = bytes_allocated; 1628 last_gc_time_ns_ = NanoTime(); 1629 1630 size_t target_size; 1631 if (gc_type != collector::kGcTypeSticky) { 1632 // Grow the heap for non sticky GC. 1633 target_size = bytes_allocated / GetTargetHeapUtilization(); 1634 if (target_size > bytes_allocated + max_free_) { 1635 target_size = bytes_allocated + max_free_; 1636 } else if (target_size < bytes_allocated + min_free_) { 1637 target_size = bytes_allocated + min_free_; 1638 } 1639 next_gc_type_ = collector::kGcTypeSticky; 1640 } else { 1641 // Based on how close the current heap size is to the target size, decide 1642 // whether or not to do a partial or sticky GC next. 1643 if (bytes_allocated + min_free_ <= max_allowed_footprint_) { 1644 next_gc_type_ = collector::kGcTypeSticky; 1645 } else { 1646 next_gc_type_ = collector::kGcTypePartial; 1647 } 1648 1649 // If we have freed enough memory, shrink the heap back down. 1650 if (bytes_allocated + max_free_ < max_allowed_footprint_) { 1651 target_size = bytes_allocated + max_free_; 1652 } else { 1653 target_size = std::max(bytes_allocated, max_allowed_footprint_); 1654 } 1655 } 1656 SetIdealFootprint(target_size); 1657 1658 // Calculate when to perform the next ConcurrentGC. 1659 if (concurrent_gc_) { 1660 // Calculate the estimated GC duration. 1661 double gc_duration_seconds = NsToMs(gc_duration) / 1000.0; 1662 // Estimate how many remaining bytes we will have when we need to start the next GC. 1663 size_t remaining_bytes = allocation_rate_ * gc_duration_seconds; 1664 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes); 1665 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) { 1666 // A never going to happen situation that from the estimated allocation rate we will exceed 1667 // the applications entire footprint with the given estimated allocation rate. Schedule 1668 // another GC straight away. 1669 concurrent_start_bytes_ = bytes_allocated; 1670 } else { 1671 // Start a concurrent GC when we get close to the estimated remaining bytes. When the 1672 // allocation rate is very high, remaining_bytes could tell us that we should start a GC 1673 // right away. 1674 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated); 1675 } 1676 DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_); 1677 DCHECK_LE(max_allowed_footprint_, growth_limit_); 1678 } 1679 1680 UpdateMaxNativeFootprint(); 1681} 1682 1683void Heap::ClearGrowthLimit() { 1684 growth_limit_ = capacity_; 1685 alloc_space_->ClearGrowthLimit(); 1686} 1687 1688void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset, 1689 MemberOffset reference_queue_offset, 1690 MemberOffset reference_queueNext_offset, 1691 MemberOffset reference_pendingNext_offset, 1692 MemberOffset finalizer_reference_zombie_offset) { 1693 reference_referent_offset_ = reference_referent_offset; 1694 reference_queue_offset_ = reference_queue_offset; 1695 reference_queueNext_offset_ = reference_queueNext_offset; 1696 reference_pendingNext_offset_ = reference_pendingNext_offset; 1697 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset; 1698 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1699 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U); 1700 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U); 1701 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); 1702 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); 1703} 1704 1705mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) { 1706 DCHECK(reference != NULL); 1707 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1708 return reference->GetFieldObject<mirror::Object*>(reference_referent_offset_, true); 1709} 1710 1711void Heap::ClearReferenceReferent(mirror::Object* reference) { 1712 DCHECK(reference != NULL); 1713 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U); 1714 reference->SetFieldObject(reference_referent_offset_, NULL, true); 1715} 1716 1717// Returns true if the reference object has not yet been enqueued. 1718bool Heap::IsEnqueuable(const mirror::Object* ref) { 1719 DCHECK(ref != NULL); 1720 const mirror::Object* queue = 1721 ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false); 1722 const mirror::Object* queue_next = 1723 ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false); 1724 return (queue != NULL) && (queue_next == NULL); 1725} 1726 1727void Heap::EnqueueReference(mirror::Object* ref, mirror::Object** cleared_reference_list) { 1728 DCHECK(ref != NULL); 1729 CHECK(ref->GetFieldObject<mirror::Object*>(reference_queue_offset_, false) != NULL); 1730 CHECK(ref->GetFieldObject<mirror::Object*>(reference_queueNext_offset_, false) == NULL); 1731 EnqueuePendingReference(ref, cleared_reference_list); 1732} 1733 1734void Heap::EnqueuePendingReference(mirror::Object* ref, mirror::Object** list) { 1735 DCHECK(ref != NULL); 1736 DCHECK(list != NULL); 1737 1738 // TODO: Remove this lock, use atomic stacks for storing references. 1739 MutexLock mu(Thread::Current(), *reference_queue_lock_); 1740 if (*list == NULL) { 1741 ref->SetFieldObject(reference_pendingNext_offset_, ref, false); 1742 *list = ref; 1743 } else { 1744 mirror::Object* head = 1745 (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false); 1746 ref->SetFieldObject(reference_pendingNext_offset_, head, false); 1747 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false); 1748 } 1749} 1750 1751mirror::Object* Heap::DequeuePendingReference(mirror::Object** list) { 1752 DCHECK(list != NULL); 1753 DCHECK(*list != NULL); 1754 mirror::Object* head = (*list)->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, 1755 false); 1756 mirror::Object* ref; 1757 1758 // Note: the following code is thread-safe because it is only called from ProcessReferences which 1759 // is single threaded. 1760 if (*list == head) { 1761 ref = *list; 1762 *list = NULL; 1763 } else { 1764 mirror::Object* next = head->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, 1765 false); 1766 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false); 1767 ref = head; 1768 } 1769 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false); 1770 return ref; 1771} 1772 1773void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) { 1774 ScopedObjectAccess soa(self); 1775 JValue result; 1776 ArgArray arg_array(NULL, 0); 1777 arg_array.Append(reinterpret_cast<uint32_t>(object)); 1778 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, 1779 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1780} 1781 1782void Heap::EnqueueClearedReferences(mirror::Object** cleared) { 1783 DCHECK(cleared != NULL); 1784 if (*cleared != NULL) { 1785 // When a runtime isn't started there are no reference queues to care about so ignore. 1786 if (LIKELY(Runtime::Current()->IsStarted())) { 1787 ScopedObjectAccess soa(Thread::Current()); 1788 JValue result; 1789 ArgArray arg_array(NULL, 0); 1790 arg_array.Append(reinterpret_cast<uint32_t>(*cleared)); 1791 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), 1792 arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 1793 } 1794 *cleared = NULL; 1795 } 1796} 1797 1798void Heap::RequestConcurrentGC(Thread* self) { 1799 // Make sure that we can do a concurrent GC. 1800 Runtime* runtime = Runtime::Current(); 1801 DCHECK(concurrent_gc_); 1802 if (runtime == NULL || !runtime->IsFinishedStarting() || 1803 !runtime->IsConcurrentGcEnabled()) { 1804 return; 1805 } 1806 { 1807 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1808 if (runtime->IsShuttingDown()) { 1809 return; 1810 } 1811 } 1812 if (self->IsHandlingStackOverflow()) { 1813 return; 1814 } 1815 1816 // We already have a request pending, no reason to start more until we update 1817 // concurrent_start_bytes_. 1818 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); 1819 1820 JNIEnv* env = self->GetJniEnv(); 1821 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 1822 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL); 1823 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 1824 WellKnownClasses::java_lang_Daemons_requestGC); 1825 CHECK(!env->ExceptionCheck()); 1826} 1827 1828void Heap::ConcurrentGC(Thread* self) { 1829 { 1830 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1831 if (Runtime::Current()->IsShuttingDown()) { 1832 return; 1833 } 1834 } 1835 1836 // Wait for any GCs currently running to finish. 1837 if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) { 1838 CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false); 1839 } 1840} 1841 1842void Heap::RequestHeapTrim() { 1843 // GC completed and now we must decide whether to request a heap trim (advising pages back to the 1844 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans 1845 // a space it will hold its lock and can become a cause of jank. 1846 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since 1847 // forking. 1848 1849 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap 1850 // because that only marks object heads, so a large array looks like lots of empty space. We 1851 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional 1852 // to utilization (which is probably inversely proportional to how much benefit we can expect). 1853 // We could try mincore(2) but that's only a measure of how many pages we haven't given away, 1854 // not how much use we're making of those pages. 1855 uint64_t ms_time = MilliTime(); 1856 float utilization = 1857 static_cast<float>(alloc_space_->GetBytesAllocated()) / alloc_space_->Size(); 1858 if ((utilization > 0.75f) || ((ms_time - last_trim_time_ms_) < 2 * 1000)) { 1859 // Don't bother trimming the alloc space if it's more than 75% utilized, or if a 1860 // heap trim occurred in the last two seconds. 1861 return; 1862 } 1863 1864 Thread* self = Thread::Current(); 1865 { 1866 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 1867 Runtime* runtime = Runtime::Current(); 1868 if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown()) { 1869 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time) 1870 // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check 1871 // as we don't hold the lock while requesting the trim). 1872 return; 1873 } 1874 } 1875 1876 SchedPolicy policy; 1877 get_sched_policy(self->GetTid(), &policy); 1878 if (policy == SP_FOREGROUND || policy == SP_AUDIO_APP) { 1879 // Don't trim the heap if we are a foreground or audio app. 1880 return; 1881 } 1882 1883 last_trim_time_ms_ = ms_time; 1884 JNIEnv* env = self->GetJniEnv(); 1885 DCHECK(WellKnownClasses::java_lang_Daemons != NULL); 1886 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL); 1887 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, 1888 WellKnownClasses::java_lang_Daemons_requestHeapTrim); 1889 CHECK(!env->ExceptionCheck()); 1890} 1891 1892size_t Heap::Trim() { 1893 // Handle a requested heap trim on a thread outside of the main GC thread. 1894 return alloc_space_->Trim(); 1895} 1896 1897bool Heap::IsGCRequestPending() const { 1898 return concurrent_start_bytes_ != std::numeric_limits<size_t>::max(); 1899} 1900 1901void Heap::RegisterNativeAllocation(int bytes) { 1902 // Total number of native bytes allocated. 1903 native_bytes_allocated_.fetch_add(bytes); 1904 Thread* self = Thread::Current(); 1905 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) { 1906 // The second watermark is higher than the gc watermark. If you hit this it means you are 1907 // allocating native objects faster than the GC can keep up with. 1908 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 1909 JNIEnv* env = self->GetJniEnv(); 1910 // Can't do this in WellKnownClasses::Init since System is not properly set up at that 1911 // point. 1912 if (WellKnownClasses::java_lang_System_runFinalization == NULL) { 1913 DCHECK(WellKnownClasses::java_lang_System != NULL); 1914 WellKnownClasses::java_lang_System_runFinalization = 1915 CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V"); 1916 assert(WellKnownClasses::java_lang_System_runFinalization != NULL); 1917 } 1918 if (WaitForConcurrentGcToComplete(self) != collector::kGcTypeNone) { 1919 // Just finished a GC, attempt to run finalizers. 1920 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 1921 WellKnownClasses::java_lang_System_runFinalization); 1922 CHECK(!env->ExceptionCheck()); 1923 } 1924 1925 // If we still are over the watermark, attempt a GC for alloc and run finalizers. 1926 if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) { 1927 CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false); 1928 env->CallStaticVoidMethod(WellKnownClasses::java_lang_System, 1929 WellKnownClasses::java_lang_System_runFinalization); 1930 CHECK(!env->ExceptionCheck()); 1931 } 1932 // We have just run finalizers, update the native watermark since it is very likely that 1933 // finalizers released native managed allocations. 1934 UpdateMaxNativeFootprint(); 1935 } else { 1936 if (!IsGCRequestPending()) { 1937 RequestConcurrentGC(self); 1938 } 1939 } 1940 } 1941} 1942 1943void Heap::RegisterNativeFree(int bytes) { 1944 int expected_size, new_size; 1945 do { 1946 expected_size = native_bytes_allocated_.load(); 1947 new_size = expected_size - bytes; 1948 if (new_size < 0) { 1949 ThrowRuntimeException("attempted to free %d native bytes with only %d native bytes registered as allocated", 1950 bytes, expected_size); 1951 break; 1952 } 1953 } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size)); 1954} 1955 1956int64_t Heap::GetTotalMemory() const { 1957 int64_t ret = 0; 1958 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1959 for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) { 1960 space::ContinuousSpace* space = *it; 1961 if (space->IsImageSpace()) { 1962 // Currently don't include the image space. 1963 } else if (space->IsDlMallocSpace()) { 1964 // Zygote or alloc space 1965 ret += space->AsDlMallocSpace()->GetFootprint(); 1966 } 1967 } 1968 typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2; 1969 for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) { 1970 space::DiscontinuousSpace* space = *it; 1971 if (space->IsLargeObjectSpace()) { 1972 ret += space->AsLargeObjectSpace()->GetBytesAllocated(); 1973 } 1974 } 1975 return ret; 1976} 1977 1978} // namespace gc 1979} // namespace art 1980