heap.cc revision 373a9b5c718a45ac484afcf4fe6ce84f4bb562b3
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "heap.h" 18 19#include <limits> 20#include <memory> 21#include <vector> 22 23#include "android-base/stringprintf.h" 24 25#include "allocation_listener.h" 26#include "art_field-inl.h" 27#include "backtrace_helper.h" 28#include "base/allocator.h" 29#include "base/arena_allocator.h" 30#include "base/dumpable.h" 31#include "base/histogram-inl.h" 32#include "base/memory_tool.h" 33#include "base/stl_util.h" 34#include "base/systrace.h" 35#include "base/time_utils.h" 36#include "common_throws.h" 37#include "cutils/sched_policy.h" 38#include "debugger.h" 39#include "dex_file-inl.h" 40#include "entrypoints/quick/quick_alloc_entrypoints.h" 41#include "gc/accounting/card_table-inl.h" 42#include "gc/accounting/heap_bitmap-inl.h" 43#include "gc/accounting/mod_union_table-inl.h" 44#include "gc/accounting/read_barrier_table.h" 45#include "gc/accounting/remembered_set.h" 46#include "gc/accounting/space_bitmap-inl.h" 47#include "gc/collector/concurrent_copying.h" 48#include "gc/collector/mark_compact.h" 49#include "gc/collector/mark_sweep.h" 50#include "gc/collector/partial_mark_sweep.h" 51#include "gc/collector/semi_space.h" 52#include "gc/collector/sticky_mark_sweep.h" 53#include "gc/reference_processor.h" 54#include "gc/scoped_gc_critical_section.h" 55#include "gc/space/bump_pointer_space.h" 56#include "gc/space/dlmalloc_space-inl.h" 57#include "gc/space/image_space.h" 58#include "gc/space/large_object_space.h" 59#include "gc/space/region_space.h" 60#include "gc/space/rosalloc_space-inl.h" 61#include "gc/space/space-inl.h" 62#include "gc/space/zygote_space.h" 63#include "gc/task_processor.h" 64#include "gc/verification.h" 65#include "gc_pause_listener.h" 66#include "gc_root.h" 67#include "handle_scope-inl.h" 68#include "heap-inl.h" 69#include "heap-visit-objects-inl.h" 70#include "image.h" 71#include "intern_table.h" 72#include "java_vm_ext.h" 73#include "jit/jit.h" 74#include "jit/jit_code_cache.h" 75#include "mirror/class-inl.h" 76#include "mirror/object-inl.h" 77#include "mirror/object-refvisitor-inl.h" 78#include "mirror/object_array-inl.h" 79#include "mirror/reference-inl.h" 80#include "nativehelper/scoped_local_ref.h" 81#include "obj_ptr-inl.h" 82#include "os.h" 83#include "reflection.h" 84#include "runtime.h" 85#include "scoped_thread_state_change-inl.h" 86#include "thread_list.h" 87#include "verify_object-inl.h" 88#include "well_known_classes.h" 89 90namespace art { 91 92namespace gc { 93 94static constexpr size_t kCollectorTransitionStressIterations = 0; 95static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds 96 97DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition); 98 99// Minimum amount of remaining bytes before a concurrent GC is triggered. 100static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB; 101static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB; 102// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more 103// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator 104// threads (lower pauses, use less memory bandwidth). 105static constexpr double kStickyGcThroughputAdjustment = 1.0; 106// Whether or not we compact the zygote in PreZygoteFork. 107static constexpr bool kCompactZygote = kMovingCollector; 108// How many reserve entries are at the end of the allocation stack, these are only needed if the 109// allocation stack overflows. 110static constexpr size_t kAllocationStackReserveSize = 1024; 111// Default mark stack size in bytes. 112static const size_t kDefaultMarkStackSize = 64 * KB; 113// Define space name. 114static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"}; 115static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"}; 116static const char* kMemMapSpaceName[2] = {"main space", "main space 1"}; 117static const char* kNonMovingSpaceName = "non moving space"; 118static const char* kZygoteSpaceName = "zygote space"; 119static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB; 120static constexpr bool kGCALotMode = false; 121// GC alot mode uses a small allocation stack to stress test a lot of GC. 122static constexpr size_t kGcAlotAllocationStackSize = 4 * KB / 123 sizeof(mirror::HeapReference<mirror::Object>); 124// Verify objet has a small allocation stack size since searching the allocation stack is slow. 125static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB / 126 sizeof(mirror::HeapReference<mirror::Object>); 127static constexpr size_t kDefaultAllocationStackSize = 8 * MB / 128 sizeof(mirror::HeapReference<mirror::Object>); 129// System.runFinalization can deadlock with native allocations, to deal with this, we have a 130// timeout on how long we wait for finalizers to run. b/21544853 131static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u); 132 133// For deterministic compilation, we need the heap to be at a well-known address. 134static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000; 135// Dump the rosalloc stats on SIGQUIT. 136static constexpr bool kDumpRosAllocStatsOnSigQuit = false; 137 138static const char* kRegionSpaceName = "main space (region space)"; 139 140// If true, we log all GCs in the both the foreground and background. Used for debugging. 141static constexpr bool kLogAllGCs = false; 142 143// How much we grow the TLAB if we can do it. 144static constexpr size_t kPartialTlabSize = 16 * KB; 145static constexpr bool kUsePartialTlabs = true; 146 147#if defined(__LP64__) || !defined(ADDRESS_SANITIZER) 148// 300 MB (0x12c00000) - (default non-moving space capacity). 149static uint8_t* const kPreferredAllocSpaceBegin = 150 reinterpret_cast<uint8_t*>(300 * MB - Heap::kDefaultNonMovingSpaceCapacity); 151#else 152#ifdef __ANDROID__ 153// For 32-bit Android, use 0x20000000 because asan reserves 0x04000000 - 0x20000000. 154static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000); 155#else 156// For 32-bit host, use 0x40000000 because asan uses most of the space below this. 157static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x40000000); 158#endif 159#endif 160 161static inline bool CareAboutPauseTimes() { 162 return Runtime::Current()->InJankPerceptibleProcessState(); 163} 164 165Heap::Heap(size_t initial_size, 166 size_t growth_limit, 167 size_t min_free, 168 size_t max_free, 169 double target_utilization, 170 double foreground_heap_growth_multiplier, 171 size_t capacity, 172 size_t non_moving_space_capacity, 173 const std::string& image_file_name, 174 const InstructionSet image_instruction_set, 175 CollectorType foreground_collector_type, 176 CollectorType background_collector_type, 177 space::LargeObjectSpaceType large_object_space_type, 178 size_t large_object_threshold, 179 size_t parallel_gc_threads, 180 size_t conc_gc_threads, 181 bool low_memory_mode, 182 size_t long_pause_log_threshold, 183 size_t long_gc_log_threshold, 184 bool ignore_max_footprint, 185 bool use_tlab, 186 bool verify_pre_gc_heap, 187 bool verify_pre_sweeping_heap, 188 bool verify_post_gc_heap, 189 bool verify_pre_gc_rosalloc, 190 bool verify_pre_sweeping_rosalloc, 191 bool verify_post_gc_rosalloc, 192 bool gc_stress_mode, 193 bool measure_gc_performance, 194 bool use_homogeneous_space_compaction_for_oom, 195 uint64_t min_interval_homogeneous_space_compaction_by_oom) 196 : non_moving_space_(nullptr), 197 rosalloc_space_(nullptr), 198 dlmalloc_space_(nullptr), 199 main_space_(nullptr), 200 collector_type_(kCollectorTypeNone), 201 foreground_collector_type_(foreground_collector_type), 202 background_collector_type_(background_collector_type), 203 desired_collector_type_(foreground_collector_type_), 204 pending_task_lock_(nullptr), 205 parallel_gc_threads_(parallel_gc_threads), 206 conc_gc_threads_(conc_gc_threads), 207 low_memory_mode_(low_memory_mode), 208 long_pause_log_threshold_(long_pause_log_threshold), 209 long_gc_log_threshold_(long_gc_log_threshold), 210 ignore_max_footprint_(ignore_max_footprint), 211 zygote_creation_lock_("zygote creation lock", kZygoteCreationLock), 212 zygote_space_(nullptr), 213 large_object_threshold_(large_object_threshold), 214 disable_thread_flip_count_(0), 215 thread_flip_running_(false), 216 collector_type_running_(kCollectorTypeNone), 217 last_gc_cause_(kGcCauseNone), 218 thread_running_gc_(nullptr), 219 last_gc_type_(collector::kGcTypeNone), 220 next_gc_type_(collector::kGcTypePartial), 221 capacity_(capacity), 222 growth_limit_(growth_limit), 223 max_allowed_footprint_(initial_size), 224 concurrent_start_bytes_(std::numeric_limits<size_t>::max()), 225 total_bytes_freed_ever_(0), 226 total_objects_freed_ever_(0), 227 num_bytes_allocated_(0), 228 new_native_bytes_allocated_(0), 229 old_native_bytes_allocated_(0), 230 num_bytes_freed_revoke_(0), 231 verify_missing_card_marks_(false), 232 verify_system_weaks_(false), 233 verify_pre_gc_heap_(verify_pre_gc_heap), 234 verify_pre_sweeping_heap_(verify_pre_sweeping_heap), 235 verify_post_gc_heap_(verify_post_gc_heap), 236 verify_mod_union_table_(false), 237 verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc), 238 verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc), 239 verify_post_gc_rosalloc_(verify_post_gc_rosalloc), 240 gc_stress_mode_(gc_stress_mode), 241 /* For GC a lot mode, we limit the allocation stacks to be kGcAlotInterval allocations. This 242 * causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap 243 * verification is enabled, we limit the size of allocation stacks to speed up their 244 * searching. 245 */ 246 max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize 247 : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize : 248 kDefaultAllocationStackSize), 249 current_allocator_(kAllocatorTypeDlMalloc), 250 current_non_moving_allocator_(kAllocatorTypeNonMoving), 251 bump_pointer_space_(nullptr), 252 temp_space_(nullptr), 253 region_space_(nullptr), 254 min_free_(min_free), 255 max_free_(max_free), 256 target_utilization_(target_utilization), 257 foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier), 258 total_wait_time_(0), 259 verify_object_mode_(kVerifyObjectModeDisabled), 260 disable_moving_gc_count_(0), 261 semi_space_collector_(nullptr), 262 mark_compact_collector_(nullptr), 263 concurrent_copying_collector_(nullptr), 264 is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()), 265 use_tlab_(use_tlab), 266 main_space_backup_(nullptr), 267 min_interval_homogeneous_space_compaction_by_oom_( 268 min_interval_homogeneous_space_compaction_by_oom), 269 last_time_homogeneous_space_compaction_by_oom_(NanoTime()), 270 pending_collector_transition_(nullptr), 271 pending_heap_trim_(nullptr), 272 use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom), 273 running_collection_is_blocking_(false), 274 blocking_gc_count_(0U), 275 blocking_gc_time_(0U), 276 last_update_time_gc_count_rate_histograms_( // Round down by the window duration. 277 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration), 278 gc_count_last_window_(0U), 279 blocking_gc_count_last_window_(0U), 280 gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount), 281 blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U, 282 kGcCountRateMaxBucketCount), 283 alloc_tracking_enabled_(false), 284 backtrace_lock_(nullptr), 285 seen_backtrace_count_(0u), 286 unique_backtrace_count_(0u), 287 gc_disabled_for_shutdown_(false) { 288 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 289 LOG(INFO) << "Heap() entering"; 290 } 291 if (kUseReadBarrier) { 292 CHECK_EQ(foreground_collector_type_, kCollectorTypeCC); 293 CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground); 294 } 295 verification_.reset(new Verification(this)); 296 CHECK_GE(large_object_threshold, kMinLargeObjectThreshold); 297 ScopedTrace trace(__FUNCTION__); 298 Runtime* const runtime = Runtime::Current(); 299 // If we aren't the zygote, switch to the default non zygote allocator. This may update the 300 // entrypoints. 301 const bool is_zygote = runtime->IsZygote(); 302 if (!is_zygote) { 303 // Background compaction is currently not supported for command line runs. 304 if (background_collector_type_ != foreground_collector_type_) { 305 VLOG(heap) << "Disabling background compaction for non zygote"; 306 background_collector_type_ = foreground_collector_type_; 307 } 308 } 309 ChangeCollector(desired_collector_type_); 310 live_bitmap_.reset(new accounting::HeapBitmap(this)); 311 mark_bitmap_.reset(new accounting::HeapBitmap(this)); 312 // Requested begin for the alloc space, to follow the mapped image and oat files 313 uint8_t* requested_alloc_space_begin = nullptr; 314 if (foreground_collector_type_ == kCollectorTypeCC) { 315 // Need to use a low address so that we can allocate a contiguous 2 * Xmx space when there's no 316 // image (dex2oat for target). 317 requested_alloc_space_begin = kPreferredAllocSpaceBegin; 318 } 319 320 // Load image space(s). 321 if (space::ImageSpace::LoadBootImage(image_file_name, 322 image_instruction_set, 323 &boot_image_spaces_, 324 &requested_alloc_space_begin)) { 325 for (auto space : boot_image_spaces_) { 326 AddSpace(space); 327 } 328 } 329 330 /* 331 requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- 332 +- nonmoving space (non_moving_space_capacity)+- 333 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- 334 +-????????????????????????????????????????????+- 335 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- 336 +-main alloc space / bump space 1 (capacity_) +- 337 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- 338 +-????????????????????????????????????????????+- 339 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- 340 +-main alloc space2 / bump space 2 (capacity_)+- 341 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- 342 */ 343 // We don't have hspace compaction enabled with GSS or CC. 344 if (foreground_collector_type_ == kCollectorTypeGSS || 345 foreground_collector_type_ == kCollectorTypeCC) { 346 use_homogeneous_space_compaction_for_oom_ = false; 347 } 348 bool support_homogeneous_space_compaction = 349 background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact || 350 use_homogeneous_space_compaction_for_oom_; 351 // We may use the same space the main space for the non moving space if we don't need to compact 352 // from the main space. 353 // This is not the case if we support homogeneous compaction or have a moving background 354 // collector type. 355 bool separate_non_moving_space = is_zygote || 356 support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) || 357 IsMovingGc(background_collector_type_); 358 if (foreground_collector_type_ == kCollectorTypeGSS) { 359 separate_non_moving_space = false; 360 } 361 std::unique_ptr<MemMap> main_mem_map_1; 362 std::unique_ptr<MemMap> main_mem_map_2; 363 364 // Gross hack to make dex2oat deterministic. 365 if (foreground_collector_type_ == kCollectorTypeMS && 366 requested_alloc_space_begin == nullptr && 367 Runtime::Current()->IsAotCompiler()) { 368 // Currently only enabled for MS collector since that is what the deterministic dex2oat uses. 369 // b/26849108 370 requested_alloc_space_begin = reinterpret_cast<uint8_t*>(kAllocSpaceBeginForDeterministicAoT); 371 } 372 uint8_t* request_begin = requested_alloc_space_begin; 373 if (request_begin != nullptr && separate_non_moving_space) { 374 request_begin += non_moving_space_capacity; 375 } 376 std::string error_str; 377 std::unique_ptr<MemMap> non_moving_space_mem_map; 378 if (separate_non_moving_space) { 379 ScopedTrace trace2("Create separate non moving space"); 380 // If we are the zygote, the non moving space becomes the zygote space when we run 381 // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't 382 // rename the mem map later. 383 const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName; 384 // Reserve the non moving mem map before the other two since it needs to be at a specific 385 // address. 386 non_moving_space_mem_map.reset( 387 MemMap::MapAnonymous(space_name, requested_alloc_space_begin, 388 non_moving_space_capacity, PROT_READ | PROT_WRITE, true, false, 389 &error_str)); 390 CHECK(non_moving_space_mem_map != nullptr) << error_str; 391 // Try to reserve virtual memory at a lower address if we have a separate non moving space. 392 request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity; 393 } 394 // Attempt to create 2 mem maps at or after the requested begin. 395 if (foreground_collector_type_ != kCollectorTypeCC) { 396 ScopedTrace trace2("Create main mem map"); 397 if (separate_non_moving_space || !is_zygote) { 398 main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], 399 request_begin, 400 capacity_, 401 &error_str)); 402 } else { 403 // If no separate non-moving space and we are the zygote, the main space must come right 404 // after the image space to avoid a gap. This is required since we want the zygote space to 405 // be adjacent to the image space. 406 main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_, 407 PROT_READ | PROT_WRITE, true, false, 408 &error_str)); 409 } 410 CHECK(main_mem_map_1.get() != nullptr) << error_str; 411 } 412 if (support_homogeneous_space_compaction || 413 background_collector_type_ == kCollectorTypeSS || 414 foreground_collector_type_ == kCollectorTypeSS) { 415 ScopedTrace trace2("Create main mem map 2"); 416 main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(), 417 capacity_, &error_str)); 418 CHECK(main_mem_map_2.get() != nullptr) << error_str; 419 } 420 421 // Create the non moving space first so that bitmaps don't take up the address range. 422 if (separate_non_moving_space) { 423 ScopedTrace trace2("Add non moving space"); 424 // Non moving space is always dlmalloc since we currently don't have support for multiple 425 // active rosalloc spaces. 426 const size_t size = non_moving_space_mem_map->Size(); 427 non_moving_space_ = space::DlMallocSpace::CreateFromMemMap( 428 non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize, 429 initial_size, size, size, false); 430 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity()); 431 CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space " 432 << requested_alloc_space_begin; 433 AddSpace(non_moving_space_); 434 } 435 // Create other spaces based on whether or not we have a moving GC. 436 if (foreground_collector_type_ == kCollectorTypeCC) { 437 CHECK(separate_non_moving_space); 438 MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName, 439 capacity_ * 2, 440 request_begin); 441 CHECK(region_space_mem_map != nullptr) << "No region space mem map"; 442 region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map); 443 AddSpace(region_space_); 444 } else if (IsMovingGc(foreground_collector_type_) && 445 foreground_collector_type_ != kCollectorTypeGSS) { 446 // Create bump pointer spaces. 447 // We only to create the bump pointer if the foreground collector is a compacting GC. 448 // TODO: Place bump-pointer spaces somewhere to minimize size of card table. 449 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1", 450 main_mem_map_1.release()); 451 CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space"; 452 AddSpace(bump_pointer_space_); 453 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2", 454 main_mem_map_2.release()); 455 CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space"; 456 AddSpace(temp_space_); 457 CHECK(separate_non_moving_space); 458 } else { 459 CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_); 460 CHECK(main_space_ != nullptr); 461 AddSpace(main_space_); 462 if (!separate_non_moving_space) { 463 non_moving_space_ = main_space_; 464 CHECK(!non_moving_space_->CanMoveObjects()); 465 } 466 if (foreground_collector_type_ == kCollectorTypeGSS) { 467 CHECK_EQ(foreground_collector_type_, background_collector_type_); 468 // Create bump pointer spaces instead of a backup space. 469 main_mem_map_2.release(); 470 bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1", 471 kGSSBumpPointerSpaceCapacity, nullptr); 472 CHECK(bump_pointer_space_ != nullptr); 473 AddSpace(bump_pointer_space_); 474 temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", 475 kGSSBumpPointerSpaceCapacity, nullptr); 476 CHECK(temp_space_ != nullptr); 477 AddSpace(temp_space_); 478 } else if (main_mem_map_2.get() != nullptr) { 479 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1]; 480 main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size, 481 growth_limit_, capacity_, name, true)); 482 CHECK(main_space_backup_.get() != nullptr); 483 // Add the space so its accounted for in the heap_begin and heap_end. 484 AddSpace(main_space_backup_.get()); 485 } 486 } 487 CHECK(non_moving_space_ != nullptr); 488 CHECK(!non_moving_space_->CanMoveObjects()); 489 // Allocate the large object space. 490 if (large_object_space_type == space::LargeObjectSpaceType::kFreeList) { 491 large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr, 492 capacity_); 493 CHECK(large_object_space_ != nullptr) << "Failed to create large object space"; 494 } else if (large_object_space_type == space::LargeObjectSpaceType::kMap) { 495 large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space"); 496 CHECK(large_object_space_ != nullptr) << "Failed to create large object space"; 497 } else { 498 // Disable the large object space by making the cutoff excessively large. 499 large_object_threshold_ = std::numeric_limits<size_t>::max(); 500 large_object_space_ = nullptr; 501 } 502 if (large_object_space_ != nullptr) { 503 AddSpace(large_object_space_); 504 } 505 // Compute heap capacity. Continuous spaces are sorted in order of Begin(). 506 CHECK(!continuous_spaces_.empty()); 507 // Relies on the spaces being sorted. 508 uint8_t* heap_begin = continuous_spaces_.front()->Begin(); 509 uint8_t* heap_end = continuous_spaces_.back()->Limit(); 510 size_t heap_capacity = heap_end - heap_begin; 511 // Remove the main backup space since it slows down the GC to have unused extra spaces. 512 // TODO: Avoid needing to do this. 513 if (main_space_backup_.get() != nullptr) { 514 RemoveSpace(main_space_backup_.get()); 515 } 516 // Allocate the card table. 517 // We currently don't support dynamically resizing the card table. 518 // Since we don't know where in the low_4gb the app image will be located, make the card table 519 // cover the whole low_4gb. TODO: Extend the card table in AddSpace. 520 UNUSED(heap_capacity); 521 // Start at 64 KB, we can be sure there are no spaces mapped this low since the address range is 522 // reserved by the kernel. 523 static constexpr size_t kMinHeapAddress = 4 * KB; 524 card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress), 525 4 * GB - kMinHeapAddress)); 526 CHECK(card_table_.get() != nullptr) << "Failed to create card table"; 527 if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) { 528 rb_table_.reset(new accounting::ReadBarrierTable()); 529 DCHECK(rb_table_->IsAllCleared()); 530 } 531 if (HasBootImageSpace()) { 532 // Don't add the image mod union table if we are running without an image, this can crash if 533 // we use the CardCache implementation. 534 for (space::ImageSpace* image_space : GetBootImageSpaces()) { 535 accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace( 536 "Image mod-union table", this, image_space); 537 CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table"; 538 AddModUnionTable(mod_union_table); 539 } 540 } 541 if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) { 542 accounting::RememberedSet* non_moving_space_rem_set = 543 new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_); 544 CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set"; 545 AddRememberedSet(non_moving_space_rem_set); 546 } 547 // TODO: Count objects in the image space here? 548 num_bytes_allocated_.StoreRelaxed(0); 549 mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize, 550 kDefaultMarkStackSize)); 551 const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize; 552 allocation_stack_.reset(accounting::ObjectStack::Create( 553 "allocation stack", max_allocation_stack_size_, alloc_stack_capacity)); 554 live_stack_.reset(accounting::ObjectStack::Create( 555 "live stack", max_allocation_stack_size_, alloc_stack_capacity)); 556 // It's still too early to take a lock because there are no threads yet, but we can create locks 557 // now. We don't create it earlier to make it clear that you can't use locks during heap 558 // initialization. 559 gc_complete_lock_ = new Mutex("GC complete lock"); 560 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable", 561 *gc_complete_lock_)); 562 native_blocking_gc_lock_ = new Mutex("Native blocking GC lock"); 563 native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable", 564 *native_blocking_gc_lock_)); 565 native_blocking_gc_is_assigned_ = false; 566 native_blocking_gc_in_progress_ = false; 567 native_blocking_gcs_finished_ = 0; 568 569 thread_flip_lock_ = new Mutex("GC thread flip lock"); 570 thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable", 571 *thread_flip_lock_)); 572 task_processor_.reset(new TaskProcessor()); 573 reference_processor_.reset(new ReferenceProcessor()); 574 pending_task_lock_ = new Mutex("Pending task lock"); 575 if (ignore_max_footprint_) { 576 SetIdealFootprint(std::numeric_limits<size_t>::max()); 577 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); 578 } 579 CHECK_NE(max_allowed_footprint_, 0U); 580 // Create our garbage collectors. 581 for (size_t i = 0; i < 2; ++i) { 582 const bool concurrent = i != 0; 583 if ((MayUseCollector(kCollectorTypeCMS) && concurrent) || 584 (MayUseCollector(kCollectorTypeMS) && !concurrent)) { 585 garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent)); 586 garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent)); 587 garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent)); 588 } 589 } 590 if (kMovingCollector) { 591 if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) || 592 MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) || 593 use_homogeneous_space_compaction_for_oom_) { 594 // TODO: Clean this up. 595 const bool generational = foreground_collector_type_ == kCollectorTypeGSS; 596 semi_space_collector_ = new collector::SemiSpace(this, generational, 597 generational ? "generational" : ""); 598 garbage_collectors_.push_back(semi_space_collector_); 599 } 600 if (MayUseCollector(kCollectorTypeCC)) { 601 concurrent_copying_collector_ = new collector::ConcurrentCopying(this, 602 "", 603 measure_gc_performance); 604 DCHECK(region_space_ != nullptr); 605 concurrent_copying_collector_->SetRegionSpace(region_space_); 606 garbage_collectors_.push_back(concurrent_copying_collector_); 607 } 608 if (MayUseCollector(kCollectorTypeMC)) { 609 mark_compact_collector_ = new collector::MarkCompact(this); 610 garbage_collectors_.push_back(mark_compact_collector_); 611 } 612 } 613 if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr && 614 (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) { 615 // Check that there's no gap between the image space and the non moving space so that the 616 // immune region won't break (eg. due to a large object allocated in the gap). This is only 617 // required when we're the zygote or using GSS. 618 // Space with smallest Begin(). 619 space::ImageSpace* first_space = nullptr; 620 for (space::ImageSpace* space : boot_image_spaces_) { 621 if (first_space == nullptr || space->Begin() < first_space->Begin()) { 622 first_space = space; 623 } 624 } 625 bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap()); 626 if (!no_gap) { 627 PrintFileToLog("/proc/self/maps", LogSeverity::ERROR); 628 MemMap::DumpMaps(LOG_STREAM(ERROR), true); 629 LOG(FATAL) << "There's a gap between the image space and the non-moving space"; 630 } 631 } 632 instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation(); 633 if (gc_stress_mode_) { 634 backtrace_lock_ = new Mutex("GC complete lock"); 635 } 636 if (is_running_on_memory_tool_ || gc_stress_mode_) { 637 instrumentation->InstrumentQuickAllocEntryPoints(); 638 } 639 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { 640 LOG(INFO) << "Heap() exiting"; 641 } 642} 643 644MemMap* Heap::MapAnonymousPreferredAddress(const char* name, 645 uint8_t* request_begin, 646 size_t capacity, 647 std::string* out_error_str) { 648 while (true) { 649 MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity, 650 PROT_READ | PROT_WRITE, true, false, out_error_str); 651 if (map != nullptr || request_begin == nullptr) { 652 return map; 653 } 654 // Retry a second time with no specified request begin. 655 request_begin = nullptr; 656 } 657} 658 659bool Heap::MayUseCollector(CollectorType type) const { 660 return foreground_collector_type_ == type || background_collector_type_ == type; 661} 662 663space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, 664 size_t initial_size, 665 size_t growth_limit, 666 size_t capacity, 667 const char* name, 668 bool can_move_objects) { 669 space::MallocSpace* malloc_space = nullptr; 670 if (kUseRosAlloc) { 671 // Create rosalloc space. 672 malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize, 673 initial_size, growth_limit, capacity, 674 low_memory_mode_, can_move_objects); 675 } else { 676 malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize, 677 initial_size, growth_limit, capacity, 678 can_move_objects); 679 } 680 if (collector::SemiSpace::kUseRememberedSet) { 681 accounting::RememberedSet* rem_set = 682 new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space); 683 CHECK(rem_set != nullptr) << "Failed to create main space remembered set"; 684 AddRememberedSet(rem_set); 685 } 686 CHECK(malloc_space != nullptr) << "Failed to create " << name; 687 malloc_space->SetFootprintLimit(malloc_space->Capacity()); 688 return malloc_space; 689} 690 691void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit, 692 size_t capacity) { 693 // Is background compaction is enabled? 694 bool can_move_objects = IsMovingGc(background_collector_type_) != 695 IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_; 696 // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will 697 // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact 698 // from the main space to the zygote space. If background compaction is enabled, always pass in 699 // that we can move objets. 700 if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) { 701 // After the zygote we want this to be false if we don't have background compaction enabled so 702 // that getting primitive array elements is faster. 703 // We never have homogeneous compaction with GSS and don't need a space with movable objects. 704 can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS; 705 } 706 if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) { 707 RemoveRememberedSet(main_space_); 708 } 709 const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0]; 710 main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name, 711 can_move_objects); 712 SetSpaceAsDefault(main_space_); 713 VLOG(heap) << "Created main space " << main_space_; 714} 715 716void Heap::ChangeAllocator(AllocatorType allocator) { 717 if (current_allocator_ != allocator) { 718 // These two allocators are only used internally and don't have any entrypoints. 719 CHECK_NE(allocator, kAllocatorTypeLOS); 720 CHECK_NE(allocator, kAllocatorTypeNonMoving); 721 current_allocator_ = allocator; 722 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 723 SetQuickAllocEntryPointsAllocator(current_allocator_); 724 Runtime::Current()->GetInstrumentation()->ResetQuickAllocEntryPoints(); 725 } 726} 727 728void Heap::DisableMovingGc() { 729 CHECK(!kUseReadBarrier); 730 if (IsMovingGc(foreground_collector_type_)) { 731 foreground_collector_type_ = kCollectorTypeCMS; 732 } 733 if (IsMovingGc(background_collector_type_)) { 734 background_collector_type_ = foreground_collector_type_; 735 } 736 TransitionCollector(foreground_collector_type_); 737 Thread* const self = Thread::Current(); 738 ScopedThreadStateChange tsc(self, kSuspended); 739 ScopedSuspendAll ssa(__FUNCTION__); 740 // Something may have caused the transition to fail. 741 if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) { 742 CHECK(main_space_ != nullptr); 743 // The allocation stack may have non movable objects in it. We need to flush it since the GC 744 // can't only handle marking allocation stack objects of one non moving space and one main 745 // space. 746 { 747 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 748 FlushAllocStack(); 749 } 750 main_space_->DisableMovingObjects(); 751 non_moving_space_ = main_space_; 752 CHECK(!non_moving_space_->CanMoveObjects()); 753 } 754} 755 756bool Heap::IsCompilingBoot() const { 757 if (!Runtime::Current()->IsAotCompiler()) { 758 return false; 759 } 760 ScopedObjectAccess soa(Thread::Current()); 761 for (const auto& space : continuous_spaces_) { 762 if (space->IsImageSpace() || space->IsZygoteSpace()) { 763 return false; 764 } 765 } 766 return true; 767} 768 769void Heap::IncrementDisableMovingGC(Thread* self) { 770 // Need to do this holding the lock to prevent races where the GC is about to run / running when 771 // we attempt to disable it. 772 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); 773 MutexLock mu(self, *gc_complete_lock_); 774 ++disable_moving_gc_count_; 775 if (IsMovingGc(collector_type_running_)) { 776 WaitForGcToCompleteLocked(kGcCauseDisableMovingGc, self); 777 } 778} 779 780void Heap::DecrementDisableMovingGC(Thread* self) { 781 MutexLock mu(self, *gc_complete_lock_); 782 CHECK_GT(disable_moving_gc_count_, 0U); 783 --disable_moving_gc_count_; 784} 785 786void Heap::IncrementDisableThreadFlip(Thread* self) { 787 // Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead. 788 CHECK(kUseReadBarrier); 789 bool is_nested = self->GetDisableThreadFlipCount() > 0; 790 self->IncrementDisableThreadFlipCount(); 791 if (is_nested) { 792 // If this is a nested JNI critical section enter, we don't need to wait or increment the global 793 // counter. The global counter is incremented only once for a thread for the outermost enter. 794 return; 795 } 796 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip); 797 MutexLock mu(self, *thread_flip_lock_); 798 bool has_waited = false; 799 uint64_t wait_start = NanoTime(); 800 if (thread_flip_running_) { 801 ATRACE_BEGIN("IncrementDisableThreadFlip"); 802 while (thread_flip_running_) { 803 has_waited = true; 804 thread_flip_cond_->Wait(self); 805 } 806 ATRACE_END(); 807 } 808 ++disable_thread_flip_count_; 809 if (has_waited) { 810 uint64_t wait_time = NanoTime() - wait_start; 811 total_wait_time_ += wait_time; 812 if (wait_time > long_pause_log_threshold_) { 813 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time); 814 } 815 } 816} 817 818void Heap::DecrementDisableThreadFlip(Thread* self) { 819 // Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up 820 // the GC waiting before doing a thread flip. 821 CHECK(kUseReadBarrier); 822 self->DecrementDisableThreadFlipCount(); 823 bool is_outermost = self->GetDisableThreadFlipCount() == 0; 824 if (!is_outermost) { 825 // If this is not an outermost JNI critical exit, we don't need to decrement the global counter. 826 // The global counter is decremented only once for a thread for the outermost exit. 827 return; 828 } 829 MutexLock mu(self, *thread_flip_lock_); 830 CHECK_GT(disable_thread_flip_count_, 0U); 831 --disable_thread_flip_count_; 832 if (disable_thread_flip_count_ == 0) { 833 // Potentially notify the GC thread blocking to begin a thread flip. 834 thread_flip_cond_->Broadcast(self); 835 } 836} 837 838void Heap::ThreadFlipBegin(Thread* self) { 839 // Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_ 840 // > 0, block. Otherwise, go ahead. 841 CHECK(kUseReadBarrier); 842 ScopedThreadStateChange tsc(self, kWaitingForGcThreadFlip); 843 MutexLock mu(self, *thread_flip_lock_); 844 bool has_waited = false; 845 uint64_t wait_start = NanoTime(); 846 CHECK(!thread_flip_running_); 847 // Set this to true before waiting so that frequent JNI critical enter/exits won't starve 848 // GC. This like a writer preference of a reader-writer lock. 849 thread_flip_running_ = true; 850 while (disable_thread_flip_count_ > 0) { 851 has_waited = true; 852 thread_flip_cond_->Wait(self); 853 } 854 if (has_waited) { 855 uint64_t wait_time = NanoTime() - wait_start; 856 total_wait_time_ += wait_time; 857 if (wait_time > long_pause_log_threshold_) { 858 LOG(INFO) << __FUNCTION__ << " blocked for " << PrettyDuration(wait_time); 859 } 860 } 861} 862 863void Heap::ThreadFlipEnd(Thread* self) { 864 // Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators 865 // waiting before doing a JNI critical. 866 CHECK(kUseReadBarrier); 867 MutexLock mu(self, *thread_flip_lock_); 868 CHECK(thread_flip_running_); 869 thread_flip_running_ = false; 870 // Potentially notify mutator threads blocking to enter a JNI critical section. 871 thread_flip_cond_->Broadcast(self); 872} 873 874void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) { 875 if (old_process_state != new_process_state) { 876 const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible; 877 for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) { 878 // Start at index 1 to avoid "is always false" warning. 879 // Have iteration 1 always transition the collector. 880 TransitionCollector((((i & 1) == 1) == jank_perceptible) 881 ? foreground_collector_type_ 882 : background_collector_type_); 883 usleep(kCollectorTransitionStressWait); 884 } 885 if (jank_perceptible) { 886 // Transition back to foreground right away to prevent jank. 887 RequestCollectorTransition(foreground_collector_type_, 0); 888 } else { 889 // Don't delay for debug builds since we may want to stress test the GC. 890 // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have 891 // special handling which does a homogenous space compaction once but then doesn't transition 892 // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't 893 // transition the collector. 894 RequestCollectorTransition(background_collector_type_, 895 kStressCollectorTransition 896 ? 0 897 : kCollectorTransitionWait); 898 } 899 } 900} 901 902void Heap::CreateThreadPool() { 903 const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_); 904 if (num_threads != 0) { 905 thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads)); 906 } 907} 908 909void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) { 910 space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_; 911 space::ContinuousSpace* space2 = non_moving_space_; 912 // TODO: Generalize this to n bitmaps? 913 CHECK(space1 != nullptr); 914 CHECK(space2 != nullptr); 915 MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(), 916 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr), 917 stack); 918} 919 920void Heap::DeleteThreadPool() { 921 thread_pool_.reset(nullptr); 922} 923 924void Heap::AddSpace(space::Space* space) { 925 CHECK(space != nullptr); 926 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 927 if (space->IsContinuousSpace()) { 928 DCHECK(!space->IsDiscontinuousSpace()); 929 space::ContinuousSpace* continuous_space = space->AsContinuousSpace(); 930 // Continuous spaces don't necessarily have bitmaps. 931 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap(); 932 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap(); 933 // The region space bitmap is not added since VisitObjects visits the region space objects with 934 // special handling. 935 if (live_bitmap != nullptr && !space->IsRegionSpace()) { 936 CHECK(mark_bitmap != nullptr); 937 live_bitmap_->AddContinuousSpaceBitmap(live_bitmap); 938 mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap); 939 } 940 continuous_spaces_.push_back(continuous_space); 941 // Ensure that spaces remain sorted in increasing order of start address. 942 std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), 943 [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) { 944 return a->Begin() < b->Begin(); 945 }); 946 } else { 947 CHECK(space->IsDiscontinuousSpace()); 948 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace(); 949 live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap()); 950 mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap()); 951 discontinuous_spaces_.push_back(discontinuous_space); 952 } 953 if (space->IsAllocSpace()) { 954 alloc_spaces_.push_back(space->AsAllocSpace()); 955 } 956} 957 958void Heap::SetSpaceAsDefault(space::ContinuousSpace* continuous_space) { 959 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 960 if (continuous_space->IsDlMallocSpace()) { 961 dlmalloc_space_ = continuous_space->AsDlMallocSpace(); 962 } else if (continuous_space->IsRosAllocSpace()) { 963 rosalloc_space_ = continuous_space->AsRosAllocSpace(); 964 } 965} 966 967void Heap::RemoveSpace(space::Space* space) { 968 DCHECK(space != nullptr); 969 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 970 if (space->IsContinuousSpace()) { 971 DCHECK(!space->IsDiscontinuousSpace()); 972 space::ContinuousSpace* continuous_space = space->AsContinuousSpace(); 973 // Continuous spaces don't necessarily have bitmaps. 974 accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap(); 975 accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap(); 976 if (live_bitmap != nullptr && !space->IsRegionSpace()) { 977 DCHECK(mark_bitmap != nullptr); 978 live_bitmap_->RemoveContinuousSpaceBitmap(live_bitmap); 979 mark_bitmap_->RemoveContinuousSpaceBitmap(mark_bitmap); 980 } 981 auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space); 982 DCHECK(it != continuous_spaces_.end()); 983 continuous_spaces_.erase(it); 984 } else { 985 DCHECK(space->IsDiscontinuousSpace()); 986 space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace(); 987 live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap()); 988 mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap()); 989 auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(), 990 discontinuous_space); 991 DCHECK(it != discontinuous_spaces_.end()); 992 discontinuous_spaces_.erase(it); 993 } 994 if (space->IsAllocSpace()) { 995 auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), space->AsAllocSpace()); 996 DCHECK(it != alloc_spaces_.end()); 997 alloc_spaces_.erase(it); 998 } 999} 1000 1001void Heap::DumpGcPerformanceInfo(std::ostream& os) { 1002 // Dump cumulative timings. 1003 os << "Dumping cumulative Gc timings\n"; 1004 uint64_t total_duration = 0; 1005 // Dump cumulative loggers for each GC type. 1006 uint64_t total_paused_time = 0; 1007 for (auto& collector : garbage_collectors_) { 1008 total_duration += collector->GetCumulativeTimings().GetTotalNs(); 1009 total_paused_time += collector->GetTotalPausedTimeNs(); 1010 collector->DumpPerformanceInfo(os); 1011 } 1012 if (total_duration != 0) { 1013 const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0; 1014 os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; 1015 os << "Mean GC size throughput: " 1016 << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n"; 1017 os << "Mean GC object throughput: " 1018 << (GetObjectsFreedEver() / total_seconds) << " objects/s\n"; 1019 } 1020 uint64_t total_objects_allocated = GetObjectsAllocatedEver(); 1021 os << "Total number of allocations " << total_objects_allocated << "\n"; 1022 os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n"; 1023 os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n"; 1024 os << "Free memory " << PrettySize(GetFreeMemory()) << "\n"; 1025 os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n"; 1026 os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n"; 1027 os << "Total memory " << PrettySize(GetTotalMemory()) << "\n"; 1028 os << "Max memory " << PrettySize(GetMaxMemory()) << "\n"; 1029 if (HasZygoteSpace()) { 1030 os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n"; 1031 } 1032 os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n"; 1033 os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n"; 1034 os << "Total GC count: " << GetGcCount() << "\n"; 1035 os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n"; 1036 os << "Total blocking GC count: " << GetBlockingGcCount() << "\n"; 1037 os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n"; 1038 1039 { 1040 MutexLock mu(Thread::Current(), *gc_complete_lock_); 1041 if (gc_count_rate_histogram_.SampleSize() > 0U) { 1042 os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: "; 1043 gc_count_rate_histogram_.DumpBins(os); 1044 os << "\n"; 1045 } 1046 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) { 1047 os << "Histogram of blocking GC count per " 1048 << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: "; 1049 blocking_gc_count_rate_histogram_.DumpBins(os); 1050 os << "\n"; 1051 } 1052 } 1053 1054 if (kDumpRosAllocStatsOnSigQuit && rosalloc_space_ != nullptr) { 1055 rosalloc_space_->DumpStats(os); 1056 } 1057 1058 os << "Registered native bytes allocated: " 1059 << old_native_bytes_allocated_.LoadRelaxed() + new_native_bytes_allocated_.LoadRelaxed() 1060 << "\n"; 1061 1062 BaseMutex::DumpAll(os); 1063} 1064 1065void Heap::ResetGcPerformanceInfo() { 1066 for (auto& collector : garbage_collectors_) { 1067 collector->ResetMeasurements(); 1068 } 1069 total_bytes_freed_ever_ = 0; 1070 total_objects_freed_ever_ = 0; 1071 total_wait_time_ = 0; 1072 blocking_gc_count_ = 0; 1073 blocking_gc_time_ = 0; 1074 gc_count_last_window_ = 0; 1075 blocking_gc_count_last_window_ = 0; 1076 last_update_time_gc_count_rate_histograms_ = // Round down by the window duration. 1077 (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration; 1078 { 1079 MutexLock mu(Thread::Current(), *gc_complete_lock_); 1080 gc_count_rate_histogram_.Reset(); 1081 blocking_gc_count_rate_histogram_.Reset(); 1082 } 1083} 1084 1085uint64_t Heap::GetGcCount() const { 1086 uint64_t gc_count = 0U; 1087 for (auto& collector : garbage_collectors_) { 1088 gc_count += collector->GetCumulativeTimings().GetIterations(); 1089 } 1090 return gc_count; 1091} 1092 1093uint64_t Heap::GetGcTime() const { 1094 uint64_t gc_time = 0U; 1095 for (auto& collector : garbage_collectors_) { 1096 gc_time += collector->GetCumulativeTimings().GetTotalNs(); 1097 } 1098 return gc_time; 1099} 1100 1101uint64_t Heap::GetBlockingGcCount() const { 1102 return blocking_gc_count_; 1103} 1104 1105uint64_t Heap::GetBlockingGcTime() const { 1106 return blocking_gc_time_; 1107} 1108 1109void Heap::DumpGcCountRateHistogram(std::ostream& os) const { 1110 MutexLock mu(Thread::Current(), *gc_complete_lock_); 1111 if (gc_count_rate_histogram_.SampleSize() > 0U) { 1112 gc_count_rate_histogram_.DumpBins(os); 1113 } 1114} 1115 1116void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const { 1117 MutexLock mu(Thread::Current(), *gc_complete_lock_); 1118 if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) { 1119 blocking_gc_count_rate_histogram_.DumpBins(os); 1120 } 1121} 1122 1123ALWAYS_INLINE 1124static inline AllocationListener* GetAndOverwriteAllocationListener( 1125 Atomic<AllocationListener*>* storage, AllocationListener* new_value) { 1126 AllocationListener* old; 1127 do { 1128 old = storage->LoadSequentiallyConsistent(); 1129 } while (!storage->CompareExchangeStrongSequentiallyConsistent(old, new_value)); 1130 return old; 1131} 1132 1133Heap::~Heap() { 1134 VLOG(heap) << "Starting ~Heap()"; 1135 STLDeleteElements(&garbage_collectors_); 1136 // If we don't reset then the mark stack complains in its destructor. 1137 allocation_stack_->Reset(); 1138 allocation_records_.reset(); 1139 live_stack_->Reset(); 1140 STLDeleteValues(&mod_union_tables_); 1141 STLDeleteValues(&remembered_sets_); 1142 STLDeleteElements(&continuous_spaces_); 1143 STLDeleteElements(&discontinuous_spaces_); 1144 delete gc_complete_lock_; 1145 delete native_blocking_gc_lock_; 1146 delete thread_flip_lock_; 1147 delete pending_task_lock_; 1148 delete backtrace_lock_; 1149 if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) { 1150 LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed() 1151 << " total=" << seen_backtrace_count_.LoadRelaxed() + 1152 unique_backtrace_count_.LoadRelaxed(); 1153 } 1154 1155 VLOG(heap) << "Finished ~Heap()"; 1156} 1157 1158 1159space::ContinuousSpace* Heap::FindContinuousSpaceFromAddress(const mirror::Object* addr) const { 1160 for (const auto& space : continuous_spaces_) { 1161 if (space->Contains(addr)) { 1162 return space; 1163 } 1164 } 1165 return nullptr; 1166} 1167 1168space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(ObjPtr<mirror::Object> obj, 1169 bool fail_ok) const { 1170 space::ContinuousSpace* space = FindContinuousSpaceFromAddress(obj.Ptr()); 1171 if (space != nullptr) { 1172 return space; 1173 } 1174 if (!fail_ok) { 1175 LOG(FATAL) << "object " << obj << " not inside any spaces!"; 1176 } 1177 return nullptr; 1178} 1179 1180space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object> obj, 1181 bool fail_ok) const { 1182 for (const auto& space : discontinuous_spaces_) { 1183 if (space->Contains(obj.Ptr())) { 1184 return space; 1185 } 1186 } 1187 if (!fail_ok) { 1188 LOG(FATAL) << "object " << obj << " not inside any spaces!"; 1189 } 1190 return nullptr; 1191} 1192 1193space::Space* Heap::FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const { 1194 space::Space* result = FindContinuousSpaceFromObject(obj, true); 1195 if (result != nullptr) { 1196 return result; 1197 } 1198 return FindDiscontinuousSpaceFromObject(obj, fail_ok); 1199} 1200 1201space::Space* Heap::FindSpaceFromAddress(const void* addr) const { 1202 for (const auto& space : continuous_spaces_) { 1203 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) { 1204 return space; 1205 } 1206 } 1207 for (const auto& space : discontinuous_spaces_) { 1208 if (space->Contains(reinterpret_cast<const mirror::Object*>(addr))) { 1209 return space; 1210 } 1211 } 1212 return nullptr; 1213} 1214 1215 1216void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) { 1217 // If we're in a stack overflow, do not create a new exception. It would require running the 1218 // constructor, which will of course still be in a stack overflow. 1219 if (self->IsHandlingStackOverflow()) { 1220 self->SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 1221 return; 1222 } 1223 1224 std::ostringstream oss; 1225 size_t total_bytes_free = GetFreeMemory(); 1226 oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free 1227 << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM," 1228 << " max allowed footprint " << max_allowed_footprint_ << ", growth limit " 1229 << growth_limit_; 1230 // If the allocation failed due to fragmentation, print out the largest continuous allocation. 1231 if (total_bytes_free >= byte_count) { 1232 space::AllocSpace* space = nullptr; 1233 if (allocator_type == kAllocatorTypeNonMoving) { 1234 space = non_moving_space_; 1235 } else if (allocator_type == kAllocatorTypeRosAlloc || 1236 allocator_type == kAllocatorTypeDlMalloc) { 1237 space = main_space_; 1238 } else if (allocator_type == kAllocatorTypeBumpPointer || 1239 allocator_type == kAllocatorTypeTLAB) { 1240 space = bump_pointer_space_; 1241 } else if (allocator_type == kAllocatorTypeRegion || 1242 allocator_type == kAllocatorTypeRegionTLAB) { 1243 space = region_space_; 1244 } 1245 if (space != nullptr) { 1246 space->LogFragmentationAllocFailure(oss, byte_count); 1247 } 1248 } 1249 self->ThrowOutOfMemoryError(oss.str().c_str()); 1250} 1251 1252void Heap::DoPendingCollectorTransition() { 1253 CollectorType desired_collector_type = desired_collector_type_; 1254 // Launch homogeneous space compaction if it is desired. 1255 if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) { 1256 if (!CareAboutPauseTimes()) { 1257 PerformHomogeneousSpaceCompact(); 1258 } else { 1259 VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state"; 1260 } 1261 } else if (desired_collector_type == kCollectorTypeCCBackground) { 1262 DCHECK(kUseReadBarrier); 1263 if (!CareAboutPauseTimes()) { 1264 // Invoke CC full compaction. 1265 CollectGarbageInternal(collector::kGcTypeFull, 1266 kGcCauseCollectorTransition, 1267 /*clear_soft_references*/false); 1268 } else { 1269 VLOG(gc) << "CC background compaction ignored due to jank perceptible process state"; 1270 } 1271 } else { 1272 TransitionCollector(desired_collector_type); 1273 } 1274} 1275 1276void Heap::Trim(Thread* self) { 1277 Runtime* const runtime = Runtime::Current(); 1278 if (!CareAboutPauseTimes()) { 1279 // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care 1280 // about pauses. 1281 ScopedTrace trace("Deflating monitors"); 1282 // Avoid race conditions on the lock word for CC. 1283 ScopedGCCriticalSection gcs(self, kGcCauseTrim, kCollectorTypeHeapTrim); 1284 ScopedSuspendAll ssa(__FUNCTION__); 1285 uint64_t start_time = NanoTime(); 1286 size_t count = runtime->GetMonitorList()->DeflateMonitors(); 1287 VLOG(heap) << "Deflating " << count << " monitors took " 1288 << PrettyDuration(NanoTime() - start_time); 1289 } 1290 TrimIndirectReferenceTables(self); 1291 TrimSpaces(self); 1292 // Trim arenas that may have been used by JIT or verifier. 1293 runtime->GetArenaPool()->TrimMaps(); 1294} 1295 1296class TrimIndirectReferenceTableClosure : public Closure { 1297 public: 1298 explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) { 1299 } 1300 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 1301 thread->GetJniEnv()->locals.Trim(); 1302 // If thread is a running mutator, then act on behalf of the trim thread. 1303 // See the code in ThreadList::RunCheckpoint. 1304 barrier_->Pass(Thread::Current()); 1305 } 1306 1307 private: 1308 Barrier* const barrier_; 1309}; 1310 1311void Heap::TrimIndirectReferenceTables(Thread* self) { 1312 ScopedObjectAccess soa(self); 1313 ScopedTrace trace(__PRETTY_FUNCTION__); 1314 JavaVMExt* vm = soa.Vm(); 1315 // Trim globals indirect reference table. 1316 vm->TrimGlobals(); 1317 // Trim locals indirect reference tables. 1318 Barrier barrier(0); 1319 TrimIndirectReferenceTableClosure closure(&barrier); 1320 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); 1321 size_t barrier_count = Runtime::Current()->GetThreadList()->RunCheckpoint(&closure); 1322 if (barrier_count != 0) { 1323 barrier.Increment(self, barrier_count); 1324 } 1325} 1326 1327void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) { 1328 // Need to do this before acquiring the locks since we don't want to get suspended while 1329 // holding any locks. 1330 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); 1331 MutexLock mu(self, *gc_complete_lock_); 1332 // Ensure there is only one GC at a time. 1333 WaitForGcToCompleteLocked(cause, self); 1334 collector_type_running_ = collector_type; 1335 last_gc_cause_ = cause; 1336 thread_running_gc_ = self; 1337} 1338 1339void Heap::TrimSpaces(Thread* self) { 1340 // Pretend we are doing a GC to prevent background compaction from deleting the space we are 1341 // trimming. 1342 StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim); 1343 ScopedTrace trace(__PRETTY_FUNCTION__); 1344 const uint64_t start_ns = NanoTime(); 1345 // Trim the managed spaces. 1346 uint64_t total_alloc_space_allocated = 0; 1347 uint64_t total_alloc_space_size = 0; 1348 uint64_t managed_reclaimed = 0; 1349 { 1350 ScopedObjectAccess soa(self); 1351 for (const auto& space : continuous_spaces_) { 1352 if (space->IsMallocSpace()) { 1353 gc::space::MallocSpace* malloc_space = space->AsMallocSpace(); 1354 if (malloc_space->IsRosAllocSpace() || !CareAboutPauseTimes()) { 1355 // Don't trim dlmalloc spaces if we care about pauses since this can hold the space lock 1356 // for a long period of time. 1357 managed_reclaimed += malloc_space->Trim(); 1358 } 1359 total_alloc_space_size += malloc_space->Size(); 1360 } 1361 } 1362 } 1363 total_alloc_space_allocated = GetBytesAllocated(); 1364 if (large_object_space_ != nullptr) { 1365 total_alloc_space_allocated -= large_object_space_->GetBytesAllocated(); 1366 } 1367 if (bump_pointer_space_ != nullptr) { 1368 total_alloc_space_allocated -= bump_pointer_space_->Size(); 1369 } 1370 if (region_space_ != nullptr) { 1371 total_alloc_space_allocated -= region_space_->GetBytesAllocated(); 1372 } 1373 const float managed_utilization = static_cast<float>(total_alloc_space_allocated) / 1374 static_cast<float>(total_alloc_space_size); 1375 uint64_t gc_heap_end_ns = NanoTime(); 1376 // We never move things in the native heap, so we can finish the GC at this point. 1377 FinishGC(self, collector::kGcTypeNone); 1378 1379 VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns) 1380 << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of " 1381 << static_cast<int>(100 * managed_utilization) << "%."; 1382} 1383 1384bool Heap::IsValidObjectAddress(const void* addr) const { 1385 if (addr == nullptr) { 1386 return true; 1387 } 1388 return IsAligned<kObjectAlignment>(addr) && FindSpaceFromAddress(addr) != nullptr; 1389} 1390 1391bool Heap::IsNonDiscontinuousSpaceHeapAddress(const void* addr) const { 1392 return FindContinuousSpaceFromAddress(reinterpret_cast<const mirror::Object*>(addr)) != nullptr; 1393} 1394 1395bool Heap::IsLiveObjectLocked(ObjPtr<mirror::Object> obj, 1396 bool search_allocation_stack, 1397 bool search_live_stack, 1398 bool sorted) { 1399 if (UNLIKELY(!IsAligned<kObjectAlignment>(obj.Ptr()))) { 1400 return false; 1401 } 1402 if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj.Ptr())) { 1403 mirror::Class* klass = obj->GetClass<kVerifyNone>(); 1404 if (obj == klass) { 1405 // This case happens for java.lang.Class. 1406 return true; 1407 } 1408 return VerifyClassClass(klass) && IsLiveObjectLocked(klass); 1409 } else if (temp_space_ != nullptr && temp_space_->HasAddress(obj.Ptr())) { 1410 // If we are in the allocated region of the temp space, then we are probably live (e.g. during 1411 // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained. 1412 return temp_space_->Contains(obj.Ptr()); 1413 } 1414 if (region_space_ != nullptr && region_space_->HasAddress(obj.Ptr())) { 1415 return true; 1416 } 1417 space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true); 1418 space::DiscontinuousSpace* d_space = nullptr; 1419 if (c_space != nullptr) { 1420 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) { 1421 return true; 1422 } 1423 } else { 1424 d_space = FindDiscontinuousSpaceFromObject(obj, true); 1425 if (d_space != nullptr) { 1426 if (d_space->GetLiveBitmap()->Test(obj.Ptr())) { 1427 return true; 1428 } 1429 } 1430 } 1431 // This is covering the allocation/live stack swapping that is done without mutators suspended. 1432 for (size_t i = 0; i < (sorted ? 1 : 5); ++i) { 1433 if (i > 0) { 1434 NanoSleep(MsToNs(10)); 1435 } 1436 if (search_allocation_stack) { 1437 if (sorted) { 1438 if (allocation_stack_->ContainsSorted(obj.Ptr())) { 1439 return true; 1440 } 1441 } else if (allocation_stack_->Contains(obj.Ptr())) { 1442 return true; 1443 } 1444 } 1445 1446 if (search_live_stack) { 1447 if (sorted) { 1448 if (live_stack_->ContainsSorted(obj.Ptr())) { 1449 return true; 1450 } 1451 } else if (live_stack_->Contains(obj.Ptr())) { 1452 return true; 1453 } 1454 } 1455 } 1456 // We need to check the bitmaps again since there is a race where we mark something as live and 1457 // then clear the stack containing it. 1458 if (c_space != nullptr) { 1459 if (c_space->GetLiveBitmap()->Test(obj.Ptr())) { 1460 return true; 1461 } 1462 } else { 1463 d_space = FindDiscontinuousSpaceFromObject(obj, true); 1464 if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj.Ptr())) { 1465 return true; 1466 } 1467 } 1468 return false; 1469} 1470 1471std::string Heap::DumpSpaces() const { 1472 std::ostringstream oss; 1473 DumpSpaces(oss); 1474 return oss.str(); 1475} 1476 1477void Heap::DumpSpaces(std::ostream& stream) const { 1478 for (const auto& space : continuous_spaces_) { 1479 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1480 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1481 stream << space << " " << *space << "\n"; 1482 if (live_bitmap != nullptr) { 1483 stream << live_bitmap << " " << *live_bitmap << "\n"; 1484 } 1485 if (mark_bitmap != nullptr) { 1486 stream << mark_bitmap << " " << *mark_bitmap << "\n"; 1487 } 1488 } 1489 for (const auto& space : discontinuous_spaces_) { 1490 stream << space << " " << *space << "\n"; 1491 } 1492} 1493 1494void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) { 1495 if (verify_object_mode_ == kVerifyObjectModeDisabled) { 1496 return; 1497 } 1498 1499 // Ignore early dawn of the universe verifications. 1500 if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) { 1501 return; 1502 } 1503 CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned"; 1504 mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset()); 1505 CHECK(c != nullptr) << "Null class in object " << obj; 1506 CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj; 1507 CHECK(VerifyClassClass(c)); 1508 1509 if (verify_object_mode_ > kVerifyObjectModeFast) { 1510 // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock. 1511 CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces(); 1512 } 1513} 1514 1515void Heap::VerifyHeap() { 1516 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1517 auto visitor = [&](mirror::Object* obj) { 1518 VerifyObjectBody(obj); 1519 }; 1520 // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already 1521 // NO_THREAD_SAFETY_ANALYSIS. 1522 auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS { 1523 GetLiveBitmap()->Visit(visitor); 1524 }; 1525 no_thread_safety_analysis(); 1526} 1527 1528void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) { 1529 // Use signed comparison since freed bytes can be negative when background compaction foreground 1530 // transitions occurs. This is caused by the moving objects from a bump pointer space to a 1531 // free list backed space typically increasing memory footprint due to padding and binning. 1532 DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed())); 1533 // Note: This relies on 2s complement for handling negative freed_bytes. 1534 num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes)); 1535 if (Runtime::Current()->HasStatsEnabled()) { 1536 RuntimeStats* thread_stats = Thread::Current()->GetStats(); 1537 thread_stats->freed_objects += freed_objects; 1538 thread_stats->freed_bytes += freed_bytes; 1539 // TODO: Do this concurrently. 1540 RuntimeStats* global_stats = Runtime::Current()->GetStats(); 1541 global_stats->freed_objects += freed_objects; 1542 global_stats->freed_bytes += freed_bytes; 1543 } 1544} 1545 1546void Heap::RecordFreeRevoke() { 1547 // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the 1548 // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers. 1549 // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_ 1550 // all the way to zero exactly as the remainder will be subtracted at the next GC. 1551 size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent(); 1552 CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed), 1553 bytes_freed) << "num_bytes_freed_revoke_ underflow"; 1554 CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed), 1555 bytes_freed) << "num_bytes_allocated_ underflow"; 1556 GetCurrentGcIteration()->SetFreedRevoke(bytes_freed); 1557} 1558 1559space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const { 1560 if (rosalloc_space_ != nullptr && rosalloc_space_->GetRosAlloc() == rosalloc) { 1561 return rosalloc_space_; 1562 } 1563 for (const auto& space : continuous_spaces_) { 1564 if (space->AsContinuousSpace()->IsRosAllocSpace()) { 1565 if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) { 1566 return space->AsContinuousSpace()->AsRosAllocSpace(); 1567 } 1568 } 1569 } 1570 return nullptr; 1571} 1572 1573static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) { 1574 instrumentation::Instrumentation* const instrumentation = 1575 Runtime::Current()->GetInstrumentation(); 1576 return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented(); 1577} 1578 1579mirror::Object* Heap::AllocateInternalWithGc(Thread* self, 1580 AllocatorType allocator, 1581 bool instrumented, 1582 size_t alloc_size, 1583 size_t* bytes_allocated, 1584 size_t* usable_size, 1585 size_t* bytes_tl_bulk_allocated, 1586 ObjPtr<mirror::Class>* klass) { 1587 bool was_default_allocator = allocator == GetCurrentAllocator(); 1588 // Make sure there is no pending exception since we may need to throw an OOME. 1589 self->AssertNoPendingException(); 1590 DCHECK(klass != nullptr); 1591 StackHandleScope<1> hs(self); 1592 HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass)); 1593 // The allocation failed. If the GC is running, block until it completes, and then retry the 1594 // allocation. 1595 collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self); 1596 // If we were the default allocator but the allocator changed while we were suspended, 1597 // abort the allocation. 1598 if ((was_default_allocator && allocator != GetCurrentAllocator()) || 1599 (!instrumented && EntrypointsInstrumented())) { 1600 return nullptr; 1601 } 1602 if (last_gc != collector::kGcTypeNone) { 1603 // A GC was in progress and we blocked, retry allocation now that memory has been freed. 1604 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, 1605 usable_size, bytes_tl_bulk_allocated); 1606 if (ptr != nullptr) { 1607 return ptr; 1608 } 1609 } 1610 1611 collector::GcType tried_type = next_gc_type_; 1612 const bool gc_ran = 1613 CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone; 1614 if ((was_default_allocator && allocator != GetCurrentAllocator()) || 1615 (!instrumented && EntrypointsInstrumented())) { 1616 return nullptr; 1617 } 1618 if (gc_ran) { 1619 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, 1620 usable_size, bytes_tl_bulk_allocated); 1621 if (ptr != nullptr) { 1622 return ptr; 1623 } 1624 } 1625 1626 // Loop through our different Gc types and try to Gc until we get enough free memory. 1627 for (collector::GcType gc_type : gc_plan_) { 1628 if (gc_type == tried_type) { 1629 continue; 1630 } 1631 // Attempt to run the collector, if we succeed, re-try the allocation. 1632 const bool plan_gc_ran = 1633 CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone; 1634 if ((was_default_allocator && allocator != GetCurrentAllocator()) || 1635 (!instrumented && EntrypointsInstrumented())) { 1636 return nullptr; 1637 } 1638 if (plan_gc_ran) { 1639 // Did we free sufficient memory for the allocation to succeed? 1640 mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, 1641 usable_size, bytes_tl_bulk_allocated); 1642 if (ptr != nullptr) { 1643 return ptr; 1644 } 1645 } 1646 } 1647 // Allocations have failed after GCs; this is an exceptional state. 1648 // Try harder, growing the heap if necessary. 1649 mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, 1650 usable_size, bytes_tl_bulk_allocated); 1651 if (ptr != nullptr) { 1652 return ptr; 1653 } 1654 // Most allocations should have succeeded by now, so the heap is really full, really fragmented, 1655 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The 1656 // VM spec requires that all SoftReferences have been collected and cleared before throwing 1657 // OOME. 1658 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) 1659 << " allocation"; 1660 // TODO: Run finalization, but this may cause more allocations to occur. 1661 // We don't need a WaitForGcToComplete here either. 1662 DCHECK(!gc_plan_.empty()); 1663 CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true); 1664 if ((was_default_allocator && allocator != GetCurrentAllocator()) || 1665 (!instrumented && EntrypointsInstrumented())) { 1666 return nullptr; 1667 } 1668 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size, 1669 bytes_tl_bulk_allocated); 1670 if (ptr == nullptr) { 1671 const uint64_t current_time = NanoTime(); 1672 switch (allocator) { 1673 case kAllocatorTypeRosAlloc: 1674 // Fall-through. 1675 case kAllocatorTypeDlMalloc: { 1676 if (use_homogeneous_space_compaction_for_oom_ && 1677 current_time - last_time_homogeneous_space_compaction_by_oom_ > 1678 min_interval_homogeneous_space_compaction_by_oom_) { 1679 last_time_homogeneous_space_compaction_by_oom_ = current_time; 1680 HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact(); 1681 // Thread suspension could have occurred. 1682 if ((was_default_allocator && allocator != GetCurrentAllocator()) || 1683 (!instrumented && EntrypointsInstrumented())) { 1684 return nullptr; 1685 } 1686 switch (result) { 1687 case HomogeneousSpaceCompactResult::kSuccess: 1688 // If the allocation succeeded, we delayed an oom. 1689 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, 1690 usable_size, bytes_tl_bulk_allocated); 1691 if (ptr != nullptr) { 1692 count_delayed_oom_++; 1693 } 1694 break; 1695 case HomogeneousSpaceCompactResult::kErrorReject: 1696 // Reject due to disabled moving GC. 1697 break; 1698 case HomogeneousSpaceCompactResult::kErrorVMShuttingDown: 1699 // Throw OOM by default. 1700 break; 1701 default: { 1702 UNIMPLEMENTED(FATAL) << "homogeneous space compaction result: " 1703 << static_cast<size_t>(result); 1704 UNREACHABLE(); 1705 } 1706 } 1707 // Always print that we ran homogeneous space compation since this can cause jank. 1708 VLOG(heap) << "Ran heap homogeneous space compaction, " 1709 << " requested defragmentation " 1710 << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent() 1711 << " performed defragmentation " 1712 << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent() 1713 << " ignored homogeneous space compaction " 1714 << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent() 1715 << " delayed count = " 1716 << count_delayed_oom_.LoadSequentiallyConsistent(); 1717 } 1718 break; 1719 } 1720 case kAllocatorTypeNonMoving: { 1721 if (kUseReadBarrier) { 1722 // DisableMovingGc() isn't compatible with CC. 1723 break; 1724 } 1725 // Try to transition the heap if the allocation failure was due to the space being full. 1726 if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) { 1727 // If we aren't out of memory then the OOM was probably from the non moving space being 1728 // full. Attempt to disable compaction and turn the main space into a non moving space. 1729 DisableMovingGc(); 1730 // Thread suspension could have occurred. 1731 if ((was_default_allocator && allocator != GetCurrentAllocator()) || 1732 (!instrumented && EntrypointsInstrumented())) { 1733 return nullptr; 1734 } 1735 // If we are still a moving GC then something must have caused the transition to fail. 1736 if (IsMovingGc(collector_type_)) { 1737 MutexLock mu(self, *gc_complete_lock_); 1738 // If we couldn't disable moving GC, just throw OOME and return null. 1739 LOG(WARNING) << "Couldn't disable moving GC with disable GC count " 1740 << disable_moving_gc_count_; 1741 } else { 1742 LOG(WARNING) << "Disabled moving GC due to the non moving space being full"; 1743 ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, 1744 usable_size, bytes_tl_bulk_allocated); 1745 } 1746 } 1747 break; 1748 } 1749 default: { 1750 // Do nothing for others allocators. 1751 } 1752 } 1753 } 1754 // If the allocation hasn't succeeded by this point, throw an OOM error. 1755 if (ptr == nullptr) { 1756 ThrowOutOfMemoryError(self, alloc_size, allocator); 1757 } 1758 return ptr; 1759} 1760 1761void Heap::SetTargetHeapUtilization(float target) { 1762 DCHECK_GT(target, 0.0f); // asserted in Java code 1763 DCHECK_LT(target, 1.0f); 1764 target_utilization_ = target; 1765} 1766 1767size_t Heap::GetObjectsAllocated() const { 1768 Thread* const self = Thread::Current(); 1769 ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated); 1770 // Prevent GC running during GetObjectsALlocated since we may get a checkpoint request that tells 1771 // us to suspend while we are doing SuspendAll. b/35232978 1772 gc::ScopedGCCriticalSection gcs(Thread::Current(), 1773 gc::kGcCauseGetObjectsAllocated, 1774 gc::kCollectorTypeGetObjectsAllocated); 1775 // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll. 1776 ScopedSuspendAll ssa(__FUNCTION__); 1777 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 1778 size_t total = 0; 1779 for (space::AllocSpace* space : alloc_spaces_) { 1780 total += space->GetObjectsAllocated(); 1781 } 1782 return total; 1783} 1784 1785uint64_t Heap::GetObjectsAllocatedEver() const { 1786 uint64_t total = GetObjectsFreedEver(); 1787 // If we are detached, we can't use GetObjectsAllocated since we can't change thread states. 1788 if (Thread::Current() != nullptr) { 1789 total += GetObjectsAllocated(); 1790 } 1791 return total; 1792} 1793 1794uint64_t Heap::GetBytesAllocatedEver() const { 1795 return GetBytesFreedEver() + GetBytesAllocated(); 1796} 1797 1798void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes, 1799 bool use_is_assignable_from, 1800 uint64_t* counts) { 1801 auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { 1802 mirror::Class* instance_class = obj->GetClass(); 1803 CHECK(instance_class != nullptr); 1804 for (size_t i = 0; i < classes.size(); ++i) { 1805 ObjPtr<mirror::Class> klass = classes[i].Get(); 1806 if (use_is_assignable_from) { 1807 if (klass != nullptr && klass->IsAssignableFrom(instance_class)) { 1808 ++counts[i]; 1809 } 1810 } else if (instance_class == klass) { 1811 ++counts[i]; 1812 } 1813 } 1814 }; 1815 VisitObjects(instance_counter); 1816} 1817 1818void Heap::GetInstances(VariableSizedHandleScope& scope, 1819 Handle<mirror::Class> h_class, 1820 int32_t max_count, 1821 std::vector<Handle<mirror::Object>>& instances) { 1822 DCHECK_GE(max_count, 0); 1823 auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { 1824 if (obj->GetClass() == h_class.Get()) { 1825 if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) { 1826 instances.push_back(scope.NewHandle(obj)); 1827 } 1828 } 1829 }; 1830 VisitObjects(instance_collector); 1831} 1832 1833void Heap::GetReferringObjects(VariableSizedHandleScope& scope, 1834 Handle<mirror::Object> o, 1835 int32_t max_count, 1836 std::vector<Handle<mirror::Object>>& referring_objects) { 1837 class ReferringObjectsFinder { 1838 public: 1839 ReferringObjectsFinder(VariableSizedHandleScope& scope_in, 1840 Handle<mirror::Object> object_in, 1841 int32_t max_count_in, 1842 std::vector<Handle<mirror::Object>>& referring_objects_in) 1843 REQUIRES_SHARED(Locks::mutator_lock_) 1844 : scope_(scope_in), 1845 object_(object_in), 1846 max_count_(max_count_in), 1847 referring_objects_(referring_objects_in) {} 1848 1849 // For Object::VisitReferences. 1850 void operator()(ObjPtr<mirror::Object> obj, 1851 MemberOffset offset, 1852 bool is_static ATTRIBUTE_UNUSED) const 1853 REQUIRES_SHARED(Locks::mutator_lock_) { 1854 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 1855 if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) { 1856 referring_objects_.push_back(scope_.NewHandle(obj)); 1857 } 1858 } 1859 1860 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) 1861 const {} 1862 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} 1863 1864 private: 1865 VariableSizedHandleScope& scope_; 1866 Handle<mirror::Object> const object_; 1867 const uint32_t max_count_; 1868 std::vector<Handle<mirror::Object>>& referring_objects_; 1869 DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder); 1870 }; 1871 ReferringObjectsFinder finder(scope, o, max_count, referring_objects); 1872 auto referring_objects_finder = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { 1873 obj->VisitReferences(finder, VoidFunctor()); 1874 }; 1875 VisitObjects(referring_objects_finder); 1876} 1877 1878void Heap::CollectGarbage(bool clear_soft_references) { 1879 // Even if we waited for a GC we still need to do another GC since weaks allocated during the 1880 // last GC will not have necessarily been cleared. 1881 CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references); 1882} 1883 1884bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const { 1885 return main_space_backup_.get() != nullptr && main_space_ != nullptr && 1886 foreground_collector_type_ == kCollectorTypeCMS; 1887} 1888 1889HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() { 1890 Thread* self = Thread::Current(); 1891 // Inc requested homogeneous space compaction. 1892 count_requested_homogeneous_space_compaction_++; 1893 // Store performed homogeneous space compaction at a new request arrival. 1894 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); 1895 Locks::mutator_lock_->AssertNotHeld(self); 1896 { 1897 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); 1898 MutexLock mu(self, *gc_complete_lock_); 1899 // Ensure there is only one GC at a time. 1900 WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self); 1901 // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count 1902 // is non zero. 1903 // If the collector type changed to something which doesn't benefit from homogeneous space compaction, 1904 // exit. 1905 if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) || 1906 !main_space_->CanMoveObjects()) { 1907 return kErrorReject; 1908 } 1909 if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) { 1910 return kErrorUnsupported; 1911 } 1912 collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact; 1913 } 1914 if (Runtime::Current()->IsShuttingDown(self)) { 1915 // Don't allow heap transitions to happen if the runtime is shutting down since these can 1916 // cause objects to get finalized. 1917 FinishGC(self, collector::kGcTypeNone); 1918 return HomogeneousSpaceCompactResult::kErrorVMShuttingDown; 1919 } 1920 collector::GarbageCollector* collector; 1921 { 1922 ScopedSuspendAll ssa(__FUNCTION__); 1923 uint64_t start_time = NanoTime(); 1924 // Launch compaction. 1925 space::MallocSpace* to_space = main_space_backup_.release(); 1926 space::MallocSpace* from_space = main_space_; 1927 to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE); 1928 const uint64_t space_size_before_compaction = from_space->Size(); 1929 AddSpace(to_space); 1930 // Make sure that we will have enough room to copy. 1931 CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit()); 1932 collector = Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact); 1933 const uint64_t space_size_after_compaction = to_space->Size(); 1934 main_space_ = to_space; 1935 main_space_backup_.reset(from_space); 1936 RemoveSpace(from_space); 1937 SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space. 1938 // Update performed homogeneous space compaction count. 1939 count_performed_homogeneous_space_compaction_++; 1940 // Print statics log and resume all threads. 1941 uint64_t duration = NanoTime() - start_time; 1942 VLOG(heap) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: " 1943 << PrettySize(space_size_before_compaction) << " -> " 1944 << PrettySize(space_size_after_compaction) << " compact-ratio: " 1945 << std::fixed << static_cast<double>(space_size_after_compaction) / 1946 static_cast<double>(space_size_before_compaction); 1947 } 1948 // Finish GC. 1949 reference_processor_->EnqueueClearedReferences(self); 1950 GrowForUtilization(semi_space_collector_); 1951 LogGC(kGcCauseHomogeneousSpaceCompact, collector); 1952 FinishGC(self, collector::kGcTypeFull); 1953 { 1954 ScopedObjectAccess soa(self); 1955 soa.Vm()->UnloadNativeLibraries(); 1956 } 1957 return HomogeneousSpaceCompactResult::kSuccess; 1958} 1959 1960void Heap::TransitionCollector(CollectorType collector_type) { 1961 if (collector_type == collector_type_) { 1962 return; 1963 } 1964 // Collector transition must not happen with CC 1965 CHECK(!kUseReadBarrier); 1966 VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_) 1967 << " -> " << static_cast<int>(collector_type); 1968 uint64_t start_time = NanoTime(); 1969 uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent(); 1970 Runtime* const runtime = Runtime::Current(); 1971 Thread* const self = Thread::Current(); 1972 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); 1973 Locks::mutator_lock_->AssertNotHeld(self); 1974 // Busy wait until we can GC (StartGC can fail if we have a non-zero 1975 // compacting_gc_disable_count_, this should rarely occurs). 1976 for (;;) { 1977 { 1978 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); 1979 MutexLock mu(self, *gc_complete_lock_); 1980 // Ensure there is only one GC at a time. 1981 WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self); 1982 // Currently we only need a heap transition if we switch from a moving collector to a 1983 // non-moving one, or visa versa. 1984 const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type); 1985 // If someone else beat us to it and changed the collector before we could, exit. 1986 // This is safe to do before the suspend all since we set the collector_type_running_ before 1987 // we exit the loop. If another thread attempts to do the heap transition before we exit, 1988 // then it would get blocked on WaitForGcToCompleteLocked. 1989 if (collector_type == collector_type_) { 1990 return; 1991 } 1992 // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released. 1993 if (!copying_transition || disable_moving_gc_count_ == 0) { 1994 // TODO: Not hard code in semi-space collector? 1995 collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type; 1996 break; 1997 } 1998 } 1999 usleep(1000); 2000 } 2001 if (runtime->IsShuttingDown(self)) { 2002 // Don't allow heap transitions to happen if the runtime is shutting down since these can 2003 // cause objects to get finalized. 2004 FinishGC(self, collector::kGcTypeNone); 2005 return; 2006 } 2007 collector::GarbageCollector* collector = nullptr; 2008 { 2009 ScopedSuspendAll ssa(__FUNCTION__); 2010 switch (collector_type) { 2011 case kCollectorTypeSS: { 2012 if (!IsMovingGc(collector_type_)) { 2013 // Create the bump pointer space from the backup space. 2014 CHECK(main_space_backup_ != nullptr); 2015 std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap()); 2016 // We are transitioning from non moving GC -> moving GC, since we copied from the bump 2017 // pointer space last transition it will be protected. 2018 CHECK(mem_map != nullptr); 2019 mem_map->Protect(PROT_READ | PROT_WRITE); 2020 bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space", 2021 mem_map.release()); 2022 AddSpace(bump_pointer_space_); 2023 collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition); 2024 // Use the now empty main space mem map for the bump pointer temp space. 2025 mem_map.reset(main_space_->ReleaseMemMap()); 2026 // Unset the pointers just in case. 2027 if (dlmalloc_space_ == main_space_) { 2028 dlmalloc_space_ = nullptr; 2029 } else if (rosalloc_space_ == main_space_) { 2030 rosalloc_space_ = nullptr; 2031 } 2032 // Remove the main space so that we don't try to trim it, this doens't work for debug 2033 // builds since RosAlloc attempts to read the magic number from a protected page. 2034 RemoveSpace(main_space_); 2035 RemoveRememberedSet(main_space_); 2036 delete main_space_; // Delete the space since it has been removed. 2037 main_space_ = nullptr; 2038 RemoveRememberedSet(main_space_backup_.get()); 2039 main_space_backup_.reset(nullptr); // Deletes the space. 2040 temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2", 2041 mem_map.release()); 2042 AddSpace(temp_space_); 2043 } 2044 break; 2045 } 2046 case kCollectorTypeMS: 2047 // Fall through. 2048 case kCollectorTypeCMS: { 2049 if (IsMovingGc(collector_type_)) { 2050 CHECK(temp_space_ != nullptr); 2051 std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap()); 2052 RemoveSpace(temp_space_); 2053 temp_space_ = nullptr; 2054 mem_map->Protect(PROT_READ | PROT_WRITE); 2055 CreateMainMallocSpace(mem_map.get(), 2056 kDefaultInitialSize, 2057 std::min(mem_map->Size(), growth_limit_), 2058 mem_map->Size()); 2059 mem_map.release(); 2060 // Compact to the main space from the bump pointer space, don't need to swap semispaces. 2061 AddSpace(main_space_); 2062 collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition); 2063 mem_map.reset(bump_pointer_space_->ReleaseMemMap()); 2064 RemoveSpace(bump_pointer_space_); 2065 bump_pointer_space_ = nullptr; 2066 const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1]; 2067 // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number. 2068 if (kIsDebugBuild && kUseRosAlloc) { 2069 mem_map->Protect(PROT_READ | PROT_WRITE); 2070 } 2071 main_space_backup_.reset(CreateMallocSpaceFromMemMap( 2072 mem_map.get(), 2073 kDefaultInitialSize, 2074 std::min(mem_map->Size(), growth_limit_), 2075 mem_map->Size(), 2076 name, 2077 true)); 2078 if (kIsDebugBuild && kUseRosAlloc) { 2079 mem_map->Protect(PROT_NONE); 2080 } 2081 mem_map.release(); 2082 } 2083 break; 2084 } 2085 default: { 2086 LOG(FATAL) << "Attempted to transition to invalid collector type " 2087 << static_cast<size_t>(collector_type); 2088 break; 2089 } 2090 } 2091 ChangeCollector(collector_type); 2092 } 2093 // Can't call into java code with all threads suspended. 2094 reference_processor_->EnqueueClearedReferences(self); 2095 uint64_t duration = NanoTime() - start_time; 2096 GrowForUtilization(semi_space_collector_); 2097 DCHECK(collector != nullptr); 2098 LogGC(kGcCauseCollectorTransition, collector); 2099 FinishGC(self, collector::kGcTypeFull); 2100 { 2101 ScopedObjectAccess soa(self); 2102 soa.Vm()->UnloadNativeLibraries(); 2103 } 2104 int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent(); 2105 int32_t delta_allocated = before_allocated - after_allocated; 2106 std::string saved_str; 2107 if (delta_allocated >= 0) { 2108 saved_str = " saved at least " + PrettySize(delta_allocated); 2109 } else { 2110 saved_str = " expanded " + PrettySize(-delta_allocated); 2111 } 2112 VLOG(heap) << "Collector transition to " << collector_type << " took " 2113 << PrettyDuration(duration) << saved_str; 2114} 2115 2116void Heap::ChangeCollector(CollectorType collector_type) { 2117 // TODO: Only do this with all mutators suspended to avoid races. 2118 if (collector_type != collector_type_) { 2119 if (collector_type == kCollectorTypeMC) { 2120 // Don't allow mark compact unless support is compiled in. 2121 CHECK(kMarkCompactSupport); 2122 } 2123 collector_type_ = collector_type; 2124 gc_plan_.clear(); 2125 switch (collector_type_) { 2126 case kCollectorTypeCC: { 2127 gc_plan_.push_back(collector::kGcTypeFull); 2128 if (use_tlab_) { 2129 ChangeAllocator(kAllocatorTypeRegionTLAB); 2130 } else { 2131 ChangeAllocator(kAllocatorTypeRegion); 2132 } 2133 break; 2134 } 2135 case kCollectorTypeMC: // Fall-through. 2136 case kCollectorTypeSS: // Fall-through. 2137 case kCollectorTypeGSS: { 2138 gc_plan_.push_back(collector::kGcTypeFull); 2139 if (use_tlab_) { 2140 ChangeAllocator(kAllocatorTypeTLAB); 2141 } else { 2142 ChangeAllocator(kAllocatorTypeBumpPointer); 2143 } 2144 break; 2145 } 2146 case kCollectorTypeMS: { 2147 gc_plan_.push_back(collector::kGcTypeSticky); 2148 gc_plan_.push_back(collector::kGcTypePartial); 2149 gc_plan_.push_back(collector::kGcTypeFull); 2150 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc); 2151 break; 2152 } 2153 case kCollectorTypeCMS: { 2154 gc_plan_.push_back(collector::kGcTypeSticky); 2155 gc_plan_.push_back(collector::kGcTypePartial); 2156 gc_plan_.push_back(collector::kGcTypeFull); 2157 ChangeAllocator(kUseRosAlloc ? kAllocatorTypeRosAlloc : kAllocatorTypeDlMalloc); 2158 break; 2159 } 2160 default: { 2161 UNIMPLEMENTED(FATAL); 2162 UNREACHABLE(); 2163 } 2164 } 2165 if (IsGcConcurrent()) { 2166 concurrent_start_bytes_ = 2167 std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes; 2168 } else { 2169 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); 2170 } 2171 } 2172} 2173 2174// Special compacting collector which uses sub-optimal bin packing to reduce zygote space size. 2175class ZygoteCompactingCollector FINAL : public collector::SemiSpace { 2176 public: 2177 ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool) 2178 : SemiSpace(heap, false, "zygote collector"), 2179 bin_live_bitmap_(nullptr), 2180 bin_mark_bitmap_(nullptr), 2181 is_running_on_memory_tool_(is_running_on_memory_tool) {} 2182 2183 void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) { 2184 bin_live_bitmap_ = space->GetLiveBitmap(); 2185 bin_mark_bitmap_ = space->GetMarkBitmap(); 2186 uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin()); 2187 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 2188 // Note: This requires traversing the space in increasing order of object addresses. 2189 auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { 2190 uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj); 2191 size_t bin_size = object_addr - prev; 2192 // Add the bin consisting of the end of the previous object to the start of the current object. 2193 AddBin(bin_size, prev); 2194 prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment); 2195 }; 2196 bin_live_bitmap_->Walk(visitor); 2197 // Add the last bin which spans after the last object to the end of the space. 2198 AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev); 2199 } 2200 2201 private: 2202 // Maps from bin sizes to locations. 2203 std::multimap<size_t, uintptr_t> bins_; 2204 // Live bitmap of the space which contains the bins. 2205 accounting::ContinuousSpaceBitmap* bin_live_bitmap_; 2206 // Mark bitmap of the space which contains the bins. 2207 accounting::ContinuousSpaceBitmap* bin_mark_bitmap_; 2208 const bool is_running_on_memory_tool_; 2209 2210 void AddBin(size_t size, uintptr_t position) { 2211 if (is_running_on_memory_tool_) { 2212 MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size); 2213 } 2214 if (size != 0) { 2215 bins_.insert(std::make_pair(size, position)); 2216 } 2217 } 2218 2219 virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const { 2220 // Don't sweep any spaces since we probably blasted the internal accounting of the free list 2221 // allocator. 2222 return false; 2223 } 2224 2225 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) 2226 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 2227 size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>(); 2228 size_t alloc_size = RoundUp(obj_size, kObjectAlignment); 2229 mirror::Object* forward_address; 2230 // Find the smallest bin which we can move obj in. 2231 auto it = bins_.lower_bound(alloc_size); 2232 if (it == bins_.end()) { 2233 // No available space in the bins, place it in the target space instead (grows the zygote 2234 // space). 2235 size_t bytes_allocated, dummy; 2236 forward_address = to_space_->Alloc(self_, alloc_size, &bytes_allocated, nullptr, &dummy); 2237 if (to_space_live_bitmap_ != nullptr) { 2238 to_space_live_bitmap_->Set(forward_address); 2239 } else { 2240 GetHeap()->GetNonMovingSpace()->GetLiveBitmap()->Set(forward_address); 2241 GetHeap()->GetNonMovingSpace()->GetMarkBitmap()->Set(forward_address); 2242 } 2243 } else { 2244 size_t size = it->first; 2245 uintptr_t pos = it->second; 2246 bins_.erase(it); // Erase the old bin which we replace with the new smaller bin. 2247 forward_address = reinterpret_cast<mirror::Object*>(pos); 2248 // Set the live and mark bits so that sweeping system weaks works properly. 2249 bin_live_bitmap_->Set(forward_address); 2250 bin_mark_bitmap_->Set(forward_address); 2251 DCHECK_GE(size, alloc_size); 2252 // Add a new bin with the remaining space. 2253 AddBin(size - alloc_size, pos + alloc_size); 2254 } 2255 // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error. 2256 memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size); 2257 if (kUseBakerReadBarrier) { 2258 obj->AssertReadBarrierState(); 2259 forward_address->AssertReadBarrierState(); 2260 } 2261 return forward_address; 2262 } 2263}; 2264 2265void Heap::UnBindBitmaps() { 2266 TimingLogger::ScopedTiming t("UnBindBitmaps", GetCurrentGcIteration()->GetTimings()); 2267 for (const auto& space : GetContinuousSpaces()) { 2268 if (space->IsContinuousMemMapAllocSpace()) { 2269 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 2270 if (alloc_space->HasBoundBitmaps()) { 2271 alloc_space->UnBindBitmaps(); 2272 } 2273 } 2274 } 2275} 2276 2277void Heap::PreZygoteFork() { 2278 if (!HasZygoteSpace()) { 2279 // We still want to GC in case there is some unreachable non moving objects that could cause a 2280 // suboptimal bin packing when we compact the zygote space. 2281 CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false); 2282 // Trim the pages at the end of the non moving space. Trim while not holding zygote lock since 2283 // the trim process may require locking the mutator lock. 2284 non_moving_space_->Trim(); 2285 } 2286 Thread* self = Thread::Current(); 2287 MutexLock mu(self, zygote_creation_lock_); 2288 // Try to see if we have any Zygote spaces. 2289 if (HasZygoteSpace()) { 2290 return; 2291 } 2292 Runtime::Current()->GetInternTable()->AddNewTable(); 2293 Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote(); 2294 VLOG(heap) << "Starting PreZygoteFork"; 2295 // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote 2296 // there. 2297 non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); 2298 const bool same_space = non_moving_space_ == main_space_; 2299 if (kCompactZygote) { 2300 // Temporarily disable rosalloc verification because the zygote 2301 // compaction will mess up the rosalloc internal metadata. 2302 ScopedDisableRosAllocVerification disable_rosalloc_verif(this); 2303 ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_); 2304 zygote_collector.BuildBins(non_moving_space_); 2305 // Create a new bump pointer space which we will compact into. 2306 space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(), 2307 non_moving_space_->Limit()); 2308 // Compact the bump pointer space to a new zygote bump pointer space. 2309 bool reset_main_space = false; 2310 if (IsMovingGc(collector_type_)) { 2311 if (collector_type_ == kCollectorTypeCC) { 2312 zygote_collector.SetFromSpace(region_space_); 2313 } else { 2314 zygote_collector.SetFromSpace(bump_pointer_space_); 2315 } 2316 } else { 2317 CHECK(main_space_ != nullptr); 2318 CHECK_NE(main_space_, non_moving_space_) 2319 << "Does not make sense to compact within the same space"; 2320 // Copy from the main space. 2321 zygote_collector.SetFromSpace(main_space_); 2322 reset_main_space = true; 2323 } 2324 zygote_collector.SetToSpace(&target_space); 2325 zygote_collector.SetSwapSemiSpaces(false); 2326 zygote_collector.Run(kGcCauseCollectorTransition, false); 2327 if (reset_main_space) { 2328 main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); 2329 madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED); 2330 MemMap* mem_map = main_space_->ReleaseMemMap(); 2331 RemoveSpace(main_space_); 2332 space::Space* old_main_space = main_space_; 2333 CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_), 2334 mem_map->Size()); 2335 delete old_main_space; 2336 AddSpace(main_space_); 2337 } else { 2338 if (collector_type_ == kCollectorTypeCC) { 2339 region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); 2340 // Evacuated everything out of the region space, clear the mark bitmap. 2341 region_space_->GetMarkBitmap()->Clear(); 2342 } else { 2343 bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); 2344 } 2345 } 2346 if (temp_space_ != nullptr) { 2347 CHECK(temp_space_->IsEmpty()); 2348 } 2349 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects(); 2350 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes(); 2351 // Update the end and write out image. 2352 non_moving_space_->SetEnd(target_space.End()); 2353 non_moving_space_->SetLimit(target_space.Limit()); 2354 VLOG(heap) << "Create zygote space with size=" << non_moving_space_->Size() << " bytes"; 2355 } 2356 // Change the collector to the post zygote one. 2357 ChangeCollector(foreground_collector_type_); 2358 // Save the old space so that we can remove it after we complete creating the zygote space. 2359 space::MallocSpace* old_alloc_space = non_moving_space_; 2360 // Turn the current alloc space into a zygote space and obtain the new alloc space composed of 2361 // the remaining available space. 2362 // Remove the old space before creating the zygote space since creating the zygote space sets 2363 // the old alloc space's bitmaps to null. 2364 RemoveSpace(old_alloc_space); 2365 if (collector::SemiSpace::kUseRememberedSet) { 2366 // Sanity bound check. 2367 FindRememberedSetFromSpace(old_alloc_space)->AssertAllDirtyCardsAreWithinSpace(); 2368 // Remove the remembered set for the now zygote space (the old 2369 // non-moving space). Note now that we have compacted objects into 2370 // the zygote space, the data in the remembered set is no longer 2371 // needed. The zygote space will instead have a mod-union table 2372 // from this point on. 2373 RemoveRememberedSet(old_alloc_space); 2374 } 2375 // Remaining space becomes the new non moving space. 2376 zygote_space_ = old_alloc_space->CreateZygoteSpace(kNonMovingSpaceName, low_memory_mode_, 2377 &non_moving_space_); 2378 CHECK(!non_moving_space_->CanMoveObjects()); 2379 if (same_space) { 2380 main_space_ = non_moving_space_; 2381 SetSpaceAsDefault(main_space_); 2382 } 2383 delete old_alloc_space; 2384 CHECK(HasZygoteSpace()) << "Failed creating zygote space"; 2385 AddSpace(zygote_space_); 2386 non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity()); 2387 AddSpace(non_moving_space_); 2388 if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) { 2389 // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is 2390 // safe since we mark all of the objects that may reference non immune objects as gray. 2391 zygote_space_->GetLiveBitmap()->VisitMarkedRange( 2392 reinterpret_cast<uintptr_t>(zygote_space_->Begin()), 2393 reinterpret_cast<uintptr_t>(zygote_space_->Limit()), 2394 [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { 2395 CHECK(obj->AtomicSetMarkBit(0, 1)); 2396 }); 2397 } 2398 2399 // Create the zygote space mod union table. 2400 accounting::ModUnionTable* mod_union_table = 2401 new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space_); 2402 CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table"; 2403 2404 if (collector_type_ != kCollectorTypeCC) { 2405 // Set all the cards in the mod-union table since we don't know which objects contain references 2406 // to large objects. 2407 mod_union_table->SetCards(); 2408 } else { 2409 // Make sure to clear the zygote space cards so that we don't dirty pages in the next GC. There 2410 // may be dirty cards from the zygote compaction or reference processing. These cards are not 2411 // necessary to have marked since the zygote space may not refer to any objects not in the 2412 // zygote or image spaces at this point. 2413 mod_union_table->ProcessCards(); 2414 mod_union_table->ClearTable(); 2415 2416 // For CC we never collect zygote large objects. This means we do not need to set the cards for 2417 // the zygote mod-union table and we can also clear all of the existing image mod-union tables. 2418 // The existing mod-union tables are only for image spaces and may only reference zygote and 2419 // image objects. 2420 for (auto& pair : mod_union_tables_) { 2421 CHECK(pair.first->IsImageSpace()); 2422 CHECK(!pair.first->AsImageSpace()->GetImageHeader().IsAppImage()); 2423 accounting::ModUnionTable* table = pair.second; 2424 table->ClearTable(); 2425 } 2426 } 2427 AddModUnionTable(mod_union_table); 2428 large_object_space_->SetAllLargeObjectsAsZygoteObjects(self); 2429 if (collector::SemiSpace::kUseRememberedSet) { 2430 // Add a new remembered set for the post-zygote non-moving space. 2431 accounting::RememberedSet* post_zygote_non_moving_space_rem_set = 2432 new accounting::RememberedSet("Post-zygote non-moving space remembered set", this, 2433 non_moving_space_); 2434 CHECK(post_zygote_non_moving_space_rem_set != nullptr) 2435 << "Failed to create post-zygote non-moving space remembered set"; 2436 AddRememberedSet(post_zygote_non_moving_space_rem_set); 2437 } 2438} 2439 2440void Heap::FlushAllocStack() { 2441 MarkAllocStackAsLive(allocation_stack_.get()); 2442 allocation_stack_->Reset(); 2443} 2444 2445void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1, 2446 accounting::ContinuousSpaceBitmap* bitmap2, 2447 accounting::LargeObjectBitmap* large_objects, 2448 accounting::ObjectStack* stack) { 2449 DCHECK(bitmap1 != nullptr); 2450 DCHECK(bitmap2 != nullptr); 2451 const auto* limit = stack->End(); 2452 for (auto* it = stack->Begin(); it != limit; ++it) { 2453 const mirror::Object* obj = it->AsMirrorPtr(); 2454 if (!kUseThreadLocalAllocationStack || obj != nullptr) { 2455 if (bitmap1->HasAddress(obj)) { 2456 bitmap1->Set(obj); 2457 } else if (bitmap2->HasAddress(obj)) { 2458 bitmap2->Set(obj); 2459 } else { 2460 DCHECK(large_objects != nullptr); 2461 large_objects->Set(obj); 2462 } 2463 } 2464 } 2465} 2466 2467void Heap::SwapSemiSpaces() { 2468 CHECK(bump_pointer_space_ != nullptr); 2469 CHECK(temp_space_ != nullptr); 2470 std::swap(bump_pointer_space_, temp_space_); 2471} 2472 2473collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space, 2474 space::ContinuousMemMapAllocSpace* source_space, 2475 GcCause gc_cause) { 2476 CHECK(kMovingCollector); 2477 if (target_space != source_space) { 2478 // Don't swap spaces since this isn't a typical semi space collection. 2479 semi_space_collector_->SetSwapSemiSpaces(false); 2480 semi_space_collector_->SetFromSpace(source_space); 2481 semi_space_collector_->SetToSpace(target_space); 2482 semi_space_collector_->Run(gc_cause, false); 2483 return semi_space_collector_; 2484 } else { 2485 CHECK(target_space->IsBumpPointerSpace()) 2486 << "In-place compaction is only supported for bump pointer spaces"; 2487 mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace()); 2488 mark_compact_collector_->Run(kGcCauseCollectorTransition, false); 2489 return mark_compact_collector_; 2490 } 2491} 2492 2493void Heap::TraceHeapSize(size_t heap_size) { 2494 ATRACE_INT("Heap size (KB)", heap_size / KB); 2495} 2496 2497collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, 2498 GcCause gc_cause, 2499 bool clear_soft_references) { 2500 Thread* self = Thread::Current(); 2501 Runtime* runtime = Runtime::Current(); 2502 // If the heap can't run the GC, silently fail and return that no GC was run. 2503 switch (gc_type) { 2504 case collector::kGcTypePartial: { 2505 if (!HasZygoteSpace()) { 2506 return collector::kGcTypeNone; 2507 } 2508 break; 2509 } 2510 default: { 2511 // Other GC types don't have any special cases which makes them not runnable. The main case 2512 // here is full GC. 2513 } 2514 } 2515 ScopedThreadStateChange tsc(self, kWaitingPerformingGc); 2516 Locks::mutator_lock_->AssertNotHeld(self); 2517 if (self->IsHandlingStackOverflow()) { 2518 // If we are throwing a stack overflow error we probably don't have enough remaining stack 2519 // space to run the GC. 2520 return collector::kGcTypeNone; 2521 } 2522 bool compacting_gc; 2523 { 2524 gc_complete_lock_->AssertNotHeld(self); 2525 ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); 2526 MutexLock mu(self, *gc_complete_lock_); 2527 // Ensure there is only one GC at a time. 2528 WaitForGcToCompleteLocked(gc_cause, self); 2529 compacting_gc = IsMovingGc(collector_type_); 2530 // GC can be disabled if someone has a used GetPrimitiveArrayCritical. 2531 if (compacting_gc && disable_moving_gc_count_ != 0) { 2532 LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_; 2533 return collector::kGcTypeNone; 2534 } 2535 if (gc_disabled_for_shutdown_) { 2536 return collector::kGcTypeNone; 2537 } 2538 collector_type_running_ = collector_type_; 2539 } 2540 if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) { 2541 ++runtime->GetStats()->gc_for_alloc_count; 2542 ++self->GetStats()->gc_for_alloc_count; 2543 } 2544 const uint64_t bytes_allocated_before_gc = GetBytesAllocated(); 2545 2546 if (gc_type == NonStickyGcType()) { 2547 // Move all bytes from new_native_bytes_allocated_ to 2548 // old_native_bytes_allocated_ now that GC has been triggered, resetting 2549 // new_native_bytes_allocated_ to zero in the process. 2550 old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0)); 2551 if (gc_cause == kGcCauseForNativeAllocBlocking) { 2552 MutexLock mu(self, *native_blocking_gc_lock_); 2553 native_blocking_gc_in_progress_ = true; 2554 } 2555 } 2556 2557 DCHECK_LT(gc_type, collector::kGcTypeMax); 2558 DCHECK_NE(gc_type, collector::kGcTypeNone); 2559 2560 collector::GarbageCollector* collector = nullptr; 2561 // TODO: Clean this up. 2562 if (compacting_gc) { 2563 DCHECK(current_allocator_ == kAllocatorTypeBumpPointer || 2564 current_allocator_ == kAllocatorTypeTLAB || 2565 current_allocator_ == kAllocatorTypeRegion || 2566 current_allocator_ == kAllocatorTypeRegionTLAB); 2567 switch (collector_type_) { 2568 case kCollectorTypeSS: 2569 // Fall-through. 2570 case kCollectorTypeGSS: 2571 semi_space_collector_->SetFromSpace(bump_pointer_space_); 2572 semi_space_collector_->SetToSpace(temp_space_); 2573 semi_space_collector_->SetSwapSemiSpaces(true); 2574 collector = semi_space_collector_; 2575 break; 2576 case kCollectorTypeCC: 2577 collector = concurrent_copying_collector_; 2578 break; 2579 case kCollectorTypeMC: 2580 mark_compact_collector_->SetSpace(bump_pointer_space_); 2581 collector = mark_compact_collector_; 2582 break; 2583 default: 2584 LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_); 2585 } 2586 if (collector != mark_compact_collector_ && collector != concurrent_copying_collector_) { 2587 temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE); 2588 if (kIsDebugBuild) { 2589 // Try to read each page of the memory map in case mprotect didn't work properly b/19894268. 2590 temp_space_->GetMemMap()->TryReadable(); 2591 } 2592 CHECK(temp_space_->IsEmpty()); 2593 } 2594 gc_type = collector::kGcTypeFull; // TODO: Not hard code this in. 2595 } else if (current_allocator_ == kAllocatorTypeRosAlloc || 2596 current_allocator_ == kAllocatorTypeDlMalloc) { 2597 collector = FindCollectorByGcType(gc_type); 2598 } else { 2599 LOG(FATAL) << "Invalid current allocator " << current_allocator_; 2600 } 2601 if (IsGcConcurrent()) { 2602 // Disable concurrent GC check so that we don't have spammy JNI requests. 2603 // This gets recalculated in GrowForUtilization. It is important that it is disabled / 2604 // calculated in the same thread so that there aren't any races that can cause it to become 2605 // permanantly disabled. b/17942071 2606 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); 2607 } 2608 2609 CHECK(collector != nullptr) 2610 << "Could not find garbage collector with collector_type=" 2611 << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type; 2612 collector->Run(gc_cause, clear_soft_references || runtime->IsZygote()); 2613 total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects(); 2614 total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes(); 2615 RequestTrim(self); 2616 // Enqueue cleared references. 2617 reference_processor_->EnqueueClearedReferences(self); 2618 // Grow the heap so that we know when to perform the next GC. 2619 GrowForUtilization(collector, bytes_allocated_before_gc); 2620 LogGC(gc_cause, collector); 2621 FinishGC(self, gc_type); 2622 // Inform DDMS that a GC completed. 2623 Dbg::GcDidFinish(); 2624 // Unload native libraries for class unloading. We do this after calling FinishGC to prevent 2625 // deadlocks in case the JNI_OnUnload function does allocations. 2626 { 2627 ScopedObjectAccess soa(self); 2628 soa.Vm()->UnloadNativeLibraries(); 2629 } 2630 return gc_type; 2631} 2632 2633void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) { 2634 const size_t duration = GetCurrentGcIteration()->GetDurationNs(); 2635 const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes(); 2636 // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC 2637 // (mutator time blocked >= long_pause_log_threshold_). 2638 bool log_gc = kLogAllGCs || gc_cause == kGcCauseExplicit; 2639 if (!log_gc && CareAboutPauseTimes()) { 2640 // GC for alloc pauses the allocating thread, so consider it as a pause. 2641 log_gc = duration > long_gc_log_threshold_ || 2642 (gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_); 2643 for (uint64_t pause : pause_times) { 2644 log_gc = log_gc || pause >= long_pause_log_threshold_; 2645 } 2646 } 2647 if (log_gc) { 2648 const size_t percent_free = GetPercentFree(); 2649 const size_t current_heap_size = GetBytesAllocated(); 2650 const size_t total_memory = GetTotalMemory(); 2651 std::ostringstream pause_string; 2652 for (size_t i = 0; i < pause_times.size(); ++i) { 2653 pause_string << PrettyDuration((pause_times[i] / 1000) * 1000) 2654 << ((i != pause_times.size() - 1) ? "," : ""); 2655 } 2656 LOG(INFO) << gc_cause << " " << collector->GetName() 2657 << " GC freed " << current_gc_iteration_.GetFreedObjects() << "(" 2658 << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, " 2659 << current_gc_iteration_.GetFreedLargeObjects() << "(" 2660 << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, " 2661 << percent_free << "% free, " << PrettySize(current_heap_size) << "/" 2662 << PrettySize(total_memory) << ", " << "paused " << pause_string.str() 2663 << " total " << PrettyDuration((duration / 1000) * 1000); 2664 VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings()); 2665 } 2666} 2667 2668void Heap::FinishGC(Thread* self, collector::GcType gc_type) { 2669 MutexLock mu(self, *gc_complete_lock_); 2670 collector_type_running_ = kCollectorTypeNone; 2671 if (gc_type != collector::kGcTypeNone) { 2672 last_gc_type_ = gc_type; 2673 2674 // Update stats. 2675 ++gc_count_last_window_; 2676 if (running_collection_is_blocking_) { 2677 // If the currently running collection was a blocking one, 2678 // increment the counters and reset the flag. 2679 ++blocking_gc_count_; 2680 blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs(); 2681 ++blocking_gc_count_last_window_; 2682 } 2683 // Update the gc count rate histograms if due. 2684 UpdateGcCountRateHistograms(); 2685 } 2686 // Reset. 2687 running_collection_is_blocking_ = false; 2688 thread_running_gc_ = nullptr; 2689 // Wake anyone who may have been waiting for the GC to complete. 2690 gc_complete_cond_->Broadcast(self); 2691} 2692 2693void Heap::UpdateGcCountRateHistograms() { 2694 // Invariant: if the time since the last update includes more than 2695 // one windows, all the GC runs (if > 0) must have happened in first 2696 // window because otherwise the update must have already taken place 2697 // at an earlier GC run. So, we report the non-first windows with 2698 // zero counts to the histograms. 2699 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U); 2700 uint64_t now = NanoTime(); 2701 DCHECK_GE(now, last_update_time_gc_count_rate_histograms_); 2702 uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_; 2703 uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration; 2704 if (time_since_last_update >= kGcCountRateHistogramWindowDuration) { 2705 // Record the first window. 2706 gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run. 2707 blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ? 2708 blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_); 2709 // Record the other windows (with zero counts). 2710 for (uint64_t i = 0; i < num_of_windows - 1; ++i) { 2711 gc_count_rate_histogram_.AddValue(0); 2712 blocking_gc_count_rate_histogram_.AddValue(0); 2713 } 2714 // Update the last update time and reset the counters. 2715 last_update_time_gc_count_rate_histograms_ = 2716 (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration; 2717 gc_count_last_window_ = 1; // Include the current run. 2718 blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0; 2719 } 2720 DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U); 2721} 2722 2723class RootMatchesObjectVisitor : public SingleRootVisitor { 2724 public: 2725 explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { } 2726 2727 void VisitRoot(mirror::Object* root, const RootInfo& info) 2728 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 2729 if (root == obj_) { 2730 LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString(); 2731 } 2732 } 2733 2734 private: 2735 const mirror::Object* const obj_; 2736}; 2737 2738 2739class ScanVisitor { 2740 public: 2741 void operator()(const mirror::Object* obj) const { 2742 LOG(ERROR) << "Would have rescanned object " << obj; 2743 } 2744}; 2745 2746// Verify a reference from an object. 2747class VerifyReferenceVisitor : public SingleRootVisitor { 2748 public: 2749 VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent) 2750 REQUIRES_SHARED(Locks::mutator_lock_) 2751 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {} 2752 2753 size_t GetFailureCount() const { 2754 return fail_count_->LoadSequentiallyConsistent(); 2755 } 2756 2757 void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const 2758 REQUIRES_SHARED(Locks::mutator_lock_) { 2759 if (verify_referent_) { 2760 VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset()); 2761 } 2762 } 2763 2764 void operator()(ObjPtr<mirror::Object> obj, 2765 MemberOffset offset, 2766 bool is_static ATTRIBUTE_UNUSED) const 2767 REQUIRES_SHARED(Locks::mutator_lock_) { 2768 VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset); 2769 } 2770 2771 bool IsLive(ObjPtr<mirror::Object> obj) const NO_THREAD_SAFETY_ANALYSIS { 2772 return heap_->IsLiveObjectLocked(obj, true, false, true); 2773 } 2774 2775 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const 2776 REQUIRES_SHARED(Locks::mutator_lock_) { 2777 if (!root->IsNull()) { 2778 VisitRoot(root); 2779 } 2780 } 2781 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const 2782 REQUIRES_SHARED(Locks::mutator_lock_) { 2783 const_cast<VerifyReferenceVisitor*>(this)->VisitRoot( 2784 root->AsMirrorPtr(), RootInfo(kRootVMInternal)); 2785 } 2786 2787 virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE 2788 REQUIRES_SHARED(Locks::mutator_lock_) { 2789 if (root == nullptr) { 2790 LOG(ERROR) << "Root is null with info " << root_info.GetType(); 2791 } else if (!VerifyReference(nullptr, root, MemberOffset(0))) { 2792 LOG(ERROR) << "Root " << root << " is dead with type " << mirror::Object::PrettyTypeOf(root) 2793 << " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType(); 2794 } 2795 } 2796 2797 private: 2798 // TODO: Fix the no thread safety analysis. 2799 // Returns false on failure. 2800 bool VerifyReference(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const 2801 NO_THREAD_SAFETY_ANALYSIS { 2802 if (ref == nullptr || IsLive(ref)) { 2803 // Verify that the reference is live. 2804 return true; 2805 } 2806 if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) { 2807 // Print message on only on first failure to prevent spam. 2808 LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!"; 2809 } 2810 if (obj != nullptr) { 2811 // Only do this part for non roots. 2812 accounting::CardTable* card_table = heap_->GetCardTable(); 2813 accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get(); 2814 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 2815 uint8_t* card_addr = card_table->CardFromAddr(obj); 2816 LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " 2817 << offset << "\n card value = " << static_cast<int>(*card_addr); 2818 if (heap_->IsValidObjectAddress(obj->GetClass())) { 2819 LOG(ERROR) << "Obj type " << obj->PrettyTypeOf(); 2820 } else { 2821 LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address"; 2822 } 2823 2824 // Attempt to find the class inside of the recently freed objects. 2825 space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true); 2826 if (ref_space != nullptr && ref_space->IsMallocSpace()) { 2827 space::MallocSpace* space = ref_space->AsMallocSpace(); 2828 mirror::Class* ref_class = space->FindRecentFreedObject(ref); 2829 if (ref_class != nullptr) { 2830 LOG(ERROR) << "Reference " << ref << " found as a recently freed object with class " 2831 << ref_class->PrettyClass(); 2832 } else { 2833 LOG(ERROR) << "Reference " << ref << " not found as a recently freed object"; 2834 } 2835 } 2836 2837 if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) && 2838 ref->GetClass()->IsClass()) { 2839 LOG(ERROR) << "Ref type " << ref->PrettyTypeOf(); 2840 } else { 2841 LOG(ERROR) << "Ref " << ref << " class(" << ref->GetClass() 2842 << ") is not a valid heap address"; 2843 } 2844 2845 card_table->CheckAddrIsInCardTable(reinterpret_cast<const uint8_t*>(obj)); 2846 void* cover_begin = card_table->AddrFromCard(card_addr); 2847 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) + 2848 accounting::CardTable::kCardSize); 2849 LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin 2850 << "-" << cover_end; 2851 accounting::ContinuousSpaceBitmap* bitmap = 2852 heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj); 2853 2854 if (bitmap == nullptr) { 2855 LOG(ERROR) << "Object " << obj << " has no bitmap"; 2856 if (!VerifyClassClass(obj->GetClass())) { 2857 LOG(ERROR) << "Object " << obj << " failed class verification!"; 2858 } 2859 } else { 2860 // Print out how the object is live. 2861 if (bitmap->Test(obj)) { 2862 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 2863 } 2864 if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) { 2865 LOG(ERROR) << "Object " << obj << " found in allocation stack"; 2866 } 2867 if (live_stack->Contains(const_cast<mirror::Object*>(obj))) { 2868 LOG(ERROR) << "Object " << obj << " found in live stack"; 2869 } 2870 if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) { 2871 LOG(ERROR) << "Ref " << ref << " found in allocation stack"; 2872 } 2873 if (live_stack->Contains(const_cast<mirror::Object*>(ref))) { 2874 LOG(ERROR) << "Ref " << ref << " found in live stack"; 2875 } 2876 // Attempt to see if the card table missed the reference. 2877 ScanVisitor scan_visitor; 2878 uint8_t* byte_cover_begin = reinterpret_cast<uint8_t*>(card_table->AddrFromCard(card_addr)); 2879 card_table->Scan<false>(bitmap, byte_cover_begin, 2880 byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor); 2881 } 2882 2883 // Search to see if any of the roots reference our object. 2884 RootMatchesObjectVisitor visitor1(obj); 2885 Runtime::Current()->VisitRoots(&visitor1); 2886 // Search to see if any of the roots reference our reference. 2887 RootMatchesObjectVisitor visitor2(ref); 2888 Runtime::Current()->VisitRoots(&visitor2); 2889 } 2890 return false; 2891 } 2892 2893 Heap* const heap_; 2894 Atomic<size_t>* const fail_count_; 2895 const bool verify_referent_; 2896}; 2897 2898// Verify all references within an object, for use with HeapBitmap::Visit. 2899class VerifyObjectVisitor { 2900 public: 2901 VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent) 2902 : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {} 2903 2904 void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { 2905 // Note: we are verifying the references in obj but not obj itself, this is because obj must 2906 // be live or else how did we find it in the live bitmap? 2907 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_); 2908 // The class doesn't count as a reference but we should verify it anyways. 2909 obj->VisitReferences(visitor, visitor); 2910 } 2911 2912 void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) { 2913 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 2914 VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_); 2915 Runtime::Current()->VisitRoots(&visitor); 2916 } 2917 2918 size_t GetFailureCount() const { 2919 return fail_count_->LoadSequentiallyConsistent(); 2920 } 2921 2922 private: 2923 Heap* const heap_; 2924 Atomic<size_t>* const fail_count_; 2925 const bool verify_referent_; 2926}; 2927 2928void Heap::PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj) { 2929 // Slow path, the allocation stack push back must have already failed. 2930 DCHECK(!allocation_stack_->AtomicPushBack(obj->Ptr())); 2931 do { 2932 // TODO: Add handle VerifyObject. 2933 StackHandleScope<1> hs(self); 2934 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj)); 2935 // Push our object into the reserve region of the allocation stack. This is only required due 2936 // to heap verification requiring that roots are live (either in the live bitmap or in the 2937 // allocation stack). 2938 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr())); 2939 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 2940 } while (!allocation_stack_->AtomicPushBack(obj->Ptr())); 2941} 2942 2943void Heap::PushOnThreadLocalAllocationStackWithInternalGC(Thread* self, 2944 ObjPtr<mirror::Object>* obj) { 2945 // Slow path, the allocation stack push back must have already failed. 2946 DCHECK(!self->PushOnThreadLocalAllocationStack(obj->Ptr())); 2947 StackReference<mirror::Object>* start_address; 2948 StackReference<mirror::Object>* end_address; 2949 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize, &start_address, 2950 &end_address)) { 2951 // TODO: Add handle VerifyObject. 2952 StackHandleScope<1> hs(self); 2953 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj)); 2954 // Push our object into the reserve region of the allocaiton stack. This is only required due 2955 // to heap verification requiring that roots are live (either in the live bitmap or in the 2956 // allocation stack). 2957 CHECK(allocation_stack_->AtomicPushBackIgnoreGrowthLimit(obj->Ptr())); 2958 // Push into the reserve allocation stack. 2959 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); 2960 } 2961 self->SetThreadLocalAllocationStack(start_address, end_address); 2962 // Retry on the new thread-local allocation stack. 2963 CHECK(self->PushOnThreadLocalAllocationStack(obj->Ptr())); // Must succeed. 2964} 2965 2966// Must do this with mutators suspended since we are directly accessing the allocation stacks. 2967size_t Heap::VerifyHeapReferences(bool verify_referents) { 2968 Thread* self = Thread::Current(); 2969 Locks::mutator_lock_->AssertExclusiveHeld(self); 2970 // Lets sort our allocation stacks so that we can efficiently binary search them. 2971 allocation_stack_->Sort(); 2972 live_stack_->Sort(); 2973 // Since we sorted the allocation stack content, need to revoke all 2974 // thread-local allocation stacks. 2975 RevokeAllThreadLocalAllocationStacks(self); 2976 Atomic<size_t> fail_count_(0); 2977 VerifyObjectVisitor visitor(this, &fail_count_, verify_referents); 2978 // Verify objects in the allocation stack since these will be objects which were: 2979 // 1. Allocated prior to the GC (pre GC verification). 2980 // 2. Allocated during the GC (pre sweep GC verification). 2981 // We don't want to verify the objects in the live stack since they themselves may be 2982 // pointing to dead objects if they are not reachable. 2983 VisitObjectsPaused(visitor); 2984 // Verify the roots: 2985 visitor.VerifyRoots(); 2986 if (visitor.GetFailureCount() > 0) { 2987 // Dump mod-union tables. 2988 for (const auto& table_pair : mod_union_tables_) { 2989 accounting::ModUnionTable* mod_union_table = table_pair.second; 2990 mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": "); 2991 } 2992 // Dump remembered sets. 2993 for (const auto& table_pair : remembered_sets_) { 2994 accounting::RememberedSet* remembered_set = table_pair.second; 2995 remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": "); 2996 } 2997 DumpSpaces(LOG_STREAM(ERROR)); 2998 } 2999 return visitor.GetFailureCount(); 3000} 3001 3002class VerifyReferenceCardVisitor { 3003 public: 3004 VerifyReferenceCardVisitor(Heap* heap, bool* failed) 3005 REQUIRES_SHARED(Locks::mutator_lock_, 3006 Locks::heap_bitmap_lock_) 3007 : heap_(heap), failed_(failed) { 3008 } 3009 3010 // There is no card marks for native roots on a class. 3011 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) 3012 const {} 3013 void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} 3014 3015 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for 3016 // annotalysis on visitors. 3017 void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const 3018 NO_THREAD_SAFETY_ANALYSIS { 3019 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); 3020 // Filter out class references since changing an object's class does not mark the card as dirty. 3021 // Also handles large objects, since the only reference they hold is a class reference. 3022 if (ref != nullptr && !ref->IsClass()) { 3023 accounting::CardTable* card_table = heap_->GetCardTable(); 3024 // If the object is not dirty and it is referencing something in the live stack other than 3025 // class, then it must be on a dirty card. 3026 if (!card_table->AddrIsInCardTable(obj)) { 3027 LOG(ERROR) << "Object " << obj << " is not in the address range of the card table"; 3028 *failed_ = true; 3029 } else if (!card_table->IsDirty(obj)) { 3030 // TODO: Check mod-union tables. 3031 // Card should be either kCardDirty if it got re-dirtied after we aged it, or 3032 // kCardDirty - 1 if it didnt get touched since we aged it. 3033 accounting::ObjectStack* live_stack = heap_->live_stack_.get(); 3034 if (live_stack->ContainsSorted(ref)) { 3035 if (live_stack->ContainsSorted(obj)) { 3036 LOG(ERROR) << "Object " << obj << " found in live stack"; 3037 } 3038 if (heap_->GetLiveBitmap()->Test(obj)) { 3039 LOG(ERROR) << "Object " << obj << " found in live bitmap"; 3040 } 3041 LOG(ERROR) << "Object " << obj << " " << mirror::Object::PrettyTypeOf(obj) 3042 << " references " << ref << " " << mirror::Object::PrettyTypeOf(ref) 3043 << " in live stack"; 3044 3045 // Print which field of the object is dead. 3046 if (!obj->IsObjectArray()) { 3047 mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 3048 CHECK(klass != nullptr); 3049 for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) { 3050 if (field.GetOffset().Int32Value() == offset.Int32Value()) { 3051 LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " 3052 << field.PrettyField(); 3053 break; 3054 } 3055 } 3056 } else { 3057 mirror::ObjectArray<mirror::Object>* object_array = 3058 obj->AsObjectArray<mirror::Object>(); 3059 for (int32_t i = 0; i < object_array->GetLength(); ++i) { 3060 if (object_array->Get(i) == ref) { 3061 LOG(ERROR) << (is_static ? "Static " : "") << "obj[" << i << "] = ref"; 3062 } 3063 } 3064 } 3065 3066 *failed_ = true; 3067 } 3068 } 3069 } 3070 } 3071 3072 private: 3073 Heap* const heap_; 3074 bool* const failed_; 3075}; 3076 3077class VerifyLiveStackReferences { 3078 public: 3079 explicit VerifyLiveStackReferences(Heap* heap) 3080 : heap_(heap), 3081 failed_(false) {} 3082 3083 void operator()(mirror::Object* obj) const 3084 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 3085 VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); 3086 obj->VisitReferences(visitor, VoidFunctor()); 3087 } 3088 3089 bool Failed() const { 3090 return failed_; 3091 } 3092 3093 private: 3094 Heap* const heap_; 3095 bool failed_; 3096}; 3097 3098bool Heap::VerifyMissingCardMarks() { 3099 Thread* self = Thread::Current(); 3100 Locks::mutator_lock_->AssertExclusiveHeld(self); 3101 // We need to sort the live stack since we binary search it. 3102 live_stack_->Sort(); 3103 // Since we sorted the allocation stack content, need to revoke all 3104 // thread-local allocation stacks. 3105 RevokeAllThreadLocalAllocationStacks(self); 3106 VerifyLiveStackReferences visitor(this); 3107 GetLiveBitmap()->Visit(visitor); 3108 // We can verify objects in the live stack since none of these should reference dead objects. 3109 for (auto* it = live_stack_->Begin(); it != live_stack_->End(); ++it) { 3110 if (!kUseThreadLocalAllocationStack || it->AsMirrorPtr() != nullptr) { 3111 visitor(it->AsMirrorPtr()); 3112 } 3113 } 3114 return !visitor.Failed(); 3115} 3116 3117void Heap::SwapStacks() { 3118 if (kUseThreadLocalAllocationStack) { 3119 live_stack_->AssertAllZero(); 3120 } 3121 allocation_stack_.swap(live_stack_); 3122} 3123 3124void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) { 3125 // This must be called only during the pause. 3126 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self)); 3127 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 3128 MutexLock mu2(self, *Locks::thread_list_lock_); 3129 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); 3130 for (Thread* t : thread_list) { 3131 t->RevokeThreadLocalAllocationStack(); 3132 } 3133} 3134 3135void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) { 3136 if (kIsDebugBuild) { 3137 if (rosalloc_space_ != nullptr) { 3138 rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread); 3139 } 3140 if (bump_pointer_space_ != nullptr) { 3141 bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread); 3142 } 3143 } 3144} 3145 3146void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() { 3147 if (kIsDebugBuild) { 3148 if (bump_pointer_space_ != nullptr) { 3149 bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked(); 3150 } 3151 } 3152} 3153 3154accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) { 3155 auto it = mod_union_tables_.find(space); 3156 if (it == mod_union_tables_.end()) { 3157 return nullptr; 3158 } 3159 return it->second; 3160} 3161 3162accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) { 3163 auto it = remembered_sets_.find(space); 3164 if (it == remembered_sets_.end()) { 3165 return nullptr; 3166 } 3167 return it->second; 3168} 3169 3170void Heap::ProcessCards(TimingLogger* timings, 3171 bool use_rem_sets, 3172 bool process_alloc_space_cards, 3173 bool clear_alloc_space_cards) { 3174 TimingLogger::ScopedTiming t(__FUNCTION__, timings); 3175 // Clear cards and keep track of cards cleared in the mod-union table. 3176 for (const auto& space : continuous_spaces_) { 3177 accounting::ModUnionTable* table = FindModUnionTableFromSpace(space); 3178 accounting::RememberedSet* rem_set = FindRememberedSetFromSpace(space); 3179 if (table != nullptr) { 3180 const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" : 3181 "ImageModUnionClearCards"; 3182 TimingLogger::ScopedTiming t2(name, timings); 3183 table->ProcessCards(); 3184 } else if (use_rem_sets && rem_set != nullptr) { 3185 DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS) 3186 << static_cast<int>(collector_type_); 3187 TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings); 3188 rem_set->ClearCards(); 3189 } else if (process_alloc_space_cards) { 3190 TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings); 3191 if (clear_alloc_space_cards) { 3192 uint8_t* end = space->End(); 3193 if (space->IsImageSpace()) { 3194 // Image space end is the end of the mirror objects, it is not necessarily page or card 3195 // aligned. Align up so that the check in ClearCardRange does not fail. 3196 end = AlignUp(end, accounting::CardTable::kCardSize); 3197 } 3198 card_table_->ClearCardRange(space->Begin(), end); 3199 } else { 3200 // No mod union table for the AllocSpace. Age the cards so that the GC knows that these 3201 // cards were dirty before the GC started. 3202 // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread) 3203 // -> clean(cleaning thread). 3204 // The races are we either end up with: Aged card, unaged card. Since we have the 3205 // checkpoint roots and then we scan / update mod union tables after. We will always 3206 // scan either card. If we end up with the non aged card, we scan it it in the pause. 3207 card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), 3208 VoidFunctor()); 3209 } 3210 } 3211 } 3212} 3213 3214struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor { 3215 virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE { 3216 return obj; 3217 } 3218 virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE { 3219 } 3220}; 3221 3222void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { 3223 Thread* const self = Thread::Current(); 3224 TimingLogger* const timings = current_gc_iteration_.GetTimings(); 3225 TimingLogger::ScopedTiming t(__FUNCTION__, timings); 3226 if (verify_pre_gc_heap_) { 3227 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings); 3228 size_t failures = VerifyHeapReferences(); 3229 if (failures > 0) { 3230 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures 3231 << " failures"; 3232 } 3233 } 3234 // Check that all objects which reference things in the live stack are on dirty cards. 3235 if (verify_missing_card_marks_) { 3236 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings); 3237 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 3238 SwapStacks(); 3239 // Sort the live stack so that we can quickly binary search it later. 3240 CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName() 3241 << " missing card mark verification failed\n" << DumpSpaces(); 3242 SwapStacks(); 3243 } 3244 if (verify_mod_union_table_) { 3245 TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings); 3246 ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); 3247 for (const auto& table_pair : mod_union_tables_) { 3248 accounting::ModUnionTable* mod_union_table = table_pair.second; 3249 IdentityMarkHeapReferenceVisitor visitor; 3250 mod_union_table->UpdateAndMarkReferences(&visitor); 3251 mod_union_table->Verify(); 3252 } 3253 } 3254} 3255 3256void Heap::PreGcVerification(collector::GarbageCollector* gc) { 3257 if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) { 3258 collector::GarbageCollector::ScopedPause pause(gc, false); 3259 PreGcVerificationPaused(gc); 3260 } 3261} 3262 3263void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) { 3264 // TODO: Add a new runtime option for this? 3265 if (verify_pre_gc_rosalloc_) { 3266 RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification"); 3267 } 3268} 3269 3270void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { 3271 Thread* const self = Thread::Current(); 3272 TimingLogger* const timings = current_gc_iteration_.GetTimings(); 3273 TimingLogger::ScopedTiming t(__FUNCTION__, timings); 3274 // Called before sweeping occurs since we want to make sure we are not going so reclaim any 3275 // reachable objects. 3276 if (verify_pre_sweeping_heap_) { 3277 TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings); 3278 CHECK_NE(self->GetState(), kRunnable); 3279 { 3280 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 3281 // Swapping bound bitmaps does nothing. 3282 gc->SwapBitmaps(); 3283 } 3284 // Pass in false since concurrent reference processing can mean that the reference referents 3285 // may point to dead objects at the point which PreSweepingGcVerification is called. 3286 size_t failures = VerifyHeapReferences(false); 3287 if (failures > 0) { 3288 LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed with " << failures 3289 << " failures"; 3290 } 3291 { 3292 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 3293 gc->SwapBitmaps(); 3294 } 3295 } 3296 if (verify_pre_sweeping_rosalloc_) { 3297 RosAllocVerification(timings, "PreSweepingRosAllocVerification"); 3298 } 3299} 3300 3301void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) { 3302 // Only pause if we have to do some verification. 3303 Thread* const self = Thread::Current(); 3304 TimingLogger* const timings = GetCurrentGcIteration()->GetTimings(); 3305 TimingLogger::ScopedTiming t(__FUNCTION__, timings); 3306 if (verify_system_weaks_) { 3307 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_); 3308 collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc); 3309 mark_sweep->VerifySystemWeaks(); 3310 } 3311 if (verify_post_gc_rosalloc_) { 3312 RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification"); 3313 } 3314 if (verify_post_gc_heap_) { 3315 TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings); 3316 size_t failures = VerifyHeapReferences(); 3317 if (failures > 0) { 3318 LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed with " << failures 3319 << " failures"; 3320 } 3321 } 3322} 3323 3324void Heap::PostGcVerification(collector::GarbageCollector* gc) { 3325 if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) { 3326 collector::GarbageCollector::ScopedPause pause(gc, false); 3327 PostGcVerificationPaused(gc); 3328 } 3329} 3330 3331void Heap::RosAllocVerification(TimingLogger* timings, const char* name) { 3332 TimingLogger::ScopedTiming t(name, timings); 3333 for (const auto& space : continuous_spaces_) { 3334 if (space->IsRosAllocSpace()) { 3335 VLOG(heap) << name << " : " << space->GetName(); 3336 space->AsRosAllocSpace()->Verify(); 3337 } 3338 } 3339} 3340 3341collector::GcType Heap::WaitForGcToComplete(GcCause cause, Thread* self) { 3342 ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); 3343 MutexLock mu(self, *gc_complete_lock_); 3344 return WaitForGcToCompleteLocked(cause, self); 3345} 3346 3347collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) { 3348 collector::GcType last_gc_type = collector::kGcTypeNone; 3349 GcCause last_gc_cause = kGcCauseNone; 3350 uint64_t wait_start = NanoTime(); 3351 while (collector_type_running_ != kCollectorTypeNone) { 3352 if (self != task_processor_->GetRunningThread()) { 3353 // The current thread is about to wait for a currently running 3354 // collection to finish. If the waiting thread is not the heap 3355 // task daemon thread, the currently running collection is 3356 // considered as a blocking GC. 3357 running_collection_is_blocking_ = true; 3358 VLOG(gc) << "Waiting for a blocking GC " << cause; 3359 } 3360 ScopedTrace trace("GC: Wait For Completion"); 3361 // We must wait, change thread state then sleep on gc_complete_cond_; 3362 gc_complete_cond_->Wait(self); 3363 last_gc_type = last_gc_type_; 3364 last_gc_cause = last_gc_cause_; 3365 } 3366 uint64_t wait_time = NanoTime() - wait_start; 3367 total_wait_time_ += wait_time; 3368 if (wait_time > long_pause_log_threshold_) { 3369 LOG(INFO) << "WaitForGcToComplete blocked " << cause << " on " << last_gc_cause << " for " 3370 << PrettyDuration(wait_time); 3371 } 3372 if (self != task_processor_->GetRunningThread()) { 3373 // The current thread is about to run a collection. If the thread 3374 // is not the heap task daemon thread, it's considered as a 3375 // blocking GC (i.e., blocking itself). 3376 running_collection_is_blocking_ = true; 3377 // Don't log fake "GC" types that are only used for debugger or hidden APIs. If we log these, 3378 // it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too. 3379 if (cause == kGcCauseForAlloc || 3380 cause == kGcCauseForNativeAlloc || 3381 cause == kGcCauseForNativeAllocBlocking || 3382 cause == kGcCauseDisableMovingGc) { 3383 VLOG(gc) << "Starting a blocking GC " << cause; 3384 } 3385 } 3386 return last_gc_type; 3387} 3388 3389void Heap::DumpForSigQuit(std::ostream& os) { 3390 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/" 3391 << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n"; 3392 DumpGcPerformanceInfo(os); 3393} 3394 3395size_t Heap::GetPercentFree() { 3396 return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_); 3397} 3398 3399void Heap::SetIdealFootprint(size_t max_allowed_footprint) { 3400 if (max_allowed_footprint > GetMaxMemory()) { 3401 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to " 3402 << PrettySize(GetMaxMemory()); 3403 max_allowed_footprint = GetMaxMemory(); 3404 } 3405 max_allowed_footprint_ = max_allowed_footprint; 3406} 3407 3408bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const { 3409 if (kMovingCollector) { 3410 space::Space* space = FindContinuousSpaceFromObject(obj.Ptr(), true); 3411 if (space != nullptr) { 3412 // TODO: Check large object? 3413 return space->CanMoveObjects(); 3414 } 3415 } 3416 return false; 3417} 3418 3419collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) { 3420 for (const auto& collector : garbage_collectors_) { 3421 if (collector->GetCollectorType() == collector_type_ && 3422 collector->GetGcType() == gc_type) { 3423 return collector; 3424 } 3425 } 3426 return nullptr; 3427} 3428 3429double Heap::HeapGrowthMultiplier() const { 3430 // If we don't care about pause times we are background, so return 1.0. 3431 if (!CareAboutPauseTimes()) { 3432 return 1.0; 3433 } 3434 return foreground_heap_growth_multiplier_; 3435} 3436 3437void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, 3438 uint64_t bytes_allocated_before_gc) { 3439 // We know what our utilization is at this moment. 3440 // This doesn't actually resize any memory. It just lets the heap grow more when necessary. 3441 const uint64_t bytes_allocated = GetBytesAllocated(); 3442 // Trace the new heap size after the GC is finished. 3443 TraceHeapSize(bytes_allocated); 3444 uint64_t target_size; 3445 collector::GcType gc_type = collector_ran->GetGcType(); 3446 const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for 3447 // foreground. 3448 const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier); 3449 const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier); 3450 if (gc_type != collector::kGcTypeSticky) { 3451 // Grow the heap for non sticky GC. 3452 ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated; 3453 CHECK_GE(delta, 0); 3454 target_size = bytes_allocated + delta * multiplier; 3455 target_size = std::min(target_size, bytes_allocated + adjusted_max_free); 3456 target_size = std::max(target_size, bytes_allocated + adjusted_min_free); 3457 next_gc_type_ = collector::kGcTypeSticky; 3458 } else { 3459 collector::GcType non_sticky_gc_type = NonStickyGcType(); 3460 // Find what the next non sticky collector will be. 3461 collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type); 3462 // If the throughput of the current sticky GC >= throughput of the non sticky collector, then 3463 // do another sticky collection next. 3464 // We also check that the bytes allocated aren't over the footprint limit in order to prevent a 3465 // pathological case where dead objects which aren't reclaimed by sticky could get accumulated 3466 // if the sticky GC throughput always remained >= the full/partial throughput. 3467 if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >= 3468 non_sticky_collector->GetEstimatedMeanThroughput() && 3469 non_sticky_collector->NumberOfIterations() > 0 && 3470 bytes_allocated <= max_allowed_footprint_) { 3471 next_gc_type_ = collector::kGcTypeSticky; 3472 } else { 3473 next_gc_type_ = non_sticky_gc_type; 3474 } 3475 // If we have freed enough memory, shrink the heap back down. 3476 if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) { 3477 target_size = bytes_allocated + adjusted_max_free; 3478 } else { 3479 target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_)); 3480 } 3481 } 3482 if (!ignore_max_footprint_) { 3483 SetIdealFootprint(target_size); 3484 if (IsGcConcurrent()) { 3485 const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() + 3486 current_gc_iteration_.GetFreedLargeObjectBytes() + 3487 current_gc_iteration_.GetFreedRevokeBytes(); 3488 // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out 3489 // how many bytes were allocated during the GC we need to add freed_bytes back on. 3490 CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc); 3491 const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes - 3492 bytes_allocated_before_gc; 3493 // Calculate when to perform the next ConcurrentGC. 3494 // Calculate the estimated GC duration. 3495 const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0; 3496 // Estimate how many remaining bytes we will have when we need to start the next GC. 3497 size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds; 3498 remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes); 3499 remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes); 3500 if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) { 3501 // A never going to happen situation that from the estimated allocation rate we will exceed 3502 // the applications entire footprint with the given estimated allocation rate. Schedule 3503 // another GC nearly straight away. 3504 remaining_bytes = kMinConcurrentRemainingBytes; 3505 } 3506 DCHECK_LE(remaining_bytes, max_allowed_footprint_); 3507 DCHECK_LE(max_allowed_footprint_, GetMaxMemory()); 3508 // Start a concurrent GC when we get close to the estimated remaining bytes. When the 3509 // allocation rate is very high, remaining_bytes could tell us that we should start a GC 3510 // right away. 3511 concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, 3512 static_cast<size_t>(bytes_allocated)); 3513 } 3514 } 3515} 3516 3517void Heap::ClampGrowthLimit() { 3518 // Use heap bitmap lock to guard against races with BindLiveToMarkBitmap. 3519 ScopedObjectAccess soa(Thread::Current()); 3520 WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_); 3521 capacity_ = growth_limit_; 3522 for (const auto& space : continuous_spaces_) { 3523 if (space->IsMallocSpace()) { 3524 gc::space::MallocSpace* malloc_space = space->AsMallocSpace(); 3525 malloc_space->ClampGrowthLimit(); 3526 } 3527 } 3528 // This space isn't added for performance reasons. 3529 if (main_space_backup_.get() != nullptr) { 3530 main_space_backup_->ClampGrowthLimit(); 3531 } 3532} 3533 3534void Heap::ClearGrowthLimit() { 3535 growth_limit_ = capacity_; 3536 ScopedObjectAccess soa(Thread::Current()); 3537 for (const auto& space : continuous_spaces_) { 3538 if (space->IsMallocSpace()) { 3539 gc::space::MallocSpace* malloc_space = space->AsMallocSpace(); 3540 malloc_space->ClearGrowthLimit(); 3541 malloc_space->SetFootprintLimit(malloc_space->Capacity()); 3542 } 3543 } 3544 // This space isn't added for performance reasons. 3545 if (main_space_backup_.get() != nullptr) { 3546 main_space_backup_->ClearGrowthLimit(); 3547 main_space_backup_->SetFootprintLimit(main_space_backup_->Capacity()); 3548 } 3549} 3550 3551void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) { 3552 ScopedObjectAccess soa(self); 3553 ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(*object)); 3554 jvalue args[1]; 3555 args[0].l = arg.get(); 3556 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args); 3557 // Restore object in case it gets moved. 3558 *object = soa.Decode<mirror::Object>(arg.get()); 3559} 3560 3561void Heap::RequestConcurrentGCAndSaveObject(Thread* self, 3562 bool force_full, 3563 ObjPtr<mirror::Object>* obj) { 3564 StackHandleScope<1> hs(self); 3565 HandleWrapperObjPtr<mirror::Object> wrapper(hs.NewHandleWrapper(obj)); 3566 RequestConcurrentGC(self, kGcCauseBackground, force_full); 3567} 3568 3569class Heap::ConcurrentGCTask : public HeapTask { 3570 public: 3571 ConcurrentGCTask(uint64_t target_time, GcCause cause, bool force_full) 3572 : HeapTask(target_time), cause_(cause), force_full_(force_full) {} 3573 virtual void Run(Thread* self) OVERRIDE { 3574 gc::Heap* heap = Runtime::Current()->GetHeap(); 3575 heap->ConcurrentGC(self, cause_, force_full_); 3576 heap->ClearConcurrentGCRequest(); 3577 } 3578 3579 private: 3580 const GcCause cause_; 3581 const bool force_full_; // If true, force full (or partial) collection. 3582}; 3583 3584static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) { 3585 Runtime* runtime = Runtime::Current(); 3586 return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) && 3587 !self->IsHandlingStackOverflow(); 3588} 3589 3590void Heap::ClearConcurrentGCRequest() { 3591 concurrent_gc_pending_.StoreRelaxed(false); 3592} 3593 3594void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) { 3595 if (CanAddHeapTask(self) && 3596 concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) { 3597 task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away. 3598 cause, 3599 force_full)); 3600 } 3601} 3602 3603void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full) { 3604 if (!Runtime::Current()->IsShuttingDown(self)) { 3605 // Wait for any GCs currently running to finish. 3606 if (WaitForGcToComplete(cause, self) == collector::kGcTypeNone) { 3607 // If the we can't run the GC type we wanted to run, find the next appropriate one and try 3608 // that instead. E.g. can't do partial, so do full instead. 3609 collector::GcType next_gc_type = next_gc_type_; 3610 // If forcing full and next gc type is sticky, override with a non-sticky type. 3611 if (force_full && next_gc_type == collector::kGcTypeSticky) { 3612 next_gc_type = NonStickyGcType(); 3613 } 3614 if (CollectGarbageInternal(next_gc_type, cause, false) == collector::kGcTypeNone) { 3615 for (collector::GcType gc_type : gc_plan_) { 3616 // Attempt to run the collector, if we succeed, we are done. 3617 if (gc_type > next_gc_type && 3618 CollectGarbageInternal(gc_type, cause, false) != collector::kGcTypeNone) { 3619 break; 3620 } 3621 } 3622 } 3623 } 3624 } 3625} 3626 3627class Heap::CollectorTransitionTask : public HeapTask { 3628 public: 3629 explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {} 3630 3631 virtual void Run(Thread* self) OVERRIDE { 3632 gc::Heap* heap = Runtime::Current()->GetHeap(); 3633 heap->DoPendingCollectorTransition(); 3634 heap->ClearPendingCollectorTransition(self); 3635 } 3636}; 3637 3638void Heap::ClearPendingCollectorTransition(Thread* self) { 3639 MutexLock mu(self, *pending_task_lock_); 3640 pending_collector_transition_ = nullptr; 3641} 3642 3643void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) { 3644 Thread* self = Thread::Current(); 3645 desired_collector_type_ = desired_collector_type; 3646 if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) { 3647 return; 3648 } 3649 if (collector_type_ == kCollectorTypeCC) { 3650 // For CC, we invoke a full compaction when going to the background, but the collector type 3651 // doesn't change. 3652 DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground); 3653 } 3654 DCHECK_NE(collector_type_, kCollectorTypeCCBackground); 3655 CollectorTransitionTask* added_task = nullptr; 3656 const uint64_t target_time = NanoTime() + delta_time; 3657 { 3658 MutexLock mu(self, *pending_task_lock_); 3659 // If we have an existing collector transition, update the targe time to be the new target. 3660 if (pending_collector_transition_ != nullptr) { 3661 task_processor_->UpdateTargetRunTime(self, pending_collector_transition_, target_time); 3662 return; 3663 } 3664 added_task = new CollectorTransitionTask(target_time); 3665 pending_collector_transition_ = added_task; 3666 } 3667 task_processor_->AddTask(self, added_task); 3668} 3669 3670class Heap::HeapTrimTask : public HeapTask { 3671 public: 3672 explicit HeapTrimTask(uint64_t delta_time) : HeapTask(NanoTime() + delta_time) { } 3673 virtual void Run(Thread* self) OVERRIDE { 3674 gc::Heap* heap = Runtime::Current()->GetHeap(); 3675 heap->Trim(self); 3676 heap->ClearPendingTrim(self); 3677 } 3678}; 3679 3680void Heap::ClearPendingTrim(Thread* self) { 3681 MutexLock mu(self, *pending_task_lock_); 3682 pending_heap_trim_ = nullptr; 3683} 3684 3685void Heap::RequestTrim(Thread* self) { 3686 if (!CanAddHeapTask(self)) { 3687 return; 3688 } 3689 // GC completed and now we must decide whether to request a heap trim (advising pages back to the 3690 // kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans 3691 // a space it will hold its lock and can become a cause of jank. 3692 // Note, the large object space self trims and the Zygote space was trimmed and unchanging since 3693 // forking. 3694 3695 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap 3696 // because that only marks object heads, so a large array looks like lots of empty space. We 3697 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional 3698 // to utilization (which is probably inversely proportional to how much benefit we can expect). 3699 // We could try mincore(2) but that's only a measure of how many pages we haven't given away, 3700 // not how much use we're making of those pages. 3701 HeapTrimTask* added_task = nullptr; 3702 { 3703 MutexLock mu(self, *pending_task_lock_); 3704 if (pending_heap_trim_ != nullptr) { 3705 // Already have a heap trim request in task processor, ignore this request. 3706 return; 3707 } 3708 added_task = new HeapTrimTask(kHeapTrimWait); 3709 pending_heap_trim_ = added_task; 3710 } 3711 task_processor_->AddTask(self, added_task); 3712} 3713 3714void Heap::RevokeThreadLocalBuffers(Thread* thread) { 3715 if (rosalloc_space_ != nullptr) { 3716 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread); 3717 if (freed_bytes_revoke > 0U) { 3718 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke); 3719 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed()); 3720 } 3721 } 3722 if (bump_pointer_space_ != nullptr) { 3723 CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U); 3724 } 3725 if (region_space_ != nullptr) { 3726 CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U); 3727 } 3728} 3729 3730void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) { 3731 if (rosalloc_space_ != nullptr) { 3732 size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread); 3733 if (freed_bytes_revoke > 0U) { 3734 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke); 3735 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed()); 3736 } 3737 } 3738} 3739 3740void Heap::RevokeAllThreadLocalBuffers() { 3741 if (rosalloc_space_ != nullptr) { 3742 size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers(); 3743 if (freed_bytes_revoke > 0U) { 3744 num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke); 3745 CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed()); 3746 } 3747 } 3748 if (bump_pointer_space_ != nullptr) { 3749 CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U); 3750 } 3751 if (region_space_ != nullptr) { 3752 CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U); 3753 } 3754} 3755 3756bool Heap::IsGCRequestPending() const { 3757 return concurrent_gc_pending_.LoadRelaxed(); 3758} 3759 3760void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) { 3761 env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime, 3762 WellKnownClasses::dalvik_system_VMRuntime_runFinalization, 3763 static_cast<jlong>(timeout)); 3764} 3765 3766void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { 3767 // See the REDESIGN section of go/understanding-register-native-allocation 3768 // for an explanation of how RegisterNativeAllocation works. 3769 size_t new_value = bytes + new_native_bytes_allocated_.FetchAndAddRelaxed(bytes); 3770 if (new_value > NativeAllocationBlockingGcWatermark()) { 3771 // Wait for a new GC to finish and finalizers to run, because the 3772 // allocation rate is too high. 3773 Thread* self = ThreadForEnv(env); 3774 3775 bool run_gc = false; 3776 { 3777 MutexLock mu(self, *native_blocking_gc_lock_); 3778 uint32_t initial_gcs_finished = native_blocking_gcs_finished_; 3779 if (native_blocking_gc_in_progress_) { 3780 // A native blocking GC is in progress from the last time the native 3781 // allocation blocking GC watermark was exceeded. Wait for that GC to 3782 // finish before addressing the fact that we exceeded the blocking 3783 // watermark again. 3784 do { 3785 ScopedTrace trace("RegisterNativeAllocation: Wait For Prior Blocking GC Completion"); 3786 native_blocking_gc_cond_->Wait(self); 3787 } while (native_blocking_gcs_finished_ == initial_gcs_finished); 3788 initial_gcs_finished++; 3789 } 3790 3791 // It's possible multiple threads have seen that we exceeded the 3792 // blocking watermark. Ensure that only one of those threads is assigned 3793 // to run the blocking GC. The rest of the threads should instead wait 3794 // for the blocking GC to complete. 3795 if (native_blocking_gcs_finished_ == initial_gcs_finished) { 3796 if (native_blocking_gc_is_assigned_) { 3797 do { 3798 ScopedTrace trace("RegisterNativeAllocation: Wait For Blocking GC Completion"); 3799 native_blocking_gc_cond_->Wait(self); 3800 } while (native_blocking_gcs_finished_ == initial_gcs_finished); 3801 } else { 3802 native_blocking_gc_is_assigned_ = true; 3803 run_gc = true; 3804 } 3805 } 3806 } 3807 3808 if (run_gc) { 3809 CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAllocBlocking, false); 3810 RunFinalization(env, kNativeAllocationFinalizeTimeout); 3811 CHECK(!env->ExceptionCheck()); 3812 3813 MutexLock mu(self, *native_blocking_gc_lock_); 3814 native_blocking_gc_is_assigned_ = false; 3815 native_blocking_gc_in_progress_ = false; 3816 native_blocking_gcs_finished_++; 3817 native_blocking_gc_cond_->Broadcast(self); 3818 } 3819 } else if (new_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() && 3820 !IsGCRequestPending()) { 3821 // Trigger another GC because there have been enough native bytes 3822 // allocated since the last GC. 3823 if (IsGcConcurrent()) { 3824 RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true); 3825 } else { 3826 CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false); 3827 } 3828 } 3829} 3830 3831void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) { 3832 // Take the bytes freed out of new_native_bytes_allocated_ first. If 3833 // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed 3834 // out of old_native_bytes_allocated_ to ensure all freed bytes are 3835 // accounted for. 3836 size_t allocated; 3837 size_t new_freed_bytes; 3838 do { 3839 allocated = new_native_bytes_allocated_.LoadRelaxed(); 3840 new_freed_bytes = std::min(allocated, bytes); 3841 } while (!new_native_bytes_allocated_.CompareExchangeWeakRelaxed(allocated, 3842 allocated - new_freed_bytes)); 3843 if (new_freed_bytes < bytes) { 3844 old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes); 3845 } 3846} 3847 3848size_t Heap::GetTotalMemory() const { 3849 return std::max(max_allowed_footprint_, GetBytesAllocated()); 3850} 3851 3852void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) { 3853 DCHECK(mod_union_table != nullptr); 3854 mod_union_tables_.Put(mod_union_table->GetSpace(), mod_union_table); 3855} 3856 3857void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) { 3858 // Compare rounded sizes since the allocation may have been retried after rounding the size. 3859 // See b/37885600 3860 CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || 3861 (c->IsVariableSize() || 3862 RoundUp(c->GetObjectSize(), kObjectAlignment) == 3863 RoundUp(byte_count, kObjectAlignment))) 3864 << "ClassFlags=" << c->GetClassFlags() 3865 << " IsClassClass=" << c->IsClassClass() 3866 << " byte_count=" << byte_count 3867 << " IsVariableSize=" << c->IsVariableSize() 3868 << " ObjectSize=" << c->GetObjectSize() 3869 << " sizeof(Class)=" << sizeof(mirror::Class) 3870 << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass"); 3871 CHECK_GE(byte_count, sizeof(mirror::Object)); 3872} 3873 3874void Heap::AddRememberedSet(accounting::RememberedSet* remembered_set) { 3875 CHECK(remembered_set != nullptr); 3876 space::Space* space = remembered_set->GetSpace(); 3877 CHECK(space != nullptr); 3878 CHECK(remembered_sets_.find(space) == remembered_sets_.end()) << space; 3879 remembered_sets_.Put(space, remembered_set); 3880 CHECK(remembered_sets_.find(space) != remembered_sets_.end()) << space; 3881} 3882 3883void Heap::RemoveRememberedSet(space::Space* space) { 3884 CHECK(space != nullptr); 3885 auto it = remembered_sets_.find(space); 3886 CHECK(it != remembered_sets_.end()); 3887 delete it->second; 3888 remembered_sets_.erase(it); 3889 CHECK(remembered_sets_.find(space) == remembered_sets_.end()); 3890} 3891 3892void Heap::ClearMarkedObjects() { 3893 // Clear all of the spaces' mark bitmaps. 3894 for (const auto& space : GetContinuousSpaces()) { 3895 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 3896 if (space->GetLiveBitmap() != mark_bitmap) { 3897 mark_bitmap->Clear(); 3898 } 3899 } 3900 // Clear the marked objects in the discontinous space object sets. 3901 for (const auto& space : GetDiscontinuousSpaces()) { 3902 space->GetMarkBitmap()->Clear(); 3903 } 3904} 3905 3906void Heap::SetAllocationRecords(AllocRecordObjectMap* records) { 3907 allocation_records_.reset(records); 3908} 3909 3910void Heap::VisitAllocationRecords(RootVisitor* visitor) const { 3911 if (IsAllocTrackingEnabled()) { 3912 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); 3913 if (IsAllocTrackingEnabled()) { 3914 GetAllocationRecords()->VisitRoots(visitor); 3915 } 3916 } 3917} 3918 3919void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const { 3920 if (IsAllocTrackingEnabled()) { 3921 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); 3922 if (IsAllocTrackingEnabled()) { 3923 GetAllocationRecords()->SweepAllocationRecords(visitor); 3924 } 3925 } 3926} 3927 3928void Heap::AllowNewAllocationRecords() const { 3929 CHECK(!kUseReadBarrier); 3930 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); 3931 AllocRecordObjectMap* allocation_records = GetAllocationRecords(); 3932 if (allocation_records != nullptr) { 3933 allocation_records->AllowNewAllocationRecords(); 3934 } 3935} 3936 3937void Heap::DisallowNewAllocationRecords() const { 3938 CHECK(!kUseReadBarrier); 3939 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); 3940 AllocRecordObjectMap* allocation_records = GetAllocationRecords(); 3941 if (allocation_records != nullptr) { 3942 allocation_records->DisallowNewAllocationRecords(); 3943 } 3944} 3945 3946void Heap::BroadcastForNewAllocationRecords() const { 3947 // Always broadcast without checking IsAllocTrackingEnabled() because IsAllocTrackingEnabled() may 3948 // be set to false while some threads are waiting for system weak access in 3949 // AllocRecordObjectMap::RecordAllocation() and we may fail to wake them up. b/27467554. 3950 MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); 3951 AllocRecordObjectMap* allocation_records = GetAllocationRecords(); 3952 if (allocation_records != nullptr) { 3953 allocation_records->BroadcastForNewAllocationRecords(); 3954 } 3955} 3956 3957void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) { 3958 auto* const runtime = Runtime::Current(); 3959 if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() && 3960 !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) { 3961 // Check if we should GC. 3962 bool new_backtrace = false; 3963 { 3964 static constexpr size_t kMaxFrames = 16u; 3965 FixedSizeBacktrace<kMaxFrames> backtrace; 3966 backtrace.Collect(/* skip_frames */ 2); 3967 uint64_t hash = backtrace.Hash(); 3968 MutexLock mu(self, *backtrace_lock_); 3969 new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end(); 3970 if (new_backtrace) { 3971 seen_backtraces_.insert(hash); 3972 } 3973 } 3974 if (new_backtrace) { 3975 StackHandleScope<1> hs(self); 3976 auto h = hs.NewHandleWrapper(obj); 3977 CollectGarbage(false); 3978 unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1); 3979 } else { 3980 seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1); 3981 } 3982 } 3983} 3984 3985void Heap::DisableGCForShutdown() { 3986 Thread* const self = Thread::Current(); 3987 CHECK(Runtime::Current()->IsShuttingDown(self)); 3988 MutexLock mu(self, *gc_complete_lock_); 3989 gc_disabled_for_shutdown_ = true; 3990} 3991 3992bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const { 3993 for (gc::space::ImageSpace* space : boot_image_spaces_) { 3994 if (space->HasAddress(obj.Ptr())) { 3995 return true; 3996 } 3997 } 3998 return false; 3999} 4000 4001bool Heap::IsInBootImageOatFile(const void* p) const { 4002 for (gc::space::ImageSpace* space : boot_image_spaces_) { 4003 if (space->GetOatFile()->Contains(p)) { 4004 return true; 4005 } 4006 } 4007 return false; 4008} 4009 4010void Heap::GetBootImagesSize(uint32_t* boot_image_begin, 4011 uint32_t* boot_image_end, 4012 uint32_t* boot_oat_begin, 4013 uint32_t* boot_oat_end) { 4014 DCHECK(boot_image_begin != nullptr); 4015 DCHECK(boot_image_end != nullptr); 4016 DCHECK(boot_oat_begin != nullptr); 4017 DCHECK(boot_oat_end != nullptr); 4018 *boot_image_begin = 0u; 4019 *boot_image_end = 0u; 4020 *boot_oat_begin = 0u; 4021 *boot_oat_end = 0u; 4022 for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) { 4023 const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin()); 4024 const uint32_t image_size = space_->GetImageHeader().GetImageSize(); 4025 if (*boot_image_begin == 0 || image_begin < *boot_image_begin) { 4026 *boot_image_begin = image_begin; 4027 } 4028 *boot_image_end = std::max(*boot_image_end, image_begin + image_size); 4029 const OatFile* boot_oat_file = space_->GetOatFile(); 4030 const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin()); 4031 const uint32_t oat_size = boot_oat_file->Size(); 4032 if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) { 4033 *boot_oat_begin = oat_begin; 4034 } 4035 *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size); 4036 } 4037} 4038 4039void Heap::SetAllocationListener(AllocationListener* l) { 4040 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, l); 4041 4042 if (old == nullptr) { 4043 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(); 4044 } 4045} 4046 4047void Heap::RemoveAllocationListener() { 4048 AllocationListener* old = GetAndOverwriteAllocationListener(&alloc_listener_, nullptr); 4049 4050 if (old != nullptr) { 4051 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(); 4052 } 4053} 4054 4055void Heap::SetGcPauseListener(GcPauseListener* l) { 4056 gc_pause_listener_.StoreRelaxed(l); 4057} 4058 4059void Heap::RemoveGcPauseListener() { 4060 gc_pause_listener_.StoreRelaxed(nullptr); 4061} 4062 4063mirror::Object* Heap::AllocWithNewTLAB(Thread* self, 4064 size_t alloc_size, 4065 bool grow, 4066 size_t* bytes_allocated, 4067 size_t* usable_size, 4068 size_t* bytes_tl_bulk_allocated) { 4069 const AllocatorType allocator_type = GetCurrentAllocator(); 4070 if (kUsePartialTlabs && alloc_size <= self->TlabRemainingCapacity()) { 4071 DCHECK_GT(alloc_size, self->TlabSize()); 4072 // There is enough space if we grow the TLAB. Lets do that. This increases the 4073 // TLAB bytes. 4074 const size_t min_expand_size = alloc_size - self->TlabSize(); 4075 const size_t expand_bytes = std::max( 4076 min_expand_size, 4077 std::min(self->TlabRemainingCapacity() - self->TlabSize(), kPartialTlabSize)); 4078 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, expand_bytes, grow))) { 4079 return nullptr; 4080 } 4081 *bytes_tl_bulk_allocated = expand_bytes; 4082 self->ExpandTlab(expand_bytes); 4083 DCHECK_LE(alloc_size, self->TlabSize()); 4084 } else if (allocator_type == kAllocatorTypeTLAB) { 4085 DCHECK(bump_pointer_space_ != nullptr); 4086 const size_t new_tlab_size = alloc_size + kDefaultTLABSize; 4087 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) { 4088 return nullptr; 4089 } 4090 // Try allocating a new thread local buffer, if the allocation fails the space must be 4091 // full so return null. 4092 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) { 4093 return nullptr; 4094 } 4095 *bytes_tl_bulk_allocated = new_tlab_size; 4096 } else { 4097 DCHECK(allocator_type == kAllocatorTypeRegionTLAB); 4098 DCHECK(region_space_ != nullptr); 4099 if (space::RegionSpace::kRegionSize >= alloc_size) { 4100 // Non-large. Check OOME for a tlab. 4101 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, 4102 space::RegionSpace::kRegionSize, 4103 grow))) { 4104 const size_t new_tlab_size = kUsePartialTlabs 4105 ? std::max(alloc_size, kPartialTlabSize) 4106 : gc::space::RegionSpace::kRegionSize; 4107 // Try to allocate a tlab. 4108 if (!region_space_->AllocNewTlab(self, new_tlab_size)) { 4109 // Failed to allocate a tlab. Try non-tlab. 4110 return region_space_->AllocNonvirtual<false>(alloc_size, 4111 bytes_allocated, 4112 usable_size, 4113 bytes_tl_bulk_allocated); 4114 } 4115 *bytes_tl_bulk_allocated = new_tlab_size; 4116 // Fall-through to using the TLAB below. 4117 } else { 4118 // Check OOME for a non-tlab allocation. 4119 if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) { 4120 return region_space_->AllocNonvirtual<false>(alloc_size, 4121 bytes_allocated, 4122 usable_size, 4123 bytes_tl_bulk_allocated); 4124 } 4125 // Neither tlab or non-tlab works. Give up. 4126 return nullptr; 4127 } 4128 } else { 4129 // Large. Check OOME. 4130 if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) { 4131 return region_space_->AllocNonvirtual<false>(alloc_size, 4132 bytes_allocated, 4133 usable_size, 4134 bytes_tl_bulk_allocated); 4135 } 4136 return nullptr; 4137 } 4138 } 4139 // Refilled TLAB, return. 4140 mirror::Object* ret = self->AllocTlab(alloc_size); 4141 DCHECK(ret != nullptr); 4142 *bytes_allocated = alloc_size; 4143 *usable_size = alloc_size; 4144 return ret; 4145} 4146 4147const Verification* Heap::GetVerification() const { 4148 return verification_.get(); 4149} 4150 4151} // namespace gc 4152} // namespace art 4153