semi_space.cc revision b272cd3013e046b7b001a091b0925b99a844e382
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space-inl.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap-inl.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/remembered_set.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/space/bump_pointer_space.h" 34#include "gc/space/bump_pointer_space-inl.h" 35#include "gc/space/image_space.h" 36#include "gc/space/large_object_space.h" 37#include "gc/space/space-inl.h" 38#include "indirect_reference_table.h" 39#include "intern_table.h" 40#include "jni_internal.h" 41#include "mark_sweep-inl.h" 42#include "monitor.h" 43#include "mirror/art_field.h" 44#include "mirror/art_field-inl.h" 45#include "mirror/class-inl.h" 46#include "mirror/class_loader.h" 47#include "mirror/dex_cache.h" 48#include "mirror/reference-inl.h" 49#include "mirror/object-inl.h" 50#include "mirror/object_array.h" 51#include "mirror/object_array-inl.h" 52#include "runtime.h" 53#include "stack.h" 54#include "thread-inl.h" 55#include "thread_list.h" 56#include "verifier/method_verifier.h" 57 58using ::art::mirror::Class; 59using ::art::mirror::Object; 60 61namespace art { 62namespace gc { 63namespace collector { 64 65static constexpr bool kProtectFromSpace = true; 66static constexpr bool kStoreStackTraces = false; 67static constexpr bool kUseBytesPromoted = true; 68static constexpr size_t kBytesPromotedThreshold = 4 * MB; 69 70void SemiSpace::BindBitmaps() { 71 timings_.StartSplit("BindBitmaps"); 72 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 73 // Mark all of the spaces we never collect as immune. 74 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 75 if (space->GetLiveBitmap() != nullptr) { 76 if (space == to_space_) { 77 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 78 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 79 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 80 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 81 // Add the main free list space and the non-moving 82 // space to the immune space if a bump pointer space 83 // only collection. 84 || (generational_ && !whole_heap_collection_ && 85 (space == GetHeap()->GetNonMovingSpace() || 86 space == GetHeap()->GetPrimaryFreeListSpace()))) { 87 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 88 } 89 } 90 } 91 if (generational_ && !whole_heap_collection_) { 92 // We won't collect the large object space if a bump pointer space only collection. 93 is_large_object_space_immune_ = true; 94 } 95 timings_.EndSplit(); 96} 97 98SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 99 : GarbageCollector(heap, 100 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 101 to_space_(nullptr), 102 from_space_(nullptr), 103 generational_(generational), 104 last_gc_to_space_end_(nullptr), 105 bytes_promoted_(0), 106 bytes_promoted_since_last_whole_heap_collection_(0), 107 whole_heap_collection_(true), 108 whole_heap_collection_interval_counter_(0), 109 collector_name_(name_) { 110} 111 112void SemiSpace::InitializePhase() { 113 timings_.Reset(); 114 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 115 mark_stack_ = heap_->mark_stack_.get(); 116 DCHECK(mark_stack_ != nullptr); 117 immune_region_.Reset(); 118 is_large_object_space_immune_ = false; 119 saved_bytes_ = 0; 120 self_ = Thread::Current(); 121 // Do any pre GC verification. 122 timings_.NewSplit("PreGcVerification"); 123 heap_->PreGcVerification(this); 124 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_; 125 // Set the initial bitmap. 126 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 127} 128 129void SemiSpace::ProcessReferences(Thread* self) { 130 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 131 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 132 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 133 &MarkObjectCallback, &ProcessMarkStackCallback, this); 134} 135 136void SemiSpace::MarkingPhase() { 137 if (kStoreStackTraces) { 138 Locks::mutator_lock_->AssertExclusiveHeld(self_); 139 // Store the stack traces into the runtime fault string in case we get a heap corruption 140 // related crash later. 141 ThreadState old_state = self_->SetStateUnsafe(kRunnable); 142 std::ostringstream oss; 143 Runtime* runtime = Runtime::Current(); 144 runtime->GetThreadList()->DumpForSigQuit(oss); 145 runtime->GetThreadList()->DumpNativeStacks(oss); 146 runtime->SetFaultMessage(oss.str()); 147 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); 148 } 149 150 if (generational_) { 151 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 152 clear_soft_references_) { 153 // If an explicit, native allocation-triggered, or last attempt 154 // collection, collect the whole heap (and reset the interval 155 // counter to be consistent.) 156 whole_heap_collection_ = true; 157 if (!kUseBytesPromoted) { 158 whole_heap_collection_interval_counter_ = 0; 159 } 160 } 161 if (whole_heap_collection_) { 162 VLOG(heap) << "Whole heap collection"; 163 name_ = collector_name_ + " whole"; 164 } else { 165 VLOG(heap) << "Bump pointer space only collection"; 166 name_ = collector_name_ + " bps"; 167 } 168 } 169 170 if (!clear_soft_references_) { 171 if (!generational_) { 172 // If non-generational, always clear soft references. 173 clear_soft_references_ = true; 174 } else { 175 // If generational, clear soft references if a whole heap collection. 176 if (whole_heap_collection_) { 177 clear_soft_references_ = true; 178 } 179 } 180 } 181 182 Locks::mutator_lock_->AssertExclusiveHeld(self_); 183 184 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 185 if (generational_) { 186 // If last_gc_to_space_end_ is out of the bounds of the from-space 187 // (the to-space from last GC), then point it to the beginning of 188 // the from-space. For example, the very first GC or the 189 // pre-zygote compaction. 190 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 191 last_gc_to_space_end_ = from_space_->Begin(); 192 } 193 // Reset this before the marking starts below. 194 bytes_promoted_ = 0; 195 } 196 // Assume the cleared space is already empty. 197 BindBitmaps(); 198 // Process dirty cards and add dirty cards to mod-union tables. 199 heap_->ProcessCards(timings_, kUseRememberedSet && generational_); 200 // Clear the whole card table since we can not get any additional dirty cards during the 201 // paused GC. This saves memory but only works for pause the world collectors. 202 timings_.NewSplit("ClearCardTable"); 203 heap_->GetCardTable()->ClearCardTable(); 204 // Need to do this before the checkpoint since we don't want any threads to add references to 205 // the live stack during the recursive mark. 206 timings_.NewSplit("SwapStacks"); 207 if (kUseThreadLocalAllocationStack) { 208 heap_->RevokeAllThreadLocalAllocationStacks(self_); 209 } 210 heap_->SwapStacks(self_); 211 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 212 MarkRoots(); 213 // Mark roots of immune spaces. 214 UpdateAndMarkModUnion(); 215 // Recursively mark remaining objects. 216 MarkReachableObjects(); 217} 218 219void SemiSpace::UpdateAndMarkModUnion() { 220 for (auto& space : heap_->GetContinuousSpaces()) { 221 // If the space is immune then we need to mark the references to other spaces. 222 if (immune_region_.ContainsSpace(space)) { 223 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 224 if (table != nullptr) { 225 // TODO: Improve naming. 226 TimingLogger::ScopedSplit split( 227 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 228 "UpdateAndMarkImageModUnionTable", 229 &timings_); 230 table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 231 } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) { 232 DCHECK(kUseRememberedSet); 233 // If a bump pointer space only collection, the non-moving 234 // space is added to the immune space. The non-moving space 235 // doesn't have a mod union table, but has a remembered 236 // set. Its dirty cards will be scanned later in 237 // MarkReachableObjects(). 238 DCHECK(generational_ && !whole_heap_collection_ && 239 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 240 << "Space " << space->GetName() << " " 241 << "generational_=" << generational_ << " " 242 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 243 } else { 244 DCHECK(!kUseRememberedSet); 245 // If a bump pointer space only collection, the non-moving 246 // space is added to the immune space. But the non-moving 247 // space doesn't have a mod union table. Instead, its live 248 // bitmap will be scanned later in MarkReachableObjects(). 249 DCHECK(generational_ && !whole_heap_collection_ && 250 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 251 << "Space " << space->GetName() << " " 252 << "generational_=" << generational_ << " " 253 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 254 } 255 } 256 } 257} 258 259class SemiSpaceScanObjectVisitor { 260 public: 261 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 262 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 263 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 264 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an 265 // exclusive lock on the mutator lock, but 266 // SpaceBitmap::VisitMarkedRange() only requires the shared lock. 267 DCHECK(obj != nullptr); 268 semi_space_->ScanObject(obj); 269 } 270 private: 271 SemiSpace* const semi_space_; 272}; 273 274// Used to verify that there's no references to the from-space. 275class SemiSpaceVerifyNoFromSpaceReferencesVisitor { 276 public: 277 explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : 278 from_space_(from_space) {} 279 280 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 281 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 282 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); 283 if (from_space_->HasAddress(ref)) { 284 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); 285 LOG(FATAL) << ref << " found in from space"; 286 } 287 } 288 private: 289 space::ContinuousMemMapAllocSpace* from_space_; 290}; 291 292void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { 293 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 294 SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); 295 obj->VisitReferences<kMovingClasses>(visitor); 296} 297 298class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { 299 public: 300 explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 301 void operator()(Object* obj) const 302 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 303 DCHECK(obj != nullptr); 304 semi_space_->VerifyNoFromSpaceReferences(obj); 305 } 306 private: 307 SemiSpace* const semi_space_; 308}; 309 310void SemiSpace::MarkReachableObjects() { 311 timings_.StartSplit("MarkStackAsLive"); 312 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 313 heap_->MarkAllocStackAsLive(live_stack); 314 live_stack->Reset(); 315 timings_.EndSplit(); 316 317 for (auto& space : heap_->GetContinuousSpaces()) { 318 // If the space is immune and has no mod union table (the 319 // non-moving space when the bump pointer space only collection is 320 // enabled,) then we need to scan its live bitmap or dirty cards as roots 321 // (including the objects on the live stack which have just marked 322 // in the live bitmap above in MarkAllocStackAsLive().) 323 if (immune_region_.ContainsSpace(space) && 324 heap_->FindModUnionTableFromSpace(space) == nullptr) { 325 DCHECK(generational_ && !whole_heap_collection_ && 326 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 327 accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space); 328 if (kUseRememberedSet) { 329 DCHECK(rem_set != nullptr); 330 rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, from_space_, this); 331 if (kIsDebugBuild) { 332 // Verify that there are no from-space references that 333 // remain in the space, that is, the remembered set (and the 334 // card table) didn't miss any from-space references in the 335 // space. 336 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 337 SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); 338 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 339 reinterpret_cast<uintptr_t>(space->End()), 340 visitor); 341 } 342 } else { 343 DCHECK(rem_set == nullptr); 344 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 345 SemiSpaceScanObjectVisitor visitor(this); 346 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 347 reinterpret_cast<uintptr_t>(space->End()), 348 visitor); 349 } 350 } 351 } 352 353 if (is_large_object_space_immune_) { 354 DCHECK(generational_ && !whole_heap_collection_); 355 // Delay copying the live set to the marked set until here from 356 // BindBitmaps() as the large objects on the allocation stack may 357 // be newly added to the live set above in MarkAllocStackAsLive(). 358 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 359 360 // When the large object space is immune, we need to scan the 361 // large object space as roots as they contain references to their 362 // classes (primitive array classes) that could move though they 363 // don't contain any other references. 364 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 365 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 366 SemiSpaceScanObjectVisitor visitor(this); 367 for (const Object* obj : large_live_objects->GetObjects()) { 368 visitor(const_cast<Object*>(obj)); 369 } 370 } 371 372 // Recursively process the mark stack. 373 ProcessMarkStack(); 374} 375 376void SemiSpace::ReclaimPhase() { 377 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 378 ProcessReferences(self_); 379 { 380 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 381 SweepSystemWeaks(); 382 } 383 // Record freed memory. 384 uint64_t from_bytes = from_space_->GetBytesAllocated(); 385 uint64_t to_bytes = to_space_->GetBytesAllocated(); 386 uint64_t from_objects = from_space_->GetObjectsAllocated(); 387 uint64_t to_objects = to_space_->GetObjectsAllocated(); 388 CHECK_LE(to_objects, from_objects); 389 int64_t freed_bytes = from_bytes - to_bytes; 390 int64_t freed_objects = from_objects - to_objects; 391 freed_bytes_.FetchAndAdd(freed_bytes); 392 freed_objects_.FetchAndAdd(freed_objects); 393 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 394 // space. 395 heap_->RecordFree(freed_objects, freed_bytes); 396 397 timings_.StartSplit("PreSweepingGcVerification"); 398 heap_->PreSweepingGcVerification(this); 399 timings_.EndSplit(); 400 { 401 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 402 // Reclaim unmarked objects. 403 Sweep(false); 404 // Swap the live and mark bitmaps for each space which we modified space. This is an 405 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 406 // bitmaps. 407 timings_.StartSplit("SwapBitmaps"); 408 SwapBitmaps(); 409 timings_.EndSplit(); 410 // Unbind the live and mark bitmaps. 411 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 412 GetHeap()->UnBindBitmaps(); 413 } 414 // TODO: Do this before doing verification since the from space may have objects which weren't 415 // moved and point to dead objects. 416 from_space_->Clear(); 417 // Protect the from space. 418 VLOG(heap) << "Protecting space " << *from_space_; 419 if (kProtectFromSpace) { 420 from_space_->GetMemMap()->Protect(PROT_NONE); 421 } else { 422 from_space_->GetMemMap()->Protect(PROT_READ); 423 } 424 if (saved_bytes_ > 0) { 425 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 426 } 427 428 if (generational_) { 429 // Record the end (top) of the to space so we can distinguish 430 // between objects that were allocated since the last GC and the 431 // older objects. 432 last_gc_to_space_end_ = to_space_->End(); 433 } 434} 435 436void SemiSpace::ResizeMarkStack(size_t new_size) { 437 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 438 CHECK_LE(mark_stack_->Size(), new_size); 439 mark_stack_->Resize(new_size); 440 for (const auto& obj : temp) { 441 mark_stack_->PushBack(obj); 442 } 443} 444 445inline void SemiSpace::MarkStackPush(Object* obj) { 446 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 447 ResizeMarkStack(mark_stack_->Capacity() * 2); 448 } 449 // The object must be pushed on to the mark stack. 450 mark_stack_->PushBack(obj); 451} 452 453// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 454bool SemiSpace::MarkLargeObject(const Object* obj) { 455 // TODO: support >1 discontinuous space. 456 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 457 DCHECK(large_object_space->Contains(obj)); 458 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 459 if (UNLIKELY(!large_objects->Test(obj))) { 460 large_objects->Set(obj); 461 return true; 462 } 463 return false; 464} 465 466static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 467 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 468 // We will dirty the current page and somewhere in the middle of the next page. This means 469 // that the next object copied will also dirty that page. 470 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 471 // not necessary per GC. 472 memcpy(dest, src, size); 473 return 0; 474 } 475 size_t saved_bytes = 0; 476 byte* byte_dest = reinterpret_cast<byte*>(dest); 477 if (kIsDebugBuild) { 478 for (size_t i = 0; i < size; ++i) { 479 CHECK_EQ(byte_dest[i], 0U); 480 } 481 } 482 // Process the start of the page. The page must already be dirty, don't bother with checking. 483 const byte* byte_src = reinterpret_cast<const byte*>(src); 484 const byte* limit = byte_src + size; 485 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 486 // Copy the bytes until the start of the next page. 487 memcpy(dest, src, page_remain); 488 byte_src += page_remain; 489 byte_dest += page_remain; 490 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 491 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 492 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 493 while (byte_src + kPageSize < limit) { 494 bool all_zero = true; 495 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 496 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 497 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 498 // Assumes the destination of the copy is all zeros. 499 if (word_src[i] != 0) { 500 all_zero = false; 501 word_dest[i] = word_src[i]; 502 } 503 } 504 if (all_zero) { 505 // Avoided copying into the page since it was all zeros. 506 saved_bytes += kPageSize; 507 } 508 byte_src += kPageSize; 509 byte_dest += kPageSize; 510 } 511 // Handle the part of the page at the end. 512 memcpy(byte_dest, byte_src, limit - byte_src); 513 return saved_bytes; 514} 515 516mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 517 size_t object_size = obj->SizeOf(); 518 size_t bytes_allocated; 519 mirror::Object* forward_address = nullptr; 520 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 521 // If it's allocated before the last GC (older), move 522 // (pseudo-promote) it to the main free list space (as sort 523 // of an old generation.) 524 size_t bytes_promoted; 525 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 526 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr); 527 if (forward_address == nullptr) { 528 // If out of space, fall back to the to-space. 529 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 530 } else { 531 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); 532 bytes_promoted_ += bytes_promoted; 533 // Dirty the card at the destionation as it may contain 534 // references (including the class pointer) to the bump pointer 535 // space. 536 GetHeap()->WriteBarrierEveryFieldOf(forward_address); 537 // Handle the bitmaps marking. 538 accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 539 DCHECK(live_bitmap != nullptr); 540 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 541 DCHECK(mark_bitmap != nullptr); 542 DCHECK(!live_bitmap->Test(forward_address)); 543 if (!whole_heap_collection_) { 544 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 545 DCHECK_EQ(live_bitmap, mark_bitmap); 546 547 // If a bump pointer space only collection, delay the live 548 // bitmap marking of the promoted object until it's popped off 549 // the mark stack (ProcessMarkStack()). The rationale: we may 550 // be in the middle of scanning the objects in the promo 551 // destination space for 552 // non-moving-space-to-bump-pointer-space references by 553 // iterating over the marked bits of the live bitmap 554 // (MarkReachableObjects()). If we don't delay it (and instead 555 // mark the promoted object here), the above promo destination 556 // space scan could encounter the just-promoted object and 557 // forward the references in the promoted object's fields even 558 // through it is pushed onto the mark stack. If this happens, 559 // the promoted object would be in an inconsistent state, that 560 // is, it's on the mark stack (gray) but its fields are 561 // already forwarded (black), which would cause a 562 // DCHECK(!to_space_->HasAddress(obj)) failure below. 563 } else { 564 // Mark forward_address on the live bit map. 565 live_bitmap->Set(forward_address); 566 // Mark forward_address on the mark bit map. 567 DCHECK(!mark_bitmap->Test(forward_address)); 568 mark_bitmap->Set(forward_address); 569 } 570 } 571 DCHECK(forward_address != nullptr); 572 } else { 573 // If it's allocated after the last GC (younger), copy it to the to-space. 574 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 575 } 576 // Copy over the object and add it to the mark stack since we still need to update its 577 // references. 578 saved_bytes_ += 579 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 580 if (kUseBakerOrBrooksReadBarrier) { 581 obj->AssertReadBarrierPointer(); 582 if (kUseBrooksReadBarrier) { 583 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj); 584 forward_address->SetReadBarrierPointer(forward_address); 585 } 586 forward_address->AssertReadBarrierPointer(); 587 } 588 if (to_space_live_bitmap_ != nullptr) { 589 to_space_live_bitmap_->Set(forward_address); 590 } 591 DCHECK(to_space_->HasAddress(forward_address) || 592 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 593 return forward_address; 594} 595 596void SemiSpace::ProcessMarkStackCallback(void* arg) { 597 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); 598} 599 600mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { 601 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root); 602 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 603 return ref.AsMirrorPtr(); 604} 605 606void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, 607 void* arg) { 608 reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr); 609} 610 611void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 612 RootType /*root_type*/) { 613 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root); 614 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 615 if (*root != ref.AsMirrorPtr()) { 616 *root = ref.AsMirrorPtr(); 617 } 618} 619 620// Marks all objects in the root set. 621void SemiSpace::MarkRoots() { 622 timings_.StartSplit("MarkRoots"); 623 // TODO: Visit up image roots as well? 624 Runtime::Current()->VisitRoots(MarkRootCallback, this); 625 timings_.EndSplit(); 626} 627 628mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 629 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 630} 631 632void SemiSpace::SweepSystemWeaks() { 633 timings_.StartSplit("SweepSystemWeaks"); 634 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 635 timings_.EndSplit(); 636} 637 638bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 639 return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); 640} 641 642void SemiSpace::Sweep(bool swap_bitmaps) { 643 DCHECK(mark_stack_->IsEmpty()); 644 TimingLogger::ScopedSplit("Sweep", &timings_); 645 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 646 if (space->IsContinuousMemMapAllocSpace()) { 647 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 648 if (!ShouldSweepSpace(alloc_space)) { 649 continue; 650 } 651 TimingLogger::ScopedSplit split( 652 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 653 size_t freed_objects = 0; 654 size_t freed_bytes = 0; 655 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 656 heap_->RecordFree(freed_objects, freed_bytes); 657 freed_objects_.FetchAndAdd(freed_objects); 658 freed_bytes_.FetchAndAdd(freed_bytes); 659 } 660 } 661 if (!is_large_object_space_immune_) { 662 SweepLargeObjects(swap_bitmaps); 663 } 664} 665 666void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 667 DCHECK(!is_large_object_space_immune_); 668 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 669 size_t freed_objects = 0; 670 size_t freed_bytes = 0; 671 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 672 freed_large_objects_.FetchAndAdd(freed_objects); 673 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 674 GetHeap()->RecordFree(freed_objects, freed_bytes); 675} 676 677// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 678// marked, put it on the appropriate list in the heap for later processing. 679void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 680 heap_->DelayReferenceReferent(klass, reference, MarkedForwardingAddressCallback, this); 681} 682 683class SemiSpaceMarkObjectVisitor { 684 public: 685 explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) { 686 } 687 688 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE 689 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 690 // Object was already verified when we scanned it. 691 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); 692 } 693 694 void operator()(mirror::Class* klass, mirror::Reference* ref) const 695 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 696 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 697 collector_->DelayReferenceReferent(klass, ref); 698 } 699 700 private: 701 SemiSpace* const collector_; 702}; 703 704// Visit all of the references of an object and update. 705void SemiSpace::ScanObject(Object* obj) { 706 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 707 SemiSpaceMarkObjectVisitor visitor(this); 708 obj->VisitReferences<kMovingClasses>(visitor, visitor); 709} 710 711// Scan anything that's on the mark stack. 712void SemiSpace::ProcessMarkStack() { 713 space::MallocSpace* promo_dest_space = NULL; 714 accounting::SpaceBitmap* live_bitmap = NULL; 715 if (generational_ && !whole_heap_collection_) { 716 // If a bump pointer space only collection (and the promotion is 717 // enabled,) we delay the live-bitmap marking of promoted objects 718 // from MarkObject() until this function. 719 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 720 live_bitmap = promo_dest_space->GetLiveBitmap(); 721 DCHECK(live_bitmap != nullptr); 722 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 723 DCHECK(mark_bitmap != nullptr); 724 DCHECK_EQ(live_bitmap, mark_bitmap); 725 } 726 timings_.StartSplit("ProcessMarkStack"); 727 while (!mark_stack_->IsEmpty()) { 728 Object* obj = mark_stack_->PopBack(); 729 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 730 // obj has just been promoted. Mark the live bitmap for it, 731 // which is delayed from MarkObject(). 732 DCHECK(!live_bitmap->Test(obj)); 733 live_bitmap->Set(obj); 734 } 735 ScanObject(obj); 736 } 737 timings_.EndSplit(); 738} 739 740inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 741 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 742 // All immune objects are assumed marked. 743 if (immune_region_.ContainsObject(obj)) { 744 return obj; 745 } 746 if (from_space_->HasAddress(obj)) { 747 // Returns either the forwarding address or nullptr. 748 return GetForwardingAddressInFromSpace(obj); 749 } else if (to_space_->HasAddress(obj)) { 750 // Should be unlikely. 751 // Already forwarded, must be marked. 752 return obj; 753 } 754 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 755} 756 757void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 758 DCHECK(to_space != nullptr); 759 to_space_ = to_space; 760} 761 762void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 763 DCHECK(from_space != nullptr); 764 from_space_ = from_space; 765} 766 767void SemiSpace::FinishPhase() { 768 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 769 Heap* heap = GetHeap(); 770 timings_.NewSplit("PostGcVerification"); 771 heap->PostGcVerification(this); 772 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 773 // further action is done by the heap. 774 to_space_ = nullptr; 775 from_space_ = nullptr; 776 CHECK(mark_stack_->IsEmpty()); 777 mark_stack_->Reset(); 778 if (generational_) { 779 // Decide whether to do a whole heap collection or a bump pointer 780 // only space collection at the next collection by updating 781 // whole_heap_collection. 782 if (!whole_heap_collection_) { 783 if (!kUseBytesPromoted) { 784 // Enable whole_heap_collection once every 785 // kDefaultWholeHeapCollectionInterval collections. 786 --whole_heap_collection_interval_counter_; 787 DCHECK_GE(whole_heap_collection_interval_counter_, 0); 788 if (whole_heap_collection_interval_counter_ == 0) { 789 whole_heap_collection_ = true; 790 } 791 } else { 792 // Enable whole_heap_collection if the bytes promoted since 793 // the last whole heap collection exceeds a threshold. 794 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_; 795 if (bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold) { 796 whole_heap_collection_ = true; 797 } 798 } 799 } else { 800 if (!kUseBytesPromoted) { 801 DCHECK_EQ(whole_heap_collection_interval_counter_, 0); 802 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; 803 whole_heap_collection_ = false; 804 } else { 805 // Reset it. 806 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_; 807 whole_heap_collection_ = false; 808 } 809 } 810 } 811 // Clear all of the spaces' mark bitmaps. 812 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 813 heap_->ClearMarkedObjects(); 814} 815 816void SemiSpace::RevokeAllThreadLocalBuffers() { 817 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 818 GetHeap()->RevokeAllThreadLocalBuffers(); 819 timings_.EndSplit(); 820} 821 822} // namespace collector 823} // namespace gc 824} // namespace art 825