semi_space.cc revision 815873ecc312b1d231acce71e1a16f42cdaf09f2
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/space_bitmap-inl.h" 31#include "gc/heap.h" 32#include "gc/space/bump_pointer_space.h" 33#include "gc/space/bump_pointer_space-inl.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "mark_sweep-inl.h" 41#include "monitor.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/object-inl.h" 48#include "mirror/object_array.h" 49#include "mirror/object_array-inl.h" 50#include "runtime.h" 51#include "semi_space-inl.h" 52#include "thread-inl.h" 53#include "thread_list.h" 54#include "verifier/method_verifier.h" 55 56using ::art::mirror::Class; 57using ::art::mirror::Object; 58 59namespace art { 60namespace gc { 61namespace collector { 62 63static constexpr bool kProtectFromSpace = true; 64static constexpr bool kResetFromSpace = true; 65 66// TODO: Unduplicate logic. 67void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) { 68 // Bind live to mark bitmap if necessary. 69 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 70 CHECK(space->IsContinuousMemMapAllocSpace()); 71 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 72 } 73 // Add the space to the immune region. 74 if (immune_begin_ == nullptr) { 75 DCHECK(immune_end_ == nullptr); 76 immune_begin_ = reinterpret_cast<Object*>(space->Begin()); 77 immune_end_ = reinterpret_cast<Object*>(space->End()); 78 } else { 79 const space::ContinuousSpace* prev_space = nullptr; 80 // Find out if the previous space is immune. 81 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { 82 if (cur_space == space) { 83 break; 84 } 85 prev_space = cur_space; 86 } 87 // If previous space was immune, then extend the immune region. Relies on continuous spaces 88 // being sorted by Heap::AddContinuousSpace. 89 if (prev_space != nullptr && IsImmuneSpace(prev_space)) { 90 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 91 // Use Limit() instead of End() because otherwise if the 92 // generational mode is enabled, the alloc space might expand 93 // due to promotion and the sense of immunity may change in the 94 // middle of a GC. 95 immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_); 96 } 97 } 98} 99 100void SemiSpace::BindBitmaps() { 101 timings_.StartSplit("BindBitmaps"); 102 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 103 // Mark all of the spaces we never collect as immune. 104 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 105 if (space->GetLiveBitmap() != nullptr) { 106 if (space == to_space_) { 107 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 108 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 109 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 110 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 111 // Add the main free list space and the non-moving 112 // space to the immune space if a bump pointer space 113 // only collection. 114 || (generational_ && !whole_heap_collection_ && 115 (space == GetHeap()->GetNonMovingSpace() || 116 space == GetHeap()->GetPrimaryFreeListSpace()))) { 117 ImmuneSpace(space); 118 } 119 } 120 } 121 if (generational_ && !whole_heap_collection_) { 122 // We won't collect the large object space if a bump pointer space only collection. 123 is_large_object_space_immune_ = true; 124 } 125 timings_.EndSplit(); 126} 127 128SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 129 : GarbageCollector(heap, 130 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 131 mark_stack_(nullptr), 132 immune_begin_(nullptr), 133 immune_end_(nullptr), 134 is_large_object_space_immune_(false), 135 to_space_(nullptr), 136 from_space_(nullptr), 137 self_(nullptr), 138 generational_(generational), 139 last_gc_to_space_end_(nullptr), 140 bytes_promoted_(0), 141 whole_heap_collection_(true), 142 whole_heap_collection_interval_counter_(0) { 143} 144 145void SemiSpace::InitializePhase() { 146 timings_.Reset(); 147 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 148 mark_stack_ = heap_->mark_stack_.get(); 149 DCHECK(mark_stack_ != nullptr); 150 immune_begin_ = nullptr; 151 immune_end_ = nullptr; 152 is_large_object_space_immune_ = false; 153 saved_bytes_ = 0; 154 self_ = Thread::Current(); 155 // Do any pre GC verification. 156 timings_.NewSplit("PreGcVerification"); 157 heap_->PreGcVerification(this); 158 // Set the initial bitmap. 159 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 160} 161 162void SemiSpace::ProcessReferences(Thread* self) { 163 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 164 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 165 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 166 &RecursiveMarkObjectCallback, this); 167} 168 169void SemiSpace::MarkingPhase() { 170 if (generational_) { 171 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 172 clear_soft_references_) { 173 // If an explicit, native allocation-triggered, or last attempt 174 // collection, collect the whole heap (and reset the interval 175 // counter to be consistent.) 176 whole_heap_collection_ = true; 177 whole_heap_collection_interval_counter_ = 0; 178 } 179 if (whole_heap_collection_) { 180 VLOG(heap) << "Whole heap collection"; 181 } else { 182 VLOG(heap) << "Bump pointer space only collection"; 183 } 184 } 185 Locks::mutator_lock_->AssertExclusiveHeld(self_); 186 187 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 188 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 189 // wrong space. 190 heap_->SwapSemiSpaces(); 191 if (generational_) { 192 // If last_gc_to_space_end_ is out of the bounds of the from-space 193 // (the to-space from last GC), then point it to the beginning of 194 // the from-space. For example, the very first GC or the 195 // pre-zygote compaction. 196 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 197 last_gc_to_space_end_ = from_space_->Begin(); 198 } 199 // Reset this before the marking starts below. 200 bytes_promoted_ = 0; 201 } 202 // Assume the cleared space is already empty. 203 BindBitmaps(); 204 // Process dirty cards and add dirty cards to mod-union tables. 205 heap_->ProcessCards(timings_); 206 // Clear the whole card table since we can not get any additional dirty cards during the 207 // paused GC. This saves memory but only works for pause the world collectors. 208 timings_.NewSplit("ClearCardTable"); 209 heap_->GetCardTable()->ClearCardTable(); 210 // Need to do this before the checkpoint since we don't want any threads to add references to 211 // the live stack during the recursive mark. 212 timings_.NewSplit("SwapStacks"); 213 if (kUseThreadLocalAllocationStack) { 214 heap_->RevokeAllThreadLocalAllocationStacks(self_); 215 } 216 heap_->SwapStacks(self_); 217 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 218 MarkRoots(); 219 // Mark roots of immune spaces. 220 UpdateAndMarkModUnion(); 221 // Recursively mark remaining objects. 222 MarkReachableObjects(); 223} 224 225bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const { 226 return 227 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && 228 immune_end_ >= reinterpret_cast<Object*>(space->End()); 229} 230 231void SemiSpace::UpdateAndMarkModUnion() { 232 for (auto& space : heap_->GetContinuousSpaces()) { 233 // If the space is immune then we need to mark the references to other spaces. 234 if (IsImmuneSpace(space)) { 235 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 236 if (table != nullptr) { 237 // TODO: Improve naming. 238 TimingLogger::ScopedSplit split( 239 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 240 "UpdateAndMarkImageModUnionTable", 241 &timings_); 242 table->UpdateAndMarkReferences(MarkObjectCallback, this); 243 } else { 244 // If a bump pointer space only collection, the non-moving 245 // space is added to the immune space. But the non-moving 246 // space doesn't have a mod union table. Instead, its live 247 // bitmap will be scanned later in MarkReachableObjects(). 248 DCHECK(generational_ && !whole_heap_collection_ && 249 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())); 250 } 251 } 252 } 253} 254 255class SemiSpaceScanObjectVisitor { 256 public: 257 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 258 void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 259 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an 260 // exclusive lock on the mutator lock, but 261 // SpaceBitmap::VisitMarkedRange() only requires the shared lock. 262 DCHECK(obj != nullptr); 263 semi_space_->ScanObject(obj); 264 } 265 private: 266 SemiSpace* semi_space_; 267}; 268 269void SemiSpace::MarkReachableObjects() { 270 timings_.StartSplit("MarkStackAsLive"); 271 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 272 heap_->MarkAllocStackAsLive(live_stack); 273 live_stack->Reset(); 274 timings_.EndSplit(); 275 276 for (auto& space : heap_->GetContinuousSpaces()) { 277 // If the space is immune and has no mod union table (the 278 // non-moving space when the bump pointer space only collection is 279 // enabled,) then we need to scan its live bitmap as roots 280 // (including the objects on the live stack which have just marked 281 // in the live bitmap above in MarkAllocStackAsLive().) 282 if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) { 283 DCHECK(generational_ && !whole_heap_collection_ && 284 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 285 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 286 SemiSpaceScanObjectVisitor visitor(this); 287 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 288 reinterpret_cast<uintptr_t>(space->End()), 289 visitor); 290 } 291 } 292 293 if (is_large_object_space_immune_) { 294 DCHECK(generational_ && !whole_heap_collection_); 295 // Delay copying the live set to the marked set until here from 296 // BindBitmaps() as the large objects on the allocation stack may 297 // be newly added to the live set above in MarkAllocStackAsLive(). 298 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 299 300 // When the large object space is immune, we need to scan the 301 // large object space as roots as they contain references to their 302 // classes (primitive array classes) that could move though they 303 // don't contain any other references. 304 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 305 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 306 SemiSpaceScanObjectVisitor visitor(this); 307 for (const Object* obj : large_live_objects->GetObjects()) { 308 visitor(const_cast<Object*>(obj)); 309 } 310 } 311 312 // Recursively process the mark stack. 313 ProcessMarkStack(true); 314} 315 316void SemiSpace::ReclaimPhase() { 317 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 318 ProcessReferences(self_); 319 { 320 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 321 SweepSystemWeaks(); 322 } 323 // Record freed memory. 324 uint64_t from_bytes = from_space_->GetBytesAllocated(); 325 uint64_t to_bytes = to_space_->GetBytesAllocated(); 326 uint64_t from_objects = from_space_->GetObjectsAllocated(); 327 uint64_t to_objects = to_space_->GetObjectsAllocated(); 328 CHECK_LE(to_objects, from_objects); 329 int64_t freed_bytes = from_bytes - to_bytes; 330 int64_t freed_objects = from_objects - to_objects; 331 freed_bytes_.FetchAndAdd(freed_bytes); 332 freed_objects_.FetchAndAdd(freed_objects); 333 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 334 // space. 335 heap_->RecordFree(freed_objects, freed_bytes); 336 timings_.StartSplit("PreSweepingGcVerification"); 337 heap_->PreSweepingGcVerification(this); 338 timings_.EndSplit(); 339 340 { 341 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 342 // Reclaim unmarked objects. 343 Sweep(false); 344 // Swap the live and mark bitmaps for each space which we modified space. This is an 345 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 346 // bitmaps. 347 timings_.StartSplit("SwapBitmaps"); 348 SwapBitmaps(); 349 timings_.EndSplit(); 350 // Unbind the live and mark bitmaps. 351 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 352 GetHeap()->UnBindBitmaps(); 353 } 354 // Release the memory used by the from space. 355 if (kResetFromSpace) { 356 // Clearing from space. 357 from_space_->Clear(); 358 } 359 // Protect the from space. 360 VLOG(heap) 361 << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - " 362 << reinterpret_cast<void*>(from_space_->Limit()); 363 if (kProtectFromSpace) { 364 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE); 365 } else { 366 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ); 367 } 368 if (saved_bytes_ > 0) { 369 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 370 } 371 372 if (generational_) { 373 // Record the end (top) of the to space so we can distinguish 374 // between objects that were allocated since the last GC and the 375 // older objects. 376 last_gc_to_space_end_ = to_space_->End(); 377 } 378} 379 380void SemiSpace::ResizeMarkStack(size_t new_size) { 381 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 382 CHECK_LE(mark_stack_->Size(), new_size); 383 mark_stack_->Resize(new_size); 384 for (const auto& obj : temp) { 385 mark_stack_->PushBack(obj); 386 } 387} 388 389inline void SemiSpace::MarkStackPush(Object* obj) { 390 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 391 ResizeMarkStack(mark_stack_->Capacity() * 2); 392 } 393 // The object must be pushed on to the mark stack. 394 mark_stack_->PushBack(obj); 395} 396 397// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 398bool SemiSpace::MarkLargeObject(const Object* obj) { 399 // TODO: support >1 discontinuous space. 400 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 401 DCHECK(large_object_space->Contains(obj)); 402 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 403 if (UNLIKELY(!large_objects->Test(obj))) { 404 large_objects->Set(obj); 405 return true; 406 } 407 return false; 408} 409 410static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 411 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 412 // We will dirty the current page and somewhere in the middle of the next page. This means 413 // that the next object copied will also dirty that page. 414 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 415 // not necessary per GC. 416 memcpy(dest, src, size); 417 return 0; 418 } 419 size_t saved_bytes = 0; 420 byte* byte_dest = reinterpret_cast<byte*>(dest); 421 if (kIsDebugBuild) { 422 for (size_t i = 0; i < size; ++i) { 423 CHECK_EQ(byte_dest[i], 0U); 424 } 425 } 426 // Process the start of the page. The page must already be dirty, don't bother with checking. 427 const byte* byte_src = reinterpret_cast<const byte*>(src); 428 const byte* limit = byte_src + size; 429 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 430 // Copy the bytes until the start of the next page. 431 memcpy(dest, src, page_remain); 432 byte_src += page_remain; 433 byte_dest += page_remain; 434 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 435 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 436 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 437 while (byte_src + kPageSize < limit) { 438 bool all_zero = true; 439 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 440 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 441 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 442 // Assumes the destination of the copy is all zeros. 443 if (word_src[i] != 0) { 444 all_zero = false; 445 word_dest[i] = word_src[i]; 446 } 447 } 448 if (all_zero) { 449 // Avoided copying into the page since it was all zeros. 450 saved_bytes += kPageSize; 451 } 452 byte_src += kPageSize; 453 byte_dest += kPageSize; 454 } 455 // Handle the part of the page at the end. 456 memcpy(byte_dest, byte_src, limit - byte_src); 457 return saved_bytes; 458} 459 460mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 461 size_t object_size = obj->SizeOf(); 462 size_t bytes_allocated; 463 mirror::Object* forward_address = nullptr; 464 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 465 // If it's allocated before the last GC (older), move 466 // (pseudo-promote) it to the main free list space (as sort 467 // of an old generation.) 468 size_t bytes_promoted; 469 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 470 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted); 471 if (forward_address == nullptr) { 472 // If out of space, fall back to the to-space. 473 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); 474 } else { 475 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); 476 bytes_promoted_ += bytes_promoted; 477 // Handle the bitmaps marking. 478 accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 479 DCHECK(live_bitmap != nullptr); 480 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 481 DCHECK(mark_bitmap != nullptr); 482 DCHECK(!live_bitmap->Test(forward_address)); 483 if (!whole_heap_collection_) { 484 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 485 DCHECK_EQ(live_bitmap, mark_bitmap); 486 487 // If a bump pointer space only collection, delay the live 488 // bitmap marking of the promoted object until it's popped off 489 // the mark stack (ProcessMarkStack()). The rationale: we may 490 // be in the middle of scanning the objects in the promo 491 // destination space for 492 // non-moving-space-to-bump-pointer-space references by 493 // iterating over the marked bits of the live bitmap 494 // (MarkReachableObjects()). If we don't delay it (and instead 495 // mark the promoted object here), the above promo destination 496 // space scan could encounter the just-promoted object and 497 // forward the references in the promoted object's fields even 498 // through it is pushed onto the mark stack. If this happens, 499 // the promoted object would be in an inconsistent state, that 500 // is, it's on the mark stack (gray) but its fields are 501 // already forwarded (black), which would cause a 502 // DCHECK(!to_space_->HasAddress(obj)) failure below. 503 } else { 504 // Mark forward_address on the live bit map. 505 live_bitmap->Set(forward_address); 506 // Mark forward_address on the mark bit map. 507 DCHECK(!mark_bitmap->Test(forward_address)); 508 mark_bitmap->Set(forward_address); 509 } 510 } 511 DCHECK(forward_address != nullptr); 512 } else { 513 // If it's allocated after the last GC (younger), copy it to the to-space. 514 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); 515 } 516 // Copy over the object and add it to the mark stack since we still need to update its 517 // references. 518 saved_bytes_ += 519 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 520 if (to_space_live_bitmap_ != nullptr) { 521 to_space_live_bitmap_->Set(forward_address); 522 } 523 DCHECK(to_space_->HasAddress(forward_address) || 524 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 525 return forward_address; 526} 527 528// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to 529// the to-space and have their forward address updated. Objects which have been newly marked are 530// pushed on the mark stack. 531Object* SemiSpace::MarkObject(Object* obj) { 532 Object* forward_address = obj; 533 if (obj != nullptr && !IsImmune(obj)) { 534 if (from_space_->HasAddress(obj)) { 535 forward_address = GetForwardingAddressInFromSpace(obj); 536 // If the object has already been moved, return the new forward address. 537 if (forward_address == nullptr) { 538 forward_address = MarkNonForwardedObject(obj); 539 DCHECK(forward_address != nullptr); 540 // Make sure to only update the forwarding address AFTER you copy the object so that the 541 // monitor word doesn't get stomped over. 542 obj->SetLockWord(LockWord::FromForwardingAddress( 543 reinterpret_cast<size_t>(forward_address))); 544 // Push the object onto the mark stack for later processing. 545 MarkStackPush(forward_address); 546 } 547 // TODO: Do we need this if in the else statement? 548 } else { 549 accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 550 if (LIKELY(object_bitmap != nullptr)) { 551 if (generational_) { 552 // If a bump pointer space only collection, we should not 553 // reach here as we don't/won't mark the objects in the 554 // non-moving space (except for the promoted objects.) Note 555 // the non-moving space is added to the immune space. 556 DCHECK(whole_heap_collection_); 557 } 558 // This object was not previously marked. 559 if (!object_bitmap->Test(obj)) { 560 object_bitmap->Set(obj); 561 MarkStackPush(obj); 562 } 563 } else { 564 CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; 565 if (MarkLargeObject(obj)) { 566 MarkStackPush(obj); 567 } 568 } 569 } 570 } 571 return forward_address; 572} 573 574mirror::Object* SemiSpace::RecursiveMarkObjectCallback(mirror::Object* root, void* arg) { 575 DCHECK(root != nullptr); 576 DCHECK(arg != nullptr); 577 SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg); 578 mirror::Object* ret = semi_space->MarkObject(root); 579 semi_space->ProcessMarkStack(true); 580 return ret; 581} 582 583void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 584 RootType /*root_type*/) { 585 DCHECK(root != nullptr); 586 DCHECK(arg != nullptr); 587 *root = reinterpret_cast<SemiSpace*>(arg)->MarkObject(*root); 588} 589 590Object* SemiSpace::MarkObjectCallback(Object* object, void* arg) { 591 DCHECK(object != nullptr); 592 DCHECK(arg != nullptr); 593 return reinterpret_cast<SemiSpace*>(arg)->MarkObject(object); 594} 595 596// Marks all objects in the root set. 597void SemiSpace::MarkRoots() { 598 timings_.StartSplit("MarkRoots"); 599 // TODO: Visit up image roots as well? 600 Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true); 601 timings_.EndSplit(); 602} 603 604mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 605 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 606} 607 608void SemiSpace::SweepSystemWeaks() { 609 timings_.StartSplit("SweepSystemWeaks"); 610 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 611 timings_.EndSplit(); 612} 613 614bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 615 return space != from_space_ && space != to_space_ && !IsImmuneSpace(space); 616} 617 618void SemiSpace::Sweep(bool swap_bitmaps) { 619 DCHECK(mark_stack_->IsEmpty()); 620 TimingLogger::ScopedSplit("Sweep", &timings_); 621 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 622 if (space->IsContinuousMemMapAllocSpace()) { 623 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 624 if (!ShouldSweepSpace(alloc_space)) { 625 continue; 626 } 627 TimingLogger::ScopedSplit split( 628 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 629 size_t freed_objects = 0; 630 size_t freed_bytes = 0; 631 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 632 heap_->RecordFree(freed_objects, freed_bytes); 633 freed_objects_.FetchAndAdd(freed_objects); 634 freed_bytes_.FetchAndAdd(freed_bytes); 635 } 636 } 637 if (!is_large_object_space_immune_) { 638 SweepLargeObjects(swap_bitmaps); 639 } 640} 641 642void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 643 DCHECK(!is_large_object_space_immune_); 644 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 645 size_t freed_objects = 0; 646 size_t freed_bytes = 0; 647 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 648 freed_large_objects_.FetchAndAdd(freed_objects); 649 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 650 GetHeap()->RecordFree(freed_objects, freed_bytes); 651} 652 653// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 654// marked, put it on the appropriate list in the heap for later processing. 655void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 656 heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this); 657} 658 659// Visit all of the references of an object and update. 660void SemiSpace::ScanObject(Object* obj) { 661 DCHECK(obj != NULL); 662 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 663 MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset, 664 bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS { 665 mirror::Object* new_address = MarkObject(ref); 666 if (new_address != ref) { 667 DCHECK(new_address != nullptr); 668 // Don't need to mark the card since we updating the object address and not changing the 669 // actual objects its pointing to. Using SetFieldObjectWithoutWriteBarrier is better in this 670 // case since it does not dirty cards and use additional memory. 671 // Since we do not change the actual object, we can safely use non-transactional mode. Also 672 // disable check as we could run inside a transaction. 673 obj->SetFieldObjectWithoutWriteBarrier<false, false>(offset, new_address, false); 674 } 675 }, kMovingClasses); 676 mirror::Class* klass = obj->GetClass(); 677 if (UNLIKELY(klass->IsReferenceClass())) { 678 DelayReferenceReferent(klass, obj); 679 } 680} 681 682// Scan anything that's on the mark stack. 683void SemiSpace::ProcessMarkStack(bool paused) { 684 space::MallocSpace* promo_dest_space = NULL; 685 accounting::SpaceBitmap* live_bitmap = NULL; 686 if (generational_ && !whole_heap_collection_) { 687 // If a bump pointer space only collection (and the promotion is 688 // enabled,) we delay the live-bitmap marking of promoted objects 689 // from MarkObject() until this function. 690 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 691 live_bitmap = promo_dest_space->GetLiveBitmap(); 692 DCHECK(live_bitmap != nullptr); 693 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 694 DCHECK(mark_bitmap != nullptr); 695 DCHECK_EQ(live_bitmap, mark_bitmap); 696 } 697 timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack"); 698 while (!mark_stack_->IsEmpty()) { 699 Object* obj = mark_stack_->PopBack(); 700 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 701 // obj has just been promoted. Mark the live bitmap for it, 702 // which is delayed from MarkObject(). 703 DCHECK(!live_bitmap->Test(obj)); 704 live_bitmap->Set(obj); 705 } 706 ScanObject(obj); 707 } 708 timings_.EndSplit(); 709} 710 711inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 712 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 713 // All immune objects are assumed marked. 714 if (IsImmune(obj)) { 715 return obj; 716 } 717 if (from_space_->HasAddress(obj)) { 718 mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); 719 return forwarding_address; // Returns either the forwarding address or nullptr. 720 } else if (to_space_->HasAddress(obj)) { 721 // Should be unlikely. 722 // Already forwarded, must be marked. 723 return obj; 724 } 725 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 726} 727 728void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 729 DCHECK(to_space != nullptr); 730 to_space_ = to_space; 731} 732 733void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 734 DCHECK(from_space != nullptr); 735 from_space_ = from_space; 736} 737 738void SemiSpace::FinishPhase() { 739 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 740 Heap* heap = GetHeap(); 741 timings_.NewSplit("PostGcVerification"); 742 heap->PostGcVerification(this); 743 744 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 745 // further action is done by the heap. 746 to_space_ = nullptr; 747 from_space_ = nullptr; 748 749 // Update the cumulative statistics 750 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 751 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 752 753 // Ensure that the mark stack is empty. 754 CHECK(mark_stack_->IsEmpty()); 755 756 // Update the cumulative loggers. 757 cumulative_timings_.Start(); 758 cumulative_timings_.AddLogger(timings_); 759 cumulative_timings_.End(); 760 761 // Clear all of the spaces' mark bitmaps. 762 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 763 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 764 if (bitmap != nullptr && 765 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 766 bitmap->Clear(); 767 } 768 } 769 mark_stack_->Reset(); 770 771 // Reset the marked large objects. 772 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 773 large_objects->GetMarkObjects()->Clear(); 774 775 if (generational_) { 776 // Decide whether to do a whole heap collection or a bump pointer 777 // only space collection at the next collection by updating 778 // whole_heap_collection. Enable whole_heap_collection once every 779 // kDefaultWholeHeapCollectionInterval collections. 780 if (!whole_heap_collection_) { 781 --whole_heap_collection_interval_counter_; 782 DCHECK_GE(whole_heap_collection_interval_counter_, 0); 783 if (whole_heap_collection_interval_counter_ == 0) { 784 whole_heap_collection_ = true; 785 } 786 } else { 787 DCHECK_EQ(whole_heap_collection_interval_counter_, 0); 788 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; 789 whole_heap_collection_ = false; 790 } 791 } 792} 793 794} // namespace collector 795} // namespace gc 796} // namespace art 797