semi_space.cc revision 8d562103c3a3452fb15ef4b1c64df767b70507a4
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/space_bitmap-inl.h" 31#include "gc/heap.h" 32#include "gc/space/bump_pointer_space.h" 33#include "gc/space/bump_pointer_space-inl.h" 34#include "gc/space/image_space.h" 35#include "gc/space/large_object_space.h" 36#include "gc/space/space-inl.h" 37#include "indirect_reference_table.h" 38#include "intern_table.h" 39#include "jni_internal.h" 40#include "mark_sweep-inl.h" 41#include "monitor.h" 42#include "mirror/art_field.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/class-inl.h" 45#include "mirror/class_loader.h" 46#include "mirror/dex_cache.h" 47#include "mirror/object-inl.h" 48#include "mirror/object_array.h" 49#include "mirror/object_array-inl.h" 50#include "runtime.h" 51#include "semi_space-inl.h" 52#include "thread-inl.h" 53#include "thread_list.h" 54#include "verifier/method_verifier.h" 55 56using ::art::mirror::Class; 57using ::art::mirror::Object; 58 59namespace art { 60namespace gc { 61namespace collector { 62 63static constexpr bool kProtectFromSpace = true; 64static constexpr bool kClearFromSpace = true; 65static constexpr bool kStoreStackTraces = false; 66 67void SemiSpace::BindBitmaps() { 68 timings_.StartSplit("BindBitmaps"); 69 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 70 // Mark all of the spaces we never collect as immune. 71 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 72 if (space->GetLiveBitmap() != nullptr) { 73 if (space == to_space_) { 74 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 75 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 76 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 77 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 78 // Add the main free list space and the non-moving 79 // space to the immune space if a bump pointer space 80 // only collection. 81 || (generational_ && !whole_heap_collection_ && 82 (space == GetHeap()->GetNonMovingSpace() || 83 space == GetHeap()->GetPrimaryFreeListSpace()))) { 84 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 85 } 86 } 87 } 88 if (generational_ && !whole_heap_collection_) { 89 // We won't collect the large object space if a bump pointer space only collection. 90 is_large_object_space_immune_ = true; 91 } 92 timings_.EndSplit(); 93} 94 95SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 96 : GarbageCollector(heap, 97 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 98 mark_stack_(nullptr), 99 is_large_object_space_immune_(false), 100 to_space_(nullptr), 101 to_space_live_bitmap_(nullptr), 102 from_space_(nullptr), 103 self_(nullptr), 104 generational_(generational), 105 last_gc_to_space_end_(nullptr), 106 bytes_promoted_(0), 107 whole_heap_collection_(true), 108 whole_heap_collection_interval_counter_(0), 109 saved_bytes_(0) { 110} 111 112void SemiSpace::InitializePhase() { 113 timings_.Reset(); 114 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 115 mark_stack_ = heap_->mark_stack_.get(); 116 DCHECK(mark_stack_ != nullptr); 117 immune_region_.Reset(); 118 is_large_object_space_immune_ = false; 119 saved_bytes_ = 0; 120 self_ = Thread::Current(); 121 // Do any pre GC verification. 122 timings_.NewSplit("PreGcVerification"); 123 heap_->PreGcVerification(this); 124 // Set the initial bitmap. 125 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 126} 127 128void SemiSpace::ProcessReferences(Thread* self) { 129 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 130 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 131 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 132 &MarkObjectCallback, &ProcessMarkStackCallback, this); 133} 134 135void SemiSpace::MarkingPhase() { 136 if (kStoreStackTraces) { 137 Locks::mutator_lock_->AssertExclusiveHeld(self_); 138 // Store the stack traces into the runtime fault string in case we get a heap corruption 139 // related crash later. 140 ThreadState old_state = self_->SetStateUnsafe(kRunnable); 141 std::ostringstream oss; 142 Runtime* runtime = Runtime::Current(); 143 runtime->GetThreadList()->DumpForSigQuit(oss); 144 runtime->GetThreadList()->DumpNativeStacks(oss); 145 runtime->SetFaultMessage(oss.str()); 146 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); 147 } 148 149 if (generational_) { 150 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 151 clear_soft_references_) { 152 // If an explicit, native allocation-triggered, or last attempt 153 // collection, collect the whole heap (and reset the interval 154 // counter to be consistent.) 155 whole_heap_collection_ = true; 156 whole_heap_collection_interval_counter_ = 0; 157 } 158 if (whole_heap_collection_) { 159 VLOG(heap) << "Whole heap collection"; 160 } else { 161 VLOG(heap) << "Bump pointer space only collection"; 162 } 163 } 164 Locks::mutator_lock_->AssertExclusiveHeld(self_); 165 166 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 167 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 168 // wrong space. 169 heap_->SwapSemiSpaces(); 170 if (generational_) { 171 // If last_gc_to_space_end_ is out of the bounds of the from-space 172 // (the to-space from last GC), then point it to the beginning of 173 // the from-space. For example, the very first GC or the 174 // pre-zygote compaction. 175 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 176 last_gc_to_space_end_ = from_space_->Begin(); 177 } 178 // Reset this before the marking starts below. 179 bytes_promoted_ = 0; 180 } 181 // Assume the cleared space is already empty. 182 BindBitmaps(); 183 // Process dirty cards and add dirty cards to mod-union tables. 184 heap_->ProcessCards(timings_); 185 // Clear the whole card table since we can not get any additional dirty cards during the 186 // paused GC. This saves memory but only works for pause the world collectors. 187 timings_.NewSplit("ClearCardTable"); 188 heap_->GetCardTable()->ClearCardTable(); 189 // Need to do this before the checkpoint since we don't want any threads to add references to 190 // the live stack during the recursive mark. 191 timings_.NewSplit("SwapStacks"); 192 if (kUseThreadLocalAllocationStack) { 193 heap_->RevokeAllThreadLocalAllocationStacks(self_); 194 } 195 heap_->SwapStacks(self_); 196 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 197 MarkRoots(); 198 // Mark roots of immune spaces. 199 UpdateAndMarkModUnion(); 200 // Recursively mark remaining objects. 201 MarkReachableObjects(); 202} 203 204void SemiSpace::UpdateAndMarkModUnion() { 205 for (auto& space : heap_->GetContinuousSpaces()) { 206 // If the space is immune then we need to mark the references to other spaces. 207 if (immune_region_.ContainsSpace(space)) { 208 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 209 if (table != nullptr) { 210 // TODO: Improve naming. 211 TimingLogger::ScopedSplit split( 212 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 213 "UpdateAndMarkImageModUnionTable", 214 &timings_); 215 table->UpdateAndMarkReferences(MarkObjectCallback, this); 216 } else { 217 // If a bump pointer space only collection, the non-moving 218 // space is added to the immune space. But the non-moving 219 // space doesn't have a mod union table. Instead, its live 220 // bitmap will be scanned later in MarkReachableObjects(). 221 DCHECK(generational_ && !whole_heap_collection_ && 222 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())); 223 } 224 } 225 } 226} 227 228class SemiSpaceScanObjectVisitor { 229 public: 230 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 231 void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 232 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an 233 // exclusive lock on the mutator lock, but 234 // SpaceBitmap::VisitMarkedRange() only requires the shared lock. 235 DCHECK(obj != nullptr); 236 semi_space_->ScanObject(obj); 237 } 238 private: 239 SemiSpace* const semi_space_; 240}; 241 242void SemiSpace::MarkReachableObjects() { 243 timings_.StartSplit("MarkStackAsLive"); 244 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 245 heap_->MarkAllocStackAsLive(live_stack); 246 live_stack->Reset(); 247 timings_.EndSplit(); 248 249 for (auto& space : heap_->GetContinuousSpaces()) { 250 // If the space is immune and has no mod union table (the 251 // non-moving space when the bump pointer space only collection is 252 // enabled,) then we need to scan its live bitmap as roots 253 // (including the objects on the live stack which have just marked 254 // in the live bitmap above in MarkAllocStackAsLive().) 255 if (immune_region_.ContainsSpace(space) && 256 heap_->FindModUnionTableFromSpace(space) == nullptr) { 257 DCHECK(generational_ && !whole_heap_collection_ && 258 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 259 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 260 SemiSpaceScanObjectVisitor visitor(this); 261 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 262 reinterpret_cast<uintptr_t>(space->End()), 263 visitor); 264 } 265 } 266 267 if (is_large_object_space_immune_) { 268 DCHECK(generational_ && !whole_heap_collection_); 269 // Delay copying the live set to the marked set until here from 270 // BindBitmaps() as the large objects on the allocation stack may 271 // be newly added to the live set above in MarkAllocStackAsLive(). 272 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 273 274 // When the large object space is immune, we need to scan the 275 // large object space as roots as they contain references to their 276 // classes (primitive array classes) that could move though they 277 // don't contain any other references. 278 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 279 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 280 SemiSpaceScanObjectVisitor visitor(this); 281 for (const Object* obj : large_live_objects->GetObjects()) { 282 visitor(const_cast<Object*>(obj)); 283 } 284 } 285 286 // Recursively process the mark stack. 287 ProcessMarkStack(); 288} 289 290void SemiSpace::ReclaimPhase() { 291 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 292 ProcessReferences(self_); 293 { 294 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 295 SweepSystemWeaks(); 296 } 297 // Record freed memory. 298 uint64_t from_bytes = from_space_->GetBytesAllocated(); 299 uint64_t to_bytes = to_space_->GetBytesAllocated(); 300 uint64_t from_objects = from_space_->GetObjectsAllocated(); 301 uint64_t to_objects = to_space_->GetObjectsAllocated(); 302 CHECK_LE(to_objects, from_objects); 303 int64_t freed_bytes = from_bytes - to_bytes; 304 int64_t freed_objects = from_objects - to_objects; 305 freed_bytes_.FetchAndAdd(freed_bytes); 306 freed_objects_.FetchAndAdd(freed_objects); 307 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 308 // space. 309 heap_->RecordFree(freed_objects, freed_bytes); 310 timings_.StartSplit("PreSweepingGcVerification"); 311 heap_->PreSweepingGcVerification(this); 312 timings_.EndSplit(); 313 314 { 315 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 316 // Reclaim unmarked objects. 317 Sweep(false); 318 // Swap the live and mark bitmaps for each space which we modified space. This is an 319 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 320 // bitmaps. 321 timings_.StartSplit("SwapBitmaps"); 322 SwapBitmaps(); 323 timings_.EndSplit(); 324 // Unbind the live and mark bitmaps. 325 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 326 GetHeap()->UnBindBitmaps(); 327 } 328 if (kClearFromSpace) { 329 // Release the memory used by the from space. 330 from_space_->Clear(); 331 } 332 from_space_->Reset(); 333 // Protect the from space. 334 VLOG(heap) << "Protecting space " << *from_space_; 335 if (kProtectFromSpace) { 336 from_space_->GetMemMap()->Protect(PROT_NONE); 337 } else { 338 from_space_->GetMemMap()->Protect(PROT_READ); 339 } 340 if (saved_bytes_ > 0) { 341 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 342 } 343 344 if (generational_) { 345 // Record the end (top) of the to space so we can distinguish 346 // between objects that were allocated since the last GC and the 347 // older objects. 348 last_gc_to_space_end_ = to_space_->End(); 349 } 350} 351 352void SemiSpace::ResizeMarkStack(size_t new_size) { 353 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 354 CHECK_LE(mark_stack_->Size(), new_size); 355 mark_stack_->Resize(new_size); 356 for (const auto& obj : temp) { 357 mark_stack_->PushBack(obj); 358 } 359} 360 361inline void SemiSpace::MarkStackPush(Object* obj) { 362 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 363 ResizeMarkStack(mark_stack_->Capacity() * 2); 364 } 365 // The object must be pushed on to the mark stack. 366 mark_stack_->PushBack(obj); 367} 368 369// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 370bool SemiSpace::MarkLargeObject(const Object* obj) { 371 // TODO: support >1 discontinuous space. 372 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 373 DCHECK(large_object_space->Contains(obj)); 374 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 375 if (UNLIKELY(!large_objects->Test(obj))) { 376 large_objects->Set(obj); 377 return true; 378 } 379 return false; 380} 381 382static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 383 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 384 // We will dirty the current page and somewhere in the middle of the next page. This means 385 // that the next object copied will also dirty that page. 386 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 387 // not necessary per GC. 388 memcpy(dest, src, size); 389 return 0; 390 } 391 size_t saved_bytes = 0; 392 byte* byte_dest = reinterpret_cast<byte*>(dest); 393 if (kIsDebugBuild) { 394 for (size_t i = 0; i < size; ++i) { 395 CHECK_EQ(byte_dest[i], 0U); 396 } 397 } 398 // Process the start of the page. The page must already be dirty, don't bother with checking. 399 const byte* byte_src = reinterpret_cast<const byte*>(src); 400 const byte* limit = byte_src + size; 401 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 402 // Copy the bytes until the start of the next page. 403 memcpy(dest, src, page_remain); 404 byte_src += page_remain; 405 byte_dest += page_remain; 406 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 407 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 408 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 409 while (byte_src + kPageSize < limit) { 410 bool all_zero = true; 411 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 412 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 413 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 414 // Assumes the destination of the copy is all zeros. 415 if (word_src[i] != 0) { 416 all_zero = false; 417 word_dest[i] = word_src[i]; 418 } 419 } 420 if (all_zero) { 421 // Avoided copying into the page since it was all zeros. 422 saved_bytes += kPageSize; 423 } 424 byte_src += kPageSize; 425 byte_dest += kPageSize; 426 } 427 // Handle the part of the page at the end. 428 memcpy(byte_dest, byte_src, limit - byte_src); 429 return saved_bytes; 430} 431 432mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 433 size_t object_size = obj->SizeOf(); 434 size_t bytes_allocated; 435 mirror::Object* forward_address = nullptr; 436 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 437 // If it's allocated before the last GC (older), move 438 // (pseudo-promote) it to the main free list space (as sort 439 // of an old generation.) 440 size_t bytes_promoted; 441 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 442 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr); 443 if (forward_address == nullptr) { 444 // If out of space, fall back to the to-space. 445 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 446 } else { 447 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); 448 bytes_promoted_ += bytes_promoted; 449 // Handle the bitmaps marking. 450 accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 451 DCHECK(live_bitmap != nullptr); 452 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 453 DCHECK(mark_bitmap != nullptr); 454 DCHECK(!live_bitmap->Test(forward_address)); 455 if (!whole_heap_collection_) { 456 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 457 DCHECK_EQ(live_bitmap, mark_bitmap); 458 459 // If a bump pointer space only collection, delay the live 460 // bitmap marking of the promoted object until it's popped off 461 // the mark stack (ProcessMarkStack()). The rationale: we may 462 // be in the middle of scanning the objects in the promo 463 // destination space for 464 // non-moving-space-to-bump-pointer-space references by 465 // iterating over the marked bits of the live bitmap 466 // (MarkReachableObjects()). If we don't delay it (and instead 467 // mark the promoted object here), the above promo destination 468 // space scan could encounter the just-promoted object and 469 // forward the references in the promoted object's fields even 470 // through it is pushed onto the mark stack. If this happens, 471 // the promoted object would be in an inconsistent state, that 472 // is, it's on the mark stack (gray) but its fields are 473 // already forwarded (black), which would cause a 474 // DCHECK(!to_space_->HasAddress(obj)) failure below. 475 } else { 476 // Mark forward_address on the live bit map. 477 live_bitmap->Set(forward_address); 478 // Mark forward_address on the mark bit map. 479 DCHECK(!mark_bitmap->Test(forward_address)); 480 mark_bitmap->Set(forward_address); 481 } 482 } 483 DCHECK(forward_address != nullptr); 484 } else { 485 // If it's allocated after the last GC (younger), copy it to the to-space. 486 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 487 } 488 // Copy over the object and add it to the mark stack since we still need to update its 489 // references. 490 saved_bytes_ += 491 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 492 if (kUseBrooksPointer) { 493 obj->AssertSelfBrooksPointer(); 494 DCHECK_EQ(forward_address->GetBrooksPointer(), obj); 495 forward_address->SetBrooksPointer(forward_address); 496 forward_address->AssertSelfBrooksPointer(); 497 } 498 if (to_space_live_bitmap_ != nullptr) { 499 to_space_live_bitmap_->Set(forward_address); 500 } 501 DCHECK(to_space_->HasAddress(forward_address) || 502 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 503 return forward_address; 504} 505 506// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to 507// the to-space and have their forward address updated. Objects which have been newly marked are 508// pushed on the mark stack. 509Object* SemiSpace::MarkObject(Object* obj) { 510 if (kUseBrooksPointer) { 511 // Verify all the objects have the correct forward pointer installed. 512 if (obj != nullptr) { 513 obj->AssertSelfBrooksPointer(); 514 } 515 } 516 Object* forward_address = obj; 517 if (obj != nullptr && !immune_region_.ContainsObject(obj)) { 518 if (from_space_->HasAddress(obj)) { 519 forward_address = GetForwardingAddressInFromSpace(obj); 520 // If the object has already been moved, return the new forward address. 521 if (forward_address == nullptr) { 522 forward_address = MarkNonForwardedObject(obj); 523 DCHECK(forward_address != nullptr); 524 // Make sure to only update the forwarding address AFTER you copy the object so that the 525 // monitor word doesn't get stomped over. 526 obj->SetLockWord(LockWord::FromForwardingAddress( 527 reinterpret_cast<size_t>(forward_address))); 528 // Push the object onto the mark stack for later processing. 529 MarkStackPush(forward_address); 530 } 531 // TODO: Do we need this if in the else statement? 532 } else { 533 accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 534 if (LIKELY(object_bitmap != nullptr)) { 535 if (generational_) { 536 // If a bump pointer space only collection, we should not 537 // reach here as we don't/won't mark the objects in the 538 // non-moving space (except for the promoted objects.) Note 539 // the non-moving space is added to the immune space. 540 DCHECK(whole_heap_collection_); 541 } 542 // This object was not previously marked. 543 if (!object_bitmap->Test(obj)) { 544 object_bitmap->Set(obj); 545 MarkStackPush(obj); 546 } 547 } else { 548 CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; 549 if (MarkLargeObject(obj)) { 550 MarkStackPush(obj); 551 } 552 } 553 } 554 } 555 return forward_address; 556} 557 558void SemiSpace::ProcessMarkStackCallback(void* arg) { 559 DCHECK(arg != nullptr); 560 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); 561} 562 563mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { 564 DCHECK(root != nullptr); 565 DCHECK(arg != nullptr); 566 return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root); 567} 568 569void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 570 RootType /*root_type*/) { 571 DCHECK(root != nullptr); 572 DCHECK(arg != nullptr); 573 *root = reinterpret_cast<SemiSpace*>(arg)->MarkObject(*root); 574} 575 576// Marks all objects in the root set. 577void SemiSpace::MarkRoots() { 578 timings_.StartSplit("MarkRoots"); 579 // TODO: Visit up image roots as well? 580 Runtime::Current()->VisitRoots(MarkRootCallback, this); 581 timings_.EndSplit(); 582} 583 584mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 585 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 586} 587 588void SemiSpace::SweepSystemWeaks() { 589 timings_.StartSplit("SweepSystemWeaks"); 590 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 591 timings_.EndSplit(); 592} 593 594bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 595 return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); 596} 597 598void SemiSpace::Sweep(bool swap_bitmaps) { 599 DCHECK(mark_stack_->IsEmpty()); 600 TimingLogger::ScopedSplit("Sweep", &timings_); 601 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 602 if (space->IsContinuousMemMapAllocSpace()) { 603 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 604 if (!ShouldSweepSpace(alloc_space)) { 605 continue; 606 } 607 TimingLogger::ScopedSplit split( 608 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 609 size_t freed_objects = 0; 610 size_t freed_bytes = 0; 611 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 612 heap_->RecordFree(freed_objects, freed_bytes); 613 freed_objects_.FetchAndAdd(freed_objects); 614 freed_bytes_.FetchAndAdd(freed_bytes); 615 } 616 } 617 if (!is_large_object_space_immune_) { 618 SweepLargeObjects(swap_bitmaps); 619 } 620} 621 622void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 623 DCHECK(!is_large_object_space_immune_); 624 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 625 size_t freed_objects = 0; 626 size_t freed_bytes = 0; 627 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 628 freed_large_objects_.FetchAndAdd(freed_objects); 629 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 630 GetHeap()->RecordFree(freed_objects, freed_bytes); 631} 632 633// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 634// marked, put it on the appropriate list in the heap for later processing. 635void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { 636 heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this); 637} 638 639class SemiSpaceMarkObjectVisitor { 640 public: 641 explicit SemiSpaceMarkObjectVisitor(SemiSpace* semi_space) : semi_space_(semi_space) { 642 } 643 644 void operator()(Object* obj, Object* ref, const MemberOffset& offset, bool /* is_static */) 645 const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) */ { 646 mirror::Object* new_address = semi_space_->MarkObject(ref); 647 if (new_address != ref) { 648 DCHECK(new_address != nullptr); 649 // Don't need to mark the card since we updating the object address and not changing the 650 // actual objects its pointing to. Using SetFieldObjectWithoutWriteBarrier is better in this 651 // case since it does not dirty cards and use additional memory. 652 // Since we do not change the actual object, we can safely use non-transactional mode. Also 653 // disable check as we could run inside a transaction. 654 obj->SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(offset, new_address, false); 655 } 656 } 657 private: 658 SemiSpace* const semi_space_; 659}; 660 661// Visit all of the references of an object and update. 662void SemiSpace::ScanObject(Object* obj) { 663 DCHECK(obj != NULL); 664 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 665 SemiSpaceMarkObjectVisitor visitor(this); 666 MarkSweep::VisitObjectReferences(obj, visitor, kMovingClasses); 667 mirror::Class* klass = obj->GetClass<kVerifyNone>(); 668 if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) { 669 DelayReferenceReferent(klass, obj); 670 } 671} 672 673// Scan anything that's on the mark stack. 674void SemiSpace::ProcessMarkStack() { 675 space::MallocSpace* promo_dest_space = NULL; 676 accounting::SpaceBitmap* live_bitmap = NULL; 677 if (generational_ && !whole_heap_collection_) { 678 // If a bump pointer space only collection (and the promotion is 679 // enabled,) we delay the live-bitmap marking of promoted objects 680 // from MarkObject() until this function. 681 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 682 live_bitmap = promo_dest_space->GetLiveBitmap(); 683 DCHECK(live_bitmap != nullptr); 684 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 685 DCHECK(mark_bitmap != nullptr); 686 DCHECK_EQ(live_bitmap, mark_bitmap); 687 } 688 timings_.StartSplit("ProcessMarkStack"); 689 while (!mark_stack_->IsEmpty()) { 690 Object* obj = mark_stack_->PopBack(); 691 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 692 // obj has just been promoted. Mark the live bitmap for it, 693 // which is delayed from MarkObject(). 694 DCHECK(!live_bitmap->Test(obj)); 695 live_bitmap->Set(obj); 696 } 697 ScanObject(obj); 698 } 699 timings_.EndSplit(); 700} 701 702inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 703 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 704 // All immune objects are assumed marked. 705 if (immune_region_.ContainsObject(obj)) { 706 return obj; 707 } 708 if (from_space_->HasAddress(obj)) { 709 mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); 710 return forwarding_address; // Returns either the forwarding address or nullptr. 711 } else if (to_space_->HasAddress(obj)) { 712 // Should be unlikely. 713 // Already forwarded, must be marked. 714 return obj; 715 } 716 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 717} 718 719void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 720 DCHECK(to_space != nullptr); 721 to_space_ = to_space; 722} 723 724void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 725 DCHECK(from_space != nullptr); 726 from_space_ = from_space; 727} 728 729void SemiSpace::FinishPhase() { 730 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 731 Heap* heap = GetHeap(); 732 timings_.NewSplit("PostGcVerification"); 733 heap->PostGcVerification(this); 734 735 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 736 // further action is done by the heap. 737 to_space_ = nullptr; 738 from_space_ = nullptr; 739 740 // Update the cumulative statistics 741 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 742 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 743 744 // Ensure that the mark stack is empty. 745 CHECK(mark_stack_->IsEmpty()); 746 747 // Update the cumulative loggers. 748 cumulative_timings_.Start(); 749 cumulative_timings_.AddLogger(timings_); 750 cumulative_timings_.End(); 751 752 // Clear all of the spaces' mark bitmaps. 753 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 754 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 755 if (bitmap != nullptr && 756 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 757 bitmap->Clear(); 758 } 759 } 760 mark_stack_->Reset(); 761 762 // Reset the marked large objects. 763 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 764 large_objects->GetMarkObjects()->Clear(); 765 766 if (generational_) { 767 // Decide whether to do a whole heap collection or a bump pointer 768 // only space collection at the next collection by updating 769 // whole_heap_collection. Enable whole_heap_collection once every 770 // kDefaultWholeHeapCollectionInterval collections. 771 if (!whole_heap_collection_) { 772 --whole_heap_collection_interval_counter_; 773 DCHECK_GE(whole_heap_collection_interval_counter_, 0); 774 if (whole_heap_collection_interval_counter_ == 0) { 775 whole_heap_collection_ = true; 776 } 777 } else { 778 DCHECK_EQ(whole_heap_collection_interval_counter_, 0); 779 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; 780 whole_heap_collection_ = false; 781 } 782 } 783} 784 785} // namespace collector 786} // namespace gc 787} // namespace art 788