semi_space.cc revision 407f702da4f867c074fc3c8c688b8f8c32279eff
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "semi_space.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/heap_bitmap.h" 29#include "gc/accounting/mod_union_table.h" 30#include "gc/accounting/remembered_set.h" 31#include "gc/accounting/space_bitmap-inl.h" 32#include "gc/heap.h" 33#include "gc/space/bump_pointer_space.h" 34#include "gc/space/bump_pointer_space-inl.h" 35#include "gc/space/image_space.h" 36#include "gc/space/large_object_space.h" 37#include "gc/space/space-inl.h" 38#include "indirect_reference_table.h" 39#include "intern_table.h" 40#include "jni_internal.h" 41#include "mark_sweep-inl.h" 42#include "monitor.h" 43#include "mirror/art_field.h" 44#include "mirror/art_field-inl.h" 45#include "mirror/class-inl.h" 46#include "mirror/class_loader.h" 47#include "mirror/dex_cache.h" 48#include "mirror/reference-inl.h" 49#include "mirror/object-inl.h" 50#include "mirror/object_array.h" 51#include "mirror/object_array-inl.h" 52#include "runtime.h" 53#include "semi_space-inl.h" 54#include "thread-inl.h" 55#include "thread_list.h" 56#include "verifier/method_verifier.h" 57 58using ::art::mirror::Class; 59using ::art::mirror::Object; 60 61namespace art { 62namespace gc { 63namespace collector { 64 65static constexpr bool kProtectFromSpace = true; 66static constexpr bool kClearFromSpace = true; 67static constexpr bool kStoreStackTraces = false; 68 69void SemiSpace::BindBitmaps() { 70 timings_.StartSplit("BindBitmaps"); 71 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 72 // Mark all of the spaces we never collect as immune. 73 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 74 if (space->GetLiveBitmap() != nullptr) { 75 if (space == to_space_) { 76 CHECK(to_space_->IsContinuousMemMapAllocSpace()); 77 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); 78 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect 79 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect 80 // Add the main free list space and the non-moving 81 // space to the immune space if a bump pointer space 82 // only collection. 83 || (generational_ && !whole_heap_collection_ && 84 (space == GetHeap()->GetNonMovingSpace() || 85 space == GetHeap()->GetPrimaryFreeListSpace()))) { 86 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; 87 } 88 } 89 } 90 if (generational_ && !whole_heap_collection_) { 91 // We won't collect the large object space if a bump pointer space only collection. 92 is_large_object_space_immune_ = true; 93 } 94 timings_.EndSplit(); 95} 96 97SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) 98 : GarbageCollector(heap, 99 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), 100 to_space_(nullptr), 101 from_space_(nullptr), 102 generational_(generational), 103 last_gc_to_space_end_(nullptr), 104 bytes_promoted_(0), 105 whole_heap_collection_(true), 106 whole_heap_collection_interval_counter_(0) { 107} 108 109void SemiSpace::InitializePhase() { 110 timings_.Reset(); 111 TimingLogger::ScopedSplit split("InitializePhase", &timings_); 112 mark_stack_ = heap_->mark_stack_.get(); 113 DCHECK(mark_stack_ != nullptr); 114 immune_region_.Reset(); 115 is_large_object_space_immune_ = false; 116 saved_bytes_ = 0; 117 self_ = Thread::Current(); 118 // Do any pre GC verification. 119 timings_.NewSplit("PreGcVerification"); 120 heap_->PreGcVerification(this); 121 // Set the initial bitmap. 122 to_space_live_bitmap_ = to_space_->GetLiveBitmap(); 123} 124 125void SemiSpace::ProcessReferences(Thread* self) { 126 TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 127 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 128 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, 129 &MarkObjectCallback, &ProcessMarkStackCallback, this); 130} 131 132void SemiSpace::MarkingPhase() { 133 if (kStoreStackTraces) { 134 Locks::mutator_lock_->AssertExclusiveHeld(self_); 135 // Store the stack traces into the runtime fault string in case we get a heap corruption 136 // related crash later. 137 ThreadState old_state = self_->SetStateUnsafe(kRunnable); 138 std::ostringstream oss; 139 Runtime* runtime = Runtime::Current(); 140 runtime->GetThreadList()->DumpForSigQuit(oss); 141 runtime->GetThreadList()->DumpNativeStacks(oss); 142 runtime->SetFaultMessage(oss.str()); 143 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); 144 } 145 146 if (generational_) { 147 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || 148 clear_soft_references_) { 149 // If an explicit, native allocation-triggered, or last attempt 150 // collection, collect the whole heap (and reset the interval 151 // counter to be consistent.) 152 whole_heap_collection_ = true; 153 whole_heap_collection_interval_counter_ = 0; 154 } 155 if (whole_heap_collection_) { 156 VLOG(heap) << "Whole heap collection"; 157 } else { 158 VLOG(heap) << "Bump pointer space only collection"; 159 } 160 } 161 Locks::mutator_lock_->AssertExclusiveHeld(self_); 162 163 TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 164 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the 165 // wrong space. 166 heap_->SwapSemiSpaces(); 167 if (generational_) { 168 // If last_gc_to_space_end_ is out of the bounds of the from-space 169 // (the to-space from last GC), then point it to the beginning of 170 // the from-space. For example, the very first GC or the 171 // pre-zygote compaction. 172 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { 173 last_gc_to_space_end_ = from_space_->Begin(); 174 } 175 // Reset this before the marking starts below. 176 bytes_promoted_ = 0; 177 } 178 // Assume the cleared space is already empty. 179 BindBitmaps(); 180 // Process dirty cards and add dirty cards to mod-union tables. 181 heap_->ProcessCards(timings_, kUseRememberedSet && generational_); 182 // Clear the whole card table since we can not get any additional dirty cards during the 183 // paused GC. This saves memory but only works for pause the world collectors. 184 timings_.NewSplit("ClearCardTable"); 185 heap_->GetCardTable()->ClearCardTable(); 186 // Need to do this before the checkpoint since we don't want any threads to add references to 187 // the live stack during the recursive mark. 188 timings_.NewSplit("SwapStacks"); 189 if (kUseThreadLocalAllocationStack) { 190 heap_->RevokeAllThreadLocalAllocationStacks(self_); 191 } 192 heap_->SwapStacks(self_); 193 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 194 MarkRoots(); 195 // Mark roots of immune spaces. 196 UpdateAndMarkModUnion(); 197 // Recursively mark remaining objects. 198 MarkReachableObjects(); 199} 200 201void SemiSpace::UpdateAndMarkModUnion() { 202 for (auto& space : heap_->GetContinuousSpaces()) { 203 // If the space is immune then we need to mark the references to other spaces. 204 if (immune_region_.ContainsSpace(space)) { 205 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); 206 if (table != nullptr) { 207 // TODO: Improve naming. 208 TimingLogger::ScopedSplit split( 209 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : 210 "UpdateAndMarkImageModUnionTable", 211 &timings_); 212 table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); 213 } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) { 214 DCHECK(kUseRememberedSet); 215 // If a bump pointer space only collection, the non-moving 216 // space is added to the immune space. The non-moving space 217 // doesn't have a mod union table, but has a remembered 218 // set. Its dirty cards will be scanned later in 219 // MarkReachableObjects(). 220 DCHECK(generational_ && !whole_heap_collection_ && 221 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 222 << "Space " << space->GetName() << " " 223 << "generational_=" << generational_ << " " 224 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 225 } else { 226 DCHECK(!kUseRememberedSet); 227 // If a bump pointer space only collection, the non-moving 228 // space is added to the immune space. But the non-moving 229 // space doesn't have a mod union table. Instead, its live 230 // bitmap will be scanned later in MarkReachableObjects(). 231 DCHECK(generational_ && !whole_heap_collection_ && 232 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) 233 << "Space " << space->GetName() << " " 234 << "generational_=" << generational_ << " " 235 << "whole_heap_collection_=" << whole_heap_collection_ << " "; 236 } 237 } 238 } 239} 240 241class SemiSpaceScanObjectVisitor { 242 public: 243 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 244 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 245 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 246 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an 247 // exclusive lock on the mutator lock, but 248 // SpaceBitmap::VisitMarkedRange() only requires the shared lock. 249 DCHECK(obj != nullptr); 250 semi_space_->ScanObject(obj); 251 } 252 private: 253 SemiSpace* const semi_space_; 254}; 255 256// Used to verify that there's no references to the from-space. 257class SemiSpaceVerifyNoFromSpaceReferencesVisitor { 258 public: 259 explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : 260 from_space_(from_space) {} 261 262 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const 263 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { 264 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); 265 if (from_space_->HasAddress(ref)) { 266 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); 267 } 268 } 269 private: 270 space::ContinuousMemMapAllocSpace* from_space_; 271}; 272 273void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { 274 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 275 SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); 276 obj->VisitReferences<kMovingClasses>(visitor); 277} 278 279class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { 280 public: 281 explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} 282 void operator()(Object* obj) const 283 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 284 DCHECK(obj != nullptr); 285 semi_space_->VerifyNoFromSpaceReferences(obj); 286 } 287 private: 288 SemiSpace* const semi_space_; 289}; 290 291void SemiSpace::MarkReachableObjects() { 292 timings_.StartSplit("MarkStackAsLive"); 293 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 294 heap_->MarkAllocStackAsLive(live_stack); 295 live_stack->Reset(); 296 timings_.EndSplit(); 297 298 for (auto& space : heap_->GetContinuousSpaces()) { 299 // If the space is immune and has no mod union table (the 300 // non-moving space when the bump pointer space only collection is 301 // enabled,) then we need to scan its live bitmap or dirty cards as roots 302 // (including the objects on the live stack which have just marked 303 // in the live bitmap above in MarkAllocStackAsLive().) 304 if (immune_region_.ContainsSpace(space) && 305 heap_->FindModUnionTableFromSpace(space) == nullptr) { 306 DCHECK(generational_ && !whole_heap_collection_ && 307 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); 308 accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space); 309 if (kUseRememberedSet) { 310 DCHECK(rem_set != nullptr); 311 rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, from_space_, this); 312 if (kIsDebugBuild) { 313 // Verify that there are no from-space references that 314 // remain in the space, that is, the remembered set (and the 315 // card table) didn't miss any from-space references in the 316 // space. 317 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 318 SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); 319 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 320 reinterpret_cast<uintptr_t>(space->End()), 321 visitor); 322 } 323 } else { 324 DCHECK(rem_set == nullptr); 325 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 326 SemiSpaceScanObjectVisitor visitor(this); 327 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), 328 reinterpret_cast<uintptr_t>(space->End()), 329 visitor); 330 } 331 } 332 } 333 334 if (is_large_object_space_immune_) { 335 DCHECK(generational_ && !whole_heap_collection_); 336 // Delay copying the live set to the marked set until here from 337 // BindBitmaps() as the large objects on the allocation stack may 338 // be newly added to the live set above in MarkAllocStackAsLive(). 339 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); 340 341 // When the large object space is immune, we need to scan the 342 // large object space as roots as they contain references to their 343 // classes (primitive array classes) that could move though they 344 // don't contain any other references. 345 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 346 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); 347 SemiSpaceScanObjectVisitor visitor(this); 348 for (const Object* obj : large_live_objects->GetObjects()) { 349 visitor(const_cast<Object*>(obj)); 350 } 351 } 352 353 // Recursively process the mark stack. 354 ProcessMarkStack(); 355} 356 357void SemiSpace::ReclaimPhase() { 358 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 359 ProcessReferences(self_); 360 { 361 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); 362 SweepSystemWeaks(); 363 } 364 // Record freed memory. 365 uint64_t from_bytes = from_space_->GetBytesAllocated(); 366 uint64_t to_bytes = to_space_->GetBytesAllocated(); 367 uint64_t from_objects = from_space_->GetObjectsAllocated(); 368 uint64_t to_objects = to_space_->GetObjectsAllocated(); 369 CHECK_LE(to_objects, from_objects); 370 int64_t freed_bytes = from_bytes - to_bytes; 371 int64_t freed_objects = from_objects - to_objects; 372 freed_bytes_.FetchAndAdd(freed_bytes); 373 freed_objects_.FetchAndAdd(freed_objects); 374 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed 375 // space. 376 heap_->RecordFree(freed_objects, freed_bytes); 377 timings_.StartSplit("PreSweepingGcVerification"); 378 heap_->PreSweepingGcVerification(this); 379 timings_.EndSplit(); 380 381 { 382 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); 383 // Reclaim unmarked objects. 384 Sweep(false); 385 // Swap the live and mark bitmaps for each space which we modified space. This is an 386 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 387 // bitmaps. 388 timings_.StartSplit("SwapBitmaps"); 389 SwapBitmaps(); 390 timings_.EndSplit(); 391 // Unbind the live and mark bitmaps. 392 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 393 GetHeap()->UnBindBitmaps(); 394 } 395 if (kClearFromSpace) { 396 // Release the memory used by the from space. 397 from_space_->Clear(); 398 } 399 from_space_->Reset(); 400 // Protect the from space. 401 VLOG(heap) << "Protecting space " << *from_space_; 402 if (kProtectFromSpace) { 403 from_space_->GetMemMap()->Protect(PROT_NONE); 404 } else { 405 from_space_->GetMemMap()->Protect(PROT_READ); 406 } 407 if (saved_bytes_ > 0) { 408 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); 409 } 410 411 if (generational_) { 412 // Record the end (top) of the to space so we can distinguish 413 // between objects that were allocated since the last GC and the 414 // older objects. 415 last_gc_to_space_end_ = to_space_->End(); 416 } 417} 418 419void SemiSpace::ResizeMarkStack(size_t new_size) { 420 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); 421 CHECK_LE(mark_stack_->Size(), new_size); 422 mark_stack_->Resize(new_size); 423 for (const auto& obj : temp) { 424 mark_stack_->PushBack(obj); 425 } 426} 427 428inline void SemiSpace::MarkStackPush(Object* obj) { 429 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 430 ResizeMarkStack(mark_stack_->Capacity() * 2); 431 } 432 // The object must be pushed on to the mark stack. 433 mark_stack_->PushBack(obj); 434} 435 436// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 437bool SemiSpace::MarkLargeObject(const Object* obj) { 438 // TODO: support >1 discontinuous space. 439 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 440 DCHECK(large_object_space->Contains(obj)); 441 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); 442 if (UNLIKELY(!large_objects->Test(obj))) { 443 large_objects->Set(obj); 444 return true; 445 } 446 return false; 447} 448 449static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { 450 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { 451 // We will dirty the current page and somewhere in the middle of the next page. This means 452 // that the next object copied will also dirty that page. 453 // TODO: Worth considering the last object copied? We may end up dirtying one page which is 454 // not necessary per GC. 455 memcpy(dest, src, size); 456 return 0; 457 } 458 size_t saved_bytes = 0; 459 byte* byte_dest = reinterpret_cast<byte*>(dest); 460 if (kIsDebugBuild) { 461 for (size_t i = 0; i < size; ++i) { 462 CHECK_EQ(byte_dest[i], 0U); 463 } 464 } 465 // Process the start of the page. The page must already be dirty, don't bother with checking. 466 const byte* byte_src = reinterpret_cast<const byte*>(src); 467 const byte* limit = byte_src + size; 468 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; 469 // Copy the bytes until the start of the next page. 470 memcpy(dest, src, page_remain); 471 byte_src += page_remain; 472 byte_dest += page_remain; 473 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); 474 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); 475 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); 476 while (byte_src + kPageSize < limit) { 477 bool all_zero = true; 478 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); 479 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); 480 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { 481 // Assumes the destination of the copy is all zeros. 482 if (word_src[i] != 0) { 483 all_zero = false; 484 word_dest[i] = word_src[i]; 485 } 486 } 487 if (all_zero) { 488 // Avoided copying into the page since it was all zeros. 489 saved_bytes += kPageSize; 490 } 491 byte_src += kPageSize; 492 byte_dest += kPageSize; 493 } 494 // Handle the part of the page at the end. 495 memcpy(byte_dest, byte_src, limit - byte_src); 496 return saved_bytes; 497} 498 499mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { 500 size_t object_size = obj->SizeOf(); 501 size_t bytes_allocated; 502 mirror::Object* forward_address = nullptr; 503 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { 504 // If it's allocated before the last GC (older), move 505 // (pseudo-promote) it to the main free list space (as sort 506 // of an old generation.) 507 size_t bytes_promoted; 508 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 509 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr); 510 if (forward_address == nullptr) { 511 // If out of space, fall back to the to-space. 512 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 513 } else { 514 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); 515 bytes_promoted_ += bytes_promoted; 516 // Dirty the card at the destionation as it may contain 517 // references (including the class pointer) to the bump pointer 518 // space. 519 GetHeap()->WriteBarrierEveryFieldOf(forward_address); 520 // Handle the bitmaps marking. 521 accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); 522 DCHECK(live_bitmap != nullptr); 523 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 524 DCHECK(mark_bitmap != nullptr); 525 DCHECK(!live_bitmap->Test(forward_address)); 526 if (!whole_heap_collection_) { 527 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. 528 DCHECK_EQ(live_bitmap, mark_bitmap); 529 530 // If a bump pointer space only collection, delay the live 531 // bitmap marking of the promoted object until it's popped off 532 // the mark stack (ProcessMarkStack()). The rationale: we may 533 // be in the middle of scanning the objects in the promo 534 // destination space for 535 // non-moving-space-to-bump-pointer-space references by 536 // iterating over the marked bits of the live bitmap 537 // (MarkReachableObjects()). If we don't delay it (and instead 538 // mark the promoted object here), the above promo destination 539 // space scan could encounter the just-promoted object and 540 // forward the references in the promoted object's fields even 541 // through it is pushed onto the mark stack. If this happens, 542 // the promoted object would be in an inconsistent state, that 543 // is, it's on the mark stack (gray) but its fields are 544 // already forwarded (black), which would cause a 545 // DCHECK(!to_space_->HasAddress(obj)) failure below. 546 } else { 547 // Mark forward_address on the live bit map. 548 live_bitmap->Set(forward_address); 549 // Mark forward_address on the mark bit map. 550 DCHECK(!mark_bitmap->Test(forward_address)); 551 mark_bitmap->Set(forward_address); 552 } 553 } 554 DCHECK(forward_address != nullptr); 555 } else { 556 // If it's allocated after the last GC (younger), copy it to the to-space. 557 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); 558 } 559 // Copy over the object and add it to the mark stack since we still need to update its 560 // references. 561 saved_bytes_ += 562 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); 563 if (kUseBrooksPointer) { 564 obj->AssertSelfBrooksPointer(); 565 DCHECK_EQ(forward_address->GetBrooksPointer(), obj); 566 forward_address->SetBrooksPointer(forward_address); 567 forward_address->AssertSelfBrooksPointer(); 568 } 569 if (to_space_live_bitmap_ != nullptr) { 570 to_space_live_bitmap_->Set(forward_address); 571 } 572 DCHECK(to_space_->HasAddress(forward_address) || 573 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); 574 return forward_address; 575} 576 577// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to 578// the to-space and have their forward address updated. Objects which have been newly marked are 579// pushed on the mark stack. 580void SemiSpace::MarkObject(mirror::HeapReference<Object>* obj_ptr) { 581 Object* obj = obj_ptr->AsMirrorPtr(); 582 if (obj == nullptr) { 583 return; 584 } 585 if (kUseBrooksPointer) { 586 // Verify all the objects have the correct forward pointer installed. 587 obj->AssertSelfBrooksPointer(); 588 } 589 if (!immune_region_.ContainsObject(obj)) { 590 if (from_space_->HasAddress(obj)) { 591 mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj); 592 // If the object has already been moved, return the new forward address. 593 if (forward_address == nullptr) { 594 forward_address = MarkNonForwardedObject(obj); 595 DCHECK(forward_address != nullptr); 596 // Make sure to only update the forwarding address AFTER you copy the object so that the 597 // monitor word doesn't get stomped over. 598 obj->SetLockWord(LockWord::FromForwardingAddress( 599 reinterpret_cast<size_t>(forward_address))); 600 // Push the object onto the mark stack for later processing. 601 MarkStackPush(forward_address); 602 } 603 obj_ptr->Assign(forward_address); 604 } else { 605 accounting::SpaceBitmap* object_bitmap = 606 heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 607 if (LIKELY(object_bitmap != nullptr)) { 608 if (generational_) { 609 // If a bump pointer space only collection, we should not 610 // reach here as we don't/won't mark the objects in the 611 // non-moving space (except for the promoted objects.) Note 612 // the non-moving space is added to the immune space. 613 DCHECK(whole_heap_collection_); 614 } 615 if (!object_bitmap->Set(obj)) { 616 // This object was not previously marked. 617 MarkStackPush(obj); 618 } 619 } else { 620 CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; 621 if (MarkLargeObject(obj)) { 622 MarkStackPush(obj); 623 } 624 } 625 } 626 } 627} 628 629void SemiSpace::ProcessMarkStackCallback(void* arg) { 630 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); 631} 632 633mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { 634 auto ref = mirror::HeapReference<mirror::Object>::FromMirrorPtr(root); 635 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 636 return ref.AsMirrorPtr(); 637} 638 639void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, 640 void* arg) { 641 reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr); 642} 643 644void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, 645 RootType /*root_type*/) { 646 auto ref = mirror::HeapReference<mirror::Object>::FromMirrorPtr(*root); 647 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); 648 if (*root != ref.AsMirrorPtr()) { 649 *root = ref.AsMirrorPtr(); 650 } 651} 652 653// Marks all objects in the root set. 654void SemiSpace::MarkRoots() { 655 timings_.StartSplit("MarkRoots"); 656 // TODO: Visit up image roots as well? 657 Runtime::Current()->VisitRoots(MarkRootCallback, this); 658 timings_.EndSplit(); 659} 660 661mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { 662 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); 663} 664 665void SemiSpace::SweepSystemWeaks() { 666 timings_.StartSplit("SweepSystemWeaks"); 667 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); 668 timings_.EndSplit(); 669} 670 671bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { 672 return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); 673} 674 675void SemiSpace::Sweep(bool swap_bitmaps) { 676 DCHECK(mark_stack_->IsEmpty()); 677 TimingLogger::ScopedSplit("Sweep", &timings_); 678 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 679 if (space->IsContinuousMemMapAllocSpace()) { 680 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); 681 if (!ShouldSweepSpace(alloc_space)) { 682 continue; 683 } 684 TimingLogger::ScopedSplit split( 685 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); 686 size_t freed_objects = 0; 687 size_t freed_bytes = 0; 688 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 689 heap_->RecordFree(freed_objects, freed_bytes); 690 freed_objects_.FetchAndAdd(freed_objects); 691 freed_bytes_.FetchAndAdd(freed_bytes); 692 } 693 } 694 if (!is_large_object_space_immune_) { 695 SweepLargeObjects(swap_bitmaps); 696 } 697} 698 699void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { 700 DCHECK(!is_large_object_space_immune_); 701 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 702 size_t freed_objects = 0; 703 size_t freed_bytes = 0; 704 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); 705 freed_large_objects_.FetchAndAdd(freed_objects); 706 freed_large_object_bytes_.FetchAndAdd(freed_bytes); 707 GetHeap()->RecordFree(freed_objects, freed_bytes); 708} 709 710// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been 711// marked, put it on the appropriate list in the heap for later processing. 712void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { 713 heap_->DelayReferenceReferent(klass, reference, MarkedForwardingAddressCallback, this); 714} 715 716class SemiSpaceMarkObjectVisitor { 717 public: 718 explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) { 719 } 720 721 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE 722 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 723 collector_->MarkObject(obj->GetFieldObjectReferenceAddr(offset)); 724 } 725 726 void operator()(mirror::Class* klass, mirror::Reference* ref) const 727 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 728 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 729 collector_->DelayReferenceReferent(klass, ref); 730 } 731 732 private: 733 SemiSpace* const collector_; 734}; 735 736// Visit all of the references of an object and update. 737void SemiSpace::ScanObject(Object* obj) { 738 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; 739 SemiSpaceMarkObjectVisitor visitor(this); 740 obj->VisitReferences<kMovingClasses>(visitor, visitor); 741} 742 743// Scan anything that's on the mark stack. 744void SemiSpace::ProcessMarkStack() { 745 space::MallocSpace* promo_dest_space = NULL; 746 accounting::SpaceBitmap* live_bitmap = NULL; 747 if (generational_ && !whole_heap_collection_) { 748 // If a bump pointer space only collection (and the promotion is 749 // enabled,) we delay the live-bitmap marking of promoted objects 750 // from MarkObject() until this function. 751 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); 752 live_bitmap = promo_dest_space->GetLiveBitmap(); 753 DCHECK(live_bitmap != nullptr); 754 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); 755 DCHECK(mark_bitmap != nullptr); 756 DCHECK_EQ(live_bitmap, mark_bitmap); 757 } 758 timings_.StartSplit("ProcessMarkStack"); 759 while (!mark_stack_->IsEmpty()) { 760 Object* obj = mark_stack_->PopBack(); 761 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { 762 // obj has just been promoted. Mark the live bitmap for it, 763 // which is delayed from MarkObject(). 764 DCHECK(!live_bitmap->Test(obj)); 765 live_bitmap->Set(obj); 766 } 767 ScanObject(obj); 768 } 769 timings_.EndSplit(); 770} 771 772inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const 773 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 774 // All immune objects are assumed marked. 775 if (immune_region_.ContainsObject(obj)) { 776 return obj; 777 } 778 if (from_space_->HasAddress(obj)) { 779 mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); 780 return forwarding_address; // Returns either the forwarding address or nullptr. 781 } else if (to_space_->HasAddress(obj)) { 782 // Should be unlikely. 783 // Already forwarded, must be marked. 784 return obj; 785 } 786 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; 787} 788 789void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { 790 DCHECK(to_space != nullptr); 791 to_space_ = to_space; 792} 793 794void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { 795 DCHECK(from_space != nullptr); 796 from_space_ = from_space; 797} 798 799void SemiSpace::FinishPhase() { 800 TimingLogger::ScopedSplit split("FinishPhase", &timings_); 801 Heap* heap = GetHeap(); 802 timings_.NewSplit("PostGcVerification"); 803 heap->PostGcVerification(this); 804 805 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until 806 // further action is done by the heap. 807 to_space_ = nullptr; 808 from_space_ = nullptr; 809 810 // Update the cumulative statistics 811 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); 812 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); 813 814 // Ensure that the mark stack is empty. 815 CHECK(mark_stack_->IsEmpty()); 816 817 // Update the cumulative loggers. 818 cumulative_timings_.Start(); 819 cumulative_timings_.AddLogger(timings_); 820 cumulative_timings_.End(); 821 822 // Clear all of the spaces' mark bitmaps. 823 for (const auto& space : GetHeap()->GetContinuousSpaces()) { 824 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); 825 if (bitmap != nullptr && 826 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 827 bitmap->Clear(); 828 } 829 } 830 mark_stack_->Reset(); 831 832 // Reset the marked large objects. 833 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 834 large_objects->GetMarkObjects()->Clear(); 835 836 if (generational_) { 837 // Decide whether to do a whole heap collection or a bump pointer 838 // only space collection at the next collection by updating 839 // whole_heap_collection. Enable whole_heap_collection once every 840 // kDefaultWholeHeapCollectionInterval collections. 841 if (!whole_heap_collection_) { 842 --whole_heap_collection_interval_counter_; 843 DCHECK_GE(whole_heap_collection_interval_counter_, 0); 844 if (whole_heap_collection_interval_counter_ == 0) { 845 whole_heap_collection_ = true; 846 } 847 } else { 848 DCHECK_EQ(whole_heap_collection_interval_counter_, 0); 849 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; 850 whole_heap_collection_ = false; 851 } 852 } 853} 854 855void SemiSpace::RevokeAllThreadLocalBuffers() { 856 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); 857 GetHeap()->RevokeAllThreadLocalBuffers(); 858 timings_.EndSplit(); 859} 860 861} // namespace collector 862} // namespace gc 863} // namespace art 864