mark_sweep.cc revision 3e3d591f781b771de89f3b989830da2b6ac6fac8
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/card_table-inl.h" 29#include "gc/accounting/heap_bitmap.h" 30#include "gc/accounting/space_bitmap-inl.h" 31#include "gc/heap.h" 32#include "gc/space/image_space.h" 33#include "gc/space/large_object_space.h" 34#include "gc/space/space-inl.h" 35#include "indirect_reference_table.h" 36#include "intern_table.h" 37#include "jni_internal.h" 38#include "monitor.h" 39#include "mark_sweep-inl.h" 40#include "mirror/class-inl.h" 41#include "mirror/class_loader.h" 42#include "mirror/dex_cache.h" 43#include "mirror/field.h" 44#include "mirror/field-inl.h" 45#include "mirror/object-inl.h" 46#include "mirror/object_array.h" 47#include "mirror/object_array-inl.h" 48#include "runtime.h" 49#include "thread-inl.h" 50#include "thread_list.h" 51#include "verifier/method_verifier.h" 52 53using ::art::mirror::Class; 54using ::art::mirror::Field; 55using ::art::mirror::Object; 56using ::art::mirror::ObjectArray; 57 58namespace art { 59namespace gc { 60namespace collector { 61 62// Performance options. 63static const bool kParallelMarkStack = true; 64static const bool kDisableFinger = true; // TODO: Fix, bit rotten. 65static const bool kUseMarkStackPrefetch = true; 66 67// Profiling and information flags. 68static const bool kCountClassesMarked = false; 69static const bool kProfileLargeObjects = false; 70static const bool kMeasureOverhead = false; 71static const bool kCountTasks = false; 72static const bool kCountJavaLangRefs = false; 73 74class SetFingerVisitor { 75 public: 76 explicit SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 77 78 void operator()(void* finger) const { 79 mark_sweep_->SetFinger(reinterpret_cast<Object*>(finger)); 80 } 81 82 private: 83 MarkSweep* const mark_sweep_; 84}; 85 86void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { 87 // Bind live to mark bitmap if necessary. 88 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 89 BindLiveToMarkBitmap(space); 90 } 91 92 // Add the space to the immune region. 93 if (immune_begin_ == NULL) { 94 DCHECK(immune_end_ == NULL); 95 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), 96 reinterpret_cast<Object*>(space->End())); 97 } else { 98 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 99 const space::ContinuousSpace* prev_space = NULL; 100 // Find out if the previous space is immune. 101 // TODO: C++0x 102 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 103 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 104 if (*it == space) { 105 break; 106 } 107 prev_space = *it; 108 } 109 110 // If previous space was immune, then extend the immune region. Relies on continuous spaces 111 // being sorted by Heap::AddContinuousSpace. 112 if (prev_space != NULL && 113 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) && 114 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) { 115 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 116 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 117 } 118 } 119} 120 121void MarkSweep::BindBitmaps() { 122 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 123 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 124 125 // Mark all of the spaces we never collect as immune. 126 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 127 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 128 space::ContinuousSpace* space = *it; 129 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 130 ImmuneSpace(space); 131 } 132 } 133} 134 135MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 136 : GarbageCollector(heap, 137 name_prefix + (name_prefix.empty() ? "" : " ") + 138 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 139 current_mark_bitmap_(NULL), 140 java_lang_Class_(NULL), 141 mark_stack_(NULL), 142 finger_(NULL), 143 immune_begin_(NULL), 144 immune_end_(NULL), 145 soft_reference_list_(NULL), 146 weak_reference_list_(NULL), 147 finalizer_reference_list_(NULL), 148 phantom_reference_list_(NULL), 149 cleared_reference_list_(NULL), 150 gc_barrier_(new Barrier(0)), 151 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 152 mark_stack_expand_lock_("mark sweep mark stack expand lock"), 153 is_concurrent_(is_concurrent), 154 clear_soft_references_(false) { 155} 156 157void MarkSweep::InitializePhase() { 158 timings_.Reset(); 159 timings_.StartSplit("InitializePhase"); 160 mark_stack_ = GetHeap()->mark_stack_.get(); 161 DCHECK(mark_stack_ != NULL); 162 finger_ = NULL; 163 SetImmuneRange(NULL, NULL); 164 soft_reference_list_ = NULL; 165 weak_reference_list_ = NULL; 166 finalizer_reference_list_ = NULL; 167 phantom_reference_list_ = NULL; 168 cleared_reference_list_ = NULL; 169 freed_bytes_ = 0; 170 freed_objects_ = 0; 171 class_count_ = 0; 172 array_count_ = 0; 173 other_count_ = 0; 174 large_object_test_ = 0; 175 large_object_mark_ = 0; 176 classes_marked_ = 0; 177 overhead_time_ = 0; 178 work_chunks_created_ = 0; 179 work_chunks_deleted_ = 0; 180 reference_count_ = 0; 181 java_lang_Class_ = Class::GetJavaLangClass(); 182 CHECK(java_lang_Class_ != NULL); 183 FindDefaultMarkBitmap(); 184 // Do any pre GC verification. 185 heap_->PreGcVerification(this); 186} 187 188void MarkSweep::ProcessReferences(Thread* self) { 189 timings_.NewSplit("ProcessReferences"); 190 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 191 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, 192 &finalizer_reference_list_, &phantom_reference_list_); 193} 194 195bool MarkSweep::HandleDirtyObjectsPhase() { 196 Thread* self = Thread::Current(); 197 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 198 Locks::mutator_lock_->AssertExclusiveHeld(self); 199 200 { 201 timings_.NewSplit("ReMarkRoots"); 202 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 203 204 // Re-mark root set. 205 ReMarkRoots(); 206 207 // Scan dirty objects, this is only required if we are not doing concurrent GC. 208 RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty); 209 } 210 211 ProcessReferences(self); 212 213 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 214 if (GetHeap()->verify_missing_card_marks_) { 215 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 216 // This second sweep makes sure that we don't have any objects in the live stack which point to 217 // freed objects. These cause problems since their references may be previously freed objects. 218 SweepArray(allocation_stack, false); 219 } else { 220 timings_.NewSplit("UnMarkAllocStack"); 221 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 222 // The allocation stack contains things allocated since the start of the GC. These may have been 223 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. 224 // Remove these objects from the mark bitmaps so that they will be eligible for sticky 225 // collection. 226 heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(), 227 GetHeap()->large_object_space_->GetMarkObjects(), 228 allocation_stack); 229 } 230 return true; 231} 232 233bool MarkSweep::IsConcurrent() const { 234 return is_concurrent_; 235} 236 237void MarkSweep::MarkingPhase() { 238 Heap* heap = GetHeap(); 239 Thread* self = Thread::Current(); 240 241 timings_.NewSplit("BindBitmaps"); 242 BindBitmaps(); 243 FindDefaultMarkBitmap(); 244 // Process dirty cards and add dirty cards to mod union tables. 245 heap->ProcessCards(timings_); 246 247 // Need to do this before the checkpoint since we don't want any threads to add references to 248 // the live stack during the recursive mark. 249 timings_.NewSplit("SwapStacks"); 250 heap->SwapStacks(); 251 252 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 253 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 254 // If we exclusively hold the mutator lock, all threads must be suspended. 255 timings_.NewSplit("MarkRoots"); 256 MarkRoots(); 257 } else { 258 timings_.NewSplit("MarkRootsCheckpoint"); 259 MarkRootsCheckpoint(self); 260 timings_.NewSplit("MarkNonThreadRoots"); 261 MarkNonThreadRoots(); 262 } 263 timings_.NewSplit("MarkConcurrentRoots"); 264 MarkConcurrentRoots(); 265 266 heap->UpdateAndMarkModUnion(this, timings_, GetGcType()); 267 MarkReachableObjects(); 268} 269 270void MarkSweep::MarkReachableObjects() { 271 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 272 // knowing that new allocations won't be marked as live. 273 timings_.NewSplit("MarkStackAsLive"); 274 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 275 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(), 276 heap_->large_object_space_->GetLiveObjects(), 277 live_stack); 278 live_stack->Reset(); 279 // Recursively mark all the non-image bits set in the mark bitmap. 280 RecursiveMark(); 281 DisableFinger(); 282} 283 284void MarkSweep::ReclaimPhase() { 285 Thread* self = Thread::Current(); 286 287 if (!IsConcurrent()) { 288 ProcessReferences(self); 289 } 290 291 // Before freeing anything, lets verify the heap. 292 if (kIsDebugBuild) { 293 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 294 VerifyImageRoots(); 295 } 296 heap_->PreSweepingGcVerification(this); 297 298 { 299 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 300 301 // Reclaim unmarked objects. 302 Sweep(false); 303 304 // Swap the live and mark bitmaps for each space which we modified space. This is an 305 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 306 // bitmaps. 307 timings_.NewSplit("SwapBitmaps"); 308 SwapBitmaps(); 309 310 // Unbind the live and mark bitmaps. 311 UnBindBitmaps(); 312 } 313} 314 315void MarkSweep::SetImmuneRange(Object* begin, Object* end) { 316 immune_begin_ = begin; 317 immune_end_ = end; 318} 319 320void MarkSweep::FindDefaultMarkBitmap() { 321 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 322 // TODO: C++0x 323 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 324 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 325 space::ContinuousSpace* space = *it; 326 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 327 current_mark_bitmap_ = (*it)->GetMarkBitmap(); 328 CHECK(current_mark_bitmap_ != NULL); 329 return; 330 } 331 } 332 GetHeap()->DumpSpaces(); 333 LOG(FATAL) << "Could not find a default mark bitmap"; 334} 335 336void MarkSweep::ExpandMarkStack() { 337 // Rare case, no need to have Thread::Current be a parameter. 338 MutexLock mu(Thread::Current(), mark_stack_expand_lock_); 339 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 340 // Someone else acquired the lock and expanded the mark stack before us. 341 return; 342 } 343 std::vector<Object*> temp; 344 temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End()); 345 mark_stack_->Resize(mark_stack_->Capacity() * 2); 346 for (size_t i = 0; i < temp.size(); ++i) { 347 mark_stack_->PushBack(temp[i]); 348 } 349} 350 351inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj, bool check_finger) { 352 DCHECK(obj != NULL); 353 if (MarkObjectParallel(obj)) { 354 if (kDisableFinger || (check_finger && obj < finger_)) { 355 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) { 356 // Only reason a push can fail is that the mark stack is full. 357 ExpandMarkStack(); 358 } 359 } 360 } 361} 362 363inline void MarkSweep::MarkObjectNonNull(const Object* obj, bool check_finger) { 364 DCHECK(obj != NULL); 365 366 if (obj >= immune_begin_ && obj < immune_end_) { 367 DCHECK(IsMarked(obj)); 368 return; 369 } 370 371 // Try to take advantage of locality of references within a space, failing this find the space 372 // the hard way. 373 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 374 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 375 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 376 if (LIKELY(new_bitmap != NULL)) { 377 object_bitmap = new_bitmap; 378 } else { 379 MarkLargeObject(obj); 380 return; 381 } 382 } 383 384 // This object was not previously marked. 385 if (!object_bitmap->Test(obj)) { 386 object_bitmap->Set(obj); 387 if (kDisableFinger || (check_finger && obj < finger_)) { 388 // Do we need to expand the mark stack? 389 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 390 ExpandMarkStack(); 391 } 392 // The object must be pushed on to the mark stack. 393 mark_stack_->PushBack(const_cast<Object*>(obj)); 394 } 395 } 396} 397 398// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 399bool MarkSweep::MarkLargeObject(const Object* obj) { 400 // TODO: support >1 discontinuous space. 401 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 402 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); 403 if (kProfileLargeObjects) { 404 ++large_object_test_; 405 } 406 if (UNLIKELY(!large_objects->Test(obj))) { 407 // TODO: mark may be called holding the JNI global references lock, Contains will hold the 408 // large object space lock causing a lock level violation. Bug: 9414652; 409 if (!kDebugLocking && !large_object_space->Contains(obj)) { 410 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 411 LOG(ERROR) << "Attempting see if it's a bad root"; 412 VerifyRoots(); 413 LOG(FATAL) << "Can't mark bad root"; 414 } 415 if (kProfileLargeObjects) { 416 ++large_object_mark_; 417 } 418 large_objects->Set(obj); 419 // Don't need to check finger since large objects never have any object references. 420 return true; 421 } 422 return false; 423} 424 425inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 426 DCHECK(obj != NULL); 427 428 if (obj >= immune_begin_ && obj < immune_end_) { 429 DCHECK(IsMarked(obj)); 430 return false; 431 } 432 433 // Try to take advantage of locality of references within a space, failing this find the space 434 // the hard way. 435 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 436 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 437 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 438 if (new_bitmap != NULL) { 439 object_bitmap = new_bitmap; 440 } else { 441 // TODO: Remove the Thread::Current here? 442 // TODO: Convert this to some kind of atomic marking? 443 MutexLock mu(Thread::Current(), large_object_lock_); 444 return MarkLargeObject(obj); 445 } 446 } 447 448 // Return true if the object was not previously marked. 449 return !object_bitmap->AtomicTestAndSet(obj); 450} 451 452// Used to mark objects when recursing. Recursion is done by moving 453// the finger across the bitmaps in address order and marking child 454// objects. Any newly-marked objects whose addresses are lower than 455// the finger won't be visited by the bitmap scan, so those objects 456// need to be added to the mark stack. 457void MarkSweep::MarkObject(const Object* obj) { 458 if (obj != NULL) { 459 MarkObjectNonNull(obj, true); 460 } 461} 462 463void MarkSweep::MarkRoot(const Object* obj) { 464 if (obj != NULL) { 465 MarkObjectNonNull(obj, false); 466 } 467} 468 469void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) { 470 DCHECK(root != NULL); 471 DCHECK(arg != NULL); 472 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 473 mark_sweep->MarkObjectNonNullParallel(root, false); 474} 475 476void MarkSweep::MarkObjectCallback(const Object* root, void* arg) { 477 DCHECK(root != NULL); 478 DCHECK(arg != NULL); 479 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 480 mark_sweep->MarkObjectNonNull(root, false); 481} 482 483void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) { 484 DCHECK(root != NULL); 485 DCHECK(arg != NULL); 486 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 487 mark_sweep->MarkObjectNonNull(root, true); 488} 489 490void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 491 const StackVisitor* visitor) { 492 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 493} 494 495void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 496 // See if the root is on any space bitmap. 497 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 498 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 499 if (!large_object_space->Contains(root)) { 500 LOG(ERROR) << "Found invalid root: " << root; 501 if (visitor != NULL) { 502 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 503 } 504 } 505 } 506} 507 508void MarkSweep::VerifyRoots() { 509 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 510} 511 512// Marks all objects in the root set. 513void MarkSweep::MarkRoots() { 514 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this); 515} 516 517void MarkSweep::MarkNonThreadRoots() { 518 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this); 519} 520 521void MarkSweep::MarkConcurrentRoots() { 522 // Visit all runtime roots and clear dirty flags. 523 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true); 524} 525 526class CheckObjectVisitor { 527 public: 528 explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 529 530 void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const 531 NO_THREAD_SAFETY_ANALYSIS { 532 if (kDebugLocking) { 533 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 534 } 535 mark_sweep_->CheckReference(obj, ref, offset, is_static); 536 } 537 538 private: 539 MarkSweep* const mark_sweep_; 540}; 541 542void MarkSweep::CheckObject(const Object* obj) { 543 DCHECK(obj != NULL); 544 CheckObjectVisitor visitor(this); 545 VisitObjectReferences(obj, visitor); 546} 547 548void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { 549 DCHECK(root != NULL); 550 DCHECK(arg != NULL); 551 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 552 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); 553 mark_sweep->CheckObject(root); 554} 555 556void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 557 CHECK(space->IsDlMallocSpace()); 558 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 559 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 560 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release(); 561 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 562 alloc_space->temp_bitmap_.reset(mark_bitmap); 563 alloc_space->mark_bitmap_.reset(live_bitmap); 564} 565 566class ScanObjectVisitor { 567 public: 568 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 569 570 // TODO: Fixme when anotatalysis works with visitors. 571 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 572 if (kDebugLocking) { 573 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 574 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 575 } 576 mark_sweep_->ScanObject(obj); 577 } 578 579 private: 580 MarkSweep* const mark_sweep_; 581}; 582 583void MarkSweep::ScanGrayObjects(byte minimum_age) { 584 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 585 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 586 ScanObjectVisitor visitor(this); 587 SetFingerVisitor finger_visitor(this); 588 // TODO: C++0x 589 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 590 for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) { 591 space::ContinuousSpace* space = *it; 592 switch (space->GetGcRetentionPolicy()) { 593 case space::kGcRetentionPolicyNeverCollect: 594 timings_.NewSplit("ScanGrayImageSpaceObjects"); 595 break; 596 case space::kGcRetentionPolicyFullCollect: 597 timings_.NewSplit("ScanGrayZygoteSpaceObjects"); 598 break; 599 case space::kGcRetentionPolicyAlwaysCollect: 600 timings_.NewSplit("ScanGrayAllocSpaceObjects"); 601 break; 602 } 603 byte* begin = space->Begin(); 604 byte* end = space->End(); 605 // Image spaces are handled properly since live == marked for them. 606 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 607 card_table->Scan(mark_bitmap, begin, end, visitor, finger_visitor, minimum_age); 608 } 609} 610 611class CheckBitmapVisitor { 612 public: 613 explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 614 615 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 616 if (kDebugLocking) { 617 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 618 } 619 DCHECK(obj != NULL); 620 mark_sweep_->CheckObject(obj); 621 } 622 623 private: 624 MarkSweep* mark_sweep_; 625}; 626 627void MarkSweep::VerifyImageRoots() { 628 // Verify roots ensures that all the references inside the image space point 629 // objects which are either in the image space or marked objects in the alloc 630 // space 631 CheckBitmapVisitor visitor(this); 632 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 633 // TODO: C++0x 634 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 635 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 636 if ((*it)->IsImageSpace()) { 637 space::ImageSpace* space = (*it)->AsImageSpace(); 638 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 639 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 640 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 641 DCHECK(live_bitmap != NULL); 642 live_bitmap->VisitMarkedRange(begin, end, visitor, VoidFunctor()); 643 } 644 } 645} 646 647// Populates the mark stack based on the set of marked objects and 648// recursively marks until the mark stack is emptied. 649void MarkSweep::RecursiveMark() { 650 timings_.NewSplit("RecursiveMark"); 651 // RecursiveMark will build the lists of known instances of the Reference classes. 652 // See DelayReferenceReferent for details. 653 CHECK(soft_reference_list_ == NULL); 654 CHECK(weak_reference_list_ == NULL); 655 CHECK(finalizer_reference_list_ == NULL); 656 CHECK(phantom_reference_list_ == NULL); 657 CHECK(cleared_reference_list_ == NULL); 658 659 const bool partial = GetGcType() == kGcTypePartial; 660 SetFingerVisitor set_finger_visitor(this); 661 ScanObjectVisitor scan_visitor(this); 662 if (!kDisableFinger) { 663 finger_ = NULL; 664 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 665 // TODO: C++0x 666 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 667 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 668 space::ContinuousSpace* space = *it; 669 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 670 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 671 current_mark_bitmap_ = space->GetMarkBitmap(); 672 if (current_mark_bitmap_ == NULL) { 673 GetHeap()->DumpSpaces(); 674 LOG(FATAL) << "invalid bitmap"; 675 } 676 // This function does not handle heap end increasing, so we must use the space end. 677 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 678 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 679 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor); 680 } 681 } 682 } 683 DisableFinger(); 684 timings_.NewSplit("ProcessMarkStack"); 685 ProcessMarkStack(); 686} 687 688bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) { 689 return 690 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) || 691 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object); 692} 693 694void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) { 695 ScanGrayObjects(minimum_age); 696 timings_.NewSplit("ProcessMarkStack"); 697 ProcessMarkStack(); 698} 699 700void MarkSweep::ReMarkRoots() { 701 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true); 702} 703 704void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) { 705 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 706 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 707 IndirectReferenceTable* table = &vm->weak_globals; 708 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto 709 for (It it = table->begin(), end = table->end(); it != end; ++it) { 710 const Object** entry = *it; 711 if (!is_marked(*entry, arg)) { 712 *entry = kClearedJniWeakGlobal; 713 } 714 } 715} 716 717struct ArrayMarkedCheck { 718 accounting::ObjectStack* live_stack; 719 MarkSweep* mark_sweep; 720}; 721 722// Either marked or not live. 723bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) { 724 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg); 725 if (array_check->mark_sweep->IsMarked(object)) { 726 return true; 727 } 728 accounting::ObjectStack* live_stack = array_check->live_stack; 729 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End(); 730} 731 732void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) { 733 Runtime* runtime = Runtime::Current(); 734 // The callbacks check 735 // !is_marked where is_marked is the callback but we want 736 // !IsMarked && IsLive 737 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 738 // Or for swapped (IsLive || !IsMarked). 739 740 ArrayMarkedCheck visitor; 741 visitor.live_stack = allocations; 742 visitor.mark_sweep = this; 743 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor); 744 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor); 745 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor); 746} 747 748void MarkSweep::SweepSystemWeaks() { 749 Runtime* runtime = Runtime::Current(); 750 // The callbacks check 751 // !is_marked where is_marked is the callback but we want 752 // !IsMarked && IsLive 753 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 754 // Or for swapped (IsLive || !IsMarked). 755 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this); 756 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this); 757 SweepJniWeakGlobals(IsMarkedCallback, this); 758} 759 760bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) { 761 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 762 // We don't actually want to sweep the object, so lets return "marked" 763 return true; 764} 765 766void MarkSweep::VerifyIsLive(const Object* obj) { 767 Heap* heap = GetHeap(); 768 if (!heap->GetLiveBitmap()->Test(obj)) { 769 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 770 if (!large_object_space->GetLiveObjects()->Test(obj)) { 771 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 772 heap->allocation_stack_->End()) { 773 // Object not found! 774 heap->DumpSpaces(); 775 LOG(FATAL) << "Found dead object " << obj; 776 } 777 } 778 } 779} 780 781void MarkSweep::VerifySystemWeaks() { 782 Runtime* runtime = Runtime::Current(); 783 // Verify system weaks, uses a special IsMarked callback which always returns true. 784 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this); 785 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this); 786 787 JavaVMExt* vm = runtime->GetJavaVM(); 788 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 789 IndirectReferenceTable* table = &vm->weak_globals; 790 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto 791 for (It it = table->begin(), end = table->end(); it != end; ++it) { 792 const Object** entry = *it; 793 VerifyIsLive(*entry); 794 } 795} 796 797struct SweepCallbackContext { 798 MarkSweep* mark_sweep; 799 space::AllocSpace* space; 800 Thread* self; 801}; 802 803class CheckpointMarkThreadRoots : public Closure { 804 public: 805 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 806 807 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 808 // Note: self is not necessarily equal to thread since thread may be suspended. 809 Thread* self = Thread::Current(); 810 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 811 << thread->GetState() << " thread " << thread << " self " << self; 812 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 813 mark_sweep_->GetBarrier().Pass(self); 814 } 815 816 private: 817 MarkSweep* mark_sweep_; 818}; 819 820void MarkSweep::MarkRootsCheckpoint(Thread* self) { 821 CheckpointMarkThreadRoots check_point(this); 822 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 823 // Request the check point is run on all threads returning a count of the threads that must 824 // run through the barrier including self. 825 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 826 // Release locks then wait for all mutator threads to pass the barrier. 827 // TODO: optimize to not release locks when there are no threads to wait for. 828 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 829 Locks::mutator_lock_->SharedUnlock(self); 830 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 831 CHECK_EQ(old_state, kWaitingPerformingGc); 832 gc_barrier_->Increment(self, barrier_count); 833 self->SetState(kWaitingPerformingGc); 834 Locks::mutator_lock_->SharedLock(self); 835 Locks::heap_bitmap_lock_->ExclusiveLock(self); 836} 837 838void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 839 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 840 MarkSweep* mark_sweep = context->mark_sweep; 841 Heap* heap = mark_sweep->GetHeap(); 842 space::AllocSpace* space = context->space; 843 Thread* self = context->self; 844 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 845 // Use a bulk free, that merges consecutive objects before freeing or free per object? 846 // Documentation suggests better free performance with merging, but this may be at the expensive 847 // of allocation. 848 size_t freed_objects = num_ptrs; 849 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit 850 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); 851 heap->RecordFree(freed_objects, freed_bytes); 852 mark_sweep->freed_objects_ += freed_objects; 853 mark_sweep->freed_bytes_ += freed_bytes; 854} 855 856void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 857 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 858 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); 859 Heap* heap = context->mark_sweep->GetHeap(); 860 // We don't free any actual memory to avoid dirtying the shared zygote pages. 861 for (size_t i = 0; i < num_ptrs; ++i) { 862 Object* obj = static_cast<Object*>(ptrs[i]); 863 heap->GetLiveBitmap()->Clear(obj); 864 heap->GetCardTable()->MarkCard(obj); 865 } 866} 867 868void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 869 size_t freed_bytes = 0; 870 space::DlMallocSpace* space = heap_->GetAllocSpace(); 871 872 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 873 // bitmap, resulting in occasional frees of Weaks which are still in use. 874 timings_.NewSplit("SweepSystemWeaks"); 875 SweepSystemWeaksArray(allocations); 876 877 timings_.NewSplit("Process allocation stack"); 878 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are 879 // going to free. 880 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 881 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 882 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 883 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 884 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 885 if (swap_bitmaps) { 886 std::swap(live_bitmap, mark_bitmap); 887 std::swap(large_live_objects, large_mark_objects); 888 } 889 890 size_t freed_large_objects = 0; 891 size_t count = allocations->Size(); 892 Object** objects = const_cast<Object**>(allocations->Begin()); 893 Object** out = objects; 894 895 // Empty the allocation stack. 896 Thread* self = Thread::Current(); 897 for (size_t i = 0;i < count;++i) { 898 Object* obj = objects[i]; 899 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. 900 if (LIKELY(mark_bitmap->HasAddress(obj))) { 901 if (!mark_bitmap->Test(obj)) { 902 // Don't bother un-marking since we clear the mark bitmap anyways. 903 *(out++) = obj; 904 } 905 } else if (!large_mark_objects->Test(obj)) { 906 ++freed_large_objects; 907 freed_bytes += large_object_space->Free(self, obj); 908 } 909 } 910 CHECK_EQ(count, allocations->Size()); 911 timings_.NewSplit("FreeList"); 912 913 size_t freed_objects = out - objects; 914 freed_bytes += space->FreeList(self, freed_objects, objects); 915 VLOG(heap) << "Freed " << freed_objects << "/" << count 916 << " objects with size " << PrettySize(freed_bytes); 917 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes); 918 freed_objects_ += freed_objects; 919 freed_bytes_ += freed_bytes; 920 921 timings_.NewSplit("ResetStack"); 922 allocations->Reset(); 923} 924 925void MarkSweep::Sweep(bool swap_bitmaps) { 926 DCHECK(mark_stack_->IsEmpty()); 927 928 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 929 // bitmap, resulting in occasional frees of Weaks which are still in use. 930 timings_.NewSplit("SweepSystemWeaks"); 931 SweepSystemWeaks(); 932 933 const bool partial = (GetGcType() == kGcTypePartial); 934 SweepCallbackContext scc; 935 scc.mark_sweep = this; 936 scc.self = Thread::Current(); 937 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 938 // TODO: C++0x 939 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 940 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 941 space::ContinuousSpace* space = *it; 942 // We always sweep always collect spaces. 943 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); 944 if (!partial && !sweep_space) { 945 // We sweep full collect spaces when the GC isn't a partial GC (ie its full). 946 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); 947 } 948 if (sweep_space) { 949 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 950 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 951 scc.space = space->AsDlMallocSpace(); 952 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 953 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 954 if (swap_bitmaps) { 955 std::swap(live_bitmap, mark_bitmap); 956 } 957 if (!space->IsZygoteSpace()) { 958 timings_.NewSplit("SweepAllocSpace"); 959 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. 960 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 961 &SweepCallback, reinterpret_cast<void*>(&scc)); 962 } else { 963 timings_.NewSplit("SweepZygote"); 964 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual 965 // memory. 966 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 967 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); 968 } 969 } 970 } 971 972 timings_.NewSplit("SweepLargeObjects"); 973 SweepLargeObjects(swap_bitmaps); 974} 975 976void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 977 // Sweep large objects 978 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 979 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 980 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 981 if (swap_bitmaps) { 982 std::swap(large_live_objects, large_mark_objects); 983 } 984 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects(); 985 // O(n*log(n)) but hopefully there are not too many large objects. 986 size_t freed_objects = 0; 987 size_t freed_bytes = 0; 988 Thread* self = Thread::Current(); 989 // TODO: C++0x 990 typedef accounting::SpaceSetMap::Objects::iterator It; 991 for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) { 992 if (!large_mark_objects->Test(*it)) { 993 freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it)); 994 ++freed_objects; 995 } 996 } 997 freed_objects_ += freed_objects; 998 freed_bytes_ += freed_bytes; 999 GetHeap()->RecordFree(freed_objects, freed_bytes); 1000} 1001 1002void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { 1003 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1004 // TODO: C++0x 1005 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1006 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1007 space::ContinuousSpace* space = *it; 1008 if (space->IsDlMallocSpace() && space->Contains(ref)) { 1009 DCHECK(IsMarked(obj)); 1010 1011 bool is_marked = IsMarked(ref); 1012 if (!is_marked) { 1013 LOG(INFO) << *space; 1014 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) 1015 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) 1016 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " 1017 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; 1018 1019 const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1020 DCHECK(klass != NULL); 1021 const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); 1022 DCHECK(fields != NULL); 1023 bool found = false; 1024 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1025 const Field* cur = fields->Get(i); 1026 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1027 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); 1028 found = true; 1029 break; 1030 } 1031 } 1032 if (!found) { 1033 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); 1034 } 1035 1036 bool obj_marked = heap_->GetCardTable()->IsDirty(obj); 1037 if (!obj_marked) { 1038 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " 1039 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " 1040 << "the alloc space, but wasn't card marked"; 1041 } 1042 } 1043 } 1044 break; 1045 } 1046} 1047 1048// Process the "referent" field in a java.lang.ref.Reference. If the 1049// referent has not yet been marked, put it on the appropriate list in 1050// the gcHeap for later processing. 1051void MarkSweep::DelayReferenceReferent(Object* obj) { 1052 DCHECK(obj != NULL); 1053 Class* klass = obj->GetClass(); 1054 DCHECK(klass != NULL); 1055 DCHECK(klass->IsReferenceClass()); 1056 Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false); 1057 Object* referent = heap_->GetReferenceReferent(obj); 1058 if (kCountJavaLangRefs) { 1059 ++reference_count_; 1060 } 1061 if (pending == NULL && referent != NULL && !IsMarked(referent)) { 1062 Object** list = NULL; 1063 if (klass->IsSoftReferenceClass()) { 1064 list = &soft_reference_list_; 1065 } else if (klass->IsWeakReferenceClass()) { 1066 list = &weak_reference_list_; 1067 } else if (klass->IsFinalizerReferenceClass()) { 1068 list = &finalizer_reference_list_; 1069 } else if (klass->IsPhantomReferenceClass()) { 1070 list = &phantom_reference_list_; 1071 } 1072 DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags(); 1073 // TODO: One lock per list? 1074 heap_->EnqueuePendingReference(obj, list); 1075 } 1076} 1077 1078void MarkSweep::ScanRoot(const Object* obj) { 1079 ScanObject(obj); 1080} 1081 1082class MarkObjectVisitor { 1083 public: 1084 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 1085 1086 // TODO: Fixme when anotatalysis works with visitors. 1087 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1088 bool /* is_static */) const 1089 NO_THREAD_SAFETY_ANALYSIS { 1090 if (kDebugLocking) { 1091 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1092 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1093 } 1094 mark_sweep_->MarkObject(ref); 1095 } 1096 1097 private: 1098 MarkSweep* const mark_sweep_; 1099}; 1100 1101// Scans an object reference. Determines the type of the reference 1102// and dispatches to a specialized scanning routine. 1103void MarkSweep::ScanObject(const Object* obj) { 1104 MarkObjectVisitor visitor(this); 1105 ScanObjectVisit(obj, visitor); 1106} 1107 1108class MarkStackChunk : public Task { 1109 public: 1110 MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end) 1111 : mark_sweep_(mark_sweep), 1112 thread_pool_(thread_pool), 1113 index_(0), 1114 length_(0), 1115 output_(NULL) { 1116 length_ = end - begin; 1117 if (begin != end) { 1118 // Cost not significant since we only do this for the initial set of mark stack chunks. 1119 memcpy(data_, begin, length_ * sizeof(*begin)); 1120 } 1121 if (kCountTasks) { 1122 ++mark_sweep_->work_chunks_created_; 1123 } 1124 } 1125 1126 ~MarkStackChunk() { 1127 DCHECK(output_ == NULL || output_->length_ == 0); 1128 DCHECK_GE(index_, length_); 1129 delete output_; 1130 if (kCountTasks) { 1131 ++mark_sweep_->work_chunks_deleted_; 1132 } 1133 } 1134 1135 MarkSweep* const mark_sweep_; 1136 ThreadPool* const thread_pool_; 1137 static const size_t max_size = 1 * KB; 1138 // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing. 1139 size_t index_; 1140 // Input / output mark stack. We add newly marked references to data_ until length reaches 1141 // max_size. This is an optimization so that less tasks are created. 1142 // TODO: Investigate using a bounded buffer FIFO. 1143 Object* data_[max_size]; 1144 // How many elements in data_ we need to scan. 1145 size_t length_; 1146 // Output block, newly marked references get added to the ouput block so that another thread can 1147 // scan them. 1148 MarkStackChunk* output_; 1149 1150 class MarkObjectParallelVisitor { 1151 public: 1152 explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {} 1153 1154 void operator()(const Object* /* obj */, const Object* ref, 1155 const MemberOffset& /* offset */, bool /* is_static */) const { 1156 if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) { 1157 chunk_task_->MarkStackPush(ref); 1158 } 1159 } 1160 1161 private: 1162 MarkStackChunk* const chunk_task_; 1163 }; 1164 1165 // Push an object into the block. 1166 // Don't need to use atomic ++ since we only one thread is writing to an output block at any 1167 // given time. 1168 void Push(Object* obj) { 1169 CHECK(obj != NULL); 1170 data_[length_++] = obj; 1171 } 1172 1173 void MarkStackPush(const Object* obj) { 1174 if (static_cast<size_t>(length_) < max_size) { 1175 Push(const_cast<Object*>(obj)); 1176 } else { 1177 // Internal (thread-local) buffer is full, push to a new buffer instead. 1178 if (UNLIKELY(output_ == NULL)) { 1179 AllocateOutputChunk(); 1180 } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) { 1181 // Output block is full, queue it up for processing and obtain a new block. 1182 EnqueueOutput(); 1183 AllocateOutputChunk(); 1184 } 1185 output_->Push(const_cast<Object*>(obj)); 1186 } 1187 } 1188 1189 void ScanObject(Object* obj) { 1190 mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this)); 1191 } 1192 1193 void EnqueueOutput() { 1194 if (output_ != NULL) { 1195 uint64_t start = 0; 1196 if (kMeasureOverhead) { 1197 start = NanoTime(); 1198 } 1199 thread_pool_->AddTask(Thread::Current(), output_); 1200 output_ = NULL; 1201 if (kMeasureOverhead) { 1202 mark_sweep_->overhead_time_ += NanoTime() - start; 1203 } 1204 } 1205 } 1206 1207 void AllocateOutputChunk() { 1208 uint64_t start = 0; 1209 if (kMeasureOverhead) { 1210 start = NanoTime(); 1211 } 1212 output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL); 1213 if (kMeasureOverhead) { 1214 mark_sweep_->overhead_time_ += NanoTime() - start; 1215 } 1216 } 1217 1218 void Finalize() { 1219 EnqueueOutput(); 1220 delete this; 1221 } 1222 1223 // Scans all of the objects 1224 virtual void Run(Thread* self) { 1225 size_t index; 1226 while ((index = index_++) < length_) { 1227 if (kUseMarkStackPrefetch) { 1228 static const size_t prefetch_look_ahead = 1; 1229 __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]); 1230 } 1231 Object* obj = data_[index]; 1232 DCHECK(obj != NULL); 1233 ScanObject(obj); 1234 } 1235 } 1236}; 1237 1238void MarkSweep::ProcessMarkStackParallel() { 1239 CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled"; 1240 Thread* self = Thread::Current(); 1241 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1242 // Split the current mark stack up into work tasks. 1243 const size_t num_threads = thread_pool->GetThreadCount(); 1244 const size_t stack_size = mark_stack_->Size(); 1245 const size_t chunk_size = 1246 std::min((stack_size + num_threads - 1) / num_threads, 1247 static_cast<size_t>(MarkStackChunk::max_size)); 1248 size_t index = 0; 1249 for (size_t i = 0; i < num_threads || index < stack_size; ++i) { 1250 Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)]; 1251 Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)]; 1252 index += chunk_size; 1253 thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end)); 1254 } 1255 thread_pool->StartWorkers(self); 1256 thread_pool->Wait(self, true, true); 1257 mark_stack_->Reset(); 1258 //LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime()); 1259 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1260} 1261 1262// Scan anything that's on the mark stack. 1263void MarkSweep::ProcessMarkStack() { 1264 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1265 if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) { 1266 ProcessMarkStackParallel(); 1267 return; 1268 } 1269 1270 if (kUseMarkStackPrefetch) { 1271 const size_t fifo_size = 4; 1272 const size_t fifo_mask = fifo_size - 1; 1273 const Object* fifo[fifo_size]; 1274 for (size_t i = 0;i < fifo_size;++i) { 1275 fifo[i] = NULL; 1276 } 1277 size_t fifo_pos = 0; 1278 size_t fifo_count = 0; 1279 for (;;) { 1280 const Object* obj = fifo[fifo_pos & fifo_mask]; 1281 if (obj != NULL) { 1282 ScanObject(obj); 1283 fifo[fifo_pos & fifo_mask] = NULL; 1284 --fifo_count; 1285 } 1286 1287 if (!mark_stack_->IsEmpty()) { 1288 const Object* obj = mark_stack_->PopBack(); 1289 DCHECK(obj != NULL); 1290 fifo[fifo_pos & fifo_mask] = obj; 1291 __builtin_prefetch(obj); 1292 fifo_count++; 1293 } 1294 fifo_pos++; 1295 1296 if (!fifo_count) { 1297 CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size(); 1298 break; 1299 } 1300 } 1301 } else { 1302 while (!mark_stack_->IsEmpty()) { 1303 const Object* obj = mark_stack_->PopBack(); 1304 DCHECK(obj != NULL); 1305 ScanObject(obj); 1306 } 1307 } 1308} 1309 1310// Walks the reference list marking any references subject to the 1311// reference clearing policy. References with a black referent are 1312// removed from the list. References with white referents biased 1313// toward saving are blackened and also removed from the list. 1314void MarkSweep::PreserveSomeSoftReferences(Object** list) { 1315 DCHECK(list != NULL); 1316 Object* clear = NULL; 1317 size_t counter = 0; 1318 1319 DCHECK(mark_stack_->IsEmpty()); 1320 1321 while (*list != NULL) { 1322 Object* ref = heap_->DequeuePendingReference(list); 1323 Object* referent = heap_->GetReferenceReferent(ref); 1324 if (referent == NULL) { 1325 // Referent was cleared by the user during marking. 1326 continue; 1327 } 1328 bool is_marked = IsMarked(referent); 1329 if (!is_marked && ((++counter) & 1)) { 1330 // Referent is white and biased toward saving, mark it. 1331 MarkObject(referent); 1332 is_marked = true; 1333 } 1334 if (!is_marked) { 1335 // Referent is white, queue it for clearing. 1336 heap_->EnqueuePendingReference(ref, &clear); 1337 } 1338 } 1339 *list = clear; 1340 // Restart the mark with the newly black references added to the 1341 // root set. 1342 ProcessMarkStack(); 1343} 1344 1345inline bool MarkSweep::IsMarked(const Object* object) const 1346 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1347 if (object >= immune_begin_ && object < immune_end_) { 1348 return true; 1349 } 1350 DCHECK(current_mark_bitmap_ != NULL); 1351 if (current_mark_bitmap_->HasAddress(object)) { 1352 return current_mark_bitmap_->Test(object); 1353 } 1354 return heap_->GetMarkBitmap()->Test(object); 1355} 1356 1357 1358// Unlink the reference list clearing references objects with white 1359// referents. Cleared references registered to a reference queue are 1360// scheduled for appending by the heap worker thread. 1361void MarkSweep::ClearWhiteReferences(Object** list) { 1362 DCHECK(list != NULL); 1363 while (*list != NULL) { 1364 Object* ref = heap_->DequeuePendingReference(list); 1365 Object* referent = heap_->GetReferenceReferent(ref); 1366 if (referent != NULL && !IsMarked(referent)) { 1367 // Referent is white, clear it. 1368 heap_->ClearReferenceReferent(ref); 1369 if (heap_->IsEnqueuable(ref)) { 1370 heap_->EnqueueReference(ref, &cleared_reference_list_); 1371 } 1372 } 1373 } 1374 DCHECK(*list == NULL); 1375} 1376 1377// Enqueues finalizer references with white referents. White 1378// referents are blackened, moved to the zombie field, and the 1379// referent field is cleared. 1380void MarkSweep::EnqueueFinalizerReferences(Object** list) { 1381 DCHECK(list != NULL); 1382 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); 1383 bool has_enqueued = false; 1384 while (*list != NULL) { 1385 Object* ref = heap_->DequeuePendingReference(list); 1386 Object* referent = heap_->GetReferenceReferent(ref); 1387 if (referent != NULL && !IsMarked(referent)) { 1388 MarkObject(referent); 1389 // If the referent is non-null the reference must queuable. 1390 DCHECK(heap_->IsEnqueuable(ref)); 1391 ref->SetFieldObject(zombie_offset, referent, false); 1392 heap_->ClearReferenceReferent(ref); 1393 heap_->EnqueueReference(ref, &cleared_reference_list_); 1394 has_enqueued = true; 1395 } 1396 } 1397 if (has_enqueued) { 1398 ProcessMarkStack(); 1399 } 1400 DCHECK(*list == NULL); 1401} 1402 1403// Process reference class instances and schedule finalizations. 1404void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, 1405 Object** weak_references, 1406 Object** finalizer_references, 1407 Object** phantom_references) { 1408 DCHECK(soft_references != NULL); 1409 DCHECK(weak_references != NULL); 1410 DCHECK(finalizer_references != NULL); 1411 DCHECK(phantom_references != NULL); 1412 1413 // Unless we are in the zygote or required to clear soft references 1414 // with white references, preserve some white referents. 1415 if (!clear_soft && !Runtime::Current()->IsZygote()) { 1416 PreserveSomeSoftReferences(soft_references); 1417 } 1418 1419 // Clear all remaining soft and weak references with white 1420 // referents. 1421 ClearWhiteReferences(soft_references); 1422 ClearWhiteReferences(weak_references); 1423 1424 // Preserve all white objects with finalize methods and schedule 1425 // them for finalization. 1426 EnqueueFinalizerReferences(finalizer_references); 1427 1428 // Clear all f-reachable soft and weak references with white 1429 // referents. 1430 ClearWhiteReferences(soft_references); 1431 ClearWhiteReferences(weak_references); 1432 1433 // Clear all phantom references with white referents. 1434 ClearWhiteReferences(phantom_references); 1435 1436 // At this point all reference lists should be empty. 1437 DCHECK(*soft_references == NULL); 1438 DCHECK(*weak_references == NULL); 1439 DCHECK(*finalizer_references == NULL); 1440 DCHECK(*phantom_references == NULL); 1441} 1442 1443void MarkSweep::UnBindBitmaps() { 1444 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1445 // TODO: C++0x 1446 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1447 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1448 space::ContinuousSpace* space = *it; 1449 if (space->IsDlMallocSpace()) { 1450 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 1451 if (alloc_space->temp_bitmap_.get() != NULL) { 1452 // At this point, the temp_bitmap holds our old mark bitmap. 1453 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); 1454 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); 1455 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); 1456 alloc_space->mark_bitmap_.reset(new_bitmap); 1457 DCHECK(alloc_space->temp_bitmap_.get() == NULL); 1458 } 1459 } 1460 } 1461} 1462 1463void MarkSweep::FinishPhase() { 1464 // Can't enqueue referneces if we hold the mutator lock. 1465 Object* cleared_references = GetClearedReferences(); 1466 Heap* heap = GetHeap(); 1467 heap->EnqueueClearedReferences(&cleared_references); 1468 1469 heap->PostGcVerification(this); 1470 1471 timings_.NewSplit("GrowForUtilization"); 1472 heap->GrowForUtilization(GetGcType(), GetDurationNs()); 1473 1474 timings_.NewSplit("RequestHeapTrim"); 1475 heap->RequestHeapTrim(); 1476 1477 // Update the cumulative statistics 1478 total_time_ns_ += GetDurationNs(); 1479 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, 1480 std::plus<uint64_t>()); 1481 total_freed_objects_ += GetFreedObjects(); 1482 total_freed_bytes_ += GetFreedBytes(); 1483 1484 // Ensure that the mark stack is empty. 1485 CHECK(mark_stack_->IsEmpty()); 1486 1487 if (kCountScannedTypes) { 1488 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1489 << " other=" << other_count_; 1490 } 1491 1492 if (kCountTasks) { 1493 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1494 } 1495 1496 if (kMeasureOverhead) { 1497 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1498 } 1499 1500 if (kProfileLargeObjects) { 1501 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1502 } 1503 1504 if (kCountClassesMarked) { 1505 VLOG(gc) << "Classes marked " << classes_marked_; 1506 } 1507 1508 if (kCountJavaLangRefs) { 1509 VLOG(gc) << "References scanned " << reference_count_; 1510 } 1511 1512 // Update the cumulative loggers. 1513 cumulative_timings_.Start(); 1514 cumulative_timings_.AddNewLogger(timings_); 1515 cumulative_timings_.End(); 1516 1517 // Clear all of the spaces' mark bitmaps. 1518 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1519 // TODO: C++0x 1520 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1521 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1522 space::ContinuousSpace* space = *it; 1523 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1524 space->GetMarkBitmap()->Clear(); 1525 } 1526 } 1527 mark_stack_->Reset(); 1528 1529 // Reset the marked large objects. 1530 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1531 large_objects->GetMarkObjects()->Clear(); 1532} 1533 1534} // namespace collector 1535} // namespace gc 1536} // namespace art 1537