mark_sweep.cc revision 7940e44f4517de5e2634a7e07d58d0fb26160513
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/card_table-inl.h" 29#include "gc/accounting/heap_bitmap.h" 30#include "gc/accounting/space_bitmap-inl.h" 31#include "gc/heap.h" 32#include "gc/space/image_space.h" 33#include "gc/space/large_object_space.h" 34#include "gc/space/space-inl.h" 35#include "indirect_reference_table.h" 36#include "intern_table.h" 37#include "jni_internal.h" 38#include "monitor.h" 39#include "mark_sweep-inl.h" 40#include "mirror/class-inl.h" 41#include "mirror/class_loader.h" 42#include "mirror/dex_cache.h" 43#include "mirror/field.h" 44#include "mirror/field-inl.h" 45#include "mirror/object-inl.h" 46#include "mirror/object_array.h" 47#include "mirror/object_array-inl.h" 48#include "runtime.h" 49#include "thread-inl.h" 50#include "thread_list.h" 51#include "verifier/method_verifier.h" 52 53using namespace art::mirror; 54 55namespace art { 56namespace gc { 57namespace collector { 58 59// Performance options. 60static const bool kParallelMarkStack = true; 61static const bool kDisableFinger = true; // TODO: Fix, bit rotten. 62static const bool kUseMarkStackPrefetch = true; 63 64// Profiling and information flags. 65static const bool kCountClassesMarked = false; 66static const bool kProfileLargeObjects = false; 67static const bool kMeasureOverhead = false; 68static const bool kCountTasks = false; 69static const bool kCountJavaLangRefs = false; 70 71class SetFingerVisitor { 72 public: 73 SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) { 74 } 75 76 void operator ()(void* finger) const { 77 mark_sweep_->SetFinger(reinterpret_cast<Object*>(finger)); 78 } 79 80 private: 81 MarkSweep* const mark_sweep_; 82}; 83 84void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { 85 // Bind live to mark bitmap if necessary. 86 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 87 BindLiveToMarkBitmap(space); 88 } 89 90 // Add the space to the immune region. 91 if (immune_begin_ == NULL) { 92 DCHECK(immune_end_ == NULL); 93 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), 94 reinterpret_cast<Object*>(space->End())); 95 } else { 96 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 97 const space::ContinuousSpace* prev_space = NULL; 98 // Find out if the previous space is immune. 99 // TODO: C++0x 100 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 101 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 102 if (*it == space) { 103 break; 104 } 105 prev_space = *it; 106 } 107 108 // If previous space was immune, then extend the immune region. Relies on continuous spaces 109 // being sorted by Heap::AddContinuousSpace. 110 if (prev_space != NULL && 111 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) && 112 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) { 113 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 114 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 115 } 116 } 117} 118 119void MarkSweep::BindBitmaps() { 120 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 121 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 122 123 // Mark all of the spaces we never collect as immune. 124 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 125 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 126 space::ContinuousSpace* space = *it; 127 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 128 ImmuneSpace(space); 129 } 130 } 131} 132 133MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 134 : GarbageCollector(heap, 135 name_prefix + (name_prefix.empty() ? "" : " ") + 136 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 137 current_mark_bitmap_(NULL), 138 java_lang_Class_(NULL), 139 mark_stack_(NULL), 140 finger_(NULL), 141 immune_begin_(NULL), 142 immune_end_(NULL), 143 soft_reference_list_(NULL), 144 weak_reference_list_(NULL), 145 finalizer_reference_list_(NULL), 146 phantom_reference_list_(NULL), 147 cleared_reference_list_(NULL), 148 gc_barrier_(new Barrier(0)), 149 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 150 mark_stack_expand_lock_("mark sweep mark stack expand lock"), 151 is_concurrent_(is_concurrent), 152 clear_soft_references_(false) { 153} 154 155void MarkSweep::InitializePhase() { 156 timings_.Reset(); 157 timings_.StartSplit("InitializePhase"); 158 mark_stack_ = GetHeap()->mark_stack_.get(); 159 DCHECK(mark_stack_ != NULL); 160 finger_ = NULL; 161 SetImmuneRange(NULL, NULL); 162 soft_reference_list_ = NULL; 163 weak_reference_list_ = NULL; 164 finalizer_reference_list_ = NULL; 165 phantom_reference_list_ = NULL; 166 cleared_reference_list_ = NULL; 167 freed_bytes_ = 0; 168 freed_objects_ = 0; 169 class_count_ = 0; 170 array_count_ = 0; 171 other_count_ = 0; 172 large_object_test_ = 0; 173 large_object_mark_ = 0; 174 classes_marked_ = 0; 175 overhead_time_ = 0; 176 work_chunks_created_ = 0; 177 work_chunks_deleted_ = 0; 178 reference_count_ = 0; 179 java_lang_Class_ = Class::GetJavaLangClass(); 180 CHECK(java_lang_Class_ != NULL); 181 FindDefaultMarkBitmap(); 182 // Do any pre GC verification. 183 heap_->PreGcVerification(this); 184} 185 186void MarkSweep::ProcessReferences(Thread* self) { 187 timings_.NewSplit("ProcessReferences"); 188 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 189 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, 190 &finalizer_reference_list_, &phantom_reference_list_); 191} 192 193bool MarkSweep::HandleDirtyObjectsPhase() { 194 Thread* self = Thread::Current(); 195 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 196 Locks::mutator_lock_->AssertExclusiveHeld(self); 197 198 { 199 timings_.NewSplit("ReMarkRoots"); 200 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 201 202 // Re-mark root set. 203 ReMarkRoots(); 204 205 // Scan dirty objects, this is only required if we are not doing concurrent GC. 206 RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty); 207 } 208 209 ProcessReferences(self); 210 211 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 212 if (GetHeap()->verify_missing_card_marks_) { 213 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 214 // This second sweep makes sure that we don't have any objects in the live stack which point to 215 // freed objects. These cause problems since their references may be previously freed objects. 216 SweepArray(allocation_stack, false); 217 } else { 218 timings_.NewSplit("UnMarkAllocStack"); 219 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 220 // The allocation stack contains things allocated since the start of the GC. These may have been 221 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. 222 // Remove these objects from the mark bitmaps so that they will be eligible for sticky 223 // collection. 224 heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(), 225 GetHeap()->large_object_space_->GetMarkObjects(), 226 allocation_stack); 227 } 228 return true; 229} 230 231bool MarkSweep::IsConcurrent() const { 232 return is_concurrent_; 233} 234 235void MarkSweep::MarkingPhase() { 236 Heap* heap = GetHeap(); 237 Thread* self = Thread::Current(); 238 239 timings_.NewSplit("BindBitmaps"); 240 BindBitmaps(); 241 FindDefaultMarkBitmap(); 242 // Process dirty cards and add dirty cards to mod union tables. 243 heap->ProcessCards(timings_); 244 245 // Need to do this before the checkpoint since we don't want any threads to add references to 246 // the live stack during the recursive mark. 247 timings_.NewSplit("SwapStacks"); 248 heap->SwapStacks(); 249 250 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 251 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 252 // If we exclusively hold the mutator lock, all threads must be suspended. 253 timings_.NewSplit("MarkRoots"); 254 MarkRoots(); 255 } else { 256 timings_.NewSplit("MarkRootsCheckpoint"); 257 MarkRootsCheckpoint(self); 258 timings_.NewSplit("MarkNonThreadRoots"); 259 MarkNonThreadRoots(); 260 } 261 timings_.NewSplit("MarkConcurrentRoots"); 262 MarkConcurrentRoots(); 263 264 heap->UpdateAndMarkModUnion(this, timings_, GetGcType()); 265 MarkReachableObjects(); 266} 267 268void MarkSweep::MarkReachableObjects() { 269 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 270 // knowing that new allocations won't be marked as live. 271 timings_.NewSplit("MarkStackAsLive"); 272 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 273 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(), 274 heap_->large_object_space_->GetLiveObjects(), 275 live_stack); 276 live_stack->Reset(); 277 // Recursively mark all the non-image bits set in the mark bitmap. 278 RecursiveMark(); 279 DisableFinger(); 280} 281 282void MarkSweep::ReclaimPhase() { 283 Thread* self = Thread::Current(); 284 285 if (!IsConcurrent()) { 286 ProcessReferences(self); 287 } 288 289 // Before freeing anything, lets verify the heap. 290 if (kIsDebugBuild) { 291 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 292 VerifyImageRoots(); 293 } 294 heap_->PreSweepingGcVerification(this); 295 296 { 297 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 298 299 // Reclaim unmarked objects. 300 Sweep(false); 301 302 // Swap the live and mark bitmaps for each space which we modified space. This is an 303 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 304 // bitmaps. 305 timings_.NewSplit("SwapBitmaps"); 306 SwapBitmaps(); 307 308 // Unbind the live and mark bitmaps. 309 UnBindBitmaps(); 310 } 311} 312 313void MarkSweep::SetImmuneRange(Object* begin, Object* end) { 314 immune_begin_ = begin; 315 immune_end_ = end; 316} 317 318void MarkSweep::FindDefaultMarkBitmap() { 319 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 320 // TODO: C++0x 321 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 322 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 323 space::ContinuousSpace* space = *it; 324 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 325 current_mark_bitmap_ = (*it)->GetMarkBitmap(); 326 CHECK(current_mark_bitmap_ != NULL); 327 return; 328 } 329 } 330 GetHeap()->DumpSpaces(); 331 LOG(FATAL) << "Could not find a default mark bitmap"; 332} 333 334void MarkSweep::ExpandMarkStack() { 335 // Rare case, no need to have Thread::Current be a parameter. 336 MutexLock mu(Thread::Current(), mark_stack_expand_lock_); 337 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 338 // Someone else acquired the lock and expanded the mark stack before us. 339 return; 340 } 341 std::vector<Object*> temp; 342 temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End()); 343 mark_stack_->Resize(mark_stack_->Capacity() * 2); 344 for (size_t i = 0; i < temp.size(); ++i) { 345 mark_stack_->PushBack(temp[i]); 346 } 347} 348 349inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj, bool check_finger) { 350 DCHECK(obj != NULL); 351 if (MarkObjectParallel(obj)) { 352 if (kDisableFinger || (check_finger && obj < finger_)) { 353 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) { 354 // Only reason a push can fail is that the mark stack is full. 355 ExpandMarkStack(); 356 } 357 } 358 } 359} 360 361inline void MarkSweep::MarkObjectNonNull(const Object* obj, bool check_finger) { 362 DCHECK(obj != NULL); 363 364 if (obj >= immune_begin_ && obj < immune_end_) { 365 DCHECK(IsMarked(obj)); 366 return; 367 } 368 369 // Try to take advantage of locality of references within a space, failing this find the space 370 // the hard way. 371 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 372 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 373 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 374 if (LIKELY(new_bitmap != NULL)) { 375 object_bitmap = new_bitmap; 376 } else { 377 MarkLargeObject(obj); 378 return; 379 } 380 } 381 382 // This object was not previously marked. 383 if (!object_bitmap->Test(obj)) { 384 object_bitmap->Set(obj); 385 if (kDisableFinger || (check_finger && obj < finger_)) { 386 // Do we need to expand the mark stack? 387 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 388 ExpandMarkStack(); 389 } 390 // The object must be pushed on to the mark stack. 391 mark_stack_->PushBack(const_cast<Object*>(obj)); 392 } 393 } 394} 395 396// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 397bool MarkSweep::MarkLargeObject(const Object* obj) { 398 // TODO: support >1 discontinuous space. 399 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 400 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); 401 if (kProfileLargeObjects) { 402 ++large_object_test_; 403 } 404 if (UNLIKELY(!large_objects->Test(obj))) { 405 // TODO: mark may be called holding the JNI global references lock, Contains will hold the 406 // large object space lock causing a lock level violation. Bug: 9414652; 407 if (!kDebugLocking && !large_object_space->Contains(obj)) { 408 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 409 LOG(ERROR) << "Attempting see if it's a bad root"; 410 VerifyRoots(); 411 LOG(FATAL) << "Can't mark bad root"; 412 } 413 if (kProfileLargeObjects) { 414 ++large_object_mark_; 415 } 416 large_objects->Set(obj); 417 // Don't need to check finger since large objects never have any object references. 418 return true; 419 } 420 return false; 421} 422 423inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 424 DCHECK(obj != NULL); 425 426 if (obj >= immune_begin_ && obj < immune_end_) { 427 DCHECK(IsMarked(obj)); 428 return false; 429 } 430 431 // Try to take advantage of locality of references within a space, failing this find the space 432 // the hard way. 433 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 434 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 435 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 436 if (new_bitmap != NULL) { 437 object_bitmap = new_bitmap; 438 } else { 439 // TODO: Remove the Thread::Current here? 440 // TODO: Convert this to some kind of atomic marking? 441 MutexLock mu(Thread::Current(), large_object_lock_); 442 return MarkLargeObject(obj); 443 } 444 } 445 446 // Return true if the object was not previously marked. 447 return !object_bitmap->AtomicTestAndSet(obj); 448} 449 450// Used to mark objects when recursing. Recursion is done by moving 451// the finger across the bitmaps in address order and marking child 452// objects. Any newly-marked objects whose addresses are lower than 453// the finger won't be visited by the bitmap scan, so those objects 454// need to be added to the mark stack. 455void MarkSweep::MarkObject(const Object* obj) { 456 if (obj != NULL) { 457 MarkObjectNonNull(obj, true); 458 } 459} 460 461void MarkSweep::MarkRoot(const Object* obj) { 462 if (obj != NULL) { 463 MarkObjectNonNull(obj, false); 464 } 465} 466 467void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) { 468 DCHECK(root != NULL); 469 DCHECK(arg != NULL); 470 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 471 mark_sweep->MarkObjectNonNullParallel(root, false); 472} 473 474void MarkSweep::MarkObjectCallback(const Object* root, void* arg) { 475 DCHECK(root != NULL); 476 DCHECK(arg != NULL); 477 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 478 mark_sweep->MarkObjectNonNull(root, false); 479} 480 481void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) { 482 DCHECK(root != NULL); 483 DCHECK(arg != NULL); 484 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 485 mark_sweep->MarkObjectNonNull(root, true); 486} 487 488void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 489 const StackVisitor* visitor) { 490 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 491} 492 493void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 494 // See if the root is on any space bitmap. 495 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 496 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 497 if (!large_object_space->Contains(root)) { 498 LOG(ERROR) << "Found invalid root: " << root; 499 if (visitor != NULL) { 500 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 501 } 502 } 503 } 504} 505 506void MarkSweep::VerifyRoots() { 507 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 508} 509 510// Marks all objects in the root set. 511void MarkSweep::MarkRoots() { 512 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this); 513} 514 515void MarkSweep::MarkNonThreadRoots() { 516 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this); 517} 518 519void MarkSweep::MarkConcurrentRoots() { 520 // Visit all runtime roots and clear dirty flags. 521 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true); 522} 523 524class CheckObjectVisitor { 525 public: 526 CheckObjectVisitor(MarkSweep* const mark_sweep) 527 : mark_sweep_(mark_sweep) { 528 529 } 530 531 void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const 532 NO_THREAD_SAFETY_ANALYSIS { 533 if (kDebugLocking) { 534 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 535 } 536 mark_sweep_->CheckReference(obj, ref, offset, is_static); 537 } 538 539 private: 540 MarkSweep* const mark_sweep_; 541}; 542 543void MarkSweep::CheckObject(const Object* obj) { 544 DCHECK(obj != NULL); 545 CheckObjectVisitor visitor(this); 546 VisitObjectReferences(obj, visitor); 547} 548 549void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { 550 DCHECK(root != NULL); 551 DCHECK(arg != NULL); 552 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 553 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); 554 mark_sweep->CheckObject(root); 555} 556 557void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 558 CHECK(space->IsDlMallocSpace()); 559 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 560 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 561 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release(); 562 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 563 alloc_space->temp_bitmap_.reset(mark_bitmap); 564 alloc_space->mark_bitmap_.reset(live_bitmap); 565} 566 567class ScanObjectVisitor { 568 public: 569 ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) { 570 } 571 572 // TODO: Fixme when anotatalysis works with visitors. 573 void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 574 if (kDebugLocking) { 575 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 576 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 577 } 578 mark_sweep_->ScanObject(obj); 579 } 580 581 private: 582 MarkSweep* const mark_sweep_; 583}; 584 585void MarkSweep::ScanGrayObjects(byte minimum_age) { 586 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 587 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 588 ScanObjectVisitor visitor(this); 589 SetFingerVisitor finger_visitor(this); 590 // TODO: C++0x 591 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 592 for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) { 593 space::ContinuousSpace* space = *it; 594 switch (space->GetGcRetentionPolicy()) { 595 case space::kGcRetentionPolicyNeverCollect: 596 timings_.NewSplit("ScanGrayImageSpaceObjects"); 597 break; 598 case space::kGcRetentionPolicyFullCollect: 599 timings_.NewSplit("ScanGrayZygoteSpaceObjects"); 600 break; 601 case space::kGcRetentionPolicyAlwaysCollect: 602 timings_.NewSplit("ScanGrayAllocSpaceObjects"); 603 break; 604 } 605 byte* begin = space->Begin(); 606 byte* end = space->End(); 607 // Image spaces are handled properly since live == marked for them. 608 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 609 card_table->Scan(mark_bitmap, begin, end, visitor, finger_visitor, minimum_age); 610 } 611} 612 613class CheckBitmapVisitor { 614 public: 615 CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) { 616 } 617 618 void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 619 if (kDebugLocking) { 620 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 621 } 622 DCHECK(obj != NULL); 623 mark_sweep_->CheckObject(obj); 624 } 625 626 private: 627 MarkSweep* mark_sweep_; 628}; 629 630void MarkSweep::VerifyImageRoots() { 631 // Verify roots ensures that all the references inside the image space point 632 // objects which are either in the image space or marked objects in the alloc 633 // space 634 CheckBitmapVisitor visitor(this); 635 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 636 // TODO: C++0x 637 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 638 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 639 if ((*it)->IsImageSpace()) { 640 space::ImageSpace* space = (*it)->AsImageSpace(); 641 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 642 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 643 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 644 DCHECK(live_bitmap != NULL); 645 live_bitmap->VisitMarkedRange(begin, end, visitor, VoidFunctor()); 646 } 647 } 648} 649 650// Populates the mark stack based on the set of marked objects and 651// recursively marks until the mark stack is emptied. 652void MarkSweep::RecursiveMark() { 653 timings_.NewSplit("RecursiveMark"); 654 // RecursiveMark will build the lists of known instances of the Reference classes. 655 // See DelayReferenceReferent for details. 656 CHECK(soft_reference_list_ == NULL); 657 CHECK(weak_reference_list_ == NULL); 658 CHECK(finalizer_reference_list_ == NULL); 659 CHECK(phantom_reference_list_ == NULL); 660 CHECK(cleared_reference_list_ == NULL); 661 662 const bool partial = GetGcType() == kGcTypePartial; 663 SetFingerVisitor set_finger_visitor(this); 664 ScanObjectVisitor scan_visitor(this); 665 if (!kDisableFinger) { 666 finger_ = NULL; 667 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 668 // TODO: C++0x 669 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 670 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 671 space::ContinuousSpace* space = *it; 672 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 673 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 674 current_mark_bitmap_ = space->GetMarkBitmap(); 675 if (current_mark_bitmap_ == NULL) { 676 GetHeap()->DumpSpaces(); 677 LOG(FATAL) << "invalid bitmap"; 678 } 679 // This function does not handle heap end increasing, so we must use the space end. 680 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 681 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 682 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor); 683 } 684 } 685 } 686 DisableFinger(); 687 timings_.NewSplit("ProcessMarkStack"); 688 ProcessMarkStack(); 689} 690 691bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) { 692 return 693 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) || 694 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object); 695} 696 697void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) { 698 ScanGrayObjects(minimum_age); 699 timings_.NewSplit("ProcessMarkStack"); 700 ProcessMarkStack(); 701} 702 703void MarkSweep::ReMarkRoots() { 704 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true); 705} 706 707void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) { 708 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 709 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 710 IndirectReferenceTable* table = &vm->weak_globals; 711 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto 712 for (It it = table->begin(), end = table->end(); it != end; ++it) { 713 const Object** entry = *it; 714 if (!is_marked(*entry, arg)) { 715 *entry = kClearedJniWeakGlobal; 716 } 717 } 718} 719 720struct ArrayMarkedCheck { 721 accounting::ObjectStack* live_stack; 722 MarkSweep* mark_sweep; 723}; 724 725// Either marked or not live. 726bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) { 727 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg); 728 if (array_check->mark_sweep->IsMarked(object)) { 729 return true; 730 } 731 accounting::ObjectStack* live_stack = array_check->live_stack; 732 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End(); 733} 734 735void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) { 736 Runtime* runtime = Runtime::Current(); 737 // The callbacks check 738 // !is_marked where is_marked is the callback but we want 739 // !IsMarked && IsLive 740 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 741 // Or for swapped (IsLive || !IsMarked). 742 743 ArrayMarkedCheck visitor; 744 visitor.live_stack = allocations; 745 visitor.mark_sweep = this; 746 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor); 747 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor); 748 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor); 749} 750 751void MarkSweep::SweepSystemWeaks() { 752 Runtime* runtime = Runtime::Current(); 753 // The callbacks check 754 // !is_marked where is_marked is the callback but we want 755 // !IsMarked && IsLive 756 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 757 // Or for swapped (IsLive || !IsMarked). 758 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this); 759 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this); 760 SweepJniWeakGlobals(IsMarkedCallback, this); 761} 762 763bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) { 764 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 765 // We don't actually want to sweep the object, so lets return "marked" 766 return true; 767} 768 769void MarkSweep::VerifyIsLive(const Object* obj) { 770 Heap* heap = GetHeap(); 771 if (!heap->GetLiveBitmap()->Test(obj)) { 772 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 773 if (!large_object_space->GetLiveObjects()->Test(obj)) { 774 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 775 heap->allocation_stack_->End()) { 776 // Object not found! 777 heap->DumpSpaces(); 778 LOG(FATAL) << "Found dead object " << obj; 779 } 780 } 781 } 782} 783 784void MarkSweep::VerifySystemWeaks() { 785 Runtime* runtime = Runtime::Current(); 786 // Verify system weaks, uses a special IsMarked callback which always returns true. 787 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this); 788 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this); 789 790 JavaVMExt* vm = runtime->GetJavaVM(); 791 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 792 IndirectReferenceTable* table = &vm->weak_globals; 793 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto 794 for (It it = table->begin(), end = table->end(); it != end; ++it) { 795 const Object** entry = *it; 796 VerifyIsLive(*entry); 797 } 798} 799 800struct SweepCallbackContext { 801 MarkSweep* mark_sweep; 802 space::AllocSpace* space; 803 Thread* self; 804}; 805 806class CheckpointMarkThreadRoots : public Closure { 807 public: 808 CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) { 809 810 } 811 812 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 813 // Note: self is not necessarily equal to thread since thread may be suspended. 814 Thread* self = Thread::Current(); 815 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 816 << thread->GetState() << " thread " << thread << " self " << self; 817 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 818 mark_sweep_->GetBarrier().Pass(self); 819 } 820 821 private: 822 MarkSweep* mark_sweep_; 823}; 824 825void MarkSweep::MarkRootsCheckpoint(Thread* self) { 826 CheckpointMarkThreadRoots check_point(this); 827 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 828 // Request the check point is run on all threads returning a count of the threads that must 829 // run through the barrier including self. 830 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 831 // Release locks then wait for all mutator threads to pass the barrier. 832 // TODO: optimize to not release locks when there are no threads to wait for. 833 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 834 Locks::mutator_lock_->SharedUnlock(self); 835 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 836 CHECK_EQ(old_state, kWaitingPerformingGc); 837 gc_barrier_->Increment(self, barrier_count); 838 self->SetState(kWaitingPerformingGc); 839 Locks::mutator_lock_->SharedLock(self); 840 Locks::heap_bitmap_lock_->ExclusiveLock(self); 841} 842 843void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 844 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 845 MarkSweep* mark_sweep = context->mark_sweep; 846 Heap* heap = mark_sweep->GetHeap(); 847 space::AllocSpace* space = context->space; 848 Thread* self = context->self; 849 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 850 // Use a bulk free, that merges consecutive objects before freeing or free per object? 851 // Documentation suggests better free performance with merging, but this may be at the expensive 852 // of allocation. 853 size_t freed_objects = num_ptrs; 854 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit 855 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); 856 heap->RecordFree(freed_objects, freed_bytes); 857 mark_sweep->freed_objects_ += freed_objects; 858 mark_sweep->freed_bytes_ += freed_bytes; 859} 860 861void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 862 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 863 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); 864 Heap* heap = context->mark_sweep->GetHeap(); 865 // We don't free any actual memory to avoid dirtying the shared zygote pages. 866 for (size_t i = 0; i < num_ptrs; ++i) { 867 Object* obj = static_cast<Object*>(ptrs[i]); 868 heap->GetLiveBitmap()->Clear(obj); 869 heap->GetCardTable()->MarkCard(obj); 870 } 871} 872 873void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 874 size_t freed_bytes = 0; 875 space::DlMallocSpace* space = heap_->GetAllocSpace(); 876 877 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 878 // bitmap, resulting in occasional frees of Weaks which are still in use. 879 timings_.NewSplit("SweepSystemWeaks"); 880 SweepSystemWeaksArray(allocations); 881 882 timings_.NewSplit("Process allocation stack"); 883 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are 884 // going to free. 885 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 886 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 887 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 888 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 889 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 890 if (swap_bitmaps) { 891 std::swap(live_bitmap, mark_bitmap); 892 std::swap(large_live_objects, large_mark_objects); 893 } 894 895 size_t freed_large_objects = 0; 896 size_t count = allocations->Size(); 897 Object** objects = const_cast<Object**>(allocations->Begin()); 898 Object** out = objects; 899 900 // Empty the allocation stack. 901 Thread* self = Thread::Current(); 902 for (size_t i = 0;i < count;++i) { 903 Object* obj = objects[i]; 904 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. 905 if (LIKELY(mark_bitmap->HasAddress(obj))) { 906 if (!mark_bitmap->Test(obj)) { 907 // Don't bother un-marking since we clear the mark bitmap anyways. 908 *(out++) = obj; 909 } 910 } else if (!large_mark_objects->Test(obj)) { 911 ++freed_large_objects; 912 freed_bytes += large_object_space->Free(self, obj); 913 } 914 } 915 CHECK_EQ(count, allocations->Size()); 916 timings_.NewSplit("FreeList"); 917 918 size_t freed_objects = out - objects; 919 freed_bytes += space->FreeList(self, freed_objects, objects); 920 VLOG(heap) << "Freed " << freed_objects << "/" << count 921 << " objects with size " << PrettySize(freed_bytes); 922 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes); 923 freed_objects_ += freed_objects; 924 freed_bytes_ += freed_bytes; 925 926 timings_.NewSplit("ResetStack"); 927 allocations->Reset(); 928} 929 930void MarkSweep::Sweep(bool swap_bitmaps) { 931 DCHECK(mark_stack_->IsEmpty()); 932 933 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 934 // bitmap, resulting in occasional frees of Weaks which are still in use. 935 timings_.NewSplit("SweepSystemWeaks"); 936 SweepSystemWeaks(); 937 938 const bool partial = (GetGcType() == kGcTypePartial); 939 SweepCallbackContext scc; 940 scc.mark_sweep = this; 941 scc.self = Thread::Current(); 942 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 943 // TODO: C++0x 944 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 945 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 946 space::ContinuousSpace* space = *it; 947 // We always sweep always collect spaces. 948 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); 949 if (!partial && !sweep_space) { 950 // We sweep full collect spaces when the GC isn't a partial GC (ie its full). 951 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); 952 } 953 if (sweep_space) { 954 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 955 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 956 scc.space = space->AsDlMallocSpace(); 957 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 958 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 959 if (swap_bitmaps) { 960 std::swap(live_bitmap, mark_bitmap); 961 } 962 if (!space->IsZygoteSpace()) { 963 timings_.NewSplit("SweepAllocSpace"); 964 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. 965 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 966 &SweepCallback, reinterpret_cast<void*>(&scc)); 967 } else { 968 timings_.NewSplit("SweepZygote"); 969 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual 970 // memory. 971 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 972 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); 973 } 974 } 975 } 976 977 timings_.NewSplit("SweepLargeObjects"); 978 SweepLargeObjects(swap_bitmaps); 979} 980 981void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 982 // Sweep large objects 983 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 984 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 985 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 986 if (swap_bitmaps) { 987 std::swap(large_live_objects, large_mark_objects); 988 } 989 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects(); 990 // O(n*log(n)) but hopefully there are not too many large objects. 991 size_t freed_objects = 0; 992 size_t freed_bytes = 0; 993 Thread* self = Thread::Current(); 994 // TODO: C++0x 995 typedef accounting::SpaceSetMap::Objects::iterator It; 996 for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) { 997 if (!large_mark_objects->Test(*it)) { 998 freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it)); 999 ++freed_objects; 1000 } 1001 } 1002 freed_objects_ += freed_objects; 1003 freed_bytes_ += freed_bytes; 1004 GetHeap()->RecordFree(freed_objects, freed_bytes); 1005} 1006 1007void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { 1008 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1009 // TODO: C++0x 1010 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1011 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1012 space::ContinuousSpace* space = *it; 1013 if (space->IsDlMallocSpace() && space->Contains(ref)) { 1014 DCHECK(IsMarked(obj)); 1015 1016 bool is_marked = IsMarked(ref); 1017 if (!is_marked) { 1018 LOG(INFO) << *space; 1019 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) 1020 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) 1021 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " 1022 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; 1023 1024 const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1025 DCHECK(klass != NULL); 1026 const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); 1027 DCHECK(fields != NULL); 1028 bool found = false; 1029 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1030 const Field* cur = fields->Get(i); 1031 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1032 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); 1033 found = true; 1034 break; 1035 } 1036 } 1037 if (!found) { 1038 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); 1039 } 1040 1041 bool obj_marked = heap_->GetCardTable()->IsDirty(obj); 1042 if (!obj_marked) { 1043 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " 1044 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " 1045 << "the alloc space, but wasn't card marked"; 1046 } 1047 } 1048 } 1049 break; 1050 } 1051} 1052 1053// Process the "referent" field in a java.lang.ref.Reference. If the 1054// referent has not yet been marked, put it on the appropriate list in 1055// the gcHeap for later processing. 1056void MarkSweep::DelayReferenceReferent(Object* obj) { 1057 DCHECK(obj != NULL); 1058 Class* klass = obj->GetClass(); 1059 DCHECK(klass != NULL); 1060 DCHECK(klass->IsReferenceClass()); 1061 Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false); 1062 Object* referent = heap_->GetReferenceReferent(obj); 1063 if (kCountJavaLangRefs) { 1064 ++reference_count_; 1065 } 1066 if (pending == NULL && referent != NULL && !IsMarked(referent)) { 1067 Object** list = NULL; 1068 if (klass->IsSoftReferenceClass()) { 1069 list = &soft_reference_list_; 1070 } else if (klass->IsWeakReferenceClass()) { 1071 list = &weak_reference_list_; 1072 } else if (klass->IsFinalizerReferenceClass()) { 1073 list = &finalizer_reference_list_; 1074 } else if (klass->IsPhantomReferenceClass()) { 1075 list = &phantom_reference_list_; 1076 } 1077 DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags(); 1078 // TODO: One lock per list? 1079 heap_->EnqueuePendingReference(obj, list); 1080 } 1081} 1082 1083void MarkSweep::ScanRoot(const Object* obj) { 1084 ScanObject(obj); 1085} 1086 1087class MarkObjectVisitor { 1088 public: 1089 MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) { 1090 } 1091 1092 // TODO: Fixme when anotatalysis works with visitors. 1093 void operator ()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1094 bool /* is_static */) const 1095 NO_THREAD_SAFETY_ANALYSIS { 1096 if (kDebugLocking) { 1097 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1098 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1099 } 1100 mark_sweep_->MarkObject(ref); 1101 } 1102 1103 private: 1104 MarkSweep* const mark_sweep_; 1105}; 1106 1107// Scans an object reference. Determines the type of the reference 1108// and dispatches to a specialized scanning routine. 1109void MarkSweep::ScanObject(const Object* obj) { 1110 MarkObjectVisitor visitor(this); 1111 ScanObjectVisit(obj, visitor); 1112} 1113 1114class MarkStackChunk : public Task { 1115 public: 1116 MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end) 1117 : mark_sweep_(mark_sweep), 1118 thread_pool_(thread_pool), 1119 index_(0), 1120 length_(0), 1121 output_(NULL) { 1122 length_ = end - begin; 1123 if (begin != end) { 1124 // Cost not significant since we only do this for the initial set of mark stack chunks. 1125 memcpy(data_, begin, length_ * sizeof(*begin)); 1126 } 1127 if (kCountTasks) { 1128 ++mark_sweep_->work_chunks_created_; 1129 } 1130 } 1131 1132 ~MarkStackChunk() { 1133 DCHECK(output_ == NULL || output_->length_ == 0); 1134 DCHECK_GE(index_, length_); 1135 delete output_; 1136 if (kCountTasks) { 1137 ++mark_sweep_->work_chunks_deleted_; 1138 } 1139 } 1140 1141 MarkSweep* const mark_sweep_; 1142 ThreadPool* const thread_pool_; 1143 static const size_t max_size = 1 * KB; 1144 // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing. 1145 size_t index_; 1146 // Input / output mark stack. We add newly marked references to data_ until length reaches 1147 // max_size. This is an optimization so that less tasks are created. 1148 // TODO: Investigate using a bounded buffer FIFO. 1149 Object* data_[max_size]; 1150 // How many elements in data_ we need to scan. 1151 size_t length_; 1152 // Output block, newly marked references get added to the ouput block so that another thread can 1153 // scan them. 1154 MarkStackChunk* output_; 1155 1156 class MarkObjectParallelVisitor { 1157 public: 1158 MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) { 1159 1160 } 1161 1162 void operator ()(const Object* /* obj */, const Object* ref, 1163 const MemberOffset& /* offset */, bool /* is_static */) const { 1164 if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) { 1165 chunk_task_->MarkStackPush(ref); 1166 } 1167 } 1168 1169 private: 1170 MarkStackChunk* const chunk_task_; 1171 }; 1172 1173 // Push an object into the block. 1174 // Don't need to use atomic ++ since we only one thread is writing to an output block at any 1175 // given time. 1176 void Push(Object* obj) { 1177 CHECK(obj != NULL); 1178 data_[length_++] = obj; 1179 } 1180 1181 void MarkStackPush(const Object* obj) { 1182 if (static_cast<size_t>(length_) < max_size) { 1183 Push(const_cast<Object*>(obj)); 1184 } else { 1185 // Internal (thread-local) buffer is full, push to a new buffer instead. 1186 if (UNLIKELY(output_ == NULL)) { 1187 AllocateOutputChunk(); 1188 } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) { 1189 // Output block is full, queue it up for processing and obtain a new block. 1190 EnqueueOutput(); 1191 AllocateOutputChunk(); 1192 } 1193 output_->Push(const_cast<Object*>(obj)); 1194 } 1195 } 1196 1197 void ScanObject(Object* obj) { 1198 mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this)); 1199 } 1200 1201 void EnqueueOutput() { 1202 if (output_ != NULL) { 1203 uint64_t start = 0; 1204 if (kMeasureOverhead) { 1205 start = NanoTime(); 1206 } 1207 thread_pool_->AddTask(Thread::Current(), output_); 1208 output_ = NULL; 1209 if (kMeasureOverhead) { 1210 mark_sweep_->overhead_time_ += NanoTime() - start; 1211 } 1212 } 1213 } 1214 1215 void AllocateOutputChunk() { 1216 uint64_t start = 0; 1217 if (kMeasureOverhead) { 1218 start = NanoTime(); 1219 } 1220 output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL); 1221 if (kMeasureOverhead) { 1222 mark_sweep_->overhead_time_ += NanoTime() - start; 1223 } 1224 } 1225 1226 void Finalize() { 1227 EnqueueOutput(); 1228 delete this; 1229 } 1230 1231 // Scans all of the objects 1232 virtual void Run(Thread* self) { 1233 size_t index; 1234 while ((index = index_++) < length_) { 1235 if (kUseMarkStackPrefetch) { 1236 static const size_t prefetch_look_ahead = 1; 1237 __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]); 1238 } 1239 Object* obj = data_[index]; 1240 DCHECK(obj != NULL); 1241 ScanObject(obj); 1242 } 1243 } 1244}; 1245 1246void MarkSweep::ProcessMarkStackParallel() { 1247 CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled"; 1248 Thread* self = Thread::Current(); 1249 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1250 // Split the current mark stack up into work tasks. 1251 const size_t num_threads = thread_pool->GetThreadCount(); 1252 const size_t stack_size = mark_stack_->Size(); 1253 const size_t chunk_size = 1254 std::min((stack_size + num_threads - 1) / num_threads, 1255 static_cast<size_t>(MarkStackChunk::max_size)); 1256 size_t index = 0; 1257 for (size_t i = 0; i < num_threads || index < stack_size; ++i) { 1258 Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)]; 1259 Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)]; 1260 index += chunk_size; 1261 thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end)); 1262 } 1263 thread_pool->StartWorkers(self); 1264 thread_pool->Wait(self, true, true); 1265 mark_stack_->Reset(); 1266 //LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime()); 1267 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1268} 1269 1270// Scan anything that's on the mark stack. 1271void MarkSweep::ProcessMarkStack() { 1272 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1273 if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) { 1274 ProcessMarkStackParallel(); 1275 return; 1276 } 1277 1278 if (kUseMarkStackPrefetch) { 1279 const size_t fifo_size = 4; 1280 const size_t fifo_mask = fifo_size - 1; 1281 const Object* fifo[fifo_size]; 1282 for (size_t i = 0;i < fifo_size;++i) { 1283 fifo[i] = NULL; 1284 } 1285 size_t fifo_pos = 0; 1286 size_t fifo_count = 0; 1287 for (;;) { 1288 const Object* obj = fifo[fifo_pos & fifo_mask]; 1289 if (obj != NULL) { 1290 ScanObject(obj); 1291 fifo[fifo_pos & fifo_mask] = NULL; 1292 --fifo_count; 1293 } 1294 1295 if (!mark_stack_->IsEmpty()) { 1296 const Object* obj = mark_stack_->PopBack(); 1297 DCHECK(obj != NULL); 1298 fifo[fifo_pos & fifo_mask] = obj; 1299 __builtin_prefetch(obj); 1300 fifo_count++; 1301 } 1302 fifo_pos++; 1303 1304 if (!fifo_count) { 1305 CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size(); 1306 break; 1307 } 1308 } 1309 } else { 1310 while (!mark_stack_->IsEmpty()) { 1311 const Object* obj = mark_stack_->PopBack(); 1312 DCHECK(obj != NULL); 1313 ScanObject(obj); 1314 } 1315 } 1316} 1317 1318// Walks the reference list marking any references subject to the 1319// reference clearing policy. References with a black referent are 1320// removed from the list. References with white referents biased 1321// toward saving are blackened and also removed from the list. 1322void MarkSweep::PreserveSomeSoftReferences(Object** list) { 1323 DCHECK(list != NULL); 1324 Object* clear = NULL; 1325 size_t counter = 0; 1326 1327 DCHECK(mark_stack_->IsEmpty()); 1328 1329 while (*list != NULL) { 1330 Object* ref = heap_->DequeuePendingReference(list); 1331 Object* referent = heap_->GetReferenceReferent(ref); 1332 if (referent == NULL) { 1333 // Referent was cleared by the user during marking. 1334 continue; 1335 } 1336 bool is_marked = IsMarked(referent); 1337 if (!is_marked && ((++counter) & 1)) { 1338 // Referent is white and biased toward saving, mark it. 1339 MarkObject(referent); 1340 is_marked = true; 1341 } 1342 if (!is_marked) { 1343 // Referent is white, queue it for clearing. 1344 heap_->EnqueuePendingReference(ref, &clear); 1345 } 1346 } 1347 *list = clear; 1348 // Restart the mark with the newly black references added to the 1349 // root set. 1350 ProcessMarkStack(); 1351} 1352 1353inline bool MarkSweep::IsMarked(const Object* object) const 1354 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1355 if (object >= immune_begin_ && object < immune_end_) { 1356 return true; 1357 } 1358 DCHECK(current_mark_bitmap_ != NULL); 1359 if (current_mark_bitmap_->HasAddress(object)) { 1360 return current_mark_bitmap_->Test(object); 1361 } 1362 return heap_->GetMarkBitmap()->Test(object); 1363} 1364 1365 1366// Unlink the reference list clearing references objects with white 1367// referents. Cleared references registered to a reference queue are 1368// scheduled for appending by the heap worker thread. 1369void MarkSweep::ClearWhiteReferences(Object** list) { 1370 DCHECK(list != NULL); 1371 while (*list != NULL) { 1372 Object* ref = heap_->DequeuePendingReference(list); 1373 Object* referent = heap_->GetReferenceReferent(ref); 1374 if (referent != NULL && !IsMarked(referent)) { 1375 // Referent is white, clear it. 1376 heap_->ClearReferenceReferent(ref); 1377 if (heap_->IsEnqueuable(ref)) { 1378 heap_->EnqueueReference(ref, &cleared_reference_list_); 1379 } 1380 } 1381 } 1382 DCHECK(*list == NULL); 1383} 1384 1385// Enqueues finalizer references with white referents. White 1386// referents are blackened, moved to the zombie field, and the 1387// referent field is cleared. 1388void MarkSweep::EnqueueFinalizerReferences(Object** list) { 1389 DCHECK(list != NULL); 1390 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); 1391 bool has_enqueued = false; 1392 while (*list != NULL) { 1393 Object* ref = heap_->DequeuePendingReference(list); 1394 Object* referent = heap_->GetReferenceReferent(ref); 1395 if (referent != NULL && !IsMarked(referent)) { 1396 MarkObject(referent); 1397 // If the referent is non-null the reference must queuable. 1398 DCHECK(heap_->IsEnqueuable(ref)); 1399 ref->SetFieldObject(zombie_offset, referent, false); 1400 heap_->ClearReferenceReferent(ref); 1401 heap_->EnqueueReference(ref, &cleared_reference_list_); 1402 has_enqueued = true; 1403 } 1404 } 1405 if (has_enqueued) { 1406 ProcessMarkStack(); 1407 } 1408 DCHECK(*list == NULL); 1409} 1410 1411// Process reference class instances and schedule finalizations. 1412void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, 1413 Object** weak_references, 1414 Object** finalizer_references, 1415 Object** phantom_references) { 1416 DCHECK(soft_references != NULL); 1417 DCHECK(weak_references != NULL); 1418 DCHECK(finalizer_references != NULL); 1419 DCHECK(phantom_references != NULL); 1420 1421 // Unless we are in the zygote or required to clear soft references 1422 // with white references, preserve some white referents. 1423 if (!clear_soft && !Runtime::Current()->IsZygote()) { 1424 PreserveSomeSoftReferences(soft_references); 1425 } 1426 1427 // Clear all remaining soft and weak references with white 1428 // referents. 1429 ClearWhiteReferences(soft_references); 1430 ClearWhiteReferences(weak_references); 1431 1432 // Preserve all white objects with finalize methods and schedule 1433 // them for finalization. 1434 EnqueueFinalizerReferences(finalizer_references); 1435 1436 // Clear all f-reachable soft and weak references with white 1437 // referents. 1438 ClearWhiteReferences(soft_references); 1439 ClearWhiteReferences(weak_references); 1440 1441 // Clear all phantom references with white referents. 1442 ClearWhiteReferences(phantom_references); 1443 1444 // At this point all reference lists should be empty. 1445 DCHECK(*soft_references == NULL); 1446 DCHECK(*weak_references == NULL); 1447 DCHECK(*finalizer_references == NULL); 1448 DCHECK(*phantom_references == NULL); 1449} 1450 1451void MarkSweep::UnBindBitmaps() { 1452 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1453 // TODO: C++0x 1454 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1455 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1456 space::ContinuousSpace* space = *it; 1457 if (space->IsDlMallocSpace()) { 1458 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 1459 if (alloc_space->temp_bitmap_.get() != NULL) { 1460 // At this point, the temp_bitmap holds our old mark bitmap. 1461 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); 1462 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); 1463 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); 1464 alloc_space->mark_bitmap_.reset(new_bitmap); 1465 DCHECK(alloc_space->temp_bitmap_.get() == NULL); 1466 } 1467 } 1468 } 1469} 1470 1471void MarkSweep::FinishPhase() { 1472 // Can't enqueue referneces if we hold the mutator lock. 1473 Object* cleared_references = GetClearedReferences(); 1474 Heap* heap = GetHeap(); 1475 heap->EnqueueClearedReferences(&cleared_references); 1476 1477 heap->PostGcVerification(this); 1478 1479 timings_.NewSplit("GrowForUtilization"); 1480 heap->GrowForUtilization(GetGcType(), GetDurationNs()); 1481 1482 timings_.NewSplit("RequestHeapTrim"); 1483 heap->RequestHeapTrim(); 1484 1485 // Update the cumulative statistics 1486 total_time_ns_ += GetDurationNs(); 1487 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, 1488 std::plus<uint64_t>()); 1489 total_freed_objects_ += GetFreedObjects(); 1490 total_freed_bytes_ += GetFreedBytes(); 1491 1492 // Ensure that the mark stack is empty. 1493 CHECK(mark_stack_->IsEmpty()); 1494 1495 if (kCountScannedTypes) { 1496 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1497 << " other=" << other_count_; 1498 } 1499 1500 if (kCountTasks) { 1501 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1502 } 1503 1504 if (kMeasureOverhead) { 1505 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1506 } 1507 1508 if (kProfileLargeObjects) { 1509 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1510 } 1511 1512 if (kCountClassesMarked) { 1513 VLOG(gc) << "Classes marked " << classes_marked_; 1514 } 1515 1516 if (kCountJavaLangRefs) { 1517 VLOG(gc) << "References scanned " << reference_count_; 1518 } 1519 1520 // Update the cumulative loggers. 1521 cumulative_timings_.Start(); 1522 cumulative_timings_.AddNewLogger(timings_); 1523 cumulative_timings_.End(); 1524 1525 // Clear all of the spaces' mark bitmaps. 1526 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1527 // TODO: C++0x 1528 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1529 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1530 space::ContinuousSpace* space = *it; 1531 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1532 space->GetMarkBitmap()->Clear(); 1533 } 1534 } 1535 mark_stack_->Reset(); 1536 1537 // Reset the marked large objects. 1538 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1539 large_objects->GetMarkObjects()->Clear(); 1540} 1541 1542} // namespace collector 1543} // namespace gc 1544} // namespace art 1545