mark_sweep.cc revision df62950e7a32031b82360c407d46a37b94188fbb
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/card_table-inl.h" 29#include "gc/accounting/heap_bitmap.h" 30#include "gc/accounting/space_bitmap-inl.h" 31#include "gc/heap.h" 32#include "gc/space/image_space.h" 33#include "gc/space/large_object_space.h" 34#include "gc/space/space-inl.h" 35#include "indirect_reference_table.h" 36#include "intern_table.h" 37#include "jni_internal.h" 38#include "monitor.h" 39#include "mark_sweep-inl.h" 40#include "mirror/class-inl.h" 41#include "mirror/class_loader.h" 42#include "mirror/dex_cache.h" 43#include "mirror/field.h" 44#include "mirror/field-inl.h" 45#include "mirror/object-inl.h" 46#include "mirror/object_array.h" 47#include "mirror/object_array-inl.h" 48#include "runtime.h" 49#include "thread-inl.h" 50#include "thread_list.h" 51#include "verifier/method_verifier.h" 52 53using namespace art::mirror; 54 55namespace art { 56namespace gc { 57namespace collector { 58 59// Performance options. 60static const bool kParallelMarkStack = true; 61static const bool kDisableFinger = true; // TODO: Fix, bit rotten. 62static const bool kUseMarkStackPrefetch = true; 63 64// Profiling and information flags. 65static const bool kCountClassesMarked = false; 66static const bool kProfileLargeObjects = false; 67static const bool kMeasureOverhead = false; 68static const bool kCountTasks = false; 69static const bool kCountJavaLangRefs = false; 70 71class SetFingerVisitor { 72 public: 73 explicit SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 74 75 void operator()(void* finger) const { 76 mark_sweep_->SetFinger(reinterpret_cast<Object*>(finger)); 77 } 78 79 private: 80 MarkSweep* const mark_sweep_; 81}; 82 83void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { 84 // Bind live to mark bitmap if necessary. 85 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 86 BindLiveToMarkBitmap(space); 87 } 88 89 // Add the space to the immune region. 90 if (immune_begin_ == NULL) { 91 DCHECK(immune_end_ == NULL); 92 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), 93 reinterpret_cast<Object*>(space->End())); 94 } else { 95 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 96 const space::ContinuousSpace* prev_space = NULL; 97 // Find out if the previous space is immune. 98 // TODO: C++0x 99 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 100 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 101 if (*it == space) { 102 break; 103 } 104 prev_space = *it; 105 } 106 107 // If previous space was immune, then extend the immune region. Relies on continuous spaces 108 // being sorted by Heap::AddContinuousSpace. 109 if (prev_space != NULL && 110 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) && 111 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) { 112 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 113 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 114 } 115 } 116} 117 118void MarkSweep::BindBitmaps() { 119 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 120 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 121 122 // Mark all of the spaces we never collect as immune. 123 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 124 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 125 space::ContinuousSpace* space = *it; 126 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 127 ImmuneSpace(space); 128 } 129 } 130} 131 132MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 133 : GarbageCollector(heap, 134 name_prefix + (name_prefix.empty() ? "" : " ") + 135 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 136 current_mark_bitmap_(NULL), 137 java_lang_Class_(NULL), 138 mark_stack_(NULL), 139 finger_(NULL), 140 immune_begin_(NULL), 141 immune_end_(NULL), 142 soft_reference_list_(NULL), 143 weak_reference_list_(NULL), 144 finalizer_reference_list_(NULL), 145 phantom_reference_list_(NULL), 146 cleared_reference_list_(NULL), 147 gc_barrier_(new Barrier(0)), 148 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 149 mark_stack_expand_lock_("mark sweep mark stack expand lock"), 150 is_concurrent_(is_concurrent), 151 clear_soft_references_(false) { 152} 153 154void MarkSweep::InitializePhase() { 155 timings_.Reset(); 156 timings_.StartSplit("InitializePhase"); 157 mark_stack_ = GetHeap()->mark_stack_.get(); 158 DCHECK(mark_stack_ != NULL); 159 finger_ = NULL; 160 SetImmuneRange(NULL, NULL); 161 soft_reference_list_ = NULL; 162 weak_reference_list_ = NULL; 163 finalizer_reference_list_ = NULL; 164 phantom_reference_list_ = NULL; 165 cleared_reference_list_ = NULL; 166 freed_bytes_ = 0; 167 freed_objects_ = 0; 168 class_count_ = 0; 169 array_count_ = 0; 170 other_count_ = 0; 171 large_object_test_ = 0; 172 large_object_mark_ = 0; 173 classes_marked_ = 0; 174 overhead_time_ = 0; 175 work_chunks_created_ = 0; 176 work_chunks_deleted_ = 0; 177 reference_count_ = 0; 178 java_lang_Class_ = Class::GetJavaLangClass(); 179 CHECK(java_lang_Class_ != NULL); 180 FindDefaultMarkBitmap(); 181 // Do any pre GC verification. 182 heap_->PreGcVerification(this); 183} 184 185void MarkSweep::ProcessReferences(Thread* self) { 186 timings_.NewSplit("ProcessReferences"); 187 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 188 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, 189 &finalizer_reference_list_, &phantom_reference_list_); 190} 191 192bool MarkSweep::HandleDirtyObjectsPhase() { 193 Thread* self = Thread::Current(); 194 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 195 Locks::mutator_lock_->AssertExclusiveHeld(self); 196 197 { 198 timings_.NewSplit("ReMarkRoots"); 199 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 200 201 // Re-mark root set. 202 ReMarkRoots(); 203 204 // Scan dirty objects, this is only required if we are not doing concurrent GC. 205 RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty); 206 } 207 208 ProcessReferences(self); 209 210 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 211 if (GetHeap()->verify_missing_card_marks_) { 212 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 213 // This second sweep makes sure that we don't have any objects in the live stack which point to 214 // freed objects. These cause problems since their references may be previously freed objects. 215 SweepArray(allocation_stack, false); 216 } else { 217 timings_.NewSplit("UnMarkAllocStack"); 218 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 219 // The allocation stack contains things allocated since the start of the GC. These may have been 220 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. 221 // Remove these objects from the mark bitmaps so that they will be eligible for sticky 222 // collection. 223 heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(), 224 GetHeap()->large_object_space_->GetMarkObjects(), 225 allocation_stack); 226 } 227 return true; 228} 229 230bool MarkSweep::IsConcurrent() const { 231 return is_concurrent_; 232} 233 234void MarkSweep::MarkingPhase() { 235 Heap* heap = GetHeap(); 236 Thread* self = Thread::Current(); 237 238 timings_.NewSplit("BindBitmaps"); 239 BindBitmaps(); 240 FindDefaultMarkBitmap(); 241 // Process dirty cards and add dirty cards to mod union tables. 242 heap->ProcessCards(timings_); 243 244 // Need to do this before the checkpoint since we don't want any threads to add references to 245 // the live stack during the recursive mark. 246 timings_.NewSplit("SwapStacks"); 247 heap->SwapStacks(); 248 249 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 250 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 251 // If we exclusively hold the mutator lock, all threads must be suspended. 252 timings_.NewSplit("MarkRoots"); 253 MarkRoots(); 254 } else { 255 timings_.NewSplit("MarkRootsCheckpoint"); 256 MarkRootsCheckpoint(self); 257 timings_.NewSplit("MarkNonThreadRoots"); 258 MarkNonThreadRoots(); 259 } 260 timings_.NewSplit("MarkConcurrentRoots"); 261 MarkConcurrentRoots(); 262 263 heap->UpdateAndMarkModUnion(this, timings_, GetGcType()); 264 MarkReachableObjects(); 265} 266 267void MarkSweep::MarkReachableObjects() { 268 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 269 // knowing that new allocations won't be marked as live. 270 timings_.NewSplit("MarkStackAsLive"); 271 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 272 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(), 273 heap_->large_object_space_->GetLiveObjects(), 274 live_stack); 275 live_stack->Reset(); 276 // Recursively mark all the non-image bits set in the mark bitmap. 277 RecursiveMark(); 278 DisableFinger(); 279} 280 281void MarkSweep::ReclaimPhase() { 282 Thread* self = Thread::Current(); 283 284 if (!IsConcurrent()) { 285 ProcessReferences(self); 286 } 287 288 // Before freeing anything, lets verify the heap. 289 if (kIsDebugBuild) { 290 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 291 VerifyImageRoots(); 292 } 293 heap_->PreSweepingGcVerification(this); 294 295 { 296 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 297 298 // Reclaim unmarked objects. 299 Sweep(false); 300 301 // Swap the live and mark bitmaps for each space which we modified space. This is an 302 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 303 // bitmaps. 304 timings_.NewSplit("SwapBitmaps"); 305 SwapBitmaps(); 306 307 // Unbind the live and mark bitmaps. 308 UnBindBitmaps(); 309 } 310} 311 312void MarkSweep::SetImmuneRange(Object* begin, Object* end) { 313 immune_begin_ = begin; 314 immune_end_ = end; 315} 316 317void MarkSweep::FindDefaultMarkBitmap() { 318 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 319 // TODO: C++0x 320 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 321 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 322 space::ContinuousSpace* space = *it; 323 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 324 current_mark_bitmap_ = (*it)->GetMarkBitmap(); 325 CHECK(current_mark_bitmap_ != NULL); 326 return; 327 } 328 } 329 GetHeap()->DumpSpaces(); 330 LOG(FATAL) << "Could not find a default mark bitmap"; 331} 332 333void MarkSweep::ExpandMarkStack() { 334 // Rare case, no need to have Thread::Current be a parameter. 335 MutexLock mu(Thread::Current(), mark_stack_expand_lock_); 336 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 337 // Someone else acquired the lock and expanded the mark stack before us. 338 return; 339 } 340 std::vector<Object*> temp; 341 temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End()); 342 mark_stack_->Resize(mark_stack_->Capacity() * 2); 343 for (size_t i = 0; i < temp.size(); ++i) { 344 mark_stack_->PushBack(temp[i]); 345 } 346} 347 348inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj, bool check_finger) { 349 DCHECK(obj != NULL); 350 if (MarkObjectParallel(obj)) { 351 if (kDisableFinger || (check_finger && obj < finger_)) { 352 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) { 353 // Only reason a push can fail is that the mark stack is full. 354 ExpandMarkStack(); 355 } 356 } 357 } 358} 359 360inline void MarkSweep::MarkObjectNonNull(const Object* obj, bool check_finger) { 361 DCHECK(obj != NULL); 362 363 if (obj >= immune_begin_ && obj < immune_end_) { 364 DCHECK(IsMarked(obj)); 365 return; 366 } 367 368 // Try to take advantage of locality of references within a space, failing this find the space 369 // the hard way. 370 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 371 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 372 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 373 if (LIKELY(new_bitmap != NULL)) { 374 object_bitmap = new_bitmap; 375 } else { 376 MarkLargeObject(obj); 377 return; 378 } 379 } 380 381 // This object was not previously marked. 382 if (!object_bitmap->Test(obj)) { 383 object_bitmap->Set(obj); 384 if (kDisableFinger || (check_finger && obj < finger_)) { 385 // Do we need to expand the mark stack? 386 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 387 ExpandMarkStack(); 388 } 389 // The object must be pushed on to the mark stack. 390 mark_stack_->PushBack(const_cast<Object*>(obj)); 391 } 392 } 393} 394 395// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 396bool MarkSweep::MarkLargeObject(const Object* obj) { 397 // TODO: support >1 discontinuous space. 398 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 399 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); 400 if (kProfileLargeObjects) { 401 ++large_object_test_; 402 } 403 if (UNLIKELY(!large_objects->Test(obj))) { 404 // TODO: mark may be called holding the JNI global references lock, Contains will hold the 405 // large object space lock causing a lock level violation. Bug: 9414652; 406 if (!kDebugLocking && !large_object_space->Contains(obj)) { 407 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 408 LOG(ERROR) << "Attempting see if it's a bad root"; 409 VerifyRoots(); 410 LOG(FATAL) << "Can't mark bad root"; 411 } 412 if (kProfileLargeObjects) { 413 ++large_object_mark_; 414 } 415 large_objects->Set(obj); 416 // Don't need to check finger since large objects never have any object references. 417 return true; 418 } 419 return false; 420} 421 422inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 423 DCHECK(obj != NULL); 424 425 if (obj >= immune_begin_ && obj < immune_end_) { 426 DCHECK(IsMarked(obj)); 427 return false; 428 } 429 430 // Try to take advantage of locality of references within a space, failing this find the space 431 // the hard way. 432 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 433 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 434 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 435 if (new_bitmap != NULL) { 436 object_bitmap = new_bitmap; 437 } else { 438 // TODO: Remove the Thread::Current here? 439 // TODO: Convert this to some kind of atomic marking? 440 MutexLock mu(Thread::Current(), large_object_lock_); 441 return MarkLargeObject(obj); 442 } 443 } 444 445 // Return true if the object was not previously marked. 446 return !object_bitmap->AtomicTestAndSet(obj); 447} 448 449// Used to mark objects when recursing. Recursion is done by moving 450// the finger across the bitmaps in address order and marking child 451// objects. Any newly-marked objects whose addresses are lower than 452// the finger won't be visited by the bitmap scan, so those objects 453// need to be added to the mark stack. 454void MarkSweep::MarkObject(const Object* obj) { 455 if (obj != NULL) { 456 MarkObjectNonNull(obj, true); 457 } 458} 459 460void MarkSweep::MarkRoot(const Object* obj) { 461 if (obj != NULL) { 462 MarkObjectNonNull(obj, false); 463 } 464} 465 466void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) { 467 DCHECK(root != NULL); 468 DCHECK(arg != NULL); 469 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 470 mark_sweep->MarkObjectNonNullParallel(root, false); 471} 472 473void MarkSweep::MarkObjectCallback(const Object* root, void* arg) { 474 DCHECK(root != NULL); 475 DCHECK(arg != NULL); 476 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 477 mark_sweep->MarkObjectNonNull(root, false); 478} 479 480void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) { 481 DCHECK(root != NULL); 482 DCHECK(arg != NULL); 483 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 484 mark_sweep->MarkObjectNonNull(root, true); 485} 486 487void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 488 const StackVisitor* visitor) { 489 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 490} 491 492void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 493 // See if the root is on any space bitmap. 494 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 495 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 496 if (!large_object_space->Contains(root)) { 497 LOG(ERROR) << "Found invalid root: " << root; 498 if (visitor != NULL) { 499 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 500 } 501 } 502 } 503} 504 505void MarkSweep::VerifyRoots() { 506 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 507} 508 509// Marks all objects in the root set. 510void MarkSweep::MarkRoots() { 511 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this); 512} 513 514void MarkSweep::MarkNonThreadRoots() { 515 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this); 516} 517 518void MarkSweep::MarkConcurrentRoots() { 519 // Visit all runtime roots and clear dirty flags. 520 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true); 521} 522 523class CheckObjectVisitor { 524 public: 525 explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 526 527 void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const 528 NO_THREAD_SAFETY_ANALYSIS { 529 if (kDebugLocking) { 530 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 531 } 532 mark_sweep_->CheckReference(obj, ref, offset, is_static); 533 } 534 535 private: 536 MarkSweep* const mark_sweep_; 537}; 538 539void MarkSweep::CheckObject(const Object* obj) { 540 DCHECK(obj != NULL); 541 CheckObjectVisitor visitor(this); 542 VisitObjectReferences(obj, visitor); 543} 544 545void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { 546 DCHECK(root != NULL); 547 DCHECK(arg != NULL); 548 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 549 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); 550 mark_sweep->CheckObject(root); 551} 552 553void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 554 CHECK(space->IsDlMallocSpace()); 555 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 556 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 557 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release(); 558 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 559 alloc_space->temp_bitmap_.reset(mark_bitmap); 560 alloc_space->mark_bitmap_.reset(live_bitmap); 561} 562 563class ScanObjectVisitor { 564 public: 565 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 566 567 // TODO: Fixme when anotatalysis works with visitors. 568 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 569 if (kDebugLocking) { 570 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 571 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 572 } 573 mark_sweep_->ScanObject(obj); 574 } 575 576 private: 577 MarkSweep* const mark_sweep_; 578}; 579 580void MarkSweep::ScanGrayObjects(byte minimum_age) { 581 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 582 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 583 ScanObjectVisitor visitor(this); 584 SetFingerVisitor finger_visitor(this); 585 // TODO: C++0x 586 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 587 for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) { 588 space::ContinuousSpace* space = *it; 589 switch (space->GetGcRetentionPolicy()) { 590 case space::kGcRetentionPolicyNeverCollect: 591 timings_.NewSplit("ScanGrayImageSpaceObjects"); 592 break; 593 case space::kGcRetentionPolicyFullCollect: 594 timings_.NewSplit("ScanGrayZygoteSpaceObjects"); 595 break; 596 case space::kGcRetentionPolicyAlwaysCollect: 597 timings_.NewSplit("ScanGrayAllocSpaceObjects"); 598 break; 599 } 600 byte* begin = space->Begin(); 601 byte* end = space->End(); 602 // Image spaces are handled properly since live == marked for them. 603 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 604 card_table->Scan(mark_bitmap, begin, end, visitor, finger_visitor, minimum_age); 605 } 606} 607 608class CheckBitmapVisitor { 609 public: 610 explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 611 612 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 613 if (kDebugLocking) { 614 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 615 } 616 DCHECK(obj != NULL); 617 mark_sweep_->CheckObject(obj); 618 } 619 620 private: 621 MarkSweep* mark_sweep_; 622}; 623 624void MarkSweep::VerifyImageRoots() { 625 // Verify roots ensures that all the references inside the image space point 626 // objects which are either in the image space or marked objects in the alloc 627 // space 628 CheckBitmapVisitor visitor(this); 629 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 630 // TODO: C++0x 631 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 632 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 633 if ((*it)->IsImageSpace()) { 634 space::ImageSpace* space = (*it)->AsImageSpace(); 635 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 636 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 637 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 638 DCHECK(live_bitmap != NULL); 639 live_bitmap->VisitMarkedRange(begin, end, visitor, VoidFunctor()); 640 } 641 } 642} 643 644// Populates the mark stack based on the set of marked objects and 645// recursively marks until the mark stack is emptied. 646void MarkSweep::RecursiveMark() { 647 timings_.NewSplit("RecursiveMark"); 648 // RecursiveMark will build the lists of known instances of the Reference classes. 649 // See DelayReferenceReferent for details. 650 CHECK(soft_reference_list_ == NULL); 651 CHECK(weak_reference_list_ == NULL); 652 CHECK(finalizer_reference_list_ == NULL); 653 CHECK(phantom_reference_list_ == NULL); 654 CHECK(cleared_reference_list_ == NULL); 655 656 const bool partial = GetGcType() == kGcTypePartial; 657 SetFingerVisitor set_finger_visitor(this); 658 ScanObjectVisitor scan_visitor(this); 659 if (!kDisableFinger) { 660 finger_ = NULL; 661 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 662 // TODO: C++0x 663 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 664 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 665 space::ContinuousSpace* space = *it; 666 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 667 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 668 current_mark_bitmap_ = space->GetMarkBitmap(); 669 if (current_mark_bitmap_ == NULL) { 670 GetHeap()->DumpSpaces(); 671 LOG(FATAL) << "invalid bitmap"; 672 } 673 // This function does not handle heap end increasing, so we must use the space end. 674 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 675 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 676 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor); 677 } 678 } 679 } 680 DisableFinger(); 681 timings_.NewSplit("ProcessMarkStack"); 682 ProcessMarkStack(); 683} 684 685bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) { 686 return 687 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) || 688 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object); 689} 690 691void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) { 692 ScanGrayObjects(minimum_age); 693 timings_.NewSplit("ProcessMarkStack"); 694 ProcessMarkStack(); 695} 696 697void MarkSweep::ReMarkRoots() { 698 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true); 699} 700 701void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) { 702 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 703 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 704 IndirectReferenceTable* table = &vm->weak_globals; 705 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto 706 for (It it = table->begin(), end = table->end(); it != end; ++it) { 707 const Object** entry = *it; 708 if (!is_marked(*entry, arg)) { 709 *entry = kClearedJniWeakGlobal; 710 } 711 } 712} 713 714struct ArrayMarkedCheck { 715 accounting::ObjectStack* live_stack; 716 MarkSweep* mark_sweep; 717}; 718 719// Either marked or not live. 720bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) { 721 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg); 722 if (array_check->mark_sweep->IsMarked(object)) { 723 return true; 724 } 725 accounting::ObjectStack* live_stack = array_check->live_stack; 726 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End(); 727} 728 729void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) { 730 Runtime* runtime = Runtime::Current(); 731 // The callbacks check 732 // !is_marked where is_marked is the callback but we want 733 // !IsMarked && IsLive 734 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 735 // Or for swapped (IsLive || !IsMarked). 736 737 ArrayMarkedCheck visitor; 738 visitor.live_stack = allocations; 739 visitor.mark_sweep = this; 740 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor); 741 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor); 742 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor); 743} 744 745void MarkSweep::SweepSystemWeaks() { 746 Runtime* runtime = Runtime::Current(); 747 // The callbacks check 748 // !is_marked where is_marked is the callback but we want 749 // !IsMarked && IsLive 750 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 751 // Or for swapped (IsLive || !IsMarked). 752 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this); 753 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this); 754 SweepJniWeakGlobals(IsMarkedCallback, this); 755} 756 757bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) { 758 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 759 // We don't actually want to sweep the object, so lets return "marked" 760 return true; 761} 762 763void MarkSweep::VerifyIsLive(const Object* obj) { 764 Heap* heap = GetHeap(); 765 if (!heap->GetLiveBitmap()->Test(obj)) { 766 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 767 if (!large_object_space->GetLiveObjects()->Test(obj)) { 768 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 769 heap->allocation_stack_->End()) { 770 // Object not found! 771 heap->DumpSpaces(); 772 LOG(FATAL) << "Found dead object " << obj; 773 } 774 } 775 } 776} 777 778void MarkSweep::VerifySystemWeaks() { 779 Runtime* runtime = Runtime::Current(); 780 // Verify system weaks, uses a special IsMarked callback which always returns true. 781 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this); 782 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this); 783 784 JavaVMExt* vm = runtime->GetJavaVM(); 785 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 786 IndirectReferenceTable* table = &vm->weak_globals; 787 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto 788 for (It it = table->begin(), end = table->end(); it != end; ++it) { 789 const Object** entry = *it; 790 VerifyIsLive(*entry); 791 } 792} 793 794struct SweepCallbackContext { 795 MarkSweep* mark_sweep; 796 space::AllocSpace* space; 797 Thread* self; 798}; 799 800class CheckpointMarkThreadRoots : public Closure { 801 public: 802 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 803 804 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 805 // Note: self is not necessarily equal to thread since thread may be suspended. 806 Thread* self = Thread::Current(); 807 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 808 << thread->GetState() << " thread " << thread << " self " << self; 809 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 810 mark_sweep_->GetBarrier().Pass(self); 811 } 812 813 private: 814 MarkSweep* mark_sweep_; 815}; 816 817void MarkSweep::MarkRootsCheckpoint(Thread* self) { 818 CheckpointMarkThreadRoots check_point(this); 819 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 820 // Request the check point is run on all threads returning a count of the threads that must 821 // run through the barrier including self. 822 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 823 // Release locks then wait for all mutator threads to pass the barrier. 824 // TODO: optimize to not release locks when there are no threads to wait for. 825 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 826 Locks::mutator_lock_->SharedUnlock(self); 827 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 828 CHECK_EQ(old_state, kWaitingPerformingGc); 829 gc_barrier_->Increment(self, barrier_count); 830 self->SetState(kWaitingPerformingGc); 831 Locks::mutator_lock_->SharedLock(self); 832 Locks::heap_bitmap_lock_->ExclusiveLock(self); 833} 834 835void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 836 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 837 MarkSweep* mark_sweep = context->mark_sweep; 838 Heap* heap = mark_sweep->GetHeap(); 839 space::AllocSpace* space = context->space; 840 Thread* self = context->self; 841 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 842 // Use a bulk free, that merges consecutive objects before freeing or free per object? 843 // Documentation suggests better free performance with merging, but this may be at the expensive 844 // of allocation. 845 size_t freed_objects = num_ptrs; 846 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit 847 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); 848 heap->RecordFree(freed_objects, freed_bytes); 849 mark_sweep->freed_objects_ += freed_objects; 850 mark_sweep->freed_bytes_ += freed_bytes; 851} 852 853void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 854 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 855 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); 856 Heap* heap = context->mark_sweep->GetHeap(); 857 // We don't free any actual memory to avoid dirtying the shared zygote pages. 858 for (size_t i = 0; i < num_ptrs; ++i) { 859 Object* obj = static_cast<Object*>(ptrs[i]); 860 heap->GetLiveBitmap()->Clear(obj); 861 heap->GetCardTable()->MarkCard(obj); 862 } 863} 864 865void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 866 size_t freed_bytes = 0; 867 space::DlMallocSpace* space = heap_->GetAllocSpace(); 868 869 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 870 // bitmap, resulting in occasional frees of Weaks which are still in use. 871 timings_.NewSplit("SweepSystemWeaks"); 872 SweepSystemWeaksArray(allocations); 873 874 timings_.NewSplit("Process allocation stack"); 875 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are 876 // going to free. 877 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 878 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 879 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 880 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 881 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 882 if (swap_bitmaps) { 883 std::swap(live_bitmap, mark_bitmap); 884 std::swap(large_live_objects, large_mark_objects); 885 } 886 887 size_t freed_large_objects = 0; 888 size_t count = allocations->Size(); 889 Object** objects = const_cast<Object**>(allocations->Begin()); 890 Object** out = objects; 891 892 // Empty the allocation stack. 893 Thread* self = Thread::Current(); 894 for (size_t i = 0;i < count;++i) { 895 Object* obj = objects[i]; 896 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. 897 if (LIKELY(mark_bitmap->HasAddress(obj))) { 898 if (!mark_bitmap->Test(obj)) { 899 // Don't bother un-marking since we clear the mark bitmap anyways. 900 *(out++) = obj; 901 } 902 } else if (!large_mark_objects->Test(obj)) { 903 ++freed_large_objects; 904 freed_bytes += large_object_space->Free(self, obj); 905 } 906 } 907 CHECK_EQ(count, allocations->Size()); 908 timings_.NewSplit("FreeList"); 909 910 size_t freed_objects = out - objects; 911 freed_bytes += space->FreeList(self, freed_objects, objects); 912 VLOG(heap) << "Freed " << freed_objects << "/" << count 913 << " objects with size " << PrettySize(freed_bytes); 914 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes); 915 freed_objects_ += freed_objects; 916 freed_bytes_ += freed_bytes; 917 918 timings_.NewSplit("ResetStack"); 919 allocations->Reset(); 920} 921 922void MarkSweep::Sweep(bool swap_bitmaps) { 923 DCHECK(mark_stack_->IsEmpty()); 924 925 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 926 // bitmap, resulting in occasional frees of Weaks which are still in use. 927 timings_.NewSplit("SweepSystemWeaks"); 928 SweepSystemWeaks(); 929 930 const bool partial = (GetGcType() == kGcTypePartial); 931 SweepCallbackContext scc; 932 scc.mark_sweep = this; 933 scc.self = Thread::Current(); 934 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 935 // TODO: C++0x 936 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 937 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 938 space::ContinuousSpace* space = *it; 939 // We always sweep always collect spaces. 940 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); 941 if (!partial && !sweep_space) { 942 // We sweep full collect spaces when the GC isn't a partial GC (ie its full). 943 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); 944 } 945 if (sweep_space) { 946 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 947 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 948 scc.space = space->AsDlMallocSpace(); 949 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 950 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 951 if (swap_bitmaps) { 952 std::swap(live_bitmap, mark_bitmap); 953 } 954 if (!space->IsZygoteSpace()) { 955 timings_.NewSplit("SweepAllocSpace"); 956 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. 957 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 958 &SweepCallback, reinterpret_cast<void*>(&scc)); 959 } else { 960 timings_.NewSplit("SweepZygote"); 961 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual 962 // memory. 963 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 964 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); 965 } 966 } 967 } 968 969 timings_.NewSplit("SweepLargeObjects"); 970 SweepLargeObjects(swap_bitmaps); 971} 972 973void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 974 // Sweep large objects 975 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 976 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 977 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 978 if (swap_bitmaps) { 979 std::swap(large_live_objects, large_mark_objects); 980 } 981 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects(); 982 // O(n*log(n)) but hopefully there are not too many large objects. 983 size_t freed_objects = 0; 984 size_t freed_bytes = 0; 985 Thread* self = Thread::Current(); 986 // TODO: C++0x 987 typedef accounting::SpaceSetMap::Objects::iterator It; 988 for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) { 989 if (!large_mark_objects->Test(*it)) { 990 freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it)); 991 ++freed_objects; 992 } 993 } 994 freed_objects_ += freed_objects; 995 freed_bytes_ += freed_bytes; 996 GetHeap()->RecordFree(freed_objects, freed_bytes); 997} 998 999void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { 1000 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1001 // TODO: C++0x 1002 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1003 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1004 space::ContinuousSpace* space = *it; 1005 if (space->IsDlMallocSpace() && space->Contains(ref)) { 1006 DCHECK(IsMarked(obj)); 1007 1008 bool is_marked = IsMarked(ref); 1009 if (!is_marked) { 1010 LOG(INFO) << *space; 1011 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) 1012 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) 1013 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " 1014 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; 1015 1016 const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1017 DCHECK(klass != NULL); 1018 const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); 1019 DCHECK(fields != NULL); 1020 bool found = false; 1021 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1022 const Field* cur = fields->Get(i); 1023 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1024 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); 1025 found = true; 1026 break; 1027 } 1028 } 1029 if (!found) { 1030 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); 1031 } 1032 1033 bool obj_marked = heap_->GetCardTable()->IsDirty(obj); 1034 if (!obj_marked) { 1035 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " 1036 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " 1037 << "the alloc space, but wasn't card marked"; 1038 } 1039 } 1040 } 1041 break; 1042 } 1043} 1044 1045// Process the "referent" field in a java.lang.ref.Reference. If the 1046// referent has not yet been marked, put it on the appropriate list in 1047// the gcHeap for later processing. 1048void MarkSweep::DelayReferenceReferent(Object* obj) { 1049 DCHECK(obj != NULL); 1050 Class* klass = obj->GetClass(); 1051 DCHECK(klass != NULL); 1052 DCHECK(klass->IsReferenceClass()); 1053 Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false); 1054 Object* referent = heap_->GetReferenceReferent(obj); 1055 if (kCountJavaLangRefs) { 1056 ++reference_count_; 1057 } 1058 if (pending == NULL && referent != NULL && !IsMarked(referent)) { 1059 Object** list = NULL; 1060 if (klass->IsSoftReferenceClass()) { 1061 list = &soft_reference_list_; 1062 } else if (klass->IsWeakReferenceClass()) { 1063 list = &weak_reference_list_; 1064 } else if (klass->IsFinalizerReferenceClass()) { 1065 list = &finalizer_reference_list_; 1066 } else if (klass->IsPhantomReferenceClass()) { 1067 list = &phantom_reference_list_; 1068 } 1069 DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags(); 1070 // TODO: One lock per list? 1071 heap_->EnqueuePendingReference(obj, list); 1072 } 1073} 1074 1075void MarkSweep::ScanRoot(const Object* obj) { 1076 ScanObject(obj); 1077} 1078 1079class MarkObjectVisitor { 1080 public: 1081 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 1082 1083 // TODO: Fixme when anotatalysis works with visitors. 1084 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1085 bool /* is_static */) const 1086 NO_THREAD_SAFETY_ANALYSIS { 1087 if (kDebugLocking) { 1088 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1089 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1090 } 1091 mark_sweep_->MarkObject(ref); 1092 } 1093 1094 private: 1095 MarkSweep* const mark_sweep_; 1096}; 1097 1098// Scans an object reference. Determines the type of the reference 1099// and dispatches to a specialized scanning routine. 1100void MarkSweep::ScanObject(const Object* obj) { 1101 MarkObjectVisitor visitor(this); 1102 ScanObjectVisit(obj, visitor); 1103} 1104 1105class MarkStackChunk : public Task { 1106 public: 1107 MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end) 1108 : mark_sweep_(mark_sweep), 1109 thread_pool_(thread_pool), 1110 index_(0), 1111 length_(0), 1112 output_(NULL) { 1113 length_ = end - begin; 1114 if (begin != end) { 1115 // Cost not significant since we only do this for the initial set of mark stack chunks. 1116 memcpy(data_, begin, length_ * sizeof(*begin)); 1117 } 1118 if (kCountTasks) { 1119 ++mark_sweep_->work_chunks_created_; 1120 } 1121 } 1122 1123 ~MarkStackChunk() { 1124 DCHECK(output_ == NULL || output_->length_ == 0); 1125 DCHECK_GE(index_, length_); 1126 delete output_; 1127 if (kCountTasks) { 1128 ++mark_sweep_->work_chunks_deleted_; 1129 } 1130 } 1131 1132 MarkSweep* const mark_sweep_; 1133 ThreadPool* const thread_pool_; 1134 static const size_t max_size = 1 * KB; 1135 // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing. 1136 size_t index_; 1137 // Input / output mark stack. We add newly marked references to data_ until length reaches 1138 // max_size. This is an optimization so that less tasks are created. 1139 // TODO: Investigate using a bounded buffer FIFO. 1140 Object* data_[max_size]; 1141 // How many elements in data_ we need to scan. 1142 size_t length_; 1143 // Output block, newly marked references get added to the ouput block so that another thread can 1144 // scan them. 1145 MarkStackChunk* output_; 1146 1147 class MarkObjectParallelVisitor { 1148 public: 1149 explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {} 1150 1151 void operator()(const Object* /* obj */, const Object* ref, 1152 const MemberOffset& /* offset */, bool /* is_static */) const { 1153 if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) { 1154 chunk_task_->MarkStackPush(ref); 1155 } 1156 } 1157 1158 private: 1159 MarkStackChunk* const chunk_task_; 1160 }; 1161 1162 // Push an object into the block. 1163 // Don't need to use atomic ++ since we only one thread is writing to an output block at any 1164 // given time. 1165 void Push(Object* obj) { 1166 CHECK(obj != NULL); 1167 data_[length_++] = obj; 1168 } 1169 1170 void MarkStackPush(const Object* obj) { 1171 if (static_cast<size_t>(length_) < max_size) { 1172 Push(const_cast<Object*>(obj)); 1173 } else { 1174 // Internal (thread-local) buffer is full, push to a new buffer instead. 1175 if (UNLIKELY(output_ == NULL)) { 1176 AllocateOutputChunk(); 1177 } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) { 1178 // Output block is full, queue it up for processing and obtain a new block. 1179 EnqueueOutput(); 1180 AllocateOutputChunk(); 1181 } 1182 output_->Push(const_cast<Object*>(obj)); 1183 } 1184 } 1185 1186 void ScanObject(Object* obj) { 1187 mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this)); 1188 } 1189 1190 void EnqueueOutput() { 1191 if (output_ != NULL) { 1192 uint64_t start = 0; 1193 if (kMeasureOverhead) { 1194 start = NanoTime(); 1195 } 1196 thread_pool_->AddTask(Thread::Current(), output_); 1197 output_ = NULL; 1198 if (kMeasureOverhead) { 1199 mark_sweep_->overhead_time_ += NanoTime() - start; 1200 } 1201 } 1202 } 1203 1204 void AllocateOutputChunk() { 1205 uint64_t start = 0; 1206 if (kMeasureOverhead) { 1207 start = NanoTime(); 1208 } 1209 output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL); 1210 if (kMeasureOverhead) { 1211 mark_sweep_->overhead_time_ += NanoTime() - start; 1212 } 1213 } 1214 1215 void Finalize() { 1216 EnqueueOutput(); 1217 delete this; 1218 } 1219 1220 // Scans all of the objects 1221 virtual void Run(Thread* self) { 1222 size_t index; 1223 while ((index = index_++) < length_) { 1224 if (kUseMarkStackPrefetch) { 1225 static const size_t prefetch_look_ahead = 1; 1226 __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]); 1227 } 1228 Object* obj = data_[index]; 1229 DCHECK(obj != NULL); 1230 ScanObject(obj); 1231 } 1232 } 1233}; 1234 1235void MarkSweep::ProcessMarkStackParallel() { 1236 CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled"; 1237 Thread* self = Thread::Current(); 1238 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1239 // Split the current mark stack up into work tasks. 1240 const size_t num_threads = thread_pool->GetThreadCount(); 1241 const size_t stack_size = mark_stack_->Size(); 1242 const size_t chunk_size = 1243 std::min((stack_size + num_threads - 1) / num_threads, 1244 static_cast<size_t>(MarkStackChunk::max_size)); 1245 size_t index = 0; 1246 for (size_t i = 0; i < num_threads || index < stack_size; ++i) { 1247 Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)]; 1248 Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)]; 1249 index += chunk_size; 1250 thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end)); 1251 } 1252 thread_pool->StartWorkers(self); 1253 thread_pool->Wait(self, true, true); 1254 mark_stack_->Reset(); 1255 //LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime()); 1256 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1257} 1258 1259// Scan anything that's on the mark stack. 1260void MarkSweep::ProcessMarkStack() { 1261 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1262 if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) { 1263 ProcessMarkStackParallel(); 1264 return; 1265 } 1266 1267 if (kUseMarkStackPrefetch) { 1268 const size_t fifo_size = 4; 1269 const size_t fifo_mask = fifo_size - 1; 1270 const Object* fifo[fifo_size]; 1271 for (size_t i = 0;i < fifo_size;++i) { 1272 fifo[i] = NULL; 1273 } 1274 size_t fifo_pos = 0; 1275 size_t fifo_count = 0; 1276 for (;;) { 1277 const Object* obj = fifo[fifo_pos & fifo_mask]; 1278 if (obj != NULL) { 1279 ScanObject(obj); 1280 fifo[fifo_pos & fifo_mask] = NULL; 1281 --fifo_count; 1282 } 1283 1284 if (!mark_stack_->IsEmpty()) { 1285 const Object* obj = mark_stack_->PopBack(); 1286 DCHECK(obj != NULL); 1287 fifo[fifo_pos & fifo_mask] = obj; 1288 __builtin_prefetch(obj); 1289 fifo_count++; 1290 } 1291 fifo_pos++; 1292 1293 if (!fifo_count) { 1294 CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size(); 1295 break; 1296 } 1297 } 1298 } else { 1299 while (!mark_stack_->IsEmpty()) { 1300 const Object* obj = mark_stack_->PopBack(); 1301 DCHECK(obj != NULL); 1302 ScanObject(obj); 1303 } 1304 } 1305} 1306 1307// Walks the reference list marking any references subject to the 1308// reference clearing policy. References with a black referent are 1309// removed from the list. References with white referents biased 1310// toward saving are blackened and also removed from the list. 1311void MarkSweep::PreserveSomeSoftReferences(Object** list) { 1312 DCHECK(list != NULL); 1313 Object* clear = NULL; 1314 size_t counter = 0; 1315 1316 DCHECK(mark_stack_->IsEmpty()); 1317 1318 while (*list != NULL) { 1319 Object* ref = heap_->DequeuePendingReference(list); 1320 Object* referent = heap_->GetReferenceReferent(ref); 1321 if (referent == NULL) { 1322 // Referent was cleared by the user during marking. 1323 continue; 1324 } 1325 bool is_marked = IsMarked(referent); 1326 if (!is_marked && ((++counter) & 1)) { 1327 // Referent is white and biased toward saving, mark it. 1328 MarkObject(referent); 1329 is_marked = true; 1330 } 1331 if (!is_marked) { 1332 // Referent is white, queue it for clearing. 1333 heap_->EnqueuePendingReference(ref, &clear); 1334 } 1335 } 1336 *list = clear; 1337 // Restart the mark with the newly black references added to the 1338 // root set. 1339 ProcessMarkStack(); 1340} 1341 1342inline bool MarkSweep::IsMarked(const Object* object) const 1343 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1344 if (object >= immune_begin_ && object < immune_end_) { 1345 return true; 1346 } 1347 DCHECK(current_mark_bitmap_ != NULL); 1348 if (current_mark_bitmap_->HasAddress(object)) { 1349 return current_mark_bitmap_->Test(object); 1350 } 1351 return heap_->GetMarkBitmap()->Test(object); 1352} 1353 1354 1355// Unlink the reference list clearing references objects with white 1356// referents. Cleared references registered to a reference queue are 1357// scheduled for appending by the heap worker thread. 1358void MarkSweep::ClearWhiteReferences(Object** list) { 1359 DCHECK(list != NULL); 1360 while (*list != NULL) { 1361 Object* ref = heap_->DequeuePendingReference(list); 1362 Object* referent = heap_->GetReferenceReferent(ref); 1363 if (referent != NULL && !IsMarked(referent)) { 1364 // Referent is white, clear it. 1365 heap_->ClearReferenceReferent(ref); 1366 if (heap_->IsEnqueuable(ref)) { 1367 heap_->EnqueueReference(ref, &cleared_reference_list_); 1368 } 1369 } 1370 } 1371 DCHECK(*list == NULL); 1372} 1373 1374// Enqueues finalizer references with white referents. White 1375// referents are blackened, moved to the zombie field, and the 1376// referent field is cleared. 1377void MarkSweep::EnqueueFinalizerReferences(Object** list) { 1378 DCHECK(list != NULL); 1379 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); 1380 bool has_enqueued = false; 1381 while (*list != NULL) { 1382 Object* ref = heap_->DequeuePendingReference(list); 1383 Object* referent = heap_->GetReferenceReferent(ref); 1384 if (referent != NULL && !IsMarked(referent)) { 1385 MarkObject(referent); 1386 // If the referent is non-null the reference must queuable. 1387 DCHECK(heap_->IsEnqueuable(ref)); 1388 ref->SetFieldObject(zombie_offset, referent, false); 1389 heap_->ClearReferenceReferent(ref); 1390 heap_->EnqueueReference(ref, &cleared_reference_list_); 1391 has_enqueued = true; 1392 } 1393 } 1394 if (has_enqueued) { 1395 ProcessMarkStack(); 1396 } 1397 DCHECK(*list == NULL); 1398} 1399 1400// Process reference class instances and schedule finalizations. 1401void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, 1402 Object** weak_references, 1403 Object** finalizer_references, 1404 Object** phantom_references) { 1405 DCHECK(soft_references != NULL); 1406 DCHECK(weak_references != NULL); 1407 DCHECK(finalizer_references != NULL); 1408 DCHECK(phantom_references != NULL); 1409 1410 // Unless we are in the zygote or required to clear soft references 1411 // with white references, preserve some white referents. 1412 if (!clear_soft && !Runtime::Current()->IsZygote()) { 1413 PreserveSomeSoftReferences(soft_references); 1414 } 1415 1416 // Clear all remaining soft and weak references with white 1417 // referents. 1418 ClearWhiteReferences(soft_references); 1419 ClearWhiteReferences(weak_references); 1420 1421 // Preserve all white objects with finalize methods and schedule 1422 // them for finalization. 1423 EnqueueFinalizerReferences(finalizer_references); 1424 1425 // Clear all f-reachable soft and weak references with white 1426 // referents. 1427 ClearWhiteReferences(soft_references); 1428 ClearWhiteReferences(weak_references); 1429 1430 // Clear all phantom references with white referents. 1431 ClearWhiteReferences(phantom_references); 1432 1433 // At this point all reference lists should be empty. 1434 DCHECK(*soft_references == NULL); 1435 DCHECK(*weak_references == NULL); 1436 DCHECK(*finalizer_references == NULL); 1437 DCHECK(*phantom_references == NULL); 1438} 1439 1440void MarkSweep::UnBindBitmaps() { 1441 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1442 // TODO: C++0x 1443 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1444 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1445 space::ContinuousSpace* space = *it; 1446 if (space->IsDlMallocSpace()) { 1447 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 1448 if (alloc_space->temp_bitmap_.get() != NULL) { 1449 // At this point, the temp_bitmap holds our old mark bitmap. 1450 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); 1451 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); 1452 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); 1453 alloc_space->mark_bitmap_.reset(new_bitmap); 1454 DCHECK(alloc_space->temp_bitmap_.get() == NULL); 1455 } 1456 } 1457 } 1458} 1459 1460void MarkSweep::FinishPhase() { 1461 // Can't enqueue referneces if we hold the mutator lock. 1462 Object* cleared_references = GetClearedReferences(); 1463 Heap* heap = GetHeap(); 1464 heap->EnqueueClearedReferences(&cleared_references); 1465 1466 heap->PostGcVerification(this); 1467 1468 timings_.NewSplit("GrowForUtilization"); 1469 heap->GrowForUtilization(GetGcType(), GetDurationNs()); 1470 1471 timings_.NewSplit("RequestHeapTrim"); 1472 heap->RequestHeapTrim(); 1473 1474 // Update the cumulative statistics 1475 total_time_ns_ += GetDurationNs(); 1476 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, 1477 std::plus<uint64_t>()); 1478 total_freed_objects_ += GetFreedObjects(); 1479 total_freed_bytes_ += GetFreedBytes(); 1480 1481 // Ensure that the mark stack is empty. 1482 CHECK(mark_stack_->IsEmpty()); 1483 1484 if (kCountScannedTypes) { 1485 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1486 << " other=" << other_count_; 1487 } 1488 1489 if (kCountTasks) { 1490 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1491 } 1492 1493 if (kMeasureOverhead) { 1494 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1495 } 1496 1497 if (kProfileLargeObjects) { 1498 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1499 } 1500 1501 if (kCountClassesMarked) { 1502 VLOG(gc) << "Classes marked " << classes_marked_; 1503 } 1504 1505 if (kCountJavaLangRefs) { 1506 VLOG(gc) << "References scanned " << reference_count_; 1507 } 1508 1509 // Update the cumulative loggers. 1510 cumulative_timings_.Start(); 1511 cumulative_timings_.AddNewLogger(timings_); 1512 cumulative_timings_.End(); 1513 1514 // Clear all of the spaces' mark bitmaps. 1515 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1516 // TODO: C++0x 1517 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1518 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1519 space::ContinuousSpace* space = *it; 1520 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1521 space->GetMarkBitmap()->Clear(); 1522 } 1523 } 1524 mark_stack_->Reset(); 1525 1526 // Reset the marked large objects. 1527 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1528 large_objects->GetMarkObjects()->Clear(); 1529} 1530 1531} // namespace collector 1532} // namespace gc 1533} // namespace art 1534