mark_sweep.cc revision b22a451675c29ac3fc82a8761d2a385a170d6d7f
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "mark_sweep.h" 18 19#include <functional> 20#include <numeric> 21#include <climits> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/macros.h" 26#include "base/mutex-inl.h" 27#include "base/timing_logger.h" 28#include "gc/accounting/card_table-inl.h" 29#include "gc/accounting/heap_bitmap.h" 30#include "gc/accounting/space_bitmap-inl.h" 31#include "gc/heap.h" 32#include "gc/space/image_space.h" 33#include "gc/space/large_object_space.h" 34#include "gc/space/space-inl.h" 35#include "indirect_reference_table.h" 36#include "intern_table.h" 37#include "jni_internal.h" 38#include "monitor.h" 39#include "mark_sweep-inl.h" 40#include "mirror/class-inl.h" 41#include "mirror/class_loader.h" 42#include "mirror/dex_cache.h" 43#include "mirror/field.h" 44#include "mirror/field-inl.h" 45#include "mirror/object-inl.h" 46#include "mirror/object_array.h" 47#include "mirror/object_array-inl.h" 48#include "runtime.h" 49#include "thread-inl.h" 50#include "thread_list.h" 51#include "verifier/method_verifier.h" 52 53using ::art::mirror::Class; 54using ::art::mirror::Field; 55using ::art::mirror::Object; 56using ::art::mirror::ObjectArray; 57 58namespace art { 59namespace gc { 60namespace collector { 61 62// Performance options. 63static const bool kParallelMarkStack = true; 64static const bool kDisableFinger = true; // TODO: Fix, bit rotten. 65static const bool kUseMarkStackPrefetch = true; 66static const size_t kSweepArrayChunkFreeSize = 1024; 67 68// Profiling and information flags. 69static const bool kCountClassesMarked = false; 70static const bool kProfileLargeObjects = false; 71static const bool kMeasureOverhead = false; 72static const bool kCountTasks = false; 73static const bool kCountJavaLangRefs = false; 74 75void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { 76 // Bind live to mark bitmap if necessary. 77 if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 78 BindLiveToMarkBitmap(space); 79 } 80 81 // Add the space to the immune region. 82 if (immune_begin_ == NULL) { 83 DCHECK(immune_end_ == NULL); 84 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), 85 reinterpret_cast<Object*>(space->End())); 86 } else { 87 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 88 const space::ContinuousSpace* prev_space = NULL; 89 // Find out if the previous space is immune. 90 // TODO: C++0x 91 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 92 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 93 if (*it == space) { 94 break; 95 } 96 prev_space = *it; 97 } 98 99 // If previous space was immune, then extend the immune region. Relies on continuous spaces 100 // being sorted by Heap::AddContinuousSpace. 101 if (prev_space != NULL && 102 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) && 103 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) { 104 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 105 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 106 } 107 } 108} 109 110void MarkSweep::BindBitmaps() { 111 timings_.StartSplit("BindBitmaps"); 112 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 113 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 114 115 // Mark all of the spaces we never collect as immune. 116 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 117 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 118 space::ContinuousSpace* space = *it; 119 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 120 ImmuneSpace(space); 121 } 122 } 123 timings_.EndSplit(); 124} 125 126MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 127 : GarbageCollector(heap, 128 name_prefix + (name_prefix.empty() ? "" : " ") + 129 (is_concurrent ? "concurrent mark sweep": "mark sweep")), 130 current_mark_bitmap_(NULL), 131 java_lang_Class_(NULL), 132 mark_stack_(NULL), 133 immune_begin_(NULL), 134 immune_end_(NULL), 135 soft_reference_list_(NULL), 136 weak_reference_list_(NULL), 137 finalizer_reference_list_(NULL), 138 phantom_reference_list_(NULL), 139 cleared_reference_list_(NULL), 140 gc_barrier_(new Barrier(0)), 141 large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 142 mark_stack_expand_lock_("mark sweep mark stack expand lock"), 143 is_concurrent_(is_concurrent), 144 clear_soft_references_(false) { 145} 146 147void MarkSweep::InitializePhase() { 148 timings_.Reset(); 149 base::TimingLogger::ScopedSplit split("InitializePhase", &timings_); 150 mark_stack_ = GetHeap()->mark_stack_.get(); 151 DCHECK(mark_stack_ != NULL); 152 SetImmuneRange(NULL, NULL); 153 soft_reference_list_ = NULL; 154 weak_reference_list_ = NULL; 155 finalizer_reference_list_ = NULL; 156 phantom_reference_list_ = NULL; 157 cleared_reference_list_ = NULL; 158 freed_bytes_ = 0; 159 freed_objects_ = 0; 160 class_count_ = 0; 161 array_count_ = 0; 162 other_count_ = 0; 163 large_object_test_ = 0; 164 large_object_mark_ = 0; 165 classes_marked_ = 0; 166 overhead_time_ = 0; 167 work_chunks_created_ = 0; 168 work_chunks_deleted_ = 0; 169 reference_count_ = 0; 170 java_lang_Class_ = Class::GetJavaLangClass(); 171 CHECK(java_lang_Class_ != NULL); 172 173 FindDefaultMarkBitmap(); 174 175// Do any pre GC verification. 176 timings_.NewSplit("PreGcVerification"); 177 heap_->PreGcVerification(this); 178} 179 180void MarkSweep::ProcessReferences(Thread* self) { 181 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 182 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, 183 &finalizer_reference_list_, &phantom_reference_list_); 184} 185 186bool MarkSweep::HandleDirtyObjectsPhase() { 187 base::TimingLogger::ScopedSplit split("HandleDirtyObjectsPhase", &timings_); 188 Thread* self = Thread::Current(); 189 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 190 Locks::mutator_lock_->AssertExclusiveHeld(self); 191 192 { 193 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 194 195 // Re-mark root set. 196 ReMarkRoots(); 197 198 // Scan dirty objects, this is only required if we are not doing concurrent GC. 199 RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty); 200 } 201 202 ProcessReferences(self); 203 204 // Only need to do this if we have the card mark verification on, and only during concurrent GC. 205 if (GetHeap()->verify_missing_card_marks_) { 206 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 207 // This second sweep makes sure that we don't have any objects in the live stack which point to 208 // freed objects. These cause problems since their references may be previously freed objects. 209 SweepArray(allocation_stack, false); 210 } 211 return true; 212} 213 214bool MarkSweep::IsConcurrent() const { 215 return is_concurrent_; 216} 217 218void MarkSweep::MarkingPhase() { 219 base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_); 220 Heap* heap = GetHeap(); 221 Thread* self = Thread::Current(); 222 223 BindBitmaps(); 224 FindDefaultMarkBitmap(); 225 226 // Process dirty cards and add dirty cards to mod union tables. 227 heap->ProcessCards(timings_); 228 229 // Need to do this before the checkpoint since we don't want any threads to add references to 230 // the live stack during the recursive mark. 231 timings_.NewSplit("SwapStacks"); 232 heap->SwapStacks(); 233 234 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 235 if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 236 // If we exclusively hold the mutator lock, all threads must be suspended. 237 MarkRoots(); 238 } else { 239 MarkRootsCheckpoint(self); 240 MarkNonThreadRoots(); 241 } 242 MarkConcurrentRoots(); 243 244 heap->UpdateAndMarkModUnion(this, timings_, GetGcType()); 245 MarkReachableObjects(); 246} 247 248void MarkSweep::MarkReachableObjects() { 249 // Mark everything allocated since the last as GC live so that we can sweep concurrently, 250 // knowing that new allocations won't be marked as live. 251 timings_.StartSplit("MarkStackAsLive"); 252 accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 253 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(), 254 heap_->large_object_space_->GetLiveObjects(), 255 live_stack); 256 live_stack->Reset(); 257 timings_.EndSplit(); 258 // Recursively mark all the non-image bits set in the mark bitmap. 259 RecursiveMark(); 260} 261 262void MarkSweep::ReclaimPhase() { 263 base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); 264 Thread* self = Thread::Current(); 265 266 if (!IsConcurrent()) { 267 base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_); 268 ProcessReferences(self); 269 } else { 270 base::TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_); 271 accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 272 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 273 // The allocation stack contains things allocated since the start of the GC. These may have been 274 // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. 275 // Remove these objects from the mark bitmaps so that they will be eligible for sticky 276 // collection. 277 // There is a race here which is safely handled. Another thread such as the hprof could 278 // have flushed the alloc stack after we resumed the threads. This is safe however, since 279 // reseting the allocation stack zeros it out with madvise. This means that we will either 280 // read NULLs or attempt to unmark a newly allocated object which will not be marked in the 281 // first place. 282 mirror::Object** end = allocation_stack->End(); 283 for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) { 284 Object* obj = *it; 285 if (obj != NULL) { 286 UnMarkObjectNonNull(obj); 287 } 288 } 289 } 290 291 // Before freeing anything, lets verify the heap. 292 if (kIsDebugBuild) { 293 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 294 VerifyImageRoots(); 295 } 296 timings_.StartSplit("PreSweepingGcVerification"); 297 heap_->PreSweepingGcVerification(this); 298 timings_.EndSplit(); 299 300 { 301 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 302 303 // Reclaim unmarked objects. 304 Sweep(false); 305 306 // Swap the live and mark bitmaps for each space which we modified space. This is an 307 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 308 // bitmaps. 309 timings_.StartSplit("SwapBitmaps"); 310 SwapBitmaps(); 311 timings_.EndSplit(); 312 313 // Unbind the live and mark bitmaps. 314 UnBindBitmaps(); 315 } 316} 317 318void MarkSweep::SetImmuneRange(Object* begin, Object* end) { 319 immune_begin_ = begin; 320 immune_end_ = end; 321} 322 323void MarkSweep::FindDefaultMarkBitmap() { 324 base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_); 325 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 326 // TODO: C++0x 327 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 328 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 329 space::ContinuousSpace* space = *it; 330 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 331 current_mark_bitmap_ = (*it)->GetMarkBitmap(); 332 CHECK(current_mark_bitmap_ != NULL); 333 return; 334 } 335 } 336 GetHeap()->DumpSpaces(); 337 LOG(FATAL) << "Could not find a default mark bitmap"; 338} 339 340void MarkSweep::ExpandMarkStack() { 341 // Rare case, no need to have Thread::Current be a parameter. 342 MutexLock mu(Thread::Current(), mark_stack_expand_lock_); 343 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 344 // Someone else acquired the lock and expanded the mark stack before us. 345 return; 346 } 347 std::vector<Object*> temp; 348 temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End()); 349 mark_stack_->Resize(mark_stack_->Capacity() * 2); 350 for (size_t i = 0; i < temp.size(); ++i) { 351 mark_stack_->PushBack(temp[i]); 352 } 353} 354 355inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) { 356 DCHECK(obj != NULL); 357 if (MarkObjectParallel(obj)) { 358 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) { 359 // Only reason a push can fail is that the mark stack is full. 360 ExpandMarkStack(); 361 } 362 } 363} 364 365inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { 366 DCHECK(!IsImmune(obj)); 367 // Try to take advantage of locality of references within a space, failing this find the space 368 // the hard way. 369 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 370 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 371 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 372 if (LIKELY(new_bitmap != NULL)) { 373 object_bitmap = new_bitmap; 374 } else { 375 MarkLargeObject(obj, false); 376 return; 377 } 378 } 379 380 DCHECK(object_bitmap->HasAddress(obj)); 381 object_bitmap->Clear(obj); 382} 383 384inline void MarkSweep::MarkObjectNonNull(const Object* obj) { 385 DCHECK(obj != NULL); 386 387 if (IsImmune(obj)) { 388 DCHECK(IsMarked(obj)); 389 return; 390 } 391 392 // Try to take advantage of locality of references within a space, failing this find the space 393 // the hard way. 394 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 395 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 396 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 397 if (LIKELY(new_bitmap != NULL)) { 398 object_bitmap = new_bitmap; 399 } else { 400 MarkLargeObject(obj, true); 401 return; 402 } 403 } 404 405 // This object was not previously marked. 406 if (!object_bitmap->Test(obj)) { 407 object_bitmap->Set(obj); 408 // Do we need to expand the mark stack? 409 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 410 ExpandMarkStack(); 411 } 412 // The object must be pushed on to the mark stack. 413 mark_stack_->PushBack(const_cast<Object*>(obj)); 414 } 415} 416 417// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 418bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { 419 // TODO: support >1 discontinuous space. 420 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 421 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); 422 if (kProfileLargeObjects) { 423 ++large_object_test_; 424 } 425 if (UNLIKELY(!large_objects->Test(obj))) { 426 if (!large_object_space->Contains(obj)) { 427 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 428 LOG(ERROR) << "Attempting see if it's a bad root"; 429 VerifyRoots(); 430 LOG(FATAL) << "Can't mark bad root"; 431 } 432 if (kProfileLargeObjects) { 433 ++large_object_mark_; 434 } 435 if (set) { 436 large_objects->Set(obj); 437 } else { 438 large_objects->Clear(obj); 439 } 440 return true; 441 } 442 return false; 443} 444 445inline bool MarkSweep::MarkObjectParallel(const Object* obj) { 446 DCHECK(obj != NULL); 447 448 if (IsImmune(obj)) { 449 DCHECK(IsMarked(obj)); 450 return false; 451 } 452 453 // Try to take advantage of locality of references within a space, failing this find the space 454 // the hard way. 455 accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 456 if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 457 accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 458 if (new_bitmap != NULL) { 459 object_bitmap = new_bitmap; 460 } else { 461 // TODO: Remove the Thread::Current here? 462 // TODO: Convert this to some kind of atomic marking? 463 MutexLock mu(Thread::Current(), large_object_lock_); 464 return MarkLargeObject(obj, true); 465 } 466 } 467 468 // Return true if the object was not previously marked. 469 return !object_bitmap->AtomicTestAndSet(obj); 470} 471 472// Used to mark objects when recursing. Recursion is done by moving 473// the finger across the bitmaps in address order and marking child 474// objects. Any newly-marked objects whose addresses are lower than 475// the finger won't be visited by the bitmap scan, so those objects 476// need to be added to the mark stack. 477void MarkSweep::MarkObject(const Object* obj) { 478 if (obj != NULL) { 479 MarkObjectNonNull(obj); 480 } 481} 482 483void MarkSweep::MarkRoot(const Object* obj) { 484 if (obj != NULL) { 485 MarkObjectNonNull(obj); 486 } 487} 488 489void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) { 490 DCHECK(root != NULL); 491 DCHECK(arg != NULL); 492 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 493 mark_sweep->MarkObjectNonNullParallel(root); 494} 495 496void MarkSweep::MarkObjectCallback(const Object* root, void* arg) { 497 DCHECK(root != NULL); 498 DCHECK(arg != NULL); 499 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 500 mark_sweep->MarkObjectNonNull(root); 501} 502 503void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) { 504 DCHECK(root != NULL); 505 DCHECK(arg != NULL); 506 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 507 mark_sweep->MarkObjectNonNull(root); 508} 509 510void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 511 const StackVisitor* visitor) { 512 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 513} 514 515void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 516 // See if the root is on any space bitmap. 517 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 518 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 519 if (!large_object_space->Contains(root)) { 520 LOG(ERROR) << "Found invalid root: " << root; 521 if (visitor != NULL) { 522 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 523 } 524 } 525 } 526} 527 528void MarkSweep::VerifyRoots() { 529 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 530} 531 532// Marks all objects in the root set. 533void MarkSweep::MarkRoots() { 534 timings_.StartSplit("MarkRoots"); 535 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this); 536 timings_.EndSplit(); 537} 538 539void MarkSweep::MarkNonThreadRoots() { 540 timings_.StartSplit("MarkNonThreadRoots"); 541 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this); 542 timings_.EndSplit(); 543} 544 545void MarkSweep::MarkConcurrentRoots() { 546 timings_.StartSplit("MarkConcurrentRoots"); 547 // Visit all runtime roots and clear dirty flags. 548 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true); 549 timings_.EndSplit(); 550} 551 552class CheckObjectVisitor { 553 public: 554 explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 555 556 void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const 557 NO_THREAD_SAFETY_ANALYSIS { 558 if (kDebugLocking) { 559 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 560 } 561 mark_sweep_->CheckReference(obj, ref, offset, is_static); 562 } 563 564 private: 565 MarkSweep* const mark_sweep_; 566}; 567 568void MarkSweep::CheckObject(const Object* obj) { 569 DCHECK(obj != NULL); 570 CheckObjectVisitor visitor(this); 571 VisitObjectReferences(obj, visitor); 572} 573 574void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { 575 DCHECK(root != NULL); 576 DCHECK(arg != NULL); 577 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 578 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); 579 mark_sweep->CheckObject(root); 580} 581 582void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 583 CHECK(space->IsDlMallocSpace()); 584 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 585 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 586 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release(); 587 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 588 alloc_space->temp_bitmap_.reset(mark_bitmap); 589 alloc_space->mark_bitmap_.reset(live_bitmap); 590} 591 592class ScanObjectVisitor { 593 public: 594 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 595 596 // TODO: Fixme when anotatalysis works with visitors. 597 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 598 if (kDebugLocking) { 599 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 600 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 601 } 602 mark_sweep_->ScanObject(obj); 603 } 604 605 private: 606 MarkSweep* const mark_sweep_; 607}; 608 609void MarkSweep::ScanGrayObjects(byte minimum_age) { 610 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 611 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 612 ScanObjectVisitor visitor(this); 613 // TODO: C++0x 614 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 615 for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) { 616 space::ContinuousSpace* space = *it; 617 switch (space->GetGcRetentionPolicy()) { 618 case space::kGcRetentionPolicyNeverCollect: 619 timings_.StartSplit("ScanGrayImageSpaceObjects"); 620 break; 621 case space::kGcRetentionPolicyFullCollect: 622 timings_.StartSplit("ScanGrayZygoteSpaceObjects"); 623 break; 624 case space::kGcRetentionPolicyAlwaysCollect: 625 timings_.StartSplit("ScanGrayAllocSpaceObjects"); 626 break; 627 } 628 byte* begin = space->Begin(); 629 byte* end = space->End(); 630 // Image spaces are handled properly since live == marked for them. 631 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 632 card_table->Scan(mark_bitmap, begin, end, visitor, minimum_age); 633 timings_.EndSplit(); 634 } 635} 636 637class CheckBitmapVisitor { 638 public: 639 explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 640 641 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 642 if (kDebugLocking) { 643 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 644 } 645 DCHECK(obj != NULL); 646 mark_sweep_->CheckObject(obj); 647 } 648 649 private: 650 MarkSweep* mark_sweep_; 651}; 652 653void MarkSweep::VerifyImageRoots() { 654 // Verify roots ensures that all the references inside the image space point 655 // objects which are either in the image space or marked objects in the alloc 656 // space 657 timings_.StartSplit("VerifyImageRoots"); 658 CheckBitmapVisitor visitor(this); 659 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 660 // TODO: C++0x 661 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 662 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 663 if ((*it)->IsImageSpace()) { 664 space::ImageSpace* space = (*it)->AsImageSpace(); 665 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 666 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 667 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 668 DCHECK(live_bitmap != NULL); 669 live_bitmap->VisitMarkedRange(begin, end, visitor); 670 } 671 } 672 timings_.EndSplit(); 673} 674 675// Populates the mark stack based on the set of marked objects and 676// recursively marks until the mark stack is emptied. 677void MarkSweep::RecursiveMark() { 678 base::TimingLogger::ScopedSplit split("RecursiveMark", &timings_); 679 // RecursiveMark will build the lists of known instances of the Reference classes. 680 // See DelayReferenceReferent for details. 681 CHECK(soft_reference_list_ == NULL); 682 CHECK(weak_reference_list_ == NULL); 683 CHECK(finalizer_reference_list_ == NULL); 684 CHECK(phantom_reference_list_ == NULL); 685 CHECK(cleared_reference_list_ == NULL); 686 687 const bool partial = GetGcType() == kGcTypePartial; 688 ScanObjectVisitor scan_visitor(this); 689 if (!kDisableFinger) { 690 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 691 // TODO: C++0x 692 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 693 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 694 space::ContinuousSpace* space = *it; 695 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 696 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 697 current_mark_bitmap_ = space->GetMarkBitmap(); 698 if (current_mark_bitmap_ == NULL) { 699 GetHeap()->DumpSpaces(); 700 LOG(FATAL) << "invalid bitmap"; 701 } 702 // This function does not handle heap end increasing, so we must use the space end. 703 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 704 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 705 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 706 } 707 } 708 } 709 ProcessMarkStack(); 710} 711 712bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) { 713 return 714 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) || 715 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object); 716} 717 718void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) { 719 ScanGrayObjects(minimum_age); 720 ProcessMarkStack(); 721} 722 723void MarkSweep::ReMarkRoots() { 724 timings_.StartSplit("ReMarkRoots"); 725 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true); 726 timings_.EndSplit(); 727} 728 729void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) { 730 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 731 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 732 IndirectReferenceTable* table = &vm->weak_globals; 733 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto 734 for (It it = table->begin(), end = table->end(); it != end; ++it) { 735 const Object** entry = *it; 736 if (!is_marked(*entry, arg)) { 737 *entry = kClearedJniWeakGlobal; 738 } 739 } 740} 741 742struct ArrayMarkedCheck { 743 accounting::ObjectStack* live_stack; 744 MarkSweep* mark_sweep; 745}; 746 747// Either marked or not live. 748bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) { 749 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg); 750 if (array_check->mark_sweep->IsMarked(object)) { 751 return true; 752 } 753 accounting::ObjectStack* live_stack = array_check->live_stack; 754 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End(); 755} 756 757void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) { 758 Runtime* runtime = Runtime::Current(); 759 // The callbacks check 760 // !is_marked where is_marked is the callback but we want 761 // !IsMarked && IsLive 762 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 763 // Or for swapped (IsLive || !IsMarked). 764 765 timings_.StartSplit("SweepSystemWeaksArray"); 766 ArrayMarkedCheck visitor; 767 visitor.live_stack = allocations; 768 visitor.mark_sweep = this; 769 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor); 770 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor); 771 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor); 772 timings_.EndSplit(); 773} 774 775void MarkSweep::SweepSystemWeaks() { 776 Runtime* runtime = Runtime::Current(); 777 // The callbacks check 778 // !is_marked where is_marked is the callback but we want 779 // !IsMarked && IsLive 780 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 781 // Or for swapped (IsLive || !IsMarked). 782 timings_.StartSplit("SweepSystemWeaks"); 783 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this); 784 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this); 785 SweepJniWeakGlobals(IsMarkedCallback, this); 786 timings_.EndSplit(); 787} 788 789bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) { 790 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 791 // We don't actually want to sweep the object, so lets return "marked" 792 return true; 793} 794 795void MarkSweep::VerifyIsLive(const Object* obj) { 796 Heap* heap = GetHeap(); 797 if (!heap->GetLiveBitmap()->Test(obj)) { 798 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 799 if (!large_object_space->GetLiveObjects()->Test(obj)) { 800 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 801 heap->allocation_stack_->End()) { 802 // Object not found! 803 heap->DumpSpaces(); 804 LOG(FATAL) << "Found dead object " << obj; 805 } 806 } 807 } 808} 809 810void MarkSweep::VerifySystemWeaks() { 811 Runtime* runtime = Runtime::Current(); 812 // Verify system weaks, uses a special IsMarked callback which always returns true. 813 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this); 814 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this); 815 816 JavaVMExt* vm = runtime->GetJavaVM(); 817 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 818 IndirectReferenceTable* table = &vm->weak_globals; 819 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto 820 for (It it = table->begin(), end = table->end(); it != end; ++it) { 821 const Object** entry = *it; 822 VerifyIsLive(*entry); 823 } 824} 825 826struct SweepCallbackContext { 827 MarkSweep* mark_sweep; 828 space::AllocSpace* space; 829 Thread* self; 830}; 831 832class CheckpointMarkThreadRoots : public Closure { 833 public: 834 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 835 836 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 837 // Note: self is not necessarily equal to thread since thread may be suspended. 838 Thread* self = Thread::Current(); 839 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 840 << thread->GetState() << " thread " << thread << " self " << self; 841 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 842 mark_sweep_->GetBarrier().Pass(self); 843 } 844 845 private: 846 MarkSweep* mark_sweep_; 847}; 848 849void MarkSweep::MarkRootsCheckpoint(Thread* self) { 850 CheckpointMarkThreadRoots check_point(this); 851 timings_.StartSplit("MarkRootsCheckpoint"); 852 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 853 // Request the check point is run on all threads returning a count of the threads that must 854 // run through the barrier including self. 855 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 856 // Release locks then wait for all mutator threads to pass the barrier. 857 // TODO: optimize to not release locks when there are no threads to wait for. 858 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 859 Locks::mutator_lock_->SharedUnlock(self); 860 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 861 CHECK_EQ(old_state, kWaitingPerformingGc); 862 gc_barrier_->Increment(self, barrier_count); 863 self->SetState(kWaitingPerformingGc); 864 Locks::mutator_lock_->SharedLock(self); 865 Locks::heap_bitmap_lock_->ExclusiveLock(self); 866 timings_.EndSplit(); 867} 868 869void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 870 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 871 MarkSweep* mark_sweep = context->mark_sweep; 872 Heap* heap = mark_sweep->GetHeap(); 873 space::AllocSpace* space = context->space; 874 Thread* self = context->self; 875 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 876 // Use a bulk free, that merges consecutive objects before freeing or free per object? 877 // Documentation suggests better free performance with merging, but this may be at the expensive 878 // of allocation. 879 size_t freed_objects = num_ptrs; 880 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit 881 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); 882 heap->RecordFree(freed_objects, freed_bytes); 883 mark_sweep->freed_objects_.fetch_add(freed_objects); 884 mark_sweep->freed_bytes_.fetch_add(freed_bytes); 885} 886 887void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 888 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 889 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); 890 Heap* heap = context->mark_sweep->GetHeap(); 891 // We don't free any actual memory to avoid dirtying the shared zygote pages. 892 for (size_t i = 0; i < num_ptrs; ++i) { 893 Object* obj = static_cast<Object*>(ptrs[i]); 894 heap->GetLiveBitmap()->Clear(obj); 895 heap->GetCardTable()->MarkCard(obj); 896 } 897} 898 899void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 900 size_t freed_bytes = 0; 901 space::DlMallocSpace* space = heap_->GetAllocSpace(); 902 903 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 904 // bitmap, resulting in occasional frees of Weaks which are still in use. 905 SweepSystemWeaksArray(allocations); 906 907 timings_.StartSplit("Process allocation stack"); 908 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are 909 // going to free. 910 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 911 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 912 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 913 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 914 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 915 if (swap_bitmaps) { 916 std::swap(live_bitmap, mark_bitmap); 917 std::swap(large_live_objects, large_mark_objects); 918 } 919 920 size_t freed_objects = 0; 921 size_t freed_large_objects = 0; 922 size_t count = allocations->Size(); 923 Object** objects = const_cast<Object**>(allocations->Begin()); 924 Object** out = objects; 925 Object** objects_to_chunk_free = out; 926 927 // Empty the allocation stack. 928 Thread* self = Thread::Current(); 929 for (size_t i = 0; i < count; ++i) { 930 Object* obj = objects[i]; 931 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. 932 if (LIKELY(mark_bitmap->HasAddress(obj))) { 933 if (!mark_bitmap->Test(obj)) { 934 // Don't bother un-marking since we clear the mark bitmap anyways. 935 *(out++) = obj; 936 // Free objects in chunks. 937 DCHECK_GE(out, objects_to_chunk_free); 938 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); 939 if (static_cast<size_t>(out - objects_to_chunk_free) == kSweepArrayChunkFreeSize) { 940 timings_.StartSplit("FreeList"); 941 size_t chunk_freed_objects = out - objects_to_chunk_free; 942 freed_objects += chunk_freed_objects; 943 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); 944 objects_to_chunk_free = out; 945 timings_.EndSplit(); 946 } 947 } 948 } else if (!large_mark_objects->Test(obj)) { 949 ++freed_large_objects; 950 freed_bytes += large_object_space->Free(self, obj); 951 } 952 } 953 // Free the remaining objects in chunks. 954 DCHECK_GE(out, objects_to_chunk_free); 955 DCHECK_LE(static_cast<size_t>(out - objects_to_chunk_free), kSweepArrayChunkFreeSize); 956 if (out - objects_to_chunk_free > 0) { 957 timings_.StartSplit("FreeList"); 958 size_t chunk_freed_objects = out - objects_to_chunk_free; 959 freed_objects += chunk_freed_objects; 960 freed_bytes += space->FreeList(self, chunk_freed_objects, objects_to_chunk_free); 961 timings_.EndSplit(); 962 } 963 CHECK_EQ(count, allocations->Size()); 964 timings_.EndSplit(); 965 966 timings_.StartSplit("RecordFree"); 967 VLOG(heap) << "Freed " << freed_objects << "/" << count 968 << " objects with size " << PrettySize(freed_bytes); 969 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes); 970 freed_objects_.fetch_add(freed_objects); 971 freed_bytes_.fetch_add(freed_bytes); 972 timings_.EndSplit(); 973 974 timings_.StartSplit("ResetStack"); 975 allocations->Reset(); 976 timings_.EndSplit(); 977} 978 979void MarkSweep::Sweep(bool swap_bitmaps) { 980 DCHECK(mark_stack_->IsEmpty()); 981 base::TimingLogger::ScopedSplit("Sweep", &timings_); 982 983 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 984 // bitmap, resulting in occasional frees of Weaks which are still in use. 985 SweepSystemWeaks(); 986 987 const bool partial = (GetGcType() == kGcTypePartial); 988 SweepCallbackContext scc; 989 scc.mark_sweep = this; 990 scc.self = Thread::Current(); 991 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 992 // TODO: C++0x 993 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 994 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 995 space::ContinuousSpace* space = *it; 996 // We always sweep always collect spaces. 997 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); 998 if (!partial && !sweep_space) { 999 // We sweep full collect spaces when the GC isn't a partial GC (ie its full). 1000 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); 1001 } 1002 if (sweep_space) { 1003 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 1004 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 1005 scc.space = space->AsDlMallocSpace(); 1006 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 1007 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 1008 if (swap_bitmaps) { 1009 std::swap(live_bitmap, mark_bitmap); 1010 } 1011 if (!space->IsZygoteSpace()) { 1012 base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_); 1013 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. 1014 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 1015 &SweepCallback, reinterpret_cast<void*>(&scc)); 1016 } else { 1017 base::TimingLogger::ScopedSplit split("SweepZygote", &timings_); 1018 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual 1019 // memory. 1020 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 1021 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); 1022 } 1023 } 1024 } 1025 1026 SweepLargeObjects(swap_bitmaps); 1027} 1028 1029void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 1030 base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); 1031 // Sweep large objects 1032 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 1033 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 1034 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 1035 if (swap_bitmaps) { 1036 std::swap(large_live_objects, large_mark_objects); 1037 } 1038 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects(); 1039 // O(n*log(n)) but hopefully there are not too many large objects. 1040 size_t freed_objects = 0; 1041 size_t freed_bytes = 0; 1042 Thread* self = Thread::Current(); 1043 // TODO: C++0x 1044 typedef accounting::SpaceSetMap::Objects::iterator It; 1045 for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) { 1046 if (!large_mark_objects->Test(*it)) { 1047 freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it)); 1048 ++freed_objects; 1049 } 1050 } 1051 freed_objects_.fetch_add(freed_objects); 1052 freed_bytes_.fetch_add(freed_bytes); 1053 GetHeap()->RecordFree(freed_objects, freed_bytes); 1054} 1055 1056void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { 1057 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1058 // TODO: C++0x 1059 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1060 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1061 space::ContinuousSpace* space = *it; 1062 if (space->IsDlMallocSpace() && space->Contains(ref)) { 1063 DCHECK(IsMarked(obj)); 1064 1065 bool is_marked = IsMarked(ref); 1066 if (!is_marked) { 1067 LOG(INFO) << *space; 1068 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) 1069 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) 1070 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " 1071 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; 1072 1073 const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 1074 DCHECK(klass != NULL); 1075 const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); 1076 DCHECK(fields != NULL); 1077 bool found = false; 1078 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1079 const Field* cur = fields->Get(i); 1080 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1081 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); 1082 found = true; 1083 break; 1084 } 1085 } 1086 if (!found) { 1087 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); 1088 } 1089 1090 bool obj_marked = heap_->GetCardTable()->IsDirty(obj); 1091 if (!obj_marked) { 1092 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " 1093 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " 1094 << "the alloc space, but wasn't card marked"; 1095 } 1096 } 1097 } 1098 break; 1099 } 1100} 1101 1102// Process the "referent" field in a java.lang.ref.Reference. If the 1103// referent has not yet been marked, put it on the appropriate list in 1104// the gcHeap for later processing. 1105void MarkSweep::DelayReferenceReferent(Object* obj) { 1106 DCHECK(obj != NULL); 1107 Class* klass = obj->GetClass(); 1108 DCHECK(klass != NULL); 1109 DCHECK(klass->IsReferenceClass()); 1110 Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false); 1111 Object* referent = heap_->GetReferenceReferent(obj); 1112 if (kCountJavaLangRefs) { 1113 ++reference_count_; 1114 } 1115 if (pending == NULL && referent != NULL && !IsMarked(referent)) { 1116 Object** list = NULL; 1117 if (klass->IsSoftReferenceClass()) { 1118 list = &soft_reference_list_; 1119 } else if (klass->IsWeakReferenceClass()) { 1120 list = &weak_reference_list_; 1121 } else if (klass->IsFinalizerReferenceClass()) { 1122 list = &finalizer_reference_list_; 1123 } else if (klass->IsPhantomReferenceClass()) { 1124 list = &phantom_reference_list_; 1125 } 1126 DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags(); 1127 // TODO: One lock per list? 1128 heap_->EnqueuePendingReference(obj, list); 1129 } 1130} 1131 1132void MarkSweep::ScanRoot(const Object* obj) { 1133 ScanObject(obj); 1134} 1135 1136class MarkObjectVisitor { 1137 public: 1138 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 1139 1140 // TODO: Fixme when anotatalysis works with visitors. 1141 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1142 bool /* is_static */) const 1143 NO_THREAD_SAFETY_ANALYSIS { 1144 if (kDebugLocking) { 1145 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1146 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1147 } 1148 mark_sweep_->MarkObject(ref); 1149 } 1150 1151 private: 1152 MarkSweep* const mark_sweep_; 1153}; 1154 1155// Scans an object reference. Determines the type of the reference 1156// and dispatches to a specialized scanning routine. 1157void MarkSweep::ScanObject(const Object* obj) { 1158 MarkObjectVisitor visitor(this); 1159 ScanObjectVisit(obj, visitor); 1160} 1161 1162class MarkStackChunk : public Task { 1163 public: 1164 MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end) 1165 : mark_sweep_(mark_sweep), 1166 thread_pool_(thread_pool), 1167 index_(0), 1168 length_(0), 1169 output_(NULL) { 1170 length_ = end - begin; 1171 if (begin != end) { 1172 // Cost not significant since we only do this for the initial set of mark stack chunks. 1173 memcpy(data_, begin, length_ * sizeof(*begin)); 1174 } 1175 if (kCountTasks) { 1176 ++mark_sweep_->work_chunks_created_; 1177 } 1178 } 1179 1180 ~MarkStackChunk() { 1181 DCHECK(output_ == NULL || output_->length_ == 0); 1182 DCHECK_GE(index_, length_); 1183 delete output_; 1184 if (kCountTasks) { 1185 ++mark_sweep_->work_chunks_deleted_; 1186 } 1187 } 1188 1189 MarkSweep* const mark_sweep_; 1190 ThreadPool* const thread_pool_; 1191 static const size_t max_size = 1 * KB; 1192 // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing. 1193 size_t index_; 1194 // Input / output mark stack. We add newly marked references to data_ until length reaches 1195 // max_size. This is an optimization so that less tasks are created. 1196 // TODO: Investigate using a bounded buffer FIFO. 1197 Object* data_[max_size]; 1198 // How many elements in data_ we need to scan. 1199 size_t length_; 1200 // Output block, newly marked references get added to the ouput block so that another thread can 1201 // scan them. 1202 MarkStackChunk* output_; 1203 1204 class MarkObjectParallelVisitor { 1205 public: 1206 explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {} 1207 1208 void operator()(const Object* /* obj */, const Object* ref, 1209 const MemberOffset& /* offset */, bool /* is_static */) const { 1210 if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) { 1211 chunk_task_->MarkStackPush(ref); 1212 } 1213 } 1214 1215 private: 1216 MarkStackChunk* const chunk_task_; 1217 }; 1218 1219 // Push an object into the block. 1220 // Don't need to use atomic ++ since we only one thread is writing to an output block at any 1221 // given time. 1222 void Push(Object* obj) { 1223 CHECK(obj != NULL); 1224 data_[length_++] = obj; 1225 } 1226 1227 void MarkStackPush(const Object* obj) { 1228 if (static_cast<size_t>(length_) < max_size) { 1229 Push(const_cast<Object*>(obj)); 1230 } else { 1231 // Internal (thread-local) buffer is full, push to a new buffer instead. 1232 if (UNLIKELY(output_ == NULL)) { 1233 AllocateOutputChunk(); 1234 } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) { 1235 // Output block is full, queue it up for processing and obtain a new block. 1236 EnqueueOutput(); 1237 AllocateOutputChunk(); 1238 } 1239 output_->Push(const_cast<Object*>(obj)); 1240 } 1241 } 1242 1243 void ScanObject(Object* obj) { 1244 mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this)); 1245 } 1246 1247 void EnqueueOutput() { 1248 if (output_ != NULL) { 1249 uint64_t start = 0; 1250 if (kMeasureOverhead) { 1251 start = NanoTime(); 1252 } 1253 thread_pool_->AddTask(Thread::Current(), output_); 1254 output_ = NULL; 1255 if (kMeasureOverhead) { 1256 mark_sweep_->overhead_time_.fetch_add(NanoTime() - start); 1257 } 1258 } 1259 } 1260 1261 void AllocateOutputChunk() { 1262 uint64_t start = 0; 1263 if (kMeasureOverhead) { 1264 start = NanoTime(); 1265 } 1266 output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL); 1267 if (kMeasureOverhead) { 1268 mark_sweep_->overhead_time_.fetch_add(NanoTime() - start); 1269 } 1270 } 1271 1272 void Finalize() { 1273 EnqueueOutput(); 1274 delete this; 1275 } 1276 1277 // Scans all of the objects 1278 virtual void Run(Thread* self) { 1279 size_t index; 1280 while ((index = index_++) < length_) { 1281 if (kUseMarkStackPrefetch) { 1282 static const size_t prefetch_look_ahead = 1; 1283 __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]); 1284 } 1285 Object* obj = data_[index]; 1286 DCHECK(obj != NULL); 1287 ScanObject(obj); 1288 } 1289 } 1290}; 1291 1292void MarkSweep::ProcessMarkStackParallel() { 1293 CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled"; 1294 Thread* self = Thread::Current(); 1295 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1296 // Split the current mark stack up into work tasks. 1297 const size_t num_threads = thread_pool->GetThreadCount(); 1298 const size_t stack_size = mark_stack_->Size(); 1299 const size_t chunk_size = 1300 std::min((stack_size + num_threads - 1) / num_threads, 1301 static_cast<size_t>(MarkStackChunk::max_size)); 1302 size_t index = 0; 1303 for (size_t i = 0; i < num_threads || index < stack_size; ++i) { 1304 Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)]; 1305 Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)]; 1306 index += chunk_size; 1307 thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end)); 1308 } 1309 thread_pool->StartWorkers(self); 1310 thread_pool->Wait(self, true, true); 1311 mark_stack_->Reset(); 1312 // LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime()); 1313 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1314} 1315 1316// Scan anything that's on the mark stack. 1317void MarkSweep::ProcessMarkStack() { 1318 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1319 timings_.StartSplit("ProcessMarkStack"); 1320 if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) { 1321 ProcessMarkStackParallel(); 1322 timings_.EndSplit(); 1323 return; 1324 } 1325 1326 if (kUseMarkStackPrefetch) { 1327 const size_t fifo_size = 4; 1328 const size_t fifo_mask = fifo_size - 1; 1329 const Object* fifo[fifo_size]; 1330 for (size_t i = 0; i < fifo_size; ++i) { 1331 fifo[i] = NULL; 1332 } 1333 size_t fifo_pos = 0; 1334 size_t fifo_count = 0; 1335 for (;;) { 1336 const Object* obj = fifo[fifo_pos & fifo_mask]; 1337 if (obj != NULL) { 1338 ScanObject(obj); 1339 fifo[fifo_pos & fifo_mask] = NULL; 1340 --fifo_count; 1341 } 1342 1343 if (!mark_stack_->IsEmpty()) { 1344 const Object* obj = mark_stack_->PopBack(); 1345 DCHECK(obj != NULL); 1346 fifo[fifo_pos & fifo_mask] = obj; 1347 __builtin_prefetch(obj); 1348 fifo_count++; 1349 } 1350 fifo_pos++; 1351 1352 if (!fifo_count) { 1353 CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size(); 1354 break; 1355 } 1356 } 1357 } else { 1358 while (!mark_stack_->IsEmpty()) { 1359 const Object* obj = mark_stack_->PopBack(); 1360 DCHECK(obj != NULL); 1361 ScanObject(obj); 1362 } 1363 } 1364 timings_.EndSplit(); 1365} 1366 1367// Walks the reference list marking any references subject to the 1368// reference clearing policy. References with a black referent are 1369// removed from the list. References with white referents biased 1370// toward saving are blackened and also removed from the list. 1371void MarkSweep::PreserveSomeSoftReferences(Object** list) { 1372 DCHECK(list != NULL); 1373 Object* clear = NULL; 1374 size_t counter = 0; 1375 1376 DCHECK(mark_stack_->IsEmpty()); 1377 1378 timings_.StartSplit("PreserveSomeSoftReferences"); 1379 while (*list != NULL) { 1380 Object* ref = heap_->DequeuePendingReference(list); 1381 Object* referent = heap_->GetReferenceReferent(ref); 1382 if (referent == NULL) { 1383 // Referent was cleared by the user during marking. 1384 continue; 1385 } 1386 bool is_marked = IsMarked(referent); 1387 if (!is_marked && ((++counter) & 1)) { 1388 // Referent is white and biased toward saving, mark it. 1389 MarkObject(referent); 1390 is_marked = true; 1391 } 1392 if (!is_marked) { 1393 // Referent is white, queue it for clearing. 1394 heap_->EnqueuePendingReference(ref, &clear); 1395 } 1396 } 1397 *list = clear; 1398 timings_.EndSplit(); 1399 1400 // Restart the mark with the newly black references added to the 1401 // root set. 1402 ProcessMarkStack(); 1403} 1404 1405inline bool MarkSweep::IsMarked(const Object* object) const 1406 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1407 if (IsImmune(object)) { 1408 return true; 1409 } 1410 DCHECK(current_mark_bitmap_ != NULL); 1411 if (current_mark_bitmap_->HasAddress(object)) { 1412 return current_mark_bitmap_->Test(object); 1413 } 1414 return heap_->GetMarkBitmap()->Test(object); 1415} 1416 1417 1418// Unlink the reference list clearing references objects with white 1419// referents. Cleared references registered to a reference queue are 1420// scheduled for appending by the heap worker thread. 1421void MarkSweep::ClearWhiteReferences(Object** list) { 1422 DCHECK(list != NULL); 1423 while (*list != NULL) { 1424 Object* ref = heap_->DequeuePendingReference(list); 1425 Object* referent = heap_->GetReferenceReferent(ref); 1426 if (referent != NULL && !IsMarked(referent)) { 1427 // Referent is white, clear it. 1428 heap_->ClearReferenceReferent(ref); 1429 if (heap_->IsEnqueuable(ref)) { 1430 heap_->EnqueueReference(ref, &cleared_reference_list_); 1431 } 1432 } 1433 } 1434 DCHECK(*list == NULL); 1435} 1436 1437// Enqueues finalizer references with white referents. White 1438// referents are blackened, moved to the zombie field, and the 1439// referent field is cleared. 1440void MarkSweep::EnqueueFinalizerReferences(Object** list) { 1441 DCHECK(list != NULL); 1442 timings_.StartSplit("EnqueueFinalizerReferences"); 1443 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); 1444 bool has_enqueued = false; 1445 while (*list != NULL) { 1446 Object* ref = heap_->DequeuePendingReference(list); 1447 Object* referent = heap_->GetReferenceReferent(ref); 1448 if (referent != NULL && !IsMarked(referent)) { 1449 MarkObject(referent); 1450 // If the referent is non-null the reference must queuable. 1451 DCHECK(heap_->IsEnqueuable(ref)); 1452 ref->SetFieldObject(zombie_offset, referent, false); 1453 heap_->ClearReferenceReferent(ref); 1454 heap_->EnqueueReference(ref, &cleared_reference_list_); 1455 has_enqueued = true; 1456 } 1457 } 1458 timings_.EndSplit(); 1459 if (has_enqueued) { 1460 ProcessMarkStack(); 1461 } 1462 DCHECK(*list == NULL); 1463} 1464 1465// Process reference class instances and schedule finalizations. 1466void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, 1467 Object** weak_references, 1468 Object** finalizer_references, 1469 Object** phantom_references) { 1470 DCHECK(soft_references != NULL); 1471 DCHECK(weak_references != NULL); 1472 DCHECK(finalizer_references != NULL); 1473 DCHECK(phantom_references != NULL); 1474 1475 // Unless we are in the zygote or required to clear soft references 1476 // with white references, preserve some white referents. 1477 if (!clear_soft && !Runtime::Current()->IsZygote()) { 1478 PreserveSomeSoftReferences(soft_references); 1479 } 1480 1481 timings_.StartSplit("ProcessReferences"); 1482 // Clear all remaining soft and weak references with white 1483 // referents. 1484 ClearWhiteReferences(soft_references); 1485 ClearWhiteReferences(weak_references); 1486 timings_.EndSplit(); 1487 1488 // Preserve all white objects with finalize methods and schedule 1489 // them for finalization. 1490 EnqueueFinalizerReferences(finalizer_references); 1491 1492 timings_.StartSplit("ProcessReferences"); 1493 // Clear all f-reachable soft and weak references with white 1494 // referents. 1495 ClearWhiteReferences(soft_references); 1496 ClearWhiteReferences(weak_references); 1497 1498 // Clear all phantom references with white referents. 1499 ClearWhiteReferences(phantom_references); 1500 1501 // At this point all reference lists should be empty. 1502 DCHECK(*soft_references == NULL); 1503 DCHECK(*weak_references == NULL); 1504 DCHECK(*finalizer_references == NULL); 1505 DCHECK(*phantom_references == NULL); 1506 timings_.EndSplit(); 1507} 1508 1509void MarkSweep::UnBindBitmaps() { 1510 base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); 1511 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1512 // TODO: C++0x 1513 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1514 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1515 space::ContinuousSpace* space = *it; 1516 if (space->IsDlMallocSpace()) { 1517 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 1518 if (alloc_space->temp_bitmap_.get() != NULL) { 1519 // At this point, the temp_bitmap holds our old mark bitmap. 1520 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); 1521 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); 1522 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); 1523 alloc_space->mark_bitmap_.reset(new_bitmap); 1524 DCHECK(alloc_space->temp_bitmap_.get() == NULL); 1525 } 1526 } 1527 } 1528} 1529 1530void MarkSweep::FinishPhase() { 1531 base::TimingLogger::ScopedSplit split("FinishPhase", &timings_); 1532 // Can't enqueue references if we hold the mutator lock. 1533 Object* cleared_references = GetClearedReferences(); 1534 Heap* heap = GetHeap(); 1535 timings_.NewSplit("EnqueueClearedReferences"); 1536 heap->EnqueueClearedReferences(&cleared_references); 1537 1538 timings_.NewSplit("PostGcVerification"); 1539 heap->PostGcVerification(this); 1540 1541 timings_.NewSplit("GrowForUtilization"); 1542 heap->GrowForUtilization(GetGcType(), GetDurationNs()); 1543 1544 timings_.NewSplit("RequestHeapTrim"); 1545 heap->RequestHeapTrim(); 1546 1547 // Update the cumulative statistics 1548 total_time_ns_ += GetDurationNs(); 1549 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, 1550 std::plus<uint64_t>()); 1551 total_freed_objects_ += GetFreedObjects(); 1552 total_freed_bytes_ += GetFreedBytes(); 1553 1554 // Ensure that the mark stack is empty. 1555 CHECK(mark_stack_->IsEmpty()); 1556 1557 if (kCountScannedTypes) { 1558 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1559 << " other=" << other_count_; 1560 } 1561 1562 if (kCountTasks) { 1563 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1564 } 1565 1566 if (kMeasureOverhead) { 1567 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1568 } 1569 1570 if (kProfileLargeObjects) { 1571 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1572 } 1573 1574 if (kCountClassesMarked) { 1575 VLOG(gc) << "Classes marked " << classes_marked_; 1576 } 1577 1578 if (kCountJavaLangRefs) { 1579 VLOG(gc) << "References scanned " << reference_count_; 1580 } 1581 1582 // Update the cumulative loggers. 1583 cumulative_timings_.Start(); 1584 cumulative_timings_.AddLogger(timings_); 1585 cumulative_timings_.End(); 1586 1587 // Clear all of the spaces' mark bitmaps. 1588 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1589 // TODO: C++0x 1590 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1591 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1592 space::ContinuousSpace* space = *it; 1593 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1594 space->GetMarkBitmap()->Clear(); 1595 } 1596 } 1597 mark_stack_->Reset(); 1598 1599 // Reset the marked large objects. 1600 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1601 large_objects->GetMarkObjects()->Clear(); 1602} 1603 1604} // namespace collector 1605} // namespace gc 1606} // namespace art 1607