mark_sweep.cc revision 184e322fe8ddd75c844a1eb2eb1ca32bc02f2d45
15460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao/* 25460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * Copyright (C) 2011 The Android Open Source Project 35460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * 45460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * Licensed under the Apache License, Version 2.0 (the "License"); 55460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * you may not use this file except in compliance with the License. 65460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * You may obtain a copy of the License at 75460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * 85460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * http://www.apache.org/licenses/LICENSE-2.0 95460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * 105460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * Unless required by applicable law or agreed to in writing, software 115460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * distributed under the License is distributed on an "AS IS" BASIS, 125460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 135460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * See the License for the specific language governing permissions and 145460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao * limitations under the License. 155460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao */ 165460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 175460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "mark_sweep.h" 185460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 195460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include <functional> 205460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include <numeric> 215460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include <climits> 225460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include <vector> 235460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 245460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "base/logging.h" 255460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "base/macros.h" 265460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "base/mutex-inl.h" 275460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "base/timing_logger.h" 285460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "gc/accounting/card_table-inl.h" 295460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "gc/accounting/heap_bitmap.h" 305460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "gc/accounting/space_bitmap-inl.h" 315460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "gc/heap.h" 325460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "gc/space/image_space.h" 335460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "gc/space/large_object_space.h" 345460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "gc/space/space-inl.h" 355460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "indirect_reference_table.h" 365460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "intern_table.h" 375460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "jni_internal.h" 385460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "monitor.h" 395460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "mark_sweep-inl.h" 405460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "mirror/class-inl.h" 415460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "mirror/class_loader.h" 425460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "mirror/dex_cache.h" 435460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "mirror/field.h" 445460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "mirror/field-inl.h" 455460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "mirror/object-inl.h" 465460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "mirror/object_array.h" 475460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "mirror/object_array-inl.h" 485460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "runtime.h" 495460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "thread-inl.h" 505460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "thread_list.h" 515460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao#include "verifier/method_verifier.h" 525460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 535460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaousing ::art::mirror::Class; 545460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaousing ::art::mirror::Field; 555460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaousing ::art::mirror::Object; 565460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaousing ::art::mirror::ObjectArray; 575460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 585460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaonamespace art { 595460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaonamespace gc { 605460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaonamespace collector { 615460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 625460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao// Performance options. 635460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaostatic const bool kParallelMarkStack = true; 645460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaostatic const bool kDisableFinger = true; // TODO: Fix, bit rotten. 655460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaostatic const bool kUseMarkStackPrefetch = true; 665460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 675460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao// Profiling and information flags. 685460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaostatic const bool kCountClassesMarked = false; 695460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaostatic const bool kProfileLargeObjects = false; 705460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaostatic const bool kMeasureOverhead = false; 715460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaostatic const bool kCountTasks = false; 725460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaostatic const bool kCountJavaLangRefs = false; 735460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 745460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { 755460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Bind live to mark bitmap if necessary. 765460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (space->GetLiveBitmap() != space->GetMarkBitmap()) { 775460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao BindLiveToMarkBitmap(space); 785460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 795460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 805460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Add the space to the immune region. 815460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (immune_begin_ == NULL) { 825460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao DCHECK(immune_end_ == NULL); 835460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao SetImmuneRange(reinterpret_cast<Object*>(space->Begin()), 845460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao reinterpret_cast<Object*>(space->End())); 855460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } else { 865460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 875460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao const space::ContinuousSpace* prev_space = NULL; 885460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Find out if the previous space is immune. 895460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // TODO: C++0x 905460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao typedef std::vector<space::ContinuousSpace*>::const_iterator It; 915460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 925460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (*it == space) { 935460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao break; 945460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 955460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao prev_space = *it; 965460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 975460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 985460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // If previous space was immune, then extend the immune region. Relies on continuous spaces 995460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // being sorted by Heap::AddContinuousSpace. 1005460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (prev_space != NULL && 1015460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) && 1025460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) { 1035460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); 1045460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); 1055460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 1065460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 1075460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 1085460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1095460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::BindBitmaps() { 1105460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1115460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 1125460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1135460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Mark all of the spaces we never collect as immune. 1145460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1155460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1165460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao space::ContinuousSpace* space = *it; 1175460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) { 1185460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao ImmuneSpace(space); 1195460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 1205460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 1215460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 1225460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1235460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei LiaoMarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix) 1245460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao : GarbageCollector(heap, 1255460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao name_prefix + (name_prefix.empty() ? "" : " ") + 1265460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao (is_concurrent ? "concurrent mark sweep": "mark sweep")), 1275460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao current_mark_bitmap_(NULL), 1285460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao java_lang_Class_(NULL), 1295460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao mark_stack_(NULL), 1305460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao immune_begin_(NULL), 1315460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao immune_end_(NULL), 1325460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao soft_reference_list_(NULL), 1335460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao weak_reference_list_(NULL), 1345460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao finalizer_reference_list_(NULL), 1355460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao phantom_reference_list_(NULL), 1365460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao cleared_reference_list_(NULL), 1375460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao gc_barrier_(new Barrier(0)), 1385460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), 1395460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao mark_stack_expand_lock_("mark sweep mark stack expand lock"), 1405460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao is_concurrent_(is_concurrent), 1415460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao clear_soft_references_(false) { 1425460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 1435460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1445460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::InitializePhase() { 1455460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.Reset(); 1465460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.StartSplit("InitializePhase"); 1475460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao mark_stack_ = GetHeap()->mark_stack_.get(); 1485460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao DCHECK(mark_stack_ != NULL); 1495460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao SetImmuneRange(NULL, NULL); 1505460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao soft_reference_list_ = NULL; 1515460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao weak_reference_list_ = NULL; 1525460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao finalizer_reference_list_ = NULL; 1535460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao phantom_reference_list_ = NULL; 1545460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao cleared_reference_list_ = NULL; 1555460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao freed_bytes_ = 0; 1565460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao freed_objects_ = 0; 1575460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao class_count_ = 0; 1585460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao array_count_ = 0; 1595460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao other_count_ = 0; 1605460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao large_object_test_ = 0; 1615460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao large_object_mark_ = 0; 1625460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao classes_marked_ = 0; 1635460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao overhead_time_ = 0; 1645460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao work_chunks_created_ = 0; 1655460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao work_chunks_deleted_ = 0; 1665460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao reference_count_ = 0; 1675460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao java_lang_Class_ = Class::GetJavaLangClass(); 1685460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao CHECK(java_lang_Class_ != NULL); 1695460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao FindDefaultMarkBitmap(); 1705460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Do any pre GC verification. 1715460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao heap_->PreGcVerification(this); 1725460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 1735460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1745460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::ProcessReferences(Thread* self) { 1755460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("ProcessReferences"); 1765460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1775460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, 1785460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao &finalizer_reference_list_, &phantom_reference_list_); 1795460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 1805460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1815460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaobool MarkSweep::HandleDirtyObjectsPhase() { 1825460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao Thread* self = Thread::Current(); 1835460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get(); 1845460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao Locks::mutator_lock_->AssertExclusiveHeld(self); 1855460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1865460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao { 1875460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("ReMarkRoots"); 1885460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 1895460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1905460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Re-mark root set. 1915460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao ReMarkRoots(); 1925460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1935460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Scan dirty objects, this is only required if we are not doing concurrent GC. 1945460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty); 1955460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 1965460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1975460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao ProcessReferences(self); 1985460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 1995460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Only need to do this if we have the card mark verification on, and only during concurrent GC. 2005460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (GetHeap()->verify_missing_card_marks_) { 2015460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 2025460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // This second sweep makes sure that we don't have any objects in the live stack which point to 2035460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // freed objects. These cause problems since their references may be previously freed objects. 2045460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao SweepArray(allocation_stack, false); 2055460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } else { 2065460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("UnMarkAllocStack"); 2075460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 2085460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // The allocation stack contains things allocated since the start of the GC. These may have been 2095460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC. 2105460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Remove these objects from the mark bitmaps so that they will be eligible for sticky 2115460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // collection. 2125460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(), 2135460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao GetHeap()->large_object_space_->GetMarkObjects(), 2145460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao allocation_stack); 2155460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 2165460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return true; 2175460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 2185460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2195460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaobool MarkSweep::IsConcurrent() const { 2205460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return is_concurrent_; 2215460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 2225460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2235460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::MarkingPhase() { 2245460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao Heap* heap = GetHeap(); 2255460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao Thread* self = Thread::Current(); 2265460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2275460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("BindBitmaps"); 2285460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao BindBitmaps(); 2295460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao FindDefaultMarkBitmap(); 2305460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Process dirty cards and add dirty cards to mod union tables. 2315460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao heap->ProcessCards(timings_); 2325460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2335460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Need to do this before the checkpoint since we don't want any threads to add references to 2345460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // the live stack during the recursive mark. 2355460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("SwapStacks"); 2365460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao heap->SwapStacks(); 2375460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2385460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 2395460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (Locks::mutator_lock_->IsExclusiveHeld(self)) { 2405460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // If we exclusively hold the mutator lock, all threads must be suspended. 2415460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("MarkRoots"); 2425460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao MarkRoots(); 2435460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } else { 2445460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("MarkRootsCheckpoint"); 2455460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao MarkRootsCheckpoint(self); 2465460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("MarkNonThreadRoots"); 2475460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao MarkNonThreadRoots(); 2485460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 2495460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("MarkConcurrentRoots"); 2505460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao MarkConcurrentRoots(); 2515460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2525460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao heap->UpdateAndMarkModUnion(this, timings_, GetGcType()); 2535460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao MarkReachableObjects(); 2545460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 2555460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2565460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::MarkReachableObjects() { 2575460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Mark everything allocated since the last as GC live so that we can sweep concurrently, 2585460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // knowing that new allocations won't be marked as live. 2595460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("MarkStackAsLive"); 2605460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao accounting::ObjectStack* live_stack = heap_->GetLiveStack(); 2615460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(), 2625460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao heap_->large_object_space_->GetLiveObjects(), 2635460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao live_stack); 2645460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao live_stack->Reset(); 2655460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Recursively mark all the non-image bits set in the mark bitmap. 2665460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao RecursiveMark(); 2675460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 2685460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2695460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::ReclaimPhase() { 2705460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao Thread* self = Thread::Current(); 2715460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2725460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (!IsConcurrent()) { 2735460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao ProcessReferences(self); 2745460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 2755460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2765460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Before freeing anything, lets verify the heap. 2775460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (kIsDebugBuild) { 2785460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); 2795460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao VerifyImageRoots(); 2805460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 2815460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao heap_->PreSweepingGcVerification(this); 2825460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2835460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao { 2845460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 2855460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2865460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Reclaim unmarked objects. 2875460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao Sweep(false); 2885460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2895460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Swap the live and mark bitmaps for each space which we modified space. This is an 2905460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound 2915460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // bitmaps. 2925460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao timings_.NewSplit("SwapBitmaps"); 2935460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao SwapBitmaps(); 2945460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 2955460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Unbind the live and mark bitmaps. 2965460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao UnBindBitmaps(); 2975460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 2985460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 2995460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 3005460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::SetImmuneRange(Object* begin, Object* end) { 3015460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao immune_begin_ = begin; 3025460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao immune_end_ = end; 3035460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 3045460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 3055460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::FindDefaultMarkBitmap() { 3065460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 3075460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // TODO: C++0x 3085460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao typedef std::vector<space::ContinuousSpace*>::const_iterator It; 3095460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 3105460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao space::ContinuousSpace* space = *it; 3115460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { 3125460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao current_mark_bitmap_ = (*it)->GetMarkBitmap(); 3135460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao CHECK(current_mark_bitmap_ != NULL); 3145460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return; 3155460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3165460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3175460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao GetHeap()->DumpSpaces(); 3185460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao LOG(FATAL) << "Could not find a default mark bitmap"; 3195460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 3205460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 3215460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::ExpandMarkStack() { 3225460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Rare case, no need to have Thread::Current be a parameter. 3235460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao MutexLock mu(Thread::Current(), mark_stack_expand_lock_); 3245460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) { 3255460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Someone else acquired the lock and expanded the mark stack before us. 3265460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return; 3275460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3285460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao std::vector<Object*> temp; 3295460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End()); 3305460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao mark_stack_->Resize(mark_stack_->Capacity() * 2); 3315460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao for (size_t i = 0; i < temp.size(); ++i) { 3325460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao mark_stack_->PushBack(temp[i]); 3335460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3345460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 3355460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 3365460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaoinline void MarkSweep::MarkObjectNonNullParallel(const Object* obj, bool check_finger) { 3375460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao DCHECK(obj != NULL); 3385460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (MarkObjectParallel(obj)) { 3395460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) { 3405460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Only reason a push can fail is that the mark stack is full. 3415460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao ExpandMarkStack(); 3425460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3435460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3445460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 3455460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 3465460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaoinline void MarkSweep::MarkObjectNonNull(const Object* obj, bool check_finger) { 3475460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao DCHECK(obj != NULL); 3485460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 3495460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (obj >= immune_begin_ && obj < immune_end_) { 3505460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao DCHECK(IsMarked(obj)); 3515460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return; 3525460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3535460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 3545460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Try to take advantage of locality of references within a space, failing this find the space 3555460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // the hard way. 3565460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 3575460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 3585460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 3595460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (LIKELY(new_bitmap != NULL)) { 3605460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao object_bitmap = new_bitmap; 3615460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } else { 3625460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao MarkLargeObject(obj); 3635460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return; 3645460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3655460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3665460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 3675460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // This object was not previously marked. 3685460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (!object_bitmap->Test(obj)) { 3695460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao object_bitmap->Set(obj); 3705460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Do we need to expand the mark stack? 3715460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { 3725460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao ExpandMarkStack(); 3735460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3745460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // The object must be pushed on to the mark stack. 3755460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao mark_stack_->PushBack(const_cast<Object*>(obj)); 3765460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3775460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 3785460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 3795460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao// Rare case, probably not worth inlining since it will increase instruction cache miss rate. 3805460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaobool MarkSweep::MarkLargeObject(const Object* obj) { 3815460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // TODO: support >1 discontinuous space. 3825460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 3835460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); 3845460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (kProfileLargeObjects) { 3855460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao ++large_object_test_; 3865460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3875460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (UNLIKELY(!large_objects->Test(obj))) { 3885460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (!large_object_space->Contains(obj)) { 3895460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces"; 3905460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao LOG(ERROR) << "Attempting see if it's a bad root"; 3915460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao VerifyRoots(); 3925460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao LOG(FATAL) << "Can't mark bad root"; 3935460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3945460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (kProfileLargeObjects) { 3955460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao ++large_object_mark_; 3965460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 3975460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao large_objects->Set(obj); 3985460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Don't need to check finger since large objects never have any object references. 3995460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return true; 4005460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 4015460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return false; 4025460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 4035460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 4045460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaoinline bool MarkSweep::MarkObjectParallel(const Object* obj) { 4055460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao DCHECK(obj != NULL); 4065460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 4075460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (obj >= immune_begin_ && obj < immune_end_) { 4085460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao DCHECK(IsMarked(obj)); 4095460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return false; 4105460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 4115460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 4125460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Try to take advantage of locality of references within a space, failing this find the space 4135460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // the hard way. 4145460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; 4155460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (UNLIKELY(!object_bitmap->HasAddress(obj))) { 4165460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); 4175460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao if (new_bitmap != NULL) { 4185460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao object_bitmap = new_bitmap; 4195460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } else { 4205460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // TODO: Remove the Thread::Current here? 4215460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // TODO: Convert this to some kind of atomic marking? 4225460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao MutexLock mu(Thread::Current(), large_object_lock_); 4235460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return MarkLargeObject(obj); 4245460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 4255460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao } 4265460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 4275460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao // Return true if the object was not previously marked. 4285460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao return !object_bitmap->AtomicTestAndSet(obj); 4295460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao} 4305460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao 4315460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao// Used to mark objects when recursing. Recursion is done by moving 4325460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao// the finger across the bitmaps in address order and marking child 4335460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao// objects. Any newly-marked objects whose addresses are lower than 4345460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao// the finger won't be visited by the bitmap scan, so those objects 4355460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liao// need to be added to the mark stack. 4365460a1f25d9ddecb5c70667267d66d51af177a99Shih-wei Liaovoid MarkSweep::MarkObject(const Object* obj) { 437 if (obj != NULL) { 438 MarkObjectNonNull(obj, true); 439 } 440} 441 442void MarkSweep::MarkRoot(const Object* obj) { 443 if (obj != NULL) { 444 MarkObjectNonNull(obj, false); 445 } 446} 447 448void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) { 449 DCHECK(root != NULL); 450 DCHECK(arg != NULL); 451 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 452 mark_sweep->MarkObjectNonNullParallel(root, false); 453} 454 455void MarkSweep::MarkObjectCallback(const Object* root, void* arg) { 456 DCHECK(root != NULL); 457 DCHECK(arg != NULL); 458 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 459 mark_sweep->MarkObjectNonNull(root, false); 460} 461 462void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) { 463 DCHECK(root != NULL); 464 DCHECK(arg != NULL); 465 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 466 mark_sweep->MarkObjectNonNull(root, true); 467} 468 469void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg, 470 const StackVisitor* visitor) { 471 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor); 472} 473 474void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) { 475 // See if the root is on any space bitmap. 476 if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) { 477 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 478 if (!large_object_space->Contains(root)) { 479 LOG(ERROR) << "Found invalid root: " << root; 480 if (visitor != NULL) { 481 LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg; 482 } 483 } 484 } 485} 486 487void MarkSweep::VerifyRoots() { 488 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this); 489} 490 491// Marks all objects in the root set. 492void MarkSweep::MarkRoots() { 493 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this); 494} 495 496void MarkSweep::MarkNonThreadRoots() { 497 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this); 498} 499 500void MarkSweep::MarkConcurrentRoots() { 501 // Visit all runtime roots and clear dirty flags. 502 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true); 503} 504 505class CheckObjectVisitor { 506 public: 507 explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 508 509 void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const 510 NO_THREAD_SAFETY_ANALYSIS { 511 if (kDebugLocking) { 512 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 513 } 514 mark_sweep_->CheckReference(obj, ref, offset, is_static); 515 } 516 517 private: 518 MarkSweep* const mark_sweep_; 519}; 520 521void MarkSweep::CheckObject(const Object* obj) { 522 DCHECK(obj != NULL); 523 CheckObjectVisitor visitor(this); 524 VisitObjectReferences(obj, visitor); 525} 526 527void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) { 528 DCHECK(root != NULL); 529 DCHECK(arg != NULL); 530 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); 531 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root)); 532 mark_sweep->CheckObject(root); 533} 534 535void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { 536 CHECK(space->IsDlMallocSpace()); 537 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 538 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 539 accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release(); 540 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); 541 alloc_space->temp_bitmap_.reset(mark_bitmap); 542 alloc_space->mark_bitmap_.reset(live_bitmap); 543} 544 545class ScanObjectVisitor { 546 public: 547 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 548 549 // TODO: Fixme when anotatalysis works with visitors. 550 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 551 if (kDebugLocking) { 552 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 553 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 554 } 555 mark_sweep_->ScanObject(obj); 556 } 557 558 private: 559 MarkSweep* const mark_sweep_; 560}; 561 562void MarkSweep::ScanGrayObjects(byte minimum_age) { 563 accounting::CardTable* card_table = GetHeap()->GetCardTable(); 564 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 565 ScanObjectVisitor visitor(this); 566 // TODO: C++0x 567 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 568 for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) { 569 space::ContinuousSpace* space = *it; 570 switch (space->GetGcRetentionPolicy()) { 571 case space::kGcRetentionPolicyNeverCollect: 572 timings_.NewSplit("ScanGrayImageSpaceObjects"); 573 break; 574 case space::kGcRetentionPolicyFullCollect: 575 timings_.NewSplit("ScanGrayZygoteSpaceObjects"); 576 break; 577 case space::kGcRetentionPolicyAlwaysCollect: 578 timings_.NewSplit("ScanGrayAllocSpaceObjects"); 579 break; 580 } 581 byte* begin = space->Begin(); 582 byte* end = space->End(); 583 // Image spaces are handled properly since live == marked for them. 584 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 585 card_table->Scan(mark_bitmap, begin, end, visitor, minimum_age); 586 } 587} 588 589class CheckBitmapVisitor { 590 public: 591 explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 592 593 void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS { 594 if (kDebugLocking) { 595 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); 596 } 597 DCHECK(obj != NULL); 598 mark_sweep_->CheckObject(obj); 599 } 600 601 private: 602 MarkSweep* mark_sweep_; 603}; 604 605void MarkSweep::VerifyImageRoots() { 606 // Verify roots ensures that all the references inside the image space point 607 // objects which are either in the image space or marked objects in the alloc 608 // space 609 CheckBitmapVisitor visitor(this); 610 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 611 // TODO: C++0x 612 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 613 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 614 if ((*it)->IsImageSpace()) { 615 space::ImageSpace* space = (*it)->AsImageSpace(); 616 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 617 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 618 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 619 DCHECK(live_bitmap != NULL); 620 live_bitmap->VisitMarkedRange(begin, end, visitor); 621 } 622 } 623} 624 625// Populates the mark stack based on the set of marked objects and 626// recursively marks until the mark stack is emptied. 627void MarkSweep::RecursiveMark() { 628 timings_.NewSplit("RecursiveMark"); 629 // RecursiveMark will build the lists of known instances of the Reference classes. 630 // See DelayReferenceReferent for details. 631 CHECK(soft_reference_list_ == NULL); 632 CHECK(weak_reference_list_ == NULL); 633 CHECK(finalizer_reference_list_ == NULL); 634 CHECK(phantom_reference_list_ == NULL); 635 CHECK(cleared_reference_list_ == NULL); 636 637 const bool partial = GetGcType() == kGcTypePartial; 638 ScanObjectVisitor scan_visitor(this); 639 if (!kDisableFinger) { 640 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 641 // TODO: C++0x 642 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 643 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 644 space::ContinuousSpace* space = *it; 645 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) || 646 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) { 647 current_mark_bitmap_ = space->GetMarkBitmap(); 648 if (current_mark_bitmap_ == NULL) { 649 GetHeap()->DumpSpaces(); 650 LOG(FATAL) << "invalid bitmap"; 651 } 652 // This function does not handle heap end increasing, so we must use the space end. 653 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 654 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 655 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor); 656 } 657 } 658 } 659 timings_.NewSplit("ProcessMarkStack"); 660 ProcessMarkStack(); 661} 662 663bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) { 664 return 665 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) || 666 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object); 667} 668 669void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) { 670 ScanGrayObjects(minimum_age); 671 timings_.NewSplit("ProcessMarkStack"); 672 ProcessMarkStack(); 673} 674 675void MarkSweep::ReMarkRoots() { 676 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true); 677} 678 679void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) { 680 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 681 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 682 IndirectReferenceTable* table = &vm->weak_globals; 683 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto 684 for (It it = table->begin(), end = table->end(); it != end; ++it) { 685 const Object** entry = *it; 686 if (!is_marked(*entry, arg)) { 687 *entry = kClearedJniWeakGlobal; 688 } 689 } 690} 691 692struct ArrayMarkedCheck { 693 accounting::ObjectStack* live_stack; 694 MarkSweep* mark_sweep; 695}; 696 697// Either marked or not live. 698bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) { 699 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg); 700 if (array_check->mark_sweep->IsMarked(object)) { 701 return true; 702 } 703 accounting::ObjectStack* live_stack = array_check->live_stack; 704 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End(); 705} 706 707void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) { 708 Runtime* runtime = Runtime::Current(); 709 // The callbacks check 710 // !is_marked where is_marked is the callback but we want 711 // !IsMarked && IsLive 712 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 713 // Or for swapped (IsLive || !IsMarked). 714 715 ArrayMarkedCheck visitor; 716 visitor.live_stack = allocations; 717 visitor.mark_sweep = this; 718 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor); 719 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor); 720 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor); 721} 722 723void MarkSweep::SweepSystemWeaks() { 724 Runtime* runtime = Runtime::Current(); 725 // The callbacks check 726 // !is_marked where is_marked is the callback but we want 727 // !IsMarked && IsLive 728 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive). 729 // Or for swapped (IsLive || !IsMarked). 730 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this); 731 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this); 732 SweepJniWeakGlobals(IsMarkedCallback, this); 733} 734 735bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) { 736 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); 737 // We don't actually want to sweep the object, so lets return "marked" 738 return true; 739} 740 741void MarkSweep::VerifyIsLive(const Object* obj) { 742 Heap* heap = GetHeap(); 743 if (!heap->GetLiveBitmap()->Test(obj)) { 744 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 745 if (!large_object_space->GetLiveObjects()->Test(obj)) { 746 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) == 747 heap->allocation_stack_->End()) { 748 // Object not found! 749 heap->DumpSpaces(); 750 LOG(FATAL) << "Found dead object " << obj; 751 } 752 } 753 } 754} 755 756void MarkSweep::VerifySystemWeaks() { 757 Runtime* runtime = Runtime::Current(); 758 // Verify system weaks, uses a special IsMarked callback which always returns true. 759 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this); 760 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this); 761 762 JavaVMExt* vm = runtime->GetJavaVM(); 763 MutexLock mu(Thread::Current(), vm->weak_globals_lock); 764 IndirectReferenceTable* table = &vm->weak_globals; 765 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto 766 for (It it = table->begin(), end = table->end(); it != end; ++it) { 767 const Object** entry = *it; 768 VerifyIsLive(*entry); 769 } 770} 771 772struct SweepCallbackContext { 773 MarkSweep* mark_sweep; 774 space::AllocSpace* space; 775 Thread* self; 776}; 777 778class CheckpointMarkThreadRoots : public Closure { 779 public: 780 explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} 781 782 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 783 // Note: self is not necessarily equal to thread since thread may be suspended. 784 Thread* self = Thread::Current(); 785 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) 786 << thread->GetState() << " thread " << thread << " self " << self; 787 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_); 788 mark_sweep_->GetBarrier().Pass(self); 789 } 790 791 private: 792 MarkSweep* mark_sweep_; 793}; 794 795void MarkSweep::MarkRootsCheckpoint(Thread* self) { 796 CheckpointMarkThreadRoots check_point(this); 797 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 798 // Request the check point is run on all threads returning a count of the threads that must 799 // run through the barrier including self. 800 size_t barrier_count = thread_list->RunCheckpoint(&check_point); 801 // Release locks then wait for all mutator threads to pass the barrier. 802 // TODO: optimize to not release locks when there are no threads to wait for. 803 Locks::heap_bitmap_lock_->ExclusiveUnlock(self); 804 Locks::mutator_lock_->SharedUnlock(self); 805 ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun); 806 CHECK_EQ(old_state, kWaitingPerformingGc); 807 gc_barrier_->Increment(self, barrier_count); 808 self->SetState(kWaitingPerformingGc); 809 Locks::mutator_lock_->SharedLock(self); 810 Locks::heap_bitmap_lock_->ExclusiveLock(self); 811} 812 813void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 814 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 815 MarkSweep* mark_sweep = context->mark_sweep; 816 Heap* heap = mark_sweep->GetHeap(); 817 space::AllocSpace* space = context->space; 818 Thread* self = context->self; 819 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); 820 // Use a bulk free, that merges consecutive objects before freeing or free per object? 821 // Documentation suggests better free performance with merging, but this may be at the expensive 822 // of allocation. 823 size_t freed_objects = num_ptrs; 824 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit 825 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); 826 heap->RecordFree(freed_objects, freed_bytes); 827 mark_sweep->freed_objects_.fetch_add(freed_objects); 828 mark_sweep->freed_bytes_.fetch_add(freed_bytes); 829} 830 831void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { 832 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); 833 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); 834 Heap* heap = context->mark_sweep->GetHeap(); 835 // We don't free any actual memory to avoid dirtying the shared zygote pages. 836 for (size_t i = 0; i < num_ptrs; ++i) { 837 Object* obj = static_cast<Object*>(ptrs[i]); 838 heap->GetLiveBitmap()->Clear(obj); 839 heap->GetCardTable()->MarkCard(obj); 840 } 841} 842 843void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) { 844 size_t freed_bytes = 0; 845 space::DlMallocSpace* space = heap_->GetAllocSpace(); 846 847 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 848 // bitmap, resulting in occasional frees of Weaks which are still in use. 849 timings_.NewSplit("SweepSystemWeaks"); 850 SweepSystemWeaksArray(allocations); 851 852 timings_.NewSplit("Process allocation stack"); 853 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are 854 // going to free. 855 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 856 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 857 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 858 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 859 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 860 if (swap_bitmaps) { 861 std::swap(live_bitmap, mark_bitmap); 862 std::swap(large_live_objects, large_mark_objects); 863 } 864 865 size_t freed_large_objects = 0; 866 size_t count = allocations->Size(); 867 Object** objects = const_cast<Object**>(allocations->Begin()); 868 Object** out = objects; 869 870 // Empty the allocation stack. 871 Thread* self = Thread::Current(); 872 for (size_t i = 0; i < count; ++i) { 873 Object* obj = objects[i]; 874 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack. 875 if (LIKELY(mark_bitmap->HasAddress(obj))) { 876 if (!mark_bitmap->Test(obj)) { 877 // Don't bother un-marking since we clear the mark bitmap anyways. 878 *(out++) = obj; 879 } 880 } else if (!large_mark_objects->Test(obj)) { 881 ++freed_large_objects; 882 freed_bytes += large_object_space->Free(self, obj); 883 } 884 } 885 CHECK_EQ(count, allocations->Size()); 886 timings_.NewSplit("FreeList"); 887 888 size_t freed_objects = out - objects; 889 freed_bytes += space->FreeList(self, freed_objects, objects); 890 VLOG(heap) << "Freed " << freed_objects << "/" << count 891 << " objects with size " << PrettySize(freed_bytes); 892 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes); 893 freed_objects_.fetch_add(freed_objects); 894 freed_bytes_.fetch_add(freed_bytes); 895 896 timings_.NewSplit("ResetStack"); 897 allocations->Reset(); 898} 899 900void MarkSweep::Sweep(bool swap_bitmaps) { 901 DCHECK(mark_stack_->IsEmpty()); 902 903 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark 904 // bitmap, resulting in occasional frees of Weaks which are still in use. 905 timings_.NewSplit("SweepSystemWeaks"); 906 SweepSystemWeaks(); 907 908 const bool partial = (GetGcType() == kGcTypePartial); 909 SweepCallbackContext scc; 910 scc.mark_sweep = this; 911 scc.self = Thread::Current(); 912 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 913 // TODO: C++0x 914 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 915 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 916 space::ContinuousSpace* space = *it; 917 // We always sweep always collect spaces. 918 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); 919 if (!partial && !sweep_space) { 920 // We sweep full collect spaces when the GC isn't a partial GC (ie its full). 921 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); 922 } 923 if (sweep_space) { 924 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); 925 uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); 926 scc.space = space->AsDlMallocSpace(); 927 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); 928 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); 929 if (swap_bitmaps) { 930 std::swap(live_bitmap, mark_bitmap); 931 } 932 if (!space->IsZygoteSpace()) { 933 timings_.NewSplit("SweepAllocSpace"); 934 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. 935 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 936 &SweepCallback, reinterpret_cast<void*>(&scc)); 937 } else { 938 timings_.NewSplit("SweepZygote"); 939 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual 940 // memory. 941 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, 942 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); 943 } 944 } 945 } 946 947 timings_.NewSplit("SweepLargeObjects"); 948 SweepLargeObjects(swap_bitmaps); 949} 950 951void MarkSweep::SweepLargeObjects(bool swap_bitmaps) { 952 // Sweep large objects 953 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); 954 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); 955 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); 956 if (swap_bitmaps) { 957 std::swap(large_live_objects, large_mark_objects); 958 } 959 accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects(); 960 // O(n*log(n)) but hopefully there are not too many large objects. 961 size_t freed_objects = 0; 962 size_t freed_bytes = 0; 963 Thread* self = Thread::Current(); 964 // TODO: C++0x 965 typedef accounting::SpaceSetMap::Objects::iterator It; 966 for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) { 967 if (!large_mark_objects->Test(*it)) { 968 freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it)); 969 ++freed_objects; 970 } 971 } 972 freed_objects_.fetch_add(freed_objects); 973 freed_bytes_.fetch_add(freed_bytes); 974 GetHeap()->RecordFree(freed_objects, freed_bytes); 975} 976 977void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) { 978 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 979 // TODO: C++0x 980 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 981 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 982 space::ContinuousSpace* space = *it; 983 if (space->IsDlMallocSpace() && space->Contains(ref)) { 984 DCHECK(IsMarked(obj)); 985 986 bool is_marked = IsMarked(ref); 987 if (!is_marked) { 988 LOG(INFO) << *space; 989 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref) 990 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj) 991 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset " 992 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked"; 993 994 const Class* klass = is_static ? obj->AsClass() : obj->GetClass(); 995 DCHECK(klass != NULL); 996 const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields(); 997 DCHECK(fields != NULL); 998 bool found = false; 999 for (int32_t i = 0; i < fields->GetLength(); ++i) { 1000 const Field* cur = fields->Get(i); 1001 if (cur->GetOffset().Int32Value() == offset.Int32Value()) { 1002 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur); 1003 found = true; 1004 break; 1005 } 1006 } 1007 if (!found) { 1008 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value(); 1009 } 1010 1011 bool obj_marked = heap_->GetCardTable()->IsDirty(obj); 1012 if (!obj_marked) { 1013 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' " 1014 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to " 1015 << "the alloc space, but wasn't card marked"; 1016 } 1017 } 1018 } 1019 break; 1020 } 1021} 1022 1023// Process the "referent" field in a java.lang.ref.Reference. If the 1024// referent has not yet been marked, put it on the appropriate list in 1025// the gcHeap for later processing. 1026void MarkSweep::DelayReferenceReferent(Object* obj) { 1027 DCHECK(obj != NULL); 1028 Class* klass = obj->GetClass(); 1029 DCHECK(klass != NULL); 1030 DCHECK(klass->IsReferenceClass()); 1031 Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false); 1032 Object* referent = heap_->GetReferenceReferent(obj); 1033 if (kCountJavaLangRefs) { 1034 ++reference_count_; 1035 } 1036 if (pending == NULL && referent != NULL && !IsMarked(referent)) { 1037 Object** list = NULL; 1038 if (klass->IsSoftReferenceClass()) { 1039 list = &soft_reference_list_; 1040 } else if (klass->IsWeakReferenceClass()) { 1041 list = &weak_reference_list_; 1042 } else if (klass->IsFinalizerReferenceClass()) { 1043 list = &finalizer_reference_list_; 1044 } else if (klass->IsPhantomReferenceClass()) { 1045 list = &phantom_reference_list_; 1046 } 1047 DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags(); 1048 // TODO: One lock per list? 1049 heap_->EnqueuePendingReference(obj, list); 1050 } 1051} 1052 1053void MarkSweep::ScanRoot(const Object* obj) { 1054 ScanObject(obj); 1055} 1056 1057class MarkObjectVisitor { 1058 public: 1059 explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {} 1060 1061 // TODO: Fixme when anotatalysis works with visitors. 1062 void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */, 1063 bool /* is_static */) const 1064 NO_THREAD_SAFETY_ANALYSIS { 1065 if (kDebugLocking) { 1066 Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); 1067 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); 1068 } 1069 mark_sweep_->MarkObject(ref); 1070 } 1071 1072 private: 1073 MarkSweep* const mark_sweep_; 1074}; 1075 1076// Scans an object reference. Determines the type of the reference 1077// and dispatches to a specialized scanning routine. 1078void MarkSweep::ScanObject(const Object* obj) { 1079 MarkObjectVisitor visitor(this); 1080 ScanObjectVisit(obj, visitor); 1081} 1082 1083class MarkStackChunk : public Task { 1084 public: 1085 MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end) 1086 : mark_sweep_(mark_sweep), 1087 thread_pool_(thread_pool), 1088 index_(0), 1089 length_(0), 1090 output_(NULL) { 1091 length_ = end - begin; 1092 if (begin != end) { 1093 // Cost not significant since we only do this for the initial set of mark stack chunks. 1094 memcpy(data_, begin, length_ * sizeof(*begin)); 1095 } 1096 if (kCountTasks) { 1097 ++mark_sweep_->work_chunks_created_; 1098 } 1099 } 1100 1101 ~MarkStackChunk() { 1102 DCHECK(output_ == NULL || output_->length_ == 0); 1103 DCHECK_GE(index_, length_); 1104 delete output_; 1105 if (kCountTasks) { 1106 ++mark_sweep_->work_chunks_deleted_; 1107 } 1108 } 1109 1110 MarkSweep* const mark_sweep_; 1111 ThreadPool* const thread_pool_; 1112 static const size_t max_size = 1 * KB; 1113 // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing. 1114 size_t index_; 1115 // Input / output mark stack. We add newly marked references to data_ until length reaches 1116 // max_size. This is an optimization so that less tasks are created. 1117 // TODO: Investigate using a bounded buffer FIFO. 1118 Object* data_[max_size]; 1119 // How many elements in data_ we need to scan. 1120 size_t length_; 1121 // Output block, newly marked references get added to the ouput block so that another thread can 1122 // scan them. 1123 MarkStackChunk* output_; 1124 1125 class MarkObjectParallelVisitor { 1126 public: 1127 explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {} 1128 1129 void operator()(const Object* /* obj */, const Object* ref, 1130 const MemberOffset& /* offset */, bool /* is_static */) const { 1131 if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) { 1132 chunk_task_->MarkStackPush(ref); 1133 } 1134 } 1135 1136 private: 1137 MarkStackChunk* const chunk_task_; 1138 }; 1139 1140 // Push an object into the block. 1141 // Don't need to use atomic ++ since we only one thread is writing to an output block at any 1142 // given time. 1143 void Push(Object* obj) { 1144 CHECK(obj != NULL); 1145 data_[length_++] = obj; 1146 } 1147 1148 void MarkStackPush(const Object* obj) { 1149 if (static_cast<size_t>(length_) < max_size) { 1150 Push(const_cast<Object*>(obj)); 1151 } else { 1152 // Internal (thread-local) buffer is full, push to a new buffer instead. 1153 if (UNLIKELY(output_ == NULL)) { 1154 AllocateOutputChunk(); 1155 } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) { 1156 // Output block is full, queue it up for processing and obtain a new block. 1157 EnqueueOutput(); 1158 AllocateOutputChunk(); 1159 } 1160 output_->Push(const_cast<Object*>(obj)); 1161 } 1162 } 1163 1164 void ScanObject(Object* obj) { 1165 mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this)); 1166 } 1167 1168 void EnqueueOutput() { 1169 if (output_ != NULL) { 1170 uint64_t start = 0; 1171 if (kMeasureOverhead) { 1172 start = NanoTime(); 1173 } 1174 thread_pool_->AddTask(Thread::Current(), output_); 1175 output_ = NULL; 1176 if (kMeasureOverhead) { 1177 mark_sweep_->overhead_time_.fetch_add(NanoTime() - start); 1178 } 1179 } 1180 } 1181 1182 void AllocateOutputChunk() { 1183 uint64_t start = 0; 1184 if (kMeasureOverhead) { 1185 start = NanoTime(); 1186 } 1187 output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL); 1188 if (kMeasureOverhead) { 1189 mark_sweep_->overhead_time_.fetch_add(NanoTime() - start); 1190 } 1191 } 1192 1193 void Finalize() { 1194 EnqueueOutput(); 1195 delete this; 1196 } 1197 1198 // Scans all of the objects 1199 virtual void Run(Thread* self) { 1200 size_t index; 1201 while ((index = index_++) < length_) { 1202 if (kUseMarkStackPrefetch) { 1203 static const size_t prefetch_look_ahead = 1; 1204 __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]); 1205 } 1206 Object* obj = data_[index]; 1207 DCHECK(obj != NULL); 1208 ScanObject(obj); 1209 } 1210 } 1211}; 1212 1213void MarkSweep::ProcessMarkStackParallel() { 1214 CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled"; 1215 Thread* self = Thread::Current(); 1216 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1217 // Split the current mark stack up into work tasks. 1218 const size_t num_threads = thread_pool->GetThreadCount(); 1219 const size_t stack_size = mark_stack_->Size(); 1220 const size_t chunk_size = 1221 std::min((stack_size + num_threads - 1) / num_threads, 1222 static_cast<size_t>(MarkStackChunk::max_size)); 1223 size_t index = 0; 1224 for (size_t i = 0; i < num_threads || index < stack_size; ++i) { 1225 Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)]; 1226 Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)]; 1227 index += chunk_size; 1228 thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end)); 1229 } 1230 thread_pool->StartWorkers(self); 1231 thread_pool->Wait(self, true, true); 1232 mark_stack_->Reset(); 1233 // LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime()); 1234 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked"; 1235} 1236 1237// Scan anything that's on the mark stack. 1238void MarkSweep::ProcessMarkStack() { 1239 ThreadPool* thread_pool = GetHeap()->GetThreadPool(); 1240 if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) { 1241 ProcessMarkStackParallel(); 1242 return; 1243 } 1244 1245 if (kUseMarkStackPrefetch) { 1246 const size_t fifo_size = 4; 1247 const size_t fifo_mask = fifo_size - 1; 1248 const Object* fifo[fifo_size]; 1249 for (size_t i = 0; i < fifo_size; ++i) { 1250 fifo[i] = NULL; 1251 } 1252 size_t fifo_pos = 0; 1253 size_t fifo_count = 0; 1254 for (;;) { 1255 const Object* obj = fifo[fifo_pos & fifo_mask]; 1256 if (obj != NULL) { 1257 ScanObject(obj); 1258 fifo[fifo_pos & fifo_mask] = NULL; 1259 --fifo_count; 1260 } 1261 1262 if (!mark_stack_->IsEmpty()) { 1263 const Object* obj = mark_stack_->PopBack(); 1264 DCHECK(obj != NULL); 1265 fifo[fifo_pos & fifo_mask] = obj; 1266 __builtin_prefetch(obj); 1267 fifo_count++; 1268 } 1269 fifo_pos++; 1270 1271 if (!fifo_count) { 1272 CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size(); 1273 break; 1274 } 1275 } 1276 } else { 1277 while (!mark_stack_->IsEmpty()) { 1278 const Object* obj = mark_stack_->PopBack(); 1279 DCHECK(obj != NULL); 1280 ScanObject(obj); 1281 } 1282 } 1283} 1284 1285// Walks the reference list marking any references subject to the 1286// reference clearing policy. References with a black referent are 1287// removed from the list. References with white referents biased 1288// toward saving are blackened and also removed from the list. 1289void MarkSweep::PreserveSomeSoftReferences(Object** list) { 1290 DCHECK(list != NULL); 1291 Object* clear = NULL; 1292 size_t counter = 0; 1293 1294 DCHECK(mark_stack_->IsEmpty()); 1295 1296 while (*list != NULL) { 1297 Object* ref = heap_->DequeuePendingReference(list); 1298 Object* referent = heap_->GetReferenceReferent(ref); 1299 if (referent == NULL) { 1300 // Referent was cleared by the user during marking. 1301 continue; 1302 } 1303 bool is_marked = IsMarked(referent); 1304 if (!is_marked && ((++counter) & 1)) { 1305 // Referent is white and biased toward saving, mark it. 1306 MarkObject(referent); 1307 is_marked = true; 1308 } 1309 if (!is_marked) { 1310 // Referent is white, queue it for clearing. 1311 heap_->EnqueuePendingReference(ref, &clear); 1312 } 1313 } 1314 *list = clear; 1315 // Restart the mark with the newly black references added to the 1316 // root set. 1317 ProcessMarkStack(); 1318} 1319 1320inline bool MarkSweep::IsMarked(const Object* object) const 1321 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 1322 if (object >= immune_begin_ && object < immune_end_) { 1323 return true; 1324 } 1325 DCHECK(current_mark_bitmap_ != NULL); 1326 if (current_mark_bitmap_->HasAddress(object)) { 1327 return current_mark_bitmap_->Test(object); 1328 } 1329 return heap_->GetMarkBitmap()->Test(object); 1330} 1331 1332 1333// Unlink the reference list clearing references objects with white 1334// referents. Cleared references registered to a reference queue are 1335// scheduled for appending by the heap worker thread. 1336void MarkSweep::ClearWhiteReferences(Object** list) { 1337 DCHECK(list != NULL); 1338 while (*list != NULL) { 1339 Object* ref = heap_->DequeuePendingReference(list); 1340 Object* referent = heap_->GetReferenceReferent(ref); 1341 if (referent != NULL && !IsMarked(referent)) { 1342 // Referent is white, clear it. 1343 heap_->ClearReferenceReferent(ref); 1344 if (heap_->IsEnqueuable(ref)) { 1345 heap_->EnqueueReference(ref, &cleared_reference_list_); 1346 } 1347 } 1348 } 1349 DCHECK(*list == NULL); 1350} 1351 1352// Enqueues finalizer references with white referents. White 1353// referents are blackened, moved to the zombie field, and the 1354// referent field is cleared. 1355void MarkSweep::EnqueueFinalizerReferences(Object** list) { 1356 DCHECK(list != NULL); 1357 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); 1358 bool has_enqueued = false; 1359 while (*list != NULL) { 1360 Object* ref = heap_->DequeuePendingReference(list); 1361 Object* referent = heap_->GetReferenceReferent(ref); 1362 if (referent != NULL && !IsMarked(referent)) { 1363 MarkObject(referent); 1364 // If the referent is non-null the reference must queuable. 1365 DCHECK(heap_->IsEnqueuable(ref)); 1366 ref->SetFieldObject(zombie_offset, referent, false); 1367 heap_->ClearReferenceReferent(ref); 1368 heap_->EnqueueReference(ref, &cleared_reference_list_); 1369 has_enqueued = true; 1370 } 1371 } 1372 if (has_enqueued) { 1373 ProcessMarkStack(); 1374 } 1375 DCHECK(*list == NULL); 1376} 1377 1378// Process reference class instances and schedule finalizations. 1379void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft, 1380 Object** weak_references, 1381 Object** finalizer_references, 1382 Object** phantom_references) { 1383 DCHECK(soft_references != NULL); 1384 DCHECK(weak_references != NULL); 1385 DCHECK(finalizer_references != NULL); 1386 DCHECK(phantom_references != NULL); 1387 1388 // Unless we are in the zygote or required to clear soft references 1389 // with white references, preserve some white referents. 1390 if (!clear_soft && !Runtime::Current()->IsZygote()) { 1391 PreserveSomeSoftReferences(soft_references); 1392 } 1393 1394 // Clear all remaining soft and weak references with white 1395 // referents. 1396 ClearWhiteReferences(soft_references); 1397 ClearWhiteReferences(weak_references); 1398 1399 // Preserve all white objects with finalize methods and schedule 1400 // them for finalization. 1401 EnqueueFinalizerReferences(finalizer_references); 1402 1403 // Clear all f-reachable soft and weak references with white 1404 // referents. 1405 ClearWhiteReferences(soft_references); 1406 ClearWhiteReferences(weak_references); 1407 1408 // Clear all phantom references with white referents. 1409 ClearWhiteReferences(phantom_references); 1410 1411 // At this point all reference lists should be empty. 1412 DCHECK(*soft_references == NULL); 1413 DCHECK(*weak_references == NULL); 1414 DCHECK(*finalizer_references == NULL); 1415 DCHECK(*phantom_references == NULL); 1416} 1417 1418void MarkSweep::UnBindBitmaps() { 1419 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1420 // TODO: C++0x 1421 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1422 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1423 space::ContinuousSpace* space = *it; 1424 if (space->IsDlMallocSpace()) { 1425 space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); 1426 if (alloc_space->temp_bitmap_.get() != NULL) { 1427 // At this point, the temp_bitmap holds our old mark bitmap. 1428 accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); 1429 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); 1430 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); 1431 alloc_space->mark_bitmap_.reset(new_bitmap); 1432 DCHECK(alloc_space->temp_bitmap_.get() == NULL); 1433 } 1434 } 1435 } 1436} 1437 1438void MarkSweep::FinishPhase() { 1439 // Can't enqueue referneces if we hold the mutator lock. 1440 Object* cleared_references = GetClearedReferences(); 1441 Heap* heap = GetHeap(); 1442 heap->EnqueueClearedReferences(&cleared_references); 1443 1444 heap->PostGcVerification(this); 1445 1446 timings_.NewSplit("GrowForUtilization"); 1447 heap->GrowForUtilization(GetGcType(), GetDurationNs()); 1448 1449 timings_.NewSplit("RequestHeapTrim"); 1450 heap->RequestHeapTrim(); 1451 1452 // Update the cumulative statistics 1453 total_time_ns_ += GetDurationNs(); 1454 total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, 1455 std::plus<uint64_t>()); 1456 total_freed_objects_ += GetFreedObjects(); 1457 total_freed_bytes_ += GetFreedBytes(); 1458 1459 // Ensure that the mark stack is empty. 1460 CHECK(mark_stack_->IsEmpty()); 1461 1462 if (kCountScannedTypes) { 1463 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_ 1464 << " other=" << other_count_; 1465 } 1466 1467 if (kCountTasks) { 1468 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_; 1469 } 1470 1471 if (kMeasureOverhead) { 1472 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_); 1473 } 1474 1475 if (kProfileLargeObjects) { 1476 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_; 1477 } 1478 1479 if (kCountClassesMarked) { 1480 VLOG(gc) << "Classes marked " << classes_marked_; 1481 } 1482 1483 if (kCountJavaLangRefs) { 1484 VLOG(gc) << "References scanned " << reference_count_; 1485 } 1486 1487 // Update the cumulative loggers. 1488 cumulative_timings_.Start(); 1489 cumulative_timings_.AddLogger(timings_); 1490 cumulative_timings_.End(); 1491 1492 // Clear all of the spaces' mark bitmaps. 1493 const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces(); 1494 // TODO: C++0x 1495 typedef std::vector<space::ContinuousSpace*>::const_iterator It; 1496 for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) { 1497 space::ContinuousSpace* space = *it; 1498 if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { 1499 space->GetMarkBitmap()->Clear(); 1500 } 1501 } 1502 mark_stack_->Reset(); 1503 1504 // Reset the marked large objects. 1505 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); 1506 large_objects->GetMarkObjects()->Clear(); 1507} 1508 1509} // namespace collector 1510} // namespace gc 1511} // namespace art 1512