semi_space.cc revision 05e713a3a009a0825826ce6d494582fcd6dd6a8d
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "semi_space.h"
18
19#include <functional>
20#include <numeric>
21#include <climits>
22#include <vector>
23
24#include "base/logging.h"
25#include "base/macros.h"
26#include "base/mutex-inl.h"
27#include "base/timing_logger.h"
28#include "gc/accounting/heap_bitmap.h"
29#include "gc/accounting/mod_union_table.h"
30#include "gc/accounting/space_bitmap-inl.h"
31#include "gc/heap.h"
32#include "gc/space/bump_pointer_space.h"
33#include "gc/space/bump_pointer_space-inl.h"
34#include "gc/space/image_space.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
37#include "indirect_reference_table.h"
38#include "intern_table.h"
39#include "jni_internal.h"
40#include "mark_sweep-inl.h"
41#include "monitor.h"
42#include "mirror/art_field.h"
43#include "mirror/art_field-inl.h"
44#include "mirror/class-inl.h"
45#include "mirror/class_loader.h"
46#include "mirror/dex_cache.h"
47#include "mirror/object-inl.h"
48#include "mirror/object_array.h"
49#include "mirror/object_array-inl.h"
50#include "runtime.h"
51#include "semi_space-inl.h"
52#include "thread-inl.h"
53#include "thread_list.h"
54#include "verifier/method_verifier.h"
55
56using ::art::mirror::Class;
57using ::art::mirror::Object;
58
59namespace art {
60namespace gc {
61namespace collector {
62
63static constexpr bool kProtectFromSpace = true;
64static constexpr bool kResetFromSpace = true;
65// TODO: move these to a new file as a new garbage collector?
66// If true, 'promote' some objects from the bump pointer spaces to the non-moving space.
67static constexpr bool kEnableSimplePromo = false;
68// If true, collect the bump pointer spaces only, as opposed to the
69// whole heap in some collections.
70static constexpr bool kEnableBumpPointerSpacesOnlyCollection = false;
71
72// TODO: Unduplicate logic.
73void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
74  // Bind live to mark bitmap if necessary.
75  if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
76    BindLiveToMarkBitmap(space);
77  }
78  // Add the space to the immune region.
79  if (immune_begin_ == nullptr) {
80    DCHECK(immune_end_ == nullptr);
81    immune_begin_ = reinterpret_cast<Object*>(space->Begin());
82    immune_end_ = reinterpret_cast<Object*>(space->End());
83  } else {
84    const space::ContinuousSpace* prev_space = nullptr;
85    // Find out if the previous space is immune.
86    for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
87      if (cur_space == space) {
88        break;
89      }
90      prev_space = cur_space;
91    }
92    // If previous space was immune, then extend the immune region. Relies on continuous spaces
93    // being sorted by Heap::AddContinuousSpace.
94    if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
95      immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
96      // Use Limit() instead of End() because otherwise if
97      // kEnableBumpPointerSpacesOnlyCollection is true, the alloc
98      // space might expand due to promotion and the sense of immunity
99      // may change in the middle of a GC.
100      immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_);
101    }
102  }
103}
104
105void SemiSpace::BindBitmaps() {
106  timings_.StartSplit("BindBitmaps");
107  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
108  // Mark all of the spaces we never collect as immune.
109  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
110    if (space->GetLiveBitmap() != nullptr) {
111      if (space == to_space_) {
112        BindLiveToMarkBitmap(to_space_);
113      } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
114                 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
115                 // Add the main free list space and the non-moving
116                 // space to the immune space if a bump pointer space
117                 // only collection.
118                 || (kEnableBumpPointerSpacesOnlyCollection &&
119                     !whole_heap_collection_ && (space == GetHeap()->GetNonMovingSpace() ||
120                                                 space == GetHeap()->GetPrimaryFreeListSpace()))) {
121        ImmuneSpace(space);
122      }
123    }
124  }
125  if (kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
126    // We won't collect the large object space if a bump pointer space only collection.
127    is_large_object_space_immune_ = true;
128    GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
129  }
130  timings_.EndSplit();
131}
132
133SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
134    : GarbageCollector(heap,
135                       name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
136      mark_stack_(nullptr),
137      immune_begin_(nullptr),
138      immune_end_(nullptr),
139      is_large_object_space_immune_(false),
140      to_space_(nullptr),
141      from_space_(nullptr),
142      self_(nullptr),
143      last_gc_to_space_end_(nullptr),
144      bytes_promoted_(0),
145      whole_heap_collection_(true),
146      whole_heap_collection_interval_counter_(0) {
147}
148
149void SemiSpace::InitializePhase() {
150  timings_.Reset();
151  TimingLogger::ScopedSplit split("InitializePhase", &timings_);
152  mark_stack_ = heap_->mark_stack_.get();
153  DCHECK(mark_stack_ != nullptr);
154  immune_begin_ = nullptr;
155  immune_end_ = nullptr;
156  is_large_object_space_immune_ = false;
157  self_ = Thread::Current();
158  // Do any pre GC verification.
159  timings_.NewSplit("PreGcVerification");
160  heap_->PreGcVerification(this);
161  // Set the initial bitmap.
162  to_space_live_bitmap_ = to_space_->GetLiveBitmap();
163}
164
165void SemiSpace::ProcessReferences(Thread* self) {
166  TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
167  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
168  GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
169                               &RecursiveMarkObjectCallback, this);
170}
171
172void SemiSpace::MarkingPhase() {
173  if (kEnableBumpPointerSpacesOnlyCollection) {
174    if (clear_soft_references_) {
175      // If we want to collect as much as possible, collect the whole
176      // heap (and reset the interval counter to be consistent.)
177      whole_heap_collection_ = true;
178      whole_heap_collection_interval_counter_ = 0;
179    }
180    if (whole_heap_collection_) {
181      VLOG(heap) << "Whole heap collection";
182    } else {
183      VLOG(heap) << "Bump pointer space only collection";
184    }
185  }
186  Thread* self = Thread::Current();
187  Locks::mutator_lock_->AssertExclusiveHeld(self);
188  TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
189  // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
190  // wrong space.
191  heap_->SwapSemiSpaces();
192  if (kEnableSimplePromo) {
193    // If last_gc_to_space_end_ is out of the bounds of the from-space
194    // (the to-space from last GC), then point it to the beginning of
195    // the from-space. For example, the very first GC or the
196    // pre-zygote compaction.
197    if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
198      last_gc_to_space_end_ = from_space_->Begin();
199    }
200    // Reset this before the marking starts below.
201    bytes_promoted_ = 0;
202  }
203  // Assume the cleared space is already empty.
204  BindBitmaps();
205  // Process dirty cards and add dirty cards to mod-union tables.
206  heap_->ProcessCards(timings_);
207  // Clear the whole card table since we can not get any additional dirty cards during the
208  // paused GC. This saves memory but only works for pause the world collectors.
209  timings_.NewSplit("ClearCardTable");
210  heap_->GetCardTable()->ClearCardTable();
211  // Need to do this before the checkpoint since we don't want any threads to add references to
212  // the live stack during the recursive mark.
213  timings_.NewSplit("SwapStacks");
214  heap_->SwapStacks();
215  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
216  MarkRoots();
217  // Mark roots of immune spaces.
218  UpdateAndMarkModUnion();
219  // Recursively mark remaining objects.
220  MarkReachableObjects();
221}
222
223bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const {
224  return
225    immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
226    immune_end_ >= reinterpret_cast<Object*>(space->End());
227}
228
229void SemiSpace::UpdateAndMarkModUnion() {
230  for (auto& space : heap_->GetContinuousSpaces()) {
231    // If the space is immune then we need to mark the references to other spaces.
232    if (IsImmuneSpace(space)) {
233      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
234      if (table != nullptr) {
235        // TODO: Improve naming.
236        TimingLogger::ScopedSplit split(
237            space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
238                                     "UpdateAndMarkImageModUnionTable",
239                                     &timings_);
240        table->UpdateAndMarkReferences(MarkRootCallback, this);
241      } else {
242        // If a bump pointer space only collection, the non-moving
243        // space is added to the immune space. But the non-moving
244        // space doesn't have a mod union table. Instead, its live
245        // bitmap will be scanned later in MarkReachableObjects().
246        DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
247               (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()));
248      }
249    }
250  }
251}
252
253class SemiSpaceScanObjectVisitor {
254 public:
255  explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
256  void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
257    // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an
258    // exclusive lock on the mutator lock, but
259    // SpaceBitmap::VisitMarkedRange() only requires the shared lock.
260    DCHECK(obj != nullptr);
261    semi_space_->ScanObject(obj);
262  }
263 private:
264  SemiSpace* semi_space_;
265};
266
267void SemiSpace::MarkReachableObjects() {
268  timings_.StartSplit("MarkStackAsLive");
269  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
270  heap_->MarkAllocStackAsLive(live_stack);
271  live_stack->Reset();
272  timings_.EndSplit();
273
274  for (auto& space : heap_->GetContinuousSpaces()) {
275    // If the space is immune and has no mod union table (the
276    // non-moving space when the bump pointer space only collection is
277    // enabled,) then we need to scan its live bitmap as roots
278    // (including the objects on the live stack which have just marked
279    // in the live bitmap above in MarkAllocStackAsLive().)
280    if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) {
281      DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
282             (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
283      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
284      SemiSpaceScanObjectVisitor visitor(this);
285      live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
286                                    reinterpret_cast<uintptr_t>(space->End()),
287                                    visitor);
288    }
289  }
290
291  if (is_large_object_space_immune_) {
292    DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_);
293    // When the large object space is immune, we need to scan the
294    // large object space as roots as they contain references to their
295    // classes (primitive array classes) that could move though they
296    // don't contain any other references.
297    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
298    accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
299    SemiSpaceScanObjectVisitor visitor(this);
300    for (const Object* obj : large_live_objects->GetObjects()) {
301      visitor(const_cast<Object*>(obj));
302    }
303  }
304
305  // Recursively process the mark stack.
306  ProcessMarkStack(true);
307}
308
309void SemiSpace::ReclaimPhase() {
310  TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
311  Thread* self = Thread::Current();
312  ProcessReferences(self);
313  {
314    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
315    SweepSystemWeaks();
316  }
317  // Record freed memory.
318  uint64_t from_bytes = from_space_->GetBytesAllocated();
319  uint64_t to_bytes = to_space_->GetBytesAllocated();
320  uint64_t from_objects = from_space_->GetObjectsAllocated();
321  uint64_t to_objects = to_space_->GetObjectsAllocated();
322  CHECK_LE(to_objects, from_objects);
323  int64_t freed_bytes = from_bytes - to_bytes;
324  int64_t freed_objects = from_objects - to_objects;
325  freed_bytes_.FetchAndAdd(freed_bytes);
326  freed_objects_.FetchAndAdd(freed_objects);
327  // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
328  // space.
329  heap_->RecordFree(freed_objects, freed_bytes);
330  timings_.StartSplit("PreSweepingGcVerification");
331  heap_->PreSweepingGcVerification(this);
332  timings_.EndSplit();
333
334  {
335    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
336    // Reclaim unmarked objects.
337    Sweep(false);
338    // Swap the live and mark bitmaps for each space which we modified space. This is an
339    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
340    // bitmaps.
341    timings_.StartSplit("SwapBitmaps");
342    SwapBitmaps();
343    timings_.EndSplit();
344    // Unbind the live and mark bitmaps.
345    UnBindBitmaps();
346  }
347  // Release the memory used by the from space.
348  if (kResetFromSpace) {
349    // Clearing from space.
350    from_space_->Clear();
351  }
352  // Protect the from space.
353  VLOG(heap)
354      << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
355      << reinterpret_cast<void*>(from_space_->Limit());
356  if (kProtectFromSpace) {
357    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
358  } else {
359    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
360  }
361
362  if (kEnableSimplePromo) {
363    // Record the end (top) of the to space so we can distinguish
364    // between objects that were allocated since the last GC and the
365    // older objects.
366    last_gc_to_space_end_ = to_space_->End();
367  }
368}
369
370void SemiSpace::ResizeMarkStack(size_t new_size) {
371  std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
372  CHECK_LE(mark_stack_->Size(), new_size);
373  mark_stack_->Resize(new_size);
374  for (const auto& obj : temp) {
375    mark_stack_->PushBack(obj);
376  }
377}
378
379inline void SemiSpace::MarkStackPush(Object* obj) {
380  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
381    ResizeMarkStack(mark_stack_->Capacity() * 2);
382  }
383  // The object must be pushed on to the mark stack.
384  mark_stack_->PushBack(obj);
385}
386
387// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
388bool SemiSpace::MarkLargeObject(const Object* obj) {
389  // TODO: support >1 discontinuous space.
390  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
391  DCHECK(large_object_space->Contains(obj));
392  accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
393  if (UNLIKELY(!large_objects->Test(obj))) {
394    large_objects->Set(obj);
395    return true;
396  }
397  return false;
398}
399
400mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
401  size_t object_size = obj->SizeOf();
402  size_t bytes_allocated;
403  mirror::Object* forward_address = nullptr;
404  if (kEnableSimplePromo && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
405    // If it's allocated before the last GC (older), move
406    // (pseudo-promote) it to the main free list space (as sort
407    // of an old generation.)
408    size_t bytes_promoted;
409    space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
410    forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted);
411    if (forward_address == nullptr) {
412      // If out of space, fall back to the to-space.
413      forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
414    } else {
415      GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted);
416      bytes_promoted_ += bytes_promoted;
417      // Handle the bitmaps marking.
418      accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
419      DCHECK(live_bitmap != nullptr);
420      accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
421      DCHECK(mark_bitmap != nullptr);
422      DCHECK(!live_bitmap->Test(forward_address));
423      if (kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
424        // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
425        DCHECK_EQ(live_bitmap, mark_bitmap);
426
427        // If a bump pointer space only collection (and the
428        // promotion is enabled,) delay the live bitmap marking
429        // of the promoted object until it's popped off the mark
430        // stack (ProcessMarkStack()). The rationale: we may be
431        // in the middle of scanning the objects in the
432        // promo destination space for
433        // non-moving-space-to-bump-pointer-space references by
434        // iterating over the marked bits of the live bitmap
435        // (MarkReachableObjects()). If we don't delay it (and
436        // instead mark the promoted object here), the above
437        // promo destination space scan could encounter the
438        // just-promoted object and forward the references in
439        // the promoted object's fields even through it is
440        // pushed onto the mark stack. If this happens, the
441        // promoted object would be in an inconsistent state,
442        // that is, it's on the mark stack (gray) but its fields
443        // are already forwarded (black), which would cause a
444        // DCHECK(!to_space_->HasAddress(obj)) failure below.
445      } else {
446        // Mark forward_address on the live bit map.
447        live_bitmap->Set(forward_address);
448        // Mark forward_address on the mark bit map.
449        DCHECK(!mark_bitmap->Test(forward_address));
450        mark_bitmap->Set(forward_address);
451      }
452    }
453    DCHECK(forward_address != nullptr);
454  } else {
455    // If it's allocated after the last GC (younger), copy it to the to-space.
456    forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
457  }
458  // Copy over the object and add it to the mark stack since we still need to update its
459  // references.
460  memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
461  if (to_space_live_bitmap_ != nullptr) {
462    to_space_live_bitmap_->Set(forward_address);
463  }
464  DCHECK(to_space_->HasAddress(forward_address) ||
465         (kEnableSimplePromo && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
466  return forward_address;
467}
468
469// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
470// the to-space and have their forward address updated. Objects which have been newly marked are
471// pushed on the mark stack.
472Object* SemiSpace::MarkObject(Object* obj) {
473  Object* forward_address = obj;
474  if (obj != nullptr && !IsImmune(obj)) {
475    if (from_space_->HasAddress(obj)) {
476      forward_address = GetForwardingAddressInFromSpace(obj);
477      // If the object has already been moved, return the new forward address.
478      if (forward_address == nullptr) {
479        forward_address = MarkNonForwardedObject(obj);
480        DCHECK(forward_address != nullptr);
481        // Make sure to only update the forwarding address AFTER you copy the object so that the
482        // monitor word doesn't get stomped over.
483        obj->SetLockWord(LockWord::FromForwardingAddress(
484            reinterpret_cast<size_t>(forward_address)));
485        // Push the object onto the mark stack for later processing.
486        MarkStackPush(forward_address);
487      }
488      // TODO: Do we need this if in the else statement?
489    } else {
490      accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
491      if (LIKELY(object_bitmap != nullptr)) {
492        if (kEnableBumpPointerSpacesOnlyCollection) {
493          // If a bump pointer space only collection, we should not
494          // reach here as we don't/won't mark the objects in the
495          // non-moving space (except for the promoted objects.)  Note
496          // the non-moving space is added to the immune space.
497          DCHECK(whole_heap_collection_);
498        }
499        // This object was not previously marked.
500        if (!object_bitmap->Test(obj)) {
501          object_bitmap->Set(obj);
502          MarkStackPush(obj);
503        }
504      } else {
505        DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
506        if (MarkLargeObject(obj)) {
507          MarkStackPush(obj);
508        }
509      }
510    }
511  }
512  return forward_address;
513}
514
515Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) {
516  DCHECK(root != nullptr);
517  DCHECK(arg != nullptr);
518  SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg);
519  mirror::Object* ret = semi_space->MarkObject(root);
520  semi_space->ProcessMarkStack(true);
521  return ret;
522}
523
524Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
525  DCHECK(root != nullptr);
526  DCHECK(arg != nullptr);
527  return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
528}
529
530// Marks all objects in the root set.
531void SemiSpace::MarkRoots() {
532  timings_.StartSplit("MarkRoots");
533  // TODO: Visit up image roots as well?
534  Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true);
535  timings_.EndSplit();
536}
537
538void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
539  CHECK(space->IsMallocSpace());
540  space::MallocSpace* alloc_space = space->AsMallocSpace();
541  accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
542  accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
543  GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
544}
545
546mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) {
547  return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
548}
549
550void SemiSpace::SweepSystemWeaks() {
551  timings_.StartSplit("SweepSystemWeaks");
552  Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
553  timings_.EndSplit();
554}
555
556bool SemiSpace::ShouldSweepSpace(space::MallocSpace* space) const {
557  return space != from_space_ && space != to_space_ && !IsImmuneSpace(space);
558}
559
560void SemiSpace::Sweep(bool swap_bitmaps) {
561  DCHECK(mark_stack_->IsEmpty());
562  TimingLogger::ScopedSplit("Sweep", &timings_);
563  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
564    if (space->IsMallocSpace()) {
565      space::MallocSpace* malloc_space = space->AsMallocSpace();
566      if (!ShouldSweepSpace(malloc_space)) {
567        continue;
568      }
569      TimingLogger::ScopedSplit split(
570          malloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
571      size_t freed_objects = 0;
572      size_t freed_bytes = 0;
573      malloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
574      heap_->RecordFree(freed_objects, freed_bytes);
575      freed_objects_.FetchAndAdd(freed_objects);
576      freed_bytes_.FetchAndAdd(freed_bytes);
577    }
578  }
579  if (!is_large_object_space_immune_) {
580    SweepLargeObjects(swap_bitmaps);
581  }
582}
583
584void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
585  DCHECK(!is_large_object_space_immune_);
586  TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
587  size_t freed_objects = 0;
588  size_t freed_bytes = 0;
589  GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
590  freed_large_objects_.FetchAndAdd(freed_objects);
591  freed_large_object_bytes_.FetchAndAdd(freed_bytes);
592  GetHeap()->RecordFree(freed_objects, freed_bytes);
593}
594
595// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
596// marked, put it on the appropriate list in the heap for later processing.
597void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
598  heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this);
599}
600
601// Visit all of the references of an object and update.
602void SemiSpace::ScanObject(Object* obj) {
603  DCHECK(obj != NULL);
604  DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
605  MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset,
606     bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS {
607    mirror::Object* new_address = MarkObject(ref);
608    if (new_address != ref) {
609      DCHECK(new_address != nullptr);
610      // Don't need to mark the card since we updating the object address and not changing the
611      // actual objects its pointing to. Using SetFieldPtr is better in this case since it does not
612      // dirty cards and use additional memory.
613      obj->SetFieldPtr(offset, new_address, false);
614    }
615  }, kMovingClasses);
616  mirror::Class* klass = obj->GetClass();
617  if (UNLIKELY(klass->IsReferenceClass())) {
618    DelayReferenceReferent(klass, obj);
619  }
620}
621
622// Scan anything that's on the mark stack.
623void SemiSpace::ProcessMarkStack(bool paused) {
624  space::MallocSpace* promo_dest_space = NULL;
625  accounting::SpaceBitmap* live_bitmap = NULL;
626  if (kEnableSimplePromo && kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
627    // If a bump pointer space only collection (and the promotion is
628    // enabled,) we delay the live-bitmap marking of promoted objects
629    // from MarkObject() until this function.
630    promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
631    live_bitmap = promo_dest_space->GetLiveBitmap();
632    DCHECK(live_bitmap != nullptr);
633    accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
634    DCHECK(mark_bitmap != nullptr);
635    DCHECK_EQ(live_bitmap, mark_bitmap);
636  }
637  timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
638  while (!mark_stack_->IsEmpty()) {
639    Object* obj = mark_stack_->PopBack();
640    if (kEnableSimplePromo && kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
641        promo_dest_space->HasAddress(obj)) {
642      // obj has just been promoted. Mark the live bitmap for it,
643      // which is delayed from MarkObject().
644      DCHECK(!live_bitmap->Test(obj));
645      live_bitmap->Set(obj);
646    }
647    ScanObject(obj);
648  }
649  timings_.EndSplit();
650}
651
652inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
653    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
654  // All immune objects are assumed marked.
655  if (IsImmune(obj)) {
656    return obj;
657  }
658  if (from_space_->HasAddress(obj)) {
659    mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
660    return forwarding_address;  // Returns either the forwarding address or nullptr.
661  } else if (to_space_->HasAddress(obj)) {
662    // Should be unlikely.
663    // Already forwarded, must be marked.
664    return obj;
665  }
666  return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
667}
668
669void SemiSpace::UnBindBitmaps() {
670  TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
671  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
672    if (space->IsMallocSpace()) {
673      space::MallocSpace* alloc_space = space->AsMallocSpace();
674      if (alloc_space->HasBoundBitmaps()) {
675        alloc_space->UnBindBitmaps();
676        heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(),
677                                              alloc_space->GetMarkBitmap());
678      }
679    }
680  }
681}
682
683void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
684  DCHECK(to_space != nullptr);
685  to_space_ = to_space;
686}
687
688void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
689  DCHECK(from_space != nullptr);
690  from_space_ = from_space;
691}
692
693void SemiSpace::FinishPhase() {
694  TimingLogger::ScopedSplit split("FinishPhase", &timings_);
695  // Can't enqueue references if we hold the mutator lock.
696  Heap* heap = GetHeap();
697  timings_.NewSplit("PostGcVerification");
698  heap->PostGcVerification(this);
699
700  // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
701  // further action is done by the heap.
702  to_space_ = nullptr;
703  from_space_ = nullptr;
704
705  // Update the cumulative statistics
706  total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
707  total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
708
709  // Ensure that the mark stack is empty.
710  CHECK(mark_stack_->IsEmpty());
711
712  // Update the cumulative loggers.
713  cumulative_timings_.Start();
714  cumulative_timings_.AddLogger(timings_);
715  cumulative_timings_.End();
716
717  // Clear all of the spaces' mark bitmaps.
718  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
719    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
720    if (bitmap != nullptr &&
721        space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
722      bitmap->Clear();
723    }
724  }
725  mark_stack_->Reset();
726
727  // Reset the marked large objects.
728  space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
729  large_objects->GetMarkObjects()->Clear();
730
731  if (kEnableBumpPointerSpacesOnlyCollection) {
732    // Decide whether to do a whole heap collection or a bump pointer
733    // only space collection at the next collection by updating
734    // whole_heap_collection. Enable whole_heap_collection once every
735    // kDefaultWholeHeapCollectionInterval collections.
736    if (!whole_heap_collection_) {
737      --whole_heap_collection_interval_counter_;
738      DCHECK_GE(whole_heap_collection_interval_counter_, 0);
739      if (whole_heap_collection_interval_counter_ == 0) {
740        whole_heap_collection_ = true;
741      }
742    } else {
743      DCHECK_EQ(whole_heap_collection_interval_counter_, 0);
744      whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval;
745      whole_heap_collection_ = false;
746    }
747  }
748}
749
750}  // namespace collector
751}  // namespace gc
752}  // namespace art
753