semi_space.cc revision ec05007f8619f8b0cc868d06731e07f84bb74c5b
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "semi_space.h"
18
19#include <functional>
20#include <numeric>
21#include <climits>
22#include <vector>
23
24#include "base/logging.h"
25#include "base/macros.h"
26#include "base/mutex-inl.h"
27#include "base/timing_logger.h"
28#include "gc/accounting/heap_bitmap.h"
29#include "gc/accounting/mod_union_table.h"
30#include "gc/accounting/space_bitmap-inl.h"
31#include "gc/heap.h"
32#include "gc/space/bump_pointer_space.h"
33#include "gc/space/bump_pointer_space-inl.h"
34#include "gc/space/image_space.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
37#include "indirect_reference_table.h"
38#include "intern_table.h"
39#include "jni_internal.h"
40#include "mark_sweep-inl.h"
41#include "monitor.h"
42#include "mirror/art_field.h"
43#include "mirror/art_field-inl.h"
44#include "mirror/class-inl.h"
45#include "mirror/class_loader.h"
46#include "mirror/dex_cache.h"
47#include "mirror/object-inl.h"
48#include "mirror/object_array.h"
49#include "mirror/object_array-inl.h"
50#include "runtime.h"
51#include "semi_space-inl.h"
52#include "thread-inl.h"
53#include "thread_list.h"
54#include "verifier/method_verifier.h"
55
56using ::art::mirror::Class;
57using ::art::mirror::Object;
58
59namespace art {
60namespace gc {
61namespace collector {
62
63static constexpr bool kProtectFromSpace = true;
64static constexpr bool kResetFromSpace = true;
65// TODO: move this to a new file as a new garbage collector?
66static constexpr bool kEnableSimplePromo = false;
67
68// TODO: Unduplicate logic.
69void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
70  // Bind live to mark bitmap if necessary.
71  if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
72    BindLiveToMarkBitmap(space);
73  }
74  // Add the space to the immune region.
75  if (immune_begin_ == nullptr) {
76    DCHECK(immune_end_ == nullptr);
77    immune_begin_ = reinterpret_cast<Object*>(space->Begin());
78    immune_end_ = reinterpret_cast<Object*>(space->End());
79  } else {
80    const space::ContinuousSpace* prev_space = nullptr;
81    // Find out if the previous space is immune.
82    for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
83      if (cur_space == space) {
84        break;
85      }
86      prev_space = cur_space;
87    }
88    // If previous space was immune, then extend the immune region. Relies on continuous spaces
89    // being sorted by Heap::AddContinuousSpace.
90    if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
91      immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
92      immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
93    }
94  }
95}
96
97void SemiSpace::BindBitmaps() {
98  timings_.StartSplit("BindBitmaps");
99  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
100  // Mark all of the spaces we never collect as immune.
101  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
102    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
103        || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
104      ImmuneSpace(space);
105    }
106  }
107  timings_.EndSplit();
108}
109
110SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
111    : GarbageCollector(heap,
112                       name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
113      mark_stack_(nullptr),
114      immune_begin_(nullptr),
115      immune_end_(nullptr),
116      to_space_(nullptr),
117      from_space_(nullptr),
118      soft_reference_list_(nullptr),
119      weak_reference_list_(nullptr),
120      finalizer_reference_list_(nullptr),
121      phantom_reference_list_(nullptr),
122      cleared_reference_list_(nullptr),
123      self_(nullptr),
124      last_gc_to_space_end_(nullptr),
125      bytes_promoted_(0) {
126}
127
128void SemiSpace::InitializePhase() {
129  timings_.Reset();
130  TimingLogger::ScopedSplit split("InitializePhase", &timings_);
131  mark_stack_ = heap_->mark_stack_.get();
132  DCHECK(mark_stack_ != nullptr);
133  immune_begin_ = nullptr;
134  immune_end_ = nullptr;
135  soft_reference_list_ = nullptr;
136  weak_reference_list_ = nullptr;
137  finalizer_reference_list_ = nullptr;
138  phantom_reference_list_ = nullptr;
139  cleared_reference_list_ = nullptr;
140  self_ = Thread::Current();
141  // Do any pre GC verification.
142  timings_.NewSplit("PreGcVerification");
143  heap_->PreGcVerification(this);
144}
145
146void SemiSpace::ProcessReferences(Thread* self) {
147  TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
148  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
149  GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
150                               &RecursiveMarkObjectCallback, this);
151}
152
153void SemiSpace::MarkingPhase() {
154  Thread* self = Thread::Current();
155  Locks::mutator_lock_->AssertExclusiveHeld(self);
156  TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
157  // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
158  // wrong space.
159  heap_->SwapSemiSpaces();
160  if (kEnableSimplePromo) {
161    // If last_gc_to_space_end_ is out of the bounds of the from-space
162    // (the to-space from last GC), then point it to the beginning of
163    // the from-space. For example, the very first GC or the
164    // pre-zygote compaction.
165    if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
166      last_gc_to_space_end_ = from_space_->Begin();
167    }
168    // Reset this before the marking starts below.
169    bytes_promoted_ = 0;
170  }
171  // Assume the cleared space is already empty.
172  BindBitmaps();
173  // Process dirty cards and add dirty cards to mod-union tables.
174  heap_->ProcessCards(timings_);
175  // Clear the whole card table since we can not get any additional dirty cards during the
176  // paused GC. This saves memory but only works for pause the world collectors.
177  timings_.NewSplit("ClearCardTable");
178  heap_->GetCardTable()->ClearCardTable();
179  // Need to do this before the checkpoint since we don't want any threads to add references to
180  // the live stack during the recursive mark.
181  timings_.NewSplit("SwapStacks");
182  heap_->SwapStacks();
183  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
184  MarkRoots();
185  // Mark roots of immune spaces.
186  UpdateAndMarkModUnion();
187  // Recursively mark remaining objects.
188  MarkReachableObjects();
189}
190
191bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const {
192  return
193    immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
194    immune_end_ >= reinterpret_cast<Object*>(space->End());
195}
196
197void SemiSpace::UpdateAndMarkModUnion() {
198  for (auto& space : heap_->GetContinuousSpaces()) {
199    // If the space is immune then we need to mark the references to other spaces.
200    if (IsImmuneSpace(space)) {
201      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
202      CHECK(table != nullptr);
203      // TODO: Improve naming.
204      TimingLogger::ScopedSplit split(
205          space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
206                                   "UpdateAndMarkImageModUnionTable",
207                                   &timings_);
208      table->UpdateAndMarkReferences(MarkRootCallback, this);
209    }
210  }
211}
212
213void SemiSpace::MarkReachableObjects() {
214  timings_.StartSplit("MarkStackAsLive");
215  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
216  heap_->MarkAllocStackAsLive(live_stack);
217  live_stack->Reset();
218  timings_.EndSplit();
219  // Recursively process the mark stack.
220  ProcessMarkStack(true);
221}
222
223void SemiSpace::ReclaimPhase() {
224  TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
225  Thread* self = Thread::Current();
226  ProcessReferences(self);
227  {
228    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
229    SweepSystemWeaks();
230  }
231  // Record freed memory.
232  int from_bytes = from_space_->GetBytesAllocated();
233  int to_bytes = to_space_->GetBytesAllocated();
234  int from_objects = from_space_->GetObjectsAllocated();
235  int to_objects = to_space_->GetObjectsAllocated();
236  int freed_bytes = from_bytes - to_bytes;
237  int freed_objects = from_objects - to_objects;
238  CHECK_GE(freed_bytes, 0);
239  freed_bytes_.FetchAndAdd(freed_bytes);
240  freed_objects_.FetchAndAdd(freed_objects);
241  heap_->RecordFree(static_cast<size_t>(freed_objects), static_cast<size_t>(freed_bytes));
242
243  timings_.StartSplit("PreSweepingGcVerification");
244  heap_->PreSweepingGcVerification(this);
245  timings_.EndSplit();
246
247  {
248    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
249    // Reclaim unmarked objects.
250    Sweep(false);
251    // Swap the live and mark bitmaps for each space which we modified space. This is an
252    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
253    // bitmaps.
254    timings_.StartSplit("SwapBitmaps");
255    SwapBitmaps();
256    timings_.EndSplit();
257    // Unbind the live and mark bitmaps.
258    UnBindBitmaps();
259  }
260  // Release the memory used by the from space.
261  if (kResetFromSpace) {
262    // Clearing from space.
263    from_space_->Clear();
264  }
265  // Protect the from space.
266  VLOG(heap)
267      << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
268      << reinterpret_cast<void*>(from_space_->Limit());
269  if (kProtectFromSpace) {
270    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
271  } else {
272    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
273  }
274
275  if (kEnableSimplePromo) {
276    // Record the end (top) of the to space so we can distinguish
277    // between objects that were allocated since the last GC and the
278    // older objects.
279    last_gc_to_space_end_ = to_space_->End();
280  }
281}
282
283void SemiSpace::ResizeMarkStack(size_t new_size) {
284  std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
285  CHECK_LE(mark_stack_->Size(), new_size);
286  mark_stack_->Resize(new_size);
287  for (const auto& obj : temp) {
288    mark_stack_->PushBack(obj);
289  }
290}
291
292inline void SemiSpace::MarkStackPush(Object* obj) {
293  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
294    ResizeMarkStack(mark_stack_->Capacity() * 2);
295  }
296  // The object must be pushed on to the mark stack.
297  mark_stack_->PushBack(obj);
298}
299
300// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
301bool SemiSpace::MarkLargeObject(const Object* obj) {
302  // TODO: support >1 discontinuous space.
303  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
304  accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
305  if (UNLIKELY(!large_objects->Test(obj))) {
306    large_objects->Set(obj);
307    return true;
308  }
309  return false;
310}
311
312// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
313// the to-space and have their forward address updated. Objects which have been newly marked are
314// pushed on the mark stack.
315Object* SemiSpace::MarkObject(Object* obj) {
316  Object* ret = obj;
317  if (obj != nullptr && !IsImmune(obj)) {
318    if (from_space_->HasAddress(obj)) {
319      mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
320      // If the object has already been moved, return the new forward address.
321      if (forward_address == nullptr) {
322        // Otherwise, we need to move the object and add it to the markstack for processing.
323        size_t object_size = obj->SizeOf();
324        size_t bytes_allocated = 0;
325        if (kEnableSimplePromo && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
326          // If it's allocated before the last GC (older), move (pseudo-promote) it to
327          // the non-moving space (as sort of an old generation.)
328          size_t bytes_promoted;
329          space::MallocSpace* non_moving_space = GetHeap()->GetNonMovingSpace();
330          forward_address = non_moving_space->Alloc(self_, object_size, &bytes_promoted);
331          if (forward_address == nullptr) {
332            // If out of space, fall back to the to-space.
333            forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
334          } else {
335            GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted);
336            bytes_promoted_ += bytes_promoted;
337            // Mark forward_address on the live bit map.
338            accounting::SpaceBitmap* live_bitmap = non_moving_space->GetLiveBitmap();
339            DCHECK(live_bitmap != nullptr);
340            DCHECK(!live_bitmap->Test(forward_address));
341            live_bitmap->Set(forward_address);
342            // Mark forward_address on the mark bit map.
343            accounting::SpaceBitmap* mark_bitmap = non_moving_space->GetMarkBitmap();
344            DCHECK(mark_bitmap != nullptr);
345            DCHECK(!mark_bitmap->Test(forward_address));
346            mark_bitmap->Set(forward_address);
347          }
348          DCHECK(forward_address != nullptr);
349        } else {
350          // If it's allocated after the last GC (younger), copy it to the to-space.
351          forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
352        }
353        // Copy over the object and add it to the mark stack since we still need to update it's
354        // references.
355        memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
356        // Make sure to only update the forwarding address AFTER you copy the object so that the
357        // monitor word doesn't get stomped over.
358        obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)));
359        MarkStackPush(forward_address);
360      } else {
361        DCHECK(to_space_->HasAddress(forward_address) ||
362               (kEnableSimplePromo && GetHeap()->GetNonMovingSpace()->HasAddress(forward_address)));
363      }
364      ret = forward_address;
365      // TODO: Do we need this if in the else statement?
366    } else {
367      accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
368      if (LIKELY(object_bitmap != nullptr)) {
369        // This object was not previously marked.
370        if (!object_bitmap->Test(obj)) {
371          object_bitmap->Set(obj);
372          MarkStackPush(obj);
373        }
374      } else {
375        DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
376        if (MarkLargeObject(obj)) {
377          MarkStackPush(obj);
378        }
379      }
380    }
381  }
382  return ret;
383}
384
385Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) {
386  DCHECK(root != nullptr);
387  DCHECK(arg != nullptr);
388  SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg);
389  mirror::Object* ret = semi_space->MarkObject(root);
390  semi_space->ProcessMarkStack(true);
391  return ret;
392}
393
394Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
395  DCHECK(root != nullptr);
396  DCHECK(arg != nullptr);
397  return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
398}
399
400// Marks all objects in the root set.
401void SemiSpace::MarkRoots() {
402  timings_.StartSplit("MarkRoots");
403  // TODO: Visit up image roots as well?
404  Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true);
405  timings_.EndSplit();
406}
407
408void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
409  CHECK(space->IsMallocSpace());
410  space::MallocSpace* alloc_space = space->AsMallocSpace();
411  accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
412  accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
413  GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
414}
415
416mirror::Object* SemiSpace::GetForwardingAddress(mirror::Object* obj) {
417  if (from_space_->HasAddress(obj)) {
418    LOG(FATAL) << "Shouldn't happen!";
419    return GetForwardingAddressInFromSpace(obj);
420  }
421  return obj;
422}
423
424mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) {
425  return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
426}
427
428void SemiSpace::SweepSystemWeaks() {
429  timings_.StartSplit("SweepSystemWeaks");
430  Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
431  timings_.EndSplit();
432}
433
434struct SweepCallbackContext {
435  SemiSpace* mark_sweep;
436  space::AllocSpace* space;
437  Thread* self;
438};
439
440void SemiSpace::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
441  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
442  SemiSpace* gc = context->mark_sweep;
443  Heap* heap = gc->GetHeap();
444  space::AllocSpace* space = context->space;
445  Thread* self = context->self;
446  Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
447  size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
448  heap->RecordFree(num_ptrs, freed_bytes);
449  gc->freed_objects_.FetchAndAdd(num_ptrs);
450  gc->freed_bytes_.FetchAndAdd(freed_bytes);
451}
452
453void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
454  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
455  Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
456  Heap* heap = context->mark_sweep->GetHeap();
457  // We don't free any actual memory to avoid dirtying the shared zygote pages.
458  for (size_t i = 0; i < num_ptrs; ++i) {
459    Object* obj = static_cast<Object*>(ptrs[i]);
460    heap->GetLiveBitmap()->Clear(obj);
461    heap->GetCardTable()->MarkCard(obj);
462  }
463}
464
465void SemiSpace::Sweep(bool swap_bitmaps) {
466  DCHECK(mark_stack_->IsEmpty());
467  TimingLogger::ScopedSplit("Sweep", &timings_);
468  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
469    if (space->IsMallocSpace() && space != from_space_ && space != to_space_) {
470      space::MallocSpace* malloc_space = space->AsMallocSpace();
471      TimingLogger::ScopedSplit split(
472          malloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
473      size_t freed_objects = 0;
474      size_t freed_bytes = 0;
475      malloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
476      heap_->RecordFree(freed_objects, freed_bytes);
477      freed_objects_.FetchAndAdd(freed_objects);
478      freed_bytes_.FetchAndAdd(freed_bytes);
479    }
480  }
481  SweepLargeObjects(swap_bitmaps);
482}
483
484void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
485  TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
486  // Sweep large objects
487  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
488  accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
489  accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
490  if (swap_bitmaps) {
491    std::swap(large_live_objects, large_mark_objects);
492  }
493  // O(n*log(n)) but hopefully there are not too many large objects.
494  size_t freed_objects = 0;
495  size_t freed_bytes = 0;
496  Thread* self = Thread::Current();
497  for (const Object* obj : large_live_objects->GetObjects()) {
498    if (!large_mark_objects->Test(obj)) {
499      freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
500      ++freed_objects;
501    }
502  }
503  freed_large_objects_.FetchAndAdd(freed_objects);
504  freed_large_object_bytes_.FetchAndAdd(freed_bytes);
505  GetHeap()->RecordFree(freed_objects, freed_bytes);
506}
507
508// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
509// marked, put it on the appropriate list in the heap for later processing.
510void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
511  heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this);
512}
513
514// Visit all of the references of an object and update.
515void SemiSpace::ScanObject(Object* obj) {
516  DCHECK(obj != NULL);
517  DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
518  MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset,
519     bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS {
520    mirror::Object* new_address = MarkObject(ref);
521    if (new_address != ref) {
522      DCHECK(new_address != nullptr);
523      // Don't need to mark the card since we updating the object address and not changing the
524      // actual objects its pointing to. Using SetFieldPtr is better in this case since it does not
525      // dirty cards and use additional memory.
526      obj->SetFieldPtr(offset, new_address, false);
527    }
528  }, kMovingClasses);
529  mirror::Class* klass = obj->GetClass();
530  if (UNLIKELY(klass->IsReferenceClass())) {
531    DelayReferenceReferent(klass, obj);
532  }
533}
534
535// Scan anything that's on the mark stack.
536void SemiSpace::ProcessMarkStack(bool paused) {
537  timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
538  while (!mark_stack_->IsEmpty()) {
539    ScanObject(mark_stack_->PopBack());
540  }
541  timings_.EndSplit();
542}
543
544inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
545    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
546  // All immune objects are assumed marked.
547  if (IsImmune(obj)) {
548    return obj;
549  }
550  if (from_space_->HasAddress(obj)) {
551    mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
552    // If the object is forwarded then it MUST be marked.
553    DCHECK(forwarding_address == nullptr || to_space_->HasAddress(forwarding_address) ||
554           (kEnableSimplePromo && GetHeap()->GetNonMovingSpace()->HasAddress(forwarding_address)));
555    if (forwarding_address != nullptr) {
556      return forwarding_address;
557    }
558    // Must not be marked, return nullptr;
559    return nullptr;
560  } else if (to_space_->HasAddress(obj)) {
561    // Already forwarded, must be marked.
562    return obj;
563  }
564  return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
565}
566
567void SemiSpace::UnBindBitmaps() {
568  TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
569  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
570    if (space->IsMallocSpace()) {
571      space::MallocSpace* alloc_space = space->AsMallocSpace();
572      if (alloc_space->HasBoundBitmaps()) {
573        alloc_space->UnBindBitmaps();
574        heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(),
575                                              alloc_space->GetMarkBitmap());
576      }
577    }
578  }
579}
580
581void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
582  DCHECK(to_space != nullptr);
583  to_space_ = to_space;
584}
585
586void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
587  DCHECK(from_space != nullptr);
588  from_space_ = from_space;
589}
590
591void SemiSpace::FinishPhase() {
592  TimingLogger::ScopedSplit split("FinishPhase", &timings_);
593  // Can't enqueue references if we hold the mutator lock.
594  Heap* heap = GetHeap();
595  timings_.NewSplit("PostGcVerification");
596  heap->PostGcVerification(this);
597
598  // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
599  // further action is done by the heap.
600  to_space_ = nullptr;
601  from_space_ = nullptr;
602
603  // Update the cumulative statistics
604  total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
605  total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
606
607  // Ensure that the mark stack is empty.
608  CHECK(mark_stack_->IsEmpty());
609
610  // Update the cumulative loggers.
611  cumulative_timings_.Start();
612  cumulative_timings_.AddLogger(timings_);
613  cumulative_timings_.End();
614
615  // Clear all of the spaces' mark bitmaps.
616  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
617    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
618    if (bitmap != nullptr &&
619        space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
620      bitmap->Clear();
621    }
622  }
623  mark_stack_->Reset();
624
625  // Reset the marked large objects.
626  space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
627  large_objects->GetMarkObjects()->Clear();
628}
629
630}  // namespace collector
631}  // namespace gc
632}  // namespace art
633