1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "bump_pointer_space-inl.h"
18#include "bump_pointer_space.h"
19#include "gc/accounting/read_barrier_table.h"
20#include "mirror/class-inl.h"
21#include "mirror/object-inl.h"
22#include "thread_list.h"
23
24namespace art {
25namespace gc {
26namespace space {
27
28// If a region has live objects whose size is less than this percent
29// value of the region size, evaculate the region.
30static constexpr uint kEvacuateLivePercentThreshold = 75U;
31
32// If we protect the cleared regions.
33// Only protect for target builds to prevent flaky test failures (b/63131961).
34static constexpr bool kProtectClearedRegions = kIsTargetBuild;
35
36MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
37                                  uint8_t* requested_begin) {
38  CHECK_ALIGNED(capacity, kRegionSize);
39  std::string error_msg;
40  // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
41  // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
42  std::unique_ptr<MemMap> mem_map;
43  while (true) {
44    mem_map.reset(MemMap::MapAnonymous(name.c_str(),
45                                       requested_begin,
46                                       capacity + kRegionSize,
47                                       PROT_READ | PROT_WRITE,
48                                       true,
49                                       false,
50                                       &error_msg));
51    if (mem_map.get() != nullptr || requested_begin == nullptr) {
52      break;
53    }
54    // Retry with no specified request begin.
55    requested_begin = nullptr;
56  }
57  if (mem_map.get() == nullptr) {
58    LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
59        << PrettySize(capacity) << " with message " << error_msg;
60    MemMap::DumpMaps(LOG_STREAM(ERROR));
61    return nullptr;
62  }
63  CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
64  CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin());
65  CHECK_EQ(mem_map->Size(), mem_map->BaseSize());
66  if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
67    // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
68    // kRegionSize at the end.
69    mem_map->SetSize(capacity);
70  } else {
71    // Got an unaligned map. Align the both ends.
72    mem_map->AlignBy(kRegionSize);
73  }
74  CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
75  CHECK_ALIGNED(mem_map->End(), kRegionSize);
76  CHECK_EQ(mem_map->Size(), capacity);
77  return mem_map.release();
78}
79
80RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
81  return new RegionSpace(name, mem_map);
82}
83
84RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
85    : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
86                                 kGcRetentionPolicyAlwaysCollect),
87      region_lock_("Region lock", kRegionSpaceRegionLock),
88      time_(1U),
89      num_regions_(mem_map->Size() / kRegionSize),
90      num_non_free_regions_(0U),
91      num_evac_regions_(0U),
92      max_peak_num_non_free_regions_(0U),
93      non_free_region_index_limit_(0U),
94      current_region_(&full_region_),
95      evac_region_(nullptr) {
96  CHECK_ALIGNED(mem_map->Size(), kRegionSize);
97  CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
98  DCHECK_GT(num_regions_, 0U);
99  regions_.reset(new Region[num_regions_]);
100  uint8_t* region_addr = mem_map->Begin();
101  for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
102    regions_[i].Init(i, region_addr, region_addr + kRegionSize);
103  }
104  mark_bitmap_.reset(
105      accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity()));
106  if (kIsDebugBuild) {
107    CHECK_EQ(regions_[0].Begin(), Begin());
108    for (size_t i = 0; i < num_regions_; ++i) {
109      CHECK(regions_[i].IsFree());
110      CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
111      if (i + 1 < num_regions_) {
112        CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
113      }
114    }
115    CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
116  }
117  DCHECK(!full_region_.IsFree());
118  DCHECK(full_region_.IsAllocated());
119  size_t ignored;
120  DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
121}
122
123size_t RegionSpace::FromSpaceSize() {
124  uint64_t num_regions = 0;
125  MutexLock mu(Thread::Current(), region_lock_);
126  for (size_t i = 0; i < num_regions_; ++i) {
127    Region* r = &regions_[i];
128    if (r->IsInFromSpace()) {
129      ++num_regions;
130    }
131  }
132  return num_regions * kRegionSize;
133}
134
135size_t RegionSpace::UnevacFromSpaceSize() {
136  uint64_t num_regions = 0;
137  MutexLock mu(Thread::Current(), region_lock_);
138  for (size_t i = 0; i < num_regions_; ++i) {
139    Region* r = &regions_[i];
140    if (r->IsInUnevacFromSpace()) {
141      ++num_regions;
142    }
143  }
144  return num_regions * kRegionSize;
145}
146
147size_t RegionSpace::ToSpaceSize() {
148  uint64_t num_regions = 0;
149  MutexLock mu(Thread::Current(), region_lock_);
150  for (size_t i = 0; i < num_regions_; ++i) {
151    Region* r = &regions_[i];
152    if (r->IsInToSpace()) {
153      ++num_regions;
154    }
155  }
156  return num_regions * kRegionSize;
157}
158
159inline bool RegionSpace::Region::ShouldBeEvacuated() {
160  DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
161  // The region should be evacuated if:
162  // - the region was allocated after the start of the previous GC (newly allocated region); or
163  // - the live ratio is below threshold (`kEvacuateLivePercentThreshold`).
164  bool result;
165  if (is_newly_allocated_) {
166    result = true;
167  } else {
168    bool is_live_percent_valid = (live_bytes_ != static_cast<size_t>(-1));
169    if (is_live_percent_valid) {
170      DCHECK(IsInToSpace());
171      DCHECK(!IsLargeTail());
172      DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
173      DCHECK_LE(live_bytes_, BytesAllocated());
174      const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
175      DCHECK_LE(live_bytes_, bytes_allocated);
176      if (IsAllocated()) {
177        // Side node: live_percent == 0 does not necessarily mean
178        // there's no live objects due to rounding (there may be a
179        // few).
180        result = (live_bytes_ * 100U < kEvacuateLivePercentThreshold * bytes_allocated);
181      } else {
182        DCHECK(IsLarge());
183        result = (live_bytes_ == 0U);
184      }
185    } else {
186      result = false;
187    }
188  }
189  return result;
190}
191
192// Determine which regions to evacuate and mark them as
193// from-space. Mark the rest as unevacuated from-space.
194void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) {
195  ++time_;
196  if (kUseTableLookupReadBarrier) {
197    DCHECK(rb_table->IsAllCleared());
198    rb_table->SetAll();
199  }
200  MutexLock mu(Thread::Current(), region_lock_);
201  // Counter for the number of expected large tail regions following a large region.
202  size_t num_expected_large_tails = 0U;
203  // Flag to store whether the previously seen large region has been evacuated.
204  // This is used to apply the same evacuation policy to related large tail regions.
205  bool prev_large_evacuated = false;
206  VerifyNonFreeRegionLimit();
207  const size_t iter_limit = kUseTableLookupReadBarrier
208      ? num_regions_
209      : std::min(num_regions_, non_free_region_index_limit_);
210  for (size_t i = 0; i < iter_limit; ++i) {
211    Region* r = &regions_[i];
212    RegionState state = r->State();
213    RegionType type = r->Type();
214    if (!r->IsFree()) {
215      DCHECK(r->IsInToSpace());
216      if (LIKELY(num_expected_large_tails == 0U)) {
217        DCHECK((state == RegionState::kRegionStateAllocated ||
218                state == RegionState::kRegionStateLarge) &&
219               type == RegionType::kRegionTypeToSpace);
220        bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated();
221        if (should_evacuate) {
222          r->SetAsFromSpace();
223          DCHECK(r->IsInFromSpace());
224        } else {
225          r->SetAsUnevacFromSpace();
226          DCHECK(r->IsInUnevacFromSpace());
227        }
228        if (UNLIKELY(state == RegionState::kRegionStateLarge &&
229                     type == RegionType::kRegionTypeToSpace)) {
230          prev_large_evacuated = should_evacuate;
231          num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
232          DCHECK_GT(num_expected_large_tails, 0U);
233        }
234      } else {
235        DCHECK(state == RegionState::kRegionStateLargeTail &&
236               type == RegionType::kRegionTypeToSpace);
237        if (prev_large_evacuated) {
238          r->SetAsFromSpace();
239          DCHECK(r->IsInFromSpace());
240        } else {
241          r->SetAsUnevacFromSpace();
242          DCHECK(r->IsInUnevacFromSpace());
243        }
244        --num_expected_large_tails;
245      }
246    } else {
247      DCHECK_EQ(num_expected_large_tails, 0U);
248      if (kUseTableLookupReadBarrier) {
249        // Clear the rb table for to-space regions.
250        rb_table->Clear(r->Begin(), r->End());
251      }
252    }
253  }
254  DCHECK_EQ(num_expected_large_tails, 0U);
255  current_region_ = &full_region_;
256  evac_region_ = &full_region_;
257}
258
259static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) {
260  ZeroAndReleasePages(begin, end - begin);
261  if (kProtectClearedRegions) {
262    CheckedCall(mprotect, __FUNCTION__, begin, end - begin, PROT_NONE);
263  }
264}
265
266void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
267                                 /* out */ uint64_t* cleared_objects) {
268  DCHECK(cleared_bytes != nullptr);
269  DCHECK(cleared_objects != nullptr);
270  *cleared_bytes = 0;
271  *cleared_objects = 0;
272  MutexLock mu(Thread::Current(), region_lock_);
273  VerifyNonFreeRegionLimit();
274  size_t new_non_free_region_index_limit = 0;
275
276  // Update max of peak non free region count before reclaiming evacuated regions.
277  max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
278                                            num_non_free_regions_);
279
280  // Lambda expression `clear_region` clears a region and adds a region to the
281  // "clear block".
282  //
283  // As we sweep regions to clear them, we maintain a "clear block", composed of
284  // adjacent cleared regions and whose bounds are `clear_block_begin` and
285  // `clear_block_end`. When processing a new region which is not adjacent to
286  // the clear block (discontinuity in cleared regions), the clear block
287  // is zeroed and released and the clear block is reset (to the most recent
288  // cleared region).
289  //
290  // This is done in order to combine zeroing and releasing pages to reduce how
291  // often madvise is called. This helps reduce contention on the mmap semaphore
292  // (see b/62194020).
293  uint8_t* clear_block_begin = nullptr;
294  uint8_t* clear_block_end = nullptr;
295  auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
296    r->Clear(/*zero_and_release_pages*/false);
297    if (clear_block_end != r->Begin()) {
298      // Region `r` is not adjacent to the current clear block; zero and release
299      // pages within the current block and restart a new clear block at the
300      // beginning of region `r`.
301      ZeroAndProtectRegion(clear_block_begin, clear_block_end);
302      clear_block_begin = r->Begin();
303    }
304    // Add region `r` to the clear block.
305    clear_block_end = r->End();
306  };
307  for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
308    Region* r = &regions_[i];
309    if (r->IsInFromSpace()) {
310      *cleared_bytes += r->BytesAllocated();
311      *cleared_objects += r->ObjectsAllocated();
312      --num_non_free_regions_;
313      clear_region(r);
314    } else if (r->IsInUnevacFromSpace()) {
315      if (r->LiveBytes() == 0) {
316        DCHECK(!r->IsLargeTail());
317        // Special case for 0 live bytes, this means all of the objects in the region are dead and
318        // we can clear it. This is important for large objects since we must not visit dead ones in
319        // RegionSpace::Walk because they may contain dangling references to invalid objects.
320        // It is also better to clear these regions now instead of at the end of the next GC to
321        // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
322        // live percent evacuation logic.
323        size_t free_regions = 1;
324        // Also release RAM for large tails.
325        while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
326          DCHECK(r->IsLarge());
327          clear_region(&regions_[i + free_regions]);
328          ++free_regions;
329        }
330        *cleared_bytes += r->BytesAllocated();
331        *cleared_objects += r->ObjectsAllocated();
332        num_non_free_regions_ -= free_regions;
333        clear_region(r);
334        GetLiveBitmap()->ClearRange(
335            reinterpret_cast<mirror::Object*>(r->Begin()),
336            reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
337        continue;
338      }
339      r->SetUnevacFromSpaceAsToSpace();
340      if (r->AllAllocatedBytesAreLive()) {
341        // Try to optimize the number of ClearRange calls by checking whether the next regions
342        // can also be cleared.
343        size_t regions_to_clear_bitmap = 1;
344        while (i + regions_to_clear_bitmap < num_regions_) {
345          Region* const cur = &regions_[i + regions_to_clear_bitmap];
346          if (!cur->AllAllocatedBytesAreLive()) {
347            DCHECK(!cur->IsLargeTail());
348            break;
349          }
350          CHECK(cur->IsInUnevacFromSpace());
351          cur->SetUnevacFromSpaceAsToSpace();
352          ++regions_to_clear_bitmap;
353        }
354
355        // Optimization: If the live bytes are *all* live in a region
356        // then the live-bit information for these objects is superfluous:
357        // - We can determine that these objects are all live by using
358        //   Region::AllAllocatedBytesAreLive (which just checks whether
359        //   `LiveBytes() == static_cast<size_t>(Top() - Begin())`.
360        // - We can visit the objects in this region using
361        //   RegionSpace::GetNextObject, i.e. without resorting to the
362        //   live bits (see RegionSpace::WalkInternal).
363        // Therefore, we can clear the bits for these objects in the
364        // (live) region space bitmap (and release the corresponding pages).
365        GetLiveBitmap()->ClearRange(
366            reinterpret_cast<mirror::Object*>(r->Begin()),
367            reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize));
368        // Skip over extra regions for which we cleared the bitmaps: we shall not clear them,
369        // as they are unevac regions that are live.
370        // Subtract one for the for-loop.
371        i += regions_to_clear_bitmap - 1;
372      }
373    }
374    // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
375    Region* last_checked_region = &regions_[i];
376    if (!last_checked_region->IsFree()) {
377      new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
378                                                 last_checked_region->Idx() + 1);
379    }
380  }
381  // Clear pages for the last block since clearing happens when a new block opens.
382  ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
383  // Update non_free_region_index_limit_.
384  SetNonFreeRegionLimit(new_non_free_region_index_limit);
385  evac_region_ = nullptr;
386  num_non_free_regions_ += num_evac_regions_;
387  num_evac_regions_ = 0;
388}
389
390void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
391                                               size_t /* failed_alloc_bytes */) {
392  size_t max_contiguous_allocation = 0;
393  MutexLock mu(Thread::Current(), region_lock_);
394  if (current_region_->End() - current_region_->Top() > 0) {
395    max_contiguous_allocation = current_region_->End() - current_region_->Top();
396  }
397  if (num_non_free_regions_ * 2 < num_regions_) {
398    // We reserve half of the regions for evaluation only. If we
399    // occupy more than half the regions, do not report the free
400    // regions as available.
401    size_t max_contiguous_free_regions = 0;
402    size_t num_contiguous_free_regions = 0;
403    bool prev_free_region = false;
404    for (size_t i = 0; i < num_regions_; ++i) {
405      Region* r = &regions_[i];
406      if (r->IsFree()) {
407        if (!prev_free_region) {
408          CHECK_EQ(num_contiguous_free_regions, 0U);
409          prev_free_region = true;
410        }
411        ++num_contiguous_free_regions;
412      } else {
413        if (prev_free_region) {
414          CHECK_NE(num_contiguous_free_regions, 0U);
415          max_contiguous_free_regions = std::max(max_contiguous_free_regions,
416                                                 num_contiguous_free_regions);
417          num_contiguous_free_regions = 0U;
418          prev_free_region = false;
419        }
420      }
421    }
422    max_contiguous_allocation = std::max(max_contiguous_allocation,
423                                         max_contiguous_free_regions * kRegionSize);
424  }
425  os << "; failed due to fragmentation (largest possible contiguous allocation "
426     <<  max_contiguous_allocation << " bytes)";
427  // Caller's job to print failed_alloc_bytes.
428}
429
430void RegionSpace::Clear() {
431  MutexLock mu(Thread::Current(), region_lock_);
432  for (size_t i = 0; i < num_regions_; ++i) {
433    Region* r = &regions_[i];
434    if (!r->IsFree()) {
435      --num_non_free_regions_;
436    }
437    r->Clear(/*zero_and_release_pages*/true);
438  }
439  SetNonFreeRegionLimit(0);
440  current_region_ = &full_region_;
441  evac_region_ = &full_region_;
442}
443
444void RegionSpace::ClampGrowthLimit(size_t new_capacity) {
445  MutexLock mu(Thread::Current(), region_lock_);
446  CHECK_LE(new_capacity, NonGrowthLimitCapacity());
447  size_t new_num_regions = new_capacity / kRegionSize;
448  if (non_free_region_index_limit_ > new_num_regions) {
449    LOG(WARNING) << "Couldn't clamp region space as there are regions in use beyond growth limit.";
450    return;
451  }
452  num_regions_ = new_num_regions;
453  SetLimit(Begin() + new_capacity);
454  if (Size() > new_capacity) {
455    SetEnd(Limit());
456  }
457  GetMarkBitmap()->SetHeapSize(new_capacity);
458  GetMemMap()->SetSize(new_capacity);
459}
460
461void RegionSpace::Dump(std::ostream& os) const {
462  os << GetName() << " "
463     << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
464}
465
466void RegionSpace::DumpRegionForObject(std::ostream& os, mirror::Object* obj) {
467  CHECK(HasAddress(obj));
468  MutexLock mu(Thread::Current(), region_lock_);
469  RefToRegionUnlocked(obj)->Dump(os);
470}
471
472void RegionSpace::DumpRegions(std::ostream& os) {
473  MutexLock mu(Thread::Current(), region_lock_);
474  for (size_t i = 0; i < num_regions_; ++i) {
475    regions_[i].Dump(os);
476  }
477}
478
479void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
480  MutexLock mu(Thread::Current(), region_lock_);
481  for (size_t i = 0; i < num_regions_; ++i) {
482    Region* reg = &regions_[i];
483    if (!reg->IsFree()) {
484      reg->Dump(os);
485    }
486  }
487}
488
489void RegionSpace::RecordAlloc(mirror::Object* ref) {
490  CHECK(ref != nullptr);
491  Region* r = RefToRegion(ref);
492  r->objects_allocated_.FetchAndAddSequentiallyConsistent(1);
493}
494
495bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) {
496  MutexLock mu(self, region_lock_);
497  RevokeThreadLocalBuffersLocked(self);
498  // Retain sufficient free regions for full evacuation.
499
500  Region* r = AllocateRegion(/*for_evac*/ false);
501  if (r != nullptr) {
502    r->is_a_tlab_ = true;
503    r->thread_ = self;
504    r->SetTop(r->End());
505    self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End());
506    return true;
507  }
508  return false;
509}
510
511size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
512  MutexLock mu(Thread::Current(), region_lock_);
513  RevokeThreadLocalBuffersLocked(thread);
514  return 0U;
515}
516
517void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
518  uint8_t* tlab_start = thread->GetTlabStart();
519  DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
520  if (tlab_start != nullptr) {
521    DCHECK_ALIGNED(tlab_start, kRegionSize);
522    Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
523    DCHECK(r->IsAllocated());
524    DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize);
525    r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
526                                    thread->GetThreadLocalBytesAllocated());
527    r->is_a_tlab_ = false;
528    r->thread_ = nullptr;
529  }
530  thread->SetTlab(nullptr, nullptr, nullptr);
531}
532
533size_t RegionSpace::RevokeAllThreadLocalBuffers() {
534  Thread* self = Thread::Current();
535  MutexLock mu(self, *Locks::runtime_shutdown_lock_);
536  MutexLock mu2(self, *Locks::thread_list_lock_);
537  std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
538  for (Thread* thread : thread_list) {
539    RevokeThreadLocalBuffers(thread);
540  }
541  return 0U;
542}
543
544void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
545  if (kIsDebugBuild) {
546    DCHECK(!thread->HasTlab());
547  }
548}
549
550void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
551  if (kIsDebugBuild) {
552    Thread* self = Thread::Current();
553    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
554    MutexLock mu2(self, *Locks::thread_list_lock_);
555    std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
556    for (Thread* thread : thread_list) {
557      AssertThreadLocalBuffersAreRevoked(thread);
558    }
559  }
560}
561
562void RegionSpace::Region::Dump(std::ostream& os) const {
563  os << "Region[" << idx_ << "]="
564     << reinterpret_cast<void*>(begin_)
565     << "-" << reinterpret_cast<void*>(Top())
566     << "-" << reinterpret_cast<void*>(end_)
567     << " state=" << state_
568     << " type=" << type_
569     << " objects_allocated=" << objects_allocated_
570     << " alloc_time=" << alloc_time_
571     << " live_bytes=" << live_bytes_
572     << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha
573     << " is_a_tlab=" << std::boolalpha << is_a_tlab_ << std::noboolalpha
574     << " thread=" << thread_ << '\n';
575}
576
577size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
578  size_t num_bytes = obj->SizeOf();
579  if (usable_size != nullptr) {
580    if (LIKELY(num_bytes <= kRegionSize)) {
581      DCHECK(RefToRegion(obj)->IsAllocated());
582      *usable_size = RoundUp(num_bytes, kAlignment);
583    } else {
584      DCHECK(RefToRegion(obj)->IsLarge());
585      *usable_size = RoundUp(num_bytes, kRegionSize);
586    }
587  }
588  return num_bytes;
589}
590
591void RegionSpace::Region::Clear(bool zero_and_release_pages) {
592  top_.StoreRelaxed(begin_);
593  state_ = RegionState::kRegionStateFree;
594  type_ = RegionType::kRegionTypeNone;
595  objects_allocated_.StoreRelaxed(0);
596  alloc_time_ = 0;
597  live_bytes_ = static_cast<size_t>(-1);
598  if (zero_and_release_pages) {
599    ZeroAndProtectRegion(begin_, end_);
600  }
601  is_newly_allocated_ = false;
602  is_a_tlab_ = false;
603  thread_ = nullptr;
604}
605
606RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
607  if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
608    return nullptr;
609  }
610  for (size_t i = 0; i < num_regions_; ++i) {
611    Region* r = &regions_[i];
612    if (r->IsFree()) {
613      r->Unfree(this, time_);
614      if (for_evac) {
615        ++num_evac_regions_;
616        // Evac doesn't count as newly allocated.
617      } else {
618        r->SetNewlyAllocated();
619        ++num_non_free_regions_;
620      }
621      return r;
622    }
623  }
624  return nullptr;
625}
626
627void RegionSpace::Region::MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time) {
628  DCHECK(IsFree());
629  alloc_time_ = alloc_time;
630  region_space->AdjustNonFreeRegionLimit(idx_);
631  type_ = RegionType::kRegionTypeToSpace;
632  if (kProtectClearedRegions) {
633    CheckedCall(mprotect, __FUNCTION__, Begin(), kRegionSize, PROT_READ | PROT_WRITE);
634  }
635}
636
637void RegionSpace::Region::Unfree(RegionSpace* region_space, uint32_t alloc_time) {
638  MarkAsAllocated(region_space, alloc_time);
639  state_ = RegionState::kRegionStateAllocated;
640}
641
642void RegionSpace::Region::UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) {
643  MarkAsAllocated(region_space, alloc_time);
644  state_ = RegionState::kRegionStateLarge;
645}
646
647void RegionSpace::Region::UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) {
648  MarkAsAllocated(region_space, alloc_time);
649  state_ = RegionState::kRegionStateLargeTail;
650}
651
652}  // namespace space
653}  // namespace gc
654}  // namespace art
655