arena_allocator.cc revision 5c42c29b89286e5efa4a4613132b09051ce5945b
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <algorithm>
18#include <iomanip>
19#include <numeric>
20
21#include "arena_allocator.h"
22#include "logging.h"
23#include "mutex.h"
24#include "thread-inl.h"
25#include <memcheck/memcheck.h>
26
27namespace art {
28
29// Memmap is a bit slower than malloc according to my measurements.
30static constexpr bool kUseMemMap = false;
31static constexpr bool kUseMemSet = true && kUseMemMap;
32static constexpr size_t kValgrindRedZoneBytes = 8;
33constexpr size_t Arena::kDefaultSize;
34
35template <bool kCount>
36const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
37  "Misc       ",
38  "BasicBlock ",
39  "BBList     "
40  "BBPreds    ",
41  "DfsPreOrd  ",
42  "DfsPostOrd ",
43  "DomPostOrd ",
44  "TopoOrd    ",
45  "Lowering   ",
46  "LIR        ",
47  "LIR masks  ",
48  "SwitchTbl  ",
49  "FillArray  ",
50  "SlowPaths  ",
51  "MIR        ",
52  "DataFlow   ",
53  "GrowList   ",
54  "GrowBitMap ",
55  "SSA2Dalvik ",
56  "Dalvik2SSA ",
57  "DebugInfo  ",
58  "Successor  ",
59  "RegAlloc   ",
60  "Data       ",
61  "Preds      ",
62  "STL        ",
63};
64
65template <bool kCount>
66ArenaAllocatorStatsImpl<kCount>::ArenaAllocatorStatsImpl()
67    : num_allocations_(0u) {
68  std::fill_n(alloc_stats_, arraysize(alloc_stats_), 0u);
69}
70
71template <bool kCount>
72void ArenaAllocatorStatsImpl<kCount>::Copy(const ArenaAllocatorStatsImpl& other) {
73  num_allocations_ = other.num_allocations_;
74  std::copy(other.alloc_stats_, other.alloc_stats_ + arraysize(alloc_stats_), alloc_stats_);
75}
76
77template <bool kCount>
78void ArenaAllocatorStatsImpl<kCount>::RecordAlloc(size_t bytes, ArenaAllocKind kind) {
79  alloc_stats_[kind] += bytes;
80  ++num_allocations_;
81}
82
83template <bool kCount>
84size_t ArenaAllocatorStatsImpl<kCount>::NumAllocations() const {
85  return num_allocations_;
86}
87
88template <bool kCount>
89size_t ArenaAllocatorStatsImpl<kCount>::BytesAllocated() const {
90  const size_t init = 0u;  // Initial value of the correct type.
91  return std::accumulate(alloc_stats_, alloc_stats_ + arraysize(alloc_stats_), init);
92}
93
94template <bool kCount>
95void ArenaAllocatorStatsImpl<kCount>::Dump(std::ostream& os, const Arena* first,
96                                           ssize_t lost_bytes_adjustment) const {
97  size_t malloc_bytes = 0u;
98  size_t lost_bytes = 0u;
99  size_t num_arenas = 0u;
100  for (const Arena* arena = first; arena != nullptr; arena = arena->next_) {
101    malloc_bytes += arena->Size();
102    lost_bytes += arena->RemainingSpace();
103    ++num_arenas;
104  }
105  // The lost_bytes_adjustment is used to make up for the fact that the current arena
106  // may not have the bytes_allocated_ updated correctly.
107  lost_bytes += lost_bytes_adjustment;
108  const size_t bytes_allocated = BytesAllocated();
109  os << " MEM: used: " << bytes_allocated << ", allocated: " << malloc_bytes
110     << ", lost: " << lost_bytes << "\n";
111  size_t num_allocations = NumAllocations();
112  if (num_allocations != 0) {
113    os << "Number of arenas allocated: " << num_arenas << ", Number of allocations: "
114       << num_allocations << ", avg size: " << bytes_allocated / num_allocations << "\n";
115  }
116  os << "===== Allocation by kind\n";
117  static_assert(arraysize(kAllocNames) == kNumArenaAllocKinds, "arraysize of kAllocNames");
118  for (int i = 0; i < kNumArenaAllocKinds; i++) {
119      os << kAllocNames[i] << std::setw(10) << alloc_stats_[i] << "\n";
120  }
121}
122
123// Explicitly instantiate the used implementation.
124template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
125
126Arena::Arena(size_t size)
127    : bytes_allocated_(0),
128      map_(nullptr),
129      next_(nullptr) {
130  if (kUseMemMap) {
131    std::string error_msg;
132    map_ = MemMap::MapAnonymous("dalvik-arena", nullptr, size, PROT_READ | PROT_WRITE, false, false,
133                                &error_msg);
134    CHECK(map_ != nullptr) << error_msg;
135    memory_ = map_->Begin();
136    size_ = map_->Size();
137  } else {
138    memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
139    size_ = size;
140  }
141}
142
143Arena::~Arena() {
144  if (kUseMemMap) {
145    delete map_;
146  } else {
147    free(reinterpret_cast<void*>(memory_));
148  }
149}
150
151void Arena::Reset() {
152  if (bytes_allocated_) {
153    if (kUseMemSet || !kUseMemMap) {
154      memset(Begin(), 0, bytes_allocated_);
155    } else {
156      map_->MadviseDontNeedAndZero();
157    }
158    bytes_allocated_ = 0;
159  }
160}
161
162ArenaPool::ArenaPool()
163    : lock_("Arena pool lock"),
164      free_arenas_(nullptr) {
165}
166
167ArenaPool::~ArenaPool() {
168  while (free_arenas_ != nullptr) {
169    auto* arena = free_arenas_;
170    free_arenas_ = free_arenas_->next_;
171    delete arena;
172  }
173}
174
175Arena* ArenaPool::AllocArena(size_t size) {
176  Thread* self = Thread::Current();
177  Arena* ret = nullptr;
178  {
179    MutexLock lock(self, lock_);
180    if (free_arenas_ != nullptr && LIKELY(free_arenas_->Size() >= size)) {
181      ret = free_arenas_;
182      free_arenas_ = free_arenas_->next_;
183    }
184  }
185  if (ret == nullptr) {
186    ret = new Arena(size);
187  }
188  ret->Reset();
189  return ret;
190}
191
192size_t ArenaPool::GetBytesAllocated() const {
193  size_t total = 0;
194  MutexLock lock(Thread::Current(), lock_);
195  for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
196    total += arena->GetBytesAllocated();
197  }
198  return total;
199}
200
201void ArenaPool::FreeArenaChain(Arena* first) {
202  if (UNLIKELY(RUNNING_ON_VALGRIND > 0)) {
203    for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
204      VALGRIND_MAKE_MEM_UNDEFINED(arena->memory_, arena->bytes_allocated_);
205    }
206  }
207  if (first != nullptr) {
208    Arena* last = first;
209    while (last->next_ != nullptr) {
210      last = last->next_;
211    }
212    Thread* self = Thread::Current();
213    MutexLock lock(self, lock_);
214    last->next_ = free_arenas_;
215    free_arenas_ = first;
216  }
217}
218
219size_t ArenaAllocator::BytesAllocated() const {
220  return ArenaAllocatorStats::BytesAllocated();
221}
222
223ArenaAllocator::ArenaAllocator(ArenaPool* pool)
224  : pool_(pool),
225    begin_(nullptr),
226    end_(nullptr),
227    ptr_(nullptr),
228    arena_head_(nullptr),
229    running_on_valgrind_(RUNNING_ON_VALGRIND > 0) {
230}
231
232void ArenaAllocator::UpdateBytesAllocated() {
233  if (arena_head_ != nullptr) {
234    // Update how many bytes we have allocated into the arena so that the arena pool knows how
235    // much memory to zero out.
236    arena_head_->bytes_allocated_ = ptr_ - begin_;
237  }
238}
239
240void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
241  size_t rounded_bytes = RoundUp(bytes + kValgrindRedZoneBytes, 8);
242  if (UNLIKELY(ptr_ + rounded_bytes > end_)) {
243    // Obtain a new block.
244    ObtainNewArenaForAllocation(rounded_bytes);
245    if (UNLIKELY(ptr_ == nullptr)) {
246      return nullptr;
247    }
248  }
249  ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
250  uint8_t* ret = ptr_;
251  ptr_ += rounded_bytes;
252  // Check that the memory is already zeroed out.
253  for (uint8_t* ptr = ret; ptr < ptr_; ++ptr) {
254    CHECK_EQ(*ptr, 0U);
255  }
256  VALGRIND_MAKE_MEM_NOACCESS(ret + bytes, rounded_bytes - bytes);
257  return ret;
258}
259
260ArenaAllocator::~ArenaAllocator() {
261  // Reclaim all the arenas by giving them back to the thread pool.
262  UpdateBytesAllocated();
263  pool_->FreeArenaChain(arena_head_);
264}
265
266void ArenaAllocator::ObtainNewArenaForAllocation(size_t allocation_size) {
267  UpdateBytesAllocated();
268  Arena* new_arena = pool_->AllocArena(std::max(Arena::kDefaultSize, allocation_size));
269  new_arena->next_ = arena_head_;
270  arena_head_ = new_arena;
271  // Update our internal data structures.
272  ptr_ = begin_ = new_arena->Begin();
273  end_ = new_arena->End();
274}
275
276MemStats::MemStats(const char* name, const ArenaAllocatorStats* stats, const Arena* first_arena,
277                   ssize_t lost_bytes_adjustment)
278    : name_(name),
279      stats_(stats),
280      first_arena_(first_arena),
281      lost_bytes_adjustment_(lost_bytes_adjustment) {
282}
283
284void MemStats::Dump(std::ostream& os) const {
285  os << name_ << " stats:\n";
286  stats_->Dump(os, first_arena_, lost_bytes_adjustment_);
287}
288
289// Dump memory usage stats.
290MemStats ArenaAllocator::GetMemStats() const {
291  ssize_t lost_bytes_adjustment =
292      (arena_head_ == nullptr) ? 0 : (end_ - ptr_) - arena_head_->RemainingSpace();
293  return MemStats("ArenaAllocator", this, arena_head_, lost_bytes_adjustment);
294}
295
296}  // namespace art
297