1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "bump_pointer_space.h"
18#include "bump_pointer_space-inl.h"
19#include "mirror/object-inl.h"
20#include "mirror/class-inl.h"
21#include "thread_list.h"
22
23namespace art {
24namespace gc {
25namespace space {
26
27BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
28                                           uint8_t* requested_begin) {
29  capacity = RoundUp(capacity, kPageSize);
30  std::string error_msg;
31  std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
32                                                       PROT_READ | PROT_WRITE, true, false,
33                                                       &error_msg));
34  if (mem_map.get() == nullptr) {
35    LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
36        << PrettySize(capacity) << " with message " << error_msg;
37    return nullptr;
38  }
39  return new BumpPointerSpace(name, mem_map.release());
40}
41
42BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap* mem_map) {
43  return new BumpPointerSpace(name, mem_map);
44}
45
46BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
47    : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
48                                 kGcRetentionPolicyAlwaysCollect),
49      growth_end_(limit),
50      objects_allocated_(0), bytes_allocated_(0),
51      block_lock_("Block lock"),
52      main_block_size_(0),
53      num_blocks_(0) {
54}
55
56BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
57    : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(),
58                                 kGcRetentionPolicyAlwaysCollect),
59      growth_end_(mem_map->End()),
60      objects_allocated_(0), bytes_allocated_(0),
61      block_lock_("Block lock", kBumpPointerSpaceBlockLock),
62      main_block_size_(0),
63      num_blocks_(0) {
64}
65
66void BumpPointerSpace::Clear() {
67  // Release the pages back to the operating system.
68  if (!kMadviseZeroes) {
69    memset(Begin(), 0, Limit() - Begin());
70  }
71  CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
72  // Reset the end of the space back to the beginning, we move the end forward as we allocate
73  // objects.
74  SetEnd(Begin());
75  objects_allocated_.StoreRelaxed(0);
76  bytes_allocated_.StoreRelaxed(0);
77  growth_end_ = Limit();
78  {
79    MutexLock mu(Thread::Current(), block_lock_);
80    num_blocks_ = 0;
81    main_block_size_ = 0;
82  }
83}
84
85void BumpPointerSpace::Dump(std::ostream& os) const {
86  os << GetName() << " "
87      << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
88      << reinterpret_cast<void*>(Limit());
89}
90
91mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
92  const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
93  return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
94}
95
96size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
97  MutexLock mu(Thread::Current(), block_lock_);
98  RevokeThreadLocalBuffersLocked(thread);
99  return 0U;
100}
101
102size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() {
103  Thread* self = Thread::Current();
104  MutexLock mu(self, *Locks::runtime_shutdown_lock_);
105  MutexLock mu2(self, *Locks::thread_list_lock_);
106  // TODO: Not do a copy of the thread list?
107  std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
108  for (Thread* thread : thread_list) {
109    RevokeThreadLocalBuffers(thread);
110  }
111  return 0U;
112}
113
114void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
115  if (kIsDebugBuild) {
116    MutexLock mu(Thread::Current(), block_lock_);
117    DCHECK(!thread->HasTlab());
118  }
119}
120
121void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() {
122  if (kIsDebugBuild) {
123    Thread* self = Thread::Current();
124    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
125    MutexLock mu2(self, *Locks::thread_list_lock_);
126    // TODO: Not do a copy of the thread list?
127    std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
128    for (Thread* thread : thread_list) {
129      AssertThreadLocalBuffersAreRevoked(thread);
130    }
131  }
132}
133
134void BumpPointerSpace::UpdateMainBlock() {
135  DCHECK_EQ(num_blocks_, 0U);
136  main_block_size_ = Size();
137}
138
139// Returns the start of the storage.
140uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
141  bytes = RoundUp(bytes, kAlignment);
142  if (!num_blocks_) {
143    UpdateMainBlock();
144  }
145  uint8_t* storage = reinterpret_cast<uint8_t*>(
146      AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader)));
147  if (LIKELY(storage != nullptr)) {
148    BlockHeader* header = reinterpret_cast<BlockHeader*>(storage);
149    header->size_ = bytes;  // Write out the block header.
150    storage += sizeof(BlockHeader);
151    ++num_blocks_;
152  }
153  return storage;
154}
155
156void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
157  uint8_t* pos = Begin();
158  uint8_t* end = End();
159  uint8_t* main_end = pos;
160  {
161    MutexLock mu(Thread::Current(), block_lock_);
162    // If we have 0 blocks then we need to update the main header since we have bump pointer style
163    // allocation into an unbounded region (actually bounded by Capacity()).
164    if (num_blocks_ == 0) {
165      UpdateMainBlock();
166    }
167    main_end = Begin() + main_block_size_;
168    if (num_blocks_ == 0) {
169      // We don't have any other blocks, this means someone else may be allocating into the main
170      // block. In this case, we don't want to try and visit the other blocks after the main block
171      // since these could actually be part of the main block.
172      end = main_end;
173    }
174  }
175  // Walk all of the objects in the main block first.
176  while (pos < main_end) {
177    mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
178    // No read barrier because obj may not be a valid object.
179    if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) {
180      // There is a race condition where a thread has just allocated an object but not set the
181      // class. We can't know the size of this object, so we don't visit it and exit the function
182      // since there is guaranteed to be not other blocks.
183      return;
184    } else {
185      callback(obj, arg);
186      pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
187    }
188  }
189  // Walk the other blocks (currently only TLABs).
190  while (pos < end) {
191    BlockHeader* header = reinterpret_cast<BlockHeader*>(pos);
192    size_t block_size = header->size_;
193    pos += sizeof(BlockHeader);  // Skip the header so that we know where the objects
194    mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
195    const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
196    CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
197    // We don't know how many objects are allocated in the current block. When we hit a null class
198    // assume its the end. TODO: Have a thread update the header when it flushes the block?
199    // No read barrier because obj may not be a valid object.
200    while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
201      callback(obj, arg);
202      obj = GetNextObject(obj);
203    }
204    pos += block_size;
205  }
206}
207
208accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
209  UNIMPLEMENTED(FATAL);
210  UNREACHABLE();
211}
212
213uint64_t BumpPointerSpace::GetBytesAllocated() {
214  // Start out pre-determined amount (blocks which are not being allocated into).
215  uint64_t total = static_cast<uint64_t>(bytes_allocated_.LoadRelaxed());
216  Thread* self = Thread::Current();
217  MutexLock mu(self, *Locks::runtime_shutdown_lock_);
218  MutexLock mu2(self, *Locks::thread_list_lock_);
219  std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
220  MutexLock mu3(Thread::Current(), block_lock_);
221  // If we don't have any blocks, we don't have any thread local buffers. This check is required
222  // since there can exist multiple bump pointer spaces which exist at the same time.
223  if (num_blocks_ > 0) {
224    for (Thread* thread : thread_list) {
225      total += thread->GetThreadLocalBytesAllocated();
226    }
227  }
228  return total;
229}
230
231uint64_t BumpPointerSpace::GetObjectsAllocated() {
232  // Start out pre-determined amount (blocks which are not being allocated into).
233  uint64_t total = static_cast<uint64_t>(objects_allocated_.LoadRelaxed());
234  Thread* self = Thread::Current();
235  MutexLock mu(self, *Locks::runtime_shutdown_lock_);
236  MutexLock mu2(self, *Locks::thread_list_lock_);
237  std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
238  MutexLock mu3(Thread::Current(), block_lock_);
239  // If we don't have any blocks, we don't have any thread local buffers. This check is required
240  // since there can exist multiple bump pointer spaces which exist at the same time.
241  if (num_blocks_ > 0) {
242    for (Thread* thread : thread_list) {
243      total += thread->GetThreadLocalObjectsAllocated();
244    }
245  }
246  return total;
247}
248
249void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
250  objects_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalObjectsAllocated());
251  bytes_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalBytesAllocated());
252  thread->SetTlab(nullptr, nullptr);
253}
254
255bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
256  MutexLock mu(Thread::Current(), block_lock_);
257  RevokeThreadLocalBuffersLocked(self);
258  uint8_t* start = AllocBlock(bytes);
259  if (start == nullptr) {
260    return false;
261  }
262  self->SetTlab(start, start + bytes);
263  return true;
264}
265
266void BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
267                                                    size_t /* failed_alloc_bytes */) {
268  size_t max_contiguous_allocation = Limit() - End();
269  os << "; failed due to fragmentation (largest possible contiguous allocation "
270     <<  max_contiguous_allocation << " bytes)";
271  // Caller's job to print failed_alloc_bytes.
272}
273
274}  // namespace space
275}  // namespace gc
276}  // namespace art
277