1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "large_object_space.h"
18
19#include <sys/mman.h>
20
21#include <memory>
22
23#include <android-base/logging.h>
24
25#include "base/macros.h"
26#include "base/memory_tool.h"
27#include "base/mutex-inl.h"
28#include "base/os.h"
29#include "base/stl_util.h"
30#include "gc/accounting/heap_bitmap-inl.h"
31#include "gc/accounting/space_bitmap-inl.h"
32#include "gc/heap.h"
33#include "image.h"
34#include "scoped_thread_state_change-inl.h"
35#include "space-inl.h"
36#include "thread-current-inl.h"
37
38namespace art {
39namespace gc {
40namespace space {
41
42class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
43 public:
44  explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
45  }
46
47  ~MemoryToolLargeObjectMapSpace() OVERRIDE {
48    // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
49    // freed since they are held live by the class linker.
50    MutexLock mu(Thread::Current(), lock_);
51    for (auto& m : large_objects_) {
52      delete m.second.mem_map;
53    }
54  }
55
56  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
57                        size_t* usable_size, size_t* bytes_tl_bulk_allocated)
58      OVERRIDE {
59    mirror::Object* obj =
60        LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
61                                   usable_size, bytes_tl_bulk_allocated);
62    mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
63        reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes);
64    MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes);
65    MEMORY_TOOL_MAKE_NOACCESS(
66        reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
67        kMemoryToolRedZoneBytes);
68    if (usable_size != nullptr) {
69      *usable_size = num_bytes;  // Since we have redzones, shrink the usable size.
70    }
71    return object_without_rdz;
72  }
73
74  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
75    return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
76  }
77
78  bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
79    return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
80  }
81
82  size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
83    mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
84    MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
85    return LargeObjectMapSpace::Free(self, object_with_rdz);
86  }
87
88  bool Contains(const mirror::Object* obj) const OVERRIDE {
89    return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
90  }
91
92 private:
93  static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
94    return reinterpret_cast<const mirror::Object*>(
95        reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
96  }
97
98  static mirror::Object* ObjectWithRedzone(mirror::Object* obj) {
99    return reinterpret_cast<mirror::Object*>(
100        reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
101  }
102
103  static constexpr size_t kMemoryToolRedZoneBytes = kPageSize;
104};
105
106void LargeObjectSpace::SwapBitmaps() {
107  live_bitmap_.swap(mark_bitmap_);
108  // Swap names to get more descriptive diagnostics.
109  std::string temp_name = live_bitmap_->GetName();
110  live_bitmap_->SetName(mark_bitmap_->GetName());
111  mark_bitmap_->SetName(temp_name);
112}
113
114LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
115    : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
116      num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
117      total_objects_allocated_(0), begin_(begin), end_(end) {
118}
119
120
121void LargeObjectSpace::CopyLiveToMarked() {
122  mark_bitmap_->CopyFrom(live_bitmap_.get());
123}
124
125LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
126    : LargeObjectSpace(name, nullptr, nullptr),
127      lock_("large object map space lock", kAllocSpaceLock) {}
128
129LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
130  if (Runtime::Current()->IsRunningOnMemoryTool()) {
131    return new MemoryToolLargeObjectMapSpace(name);
132  } else {
133    return new LargeObjectMapSpace(name);
134  }
135}
136
137mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
138                                           size_t* bytes_allocated, size_t* usable_size,
139                                           size_t* bytes_tl_bulk_allocated) {
140  std::string error_msg;
141  MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
142                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
143  if (UNLIKELY(mem_map == nullptr)) {
144    LOG(WARNING) << "Large object allocation failed: " << error_msg;
145    return nullptr;
146  }
147  mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
148  MutexLock mu(self, lock_);
149  large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
150  const size_t allocation_size = mem_map->BaseSize();
151  DCHECK(bytes_allocated != nullptr);
152
153  if (begin_ == nullptr || begin_ > reinterpret_cast<uint8_t*>(obj)) {
154    begin_ = reinterpret_cast<uint8_t*>(obj);
155  }
156  end_ = std::max(end_, reinterpret_cast<uint8_t*>(obj) + allocation_size);
157
158  *bytes_allocated = allocation_size;
159  if (usable_size != nullptr) {
160    *usable_size = allocation_size;
161  }
162  DCHECK(bytes_tl_bulk_allocated != nullptr);
163  *bytes_tl_bulk_allocated = allocation_size;
164  num_bytes_allocated_ += allocation_size;
165  total_bytes_allocated_ += allocation_size;
166  ++num_objects_allocated_;
167  ++total_objects_allocated_;
168  return obj;
169}
170
171bool LargeObjectMapSpace::IsZygoteLargeObject(Thread* self, mirror::Object* obj) const {
172  MutexLock mu(self, lock_);
173  auto it = large_objects_.find(obj);
174  CHECK(it != large_objects_.end());
175  return it->second.is_zygote;
176}
177
178void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
179  MutexLock mu(self, lock_);
180  for (auto& pair : large_objects_) {
181    pair.second.is_zygote = true;
182  }
183}
184
185size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
186  MutexLock mu(self, lock_);
187  auto it = large_objects_.find(ptr);
188  if (UNLIKELY(it == large_objects_.end())) {
189    ScopedObjectAccess soa(self);
190    Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
191    LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
192  }
193  MemMap* mem_map = it->second.mem_map;
194  const size_t map_size = mem_map->BaseSize();
195  DCHECK_GE(num_bytes_allocated_, map_size);
196  size_t allocation_size = map_size;
197  num_bytes_allocated_ -= allocation_size;
198  --num_objects_allocated_;
199  delete mem_map;
200  large_objects_.erase(it);
201  return allocation_size;
202}
203
204size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
205  MutexLock mu(Thread::Current(), lock_);
206  auto it = large_objects_.find(obj);
207  CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
208  size_t alloc_size = it->second.mem_map->BaseSize();
209  if (usable_size != nullptr) {
210    *usable_size = alloc_size;
211  }
212  return alloc_size;
213}
214
215size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
216  size_t total = 0;
217  for (size_t i = 0; i < num_ptrs; ++i) {
218    if (kDebugSpaces) {
219      CHECK(Contains(ptrs[i]));
220    }
221    total += Free(self, ptrs[i]);
222  }
223  return total;
224}
225
226void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
227  MutexLock mu(Thread::Current(), lock_);
228  for (auto& pair : large_objects_) {
229    MemMap* mem_map = pair.second.mem_map;
230    callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
231    callback(nullptr, nullptr, 0, arg);
232  }
233}
234
235bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
236  Thread* self = Thread::Current();
237  if (lock_.IsExclusiveHeld(self)) {
238    // We hold lock_ so do the check.
239    return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
240  } else {
241    MutexLock mu(self, lock_);
242    return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
243  }
244}
245
246// Keeps track of allocation sizes + whether or not the previous allocation is free.
247// Used to coalesce free blocks and find the best fit block for an allocation for best fit object
248// allocation. Each allocation has an AllocationInfo which contains the size of the previous free
249// block preceding it. Implemented in such a way that we can also find the iterator for any
250// allocation info pointer.
251class AllocationInfo {
252 public:
253  AllocationInfo() : prev_free_(0), alloc_size_(0) {
254  }
255  // Return the number of pages that the allocation info covers.
256  size_t AlignSize() const {
257    return alloc_size_ & kFlagsMask;
258  }
259  // Returns the allocation size in bytes.
260  size_t ByteSize() const {
261    return AlignSize() * FreeListSpace::kAlignment;
262  }
263  // Updates the allocation size and whether or not it is free.
264  void SetByteSize(size_t size, bool free) {
265    DCHECK_EQ(size & ~kFlagsMask, 0u);
266    DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
267    alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0u);
268  }
269  // Returns true if the block is free.
270  bool IsFree() const {
271    return (alloc_size_ & kFlagFree) != 0;
272  }
273  // Return true if the large object is a zygote object.
274  bool IsZygoteObject() const {
275    return (alloc_size_ & kFlagZygote) != 0;
276  }
277  // Change the object to be a zygote object.
278  void SetZygoteObject() {
279    alloc_size_ |= kFlagZygote;
280  }
281  // Return true if this is a zygote large object.
282  // Finds and returns the next non free allocation info after ourself.
283  AllocationInfo* GetNextInfo() {
284    return this + AlignSize();
285  }
286  const AllocationInfo* GetNextInfo() const {
287    return this + AlignSize();
288  }
289  // Returns the previous free allocation info by using the prev_free_ member to figure out
290  // where it is. This is only used for coalescing so we only need to be able to do it if the
291  // previous allocation info is free.
292  AllocationInfo* GetPrevFreeInfo() {
293    DCHECK_NE(prev_free_, 0U);
294    return this - prev_free_;
295  }
296  // Returns the address of the object associated with this allocation info.
297  mirror::Object* GetObjectAddress() {
298    return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
299  }
300  // Return how many kAlignment units there are before the free block.
301  size_t GetPrevFree() const {
302    return prev_free_;
303  }
304  // Returns how many free bytes there is before the block.
305  size_t GetPrevFreeBytes() const {
306    return GetPrevFree() * FreeListSpace::kAlignment;
307  }
308  // Update the size of the free block prior to the allocation.
309  void SetPrevFreeBytes(size_t bytes) {
310    DCHECK_ALIGNED(bytes, FreeListSpace::kAlignment);
311    prev_free_ = bytes / FreeListSpace::kAlignment;
312  }
313
314 private:
315  static constexpr uint32_t kFlagFree = 0x80000000;  // If block is free.
316  static constexpr uint32_t kFlagZygote = 0x40000000;  // If the large object is a zygote object.
317  static constexpr uint32_t kFlagsMask = ~(kFlagFree | kFlagZygote);  // Combined flags for masking.
318  // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
319  // allocation before us is not free.
320  // These variables are undefined in the middle of allocations / free blocks.
321  uint32_t prev_free_;
322  // Allocation size of this object in kAlignment as the unit.
323  uint32_t alloc_size_;
324};
325
326size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
327  DCHECK_GE(info, allocation_info_);
328  DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
329  return info - allocation_info_;
330}
331
332AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
333  return &allocation_info_[GetSlotIndexForAddress(address)];
334}
335
336const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
337  return &allocation_info_[GetSlotIndexForAddress(address)];
338}
339
340inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
341                                                      const AllocationInfo* b) const {
342  if (a->GetPrevFree() < b->GetPrevFree()) return true;
343  if (a->GetPrevFree() > b->GetPrevFree()) return false;
344  if (a->AlignSize() < b->AlignSize()) return true;
345  if (a->AlignSize() > b->AlignSize()) return false;
346  return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
347}
348
349FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
350  CHECK_EQ(size % kAlignment, 0U);
351  std::string error_msg;
352  MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
353                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
354  CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
355  return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
356}
357
358FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
359    : LargeObjectSpace(name, begin, end),
360      mem_map_(mem_map),
361      lock_("free list space lock", kAllocSpaceLock) {
362  const size_t space_capacity = end - begin;
363  free_end_ = space_capacity;
364  CHECK_ALIGNED(space_capacity, kAlignment);
365  const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
366  std::string error_msg;
367  allocation_info_map_.reset(
368      MemMap::MapAnonymous("large object free list space allocation info map",
369                           nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
370                           false, false, &error_msg));
371  CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
372      << error_msg;
373  allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
374}
375
376FreeListSpace::~FreeListSpace() {}
377
378void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
379  MutexLock mu(Thread::Current(), lock_);
380  const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
381  AllocationInfo* cur_info = &allocation_info_[0];
382  const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
383  while (cur_info < end_info) {
384    if (!cur_info->IsFree()) {
385      size_t alloc_size = cur_info->ByteSize();
386      uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
387      uint8_t* byte_end = byte_start + alloc_size;
388      callback(byte_start, byte_end, alloc_size, arg);
389      callback(nullptr, nullptr, 0, arg);
390    }
391    cur_info = cur_info->GetNextInfo();
392  }
393  CHECK_EQ(cur_info, end_info);
394}
395
396void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
397  CHECK_GT(info->GetPrevFree(), 0U);
398  auto it = free_blocks_.lower_bound(info);
399  CHECK(it != free_blocks_.end());
400  CHECK_EQ(*it, info);
401  free_blocks_.erase(it);
402}
403
404size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
405  MutexLock mu(self, lock_);
406  DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
407                        << reinterpret_cast<void*>(End());
408  DCHECK_ALIGNED(obj, kAlignment);
409  AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
410  DCHECK(!info->IsFree());
411  const size_t allocation_size = info->ByteSize();
412  DCHECK_GT(allocation_size, 0U);
413  DCHECK_ALIGNED(allocation_size, kAlignment);
414  info->SetByteSize(allocation_size, true);  // Mark as free.
415  // Look at the next chunk.
416  AllocationInfo* next_info = info->GetNextInfo();
417  // Calculate the start of the end free block.
418  uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
419  size_t prev_free_bytes = info->GetPrevFreeBytes();
420  size_t new_free_size = allocation_size;
421  if (prev_free_bytes != 0) {
422    // Coalesce with previous free chunk.
423    new_free_size += prev_free_bytes;
424    RemoveFreePrev(info);
425    info = info->GetPrevFreeInfo();
426    // The previous allocation info must not be free since we are supposed to always coalesce.
427    DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
428  }
429  uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
430  if (next_addr >= free_end_start) {
431    // Easy case, the next chunk is the end free region.
432    CHECK_EQ(next_addr, free_end_start);
433    free_end_ += new_free_size;
434  } else {
435    AllocationInfo* new_free_info;
436    if (next_info->IsFree()) {
437      AllocationInfo* next_next_info = next_info->GetNextInfo();
438      // Next next info can't be free since we always coalesce.
439      DCHECK(!next_next_info->IsFree());
440      DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment);
441      new_free_info = next_next_info;
442      new_free_size += next_next_info->GetPrevFreeBytes();
443      RemoveFreePrev(next_next_info);
444    } else {
445      new_free_info = next_info;
446    }
447    new_free_info->SetPrevFreeBytes(new_free_size);
448    free_blocks_.insert(new_free_info);
449    info->SetByteSize(new_free_size, true);
450    DCHECK_EQ(info->GetNextInfo(), new_free_info);
451  }
452  --num_objects_allocated_;
453  DCHECK_LE(allocation_size, num_bytes_allocated_);
454  num_bytes_allocated_ -= allocation_size;
455  madvise(obj, allocation_size, MADV_DONTNEED);
456  if (kIsDebugBuild) {
457    // Can't disallow reads since we use them to find next chunks during coalescing.
458    CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ);
459  }
460  return allocation_size;
461}
462
463size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
464  DCHECK(Contains(obj));
465  AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
466  DCHECK(!info->IsFree());
467  size_t alloc_size = info->ByteSize();
468  if (usable_size != nullptr) {
469    *usable_size = alloc_size;
470  }
471  return alloc_size;
472}
473
474mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
475                                     size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
476  MutexLock mu(self, lock_);
477  const size_t allocation_size = RoundUp(num_bytes, kAlignment);
478  AllocationInfo temp_info;
479  temp_info.SetPrevFreeBytes(allocation_size);
480  temp_info.SetByteSize(0, false);
481  AllocationInfo* new_info;
482  // Find the smallest chunk at least num_bytes in size.
483  auto it = free_blocks_.lower_bound(&temp_info);
484  if (it != free_blocks_.end()) {
485    AllocationInfo* info = *it;
486    free_blocks_.erase(it);
487    // Fit our object in the previous allocation info free space.
488    new_info = info->GetPrevFreeInfo();
489    // Remove the newly allocated block from the info and update the prev_free_.
490    info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
491    if (info->GetPrevFreeBytes() > 0) {
492      AllocationInfo* new_free = info - info->GetPrevFree();
493      new_free->SetPrevFreeBytes(0);
494      new_free->SetByteSize(info->GetPrevFreeBytes(), true);
495      // If there is remaining space, insert back into the free set.
496      free_blocks_.insert(info);
497    }
498  } else {
499    // Try to steal some memory from the free space at the end of the space.
500    if (LIKELY(free_end_ >= allocation_size)) {
501      // Fit our object at the start of the end free block.
502      new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
503      free_end_ -= allocation_size;
504    } else {
505      return nullptr;
506    }
507  }
508  DCHECK(bytes_allocated != nullptr);
509  *bytes_allocated = allocation_size;
510  if (usable_size != nullptr) {
511    *usable_size = allocation_size;
512  }
513  DCHECK(bytes_tl_bulk_allocated != nullptr);
514  *bytes_tl_bulk_allocated = allocation_size;
515  // Need to do these inside of the lock.
516  ++num_objects_allocated_;
517  ++total_objects_allocated_;
518  num_bytes_allocated_ += allocation_size;
519  total_bytes_allocated_ += allocation_size;
520  mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
521  // We always put our object at the start of the free block, there cannot be another free block
522  // before it.
523  if (kIsDebugBuild) {
524    CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ | PROT_WRITE);
525  }
526  new_info->SetPrevFreeBytes(0);
527  new_info->SetByteSize(allocation_size, false);
528  return obj;
529}
530
531void FreeListSpace::Dump(std::ostream& os) const {
532  MutexLock mu(Thread::Current(), lock_);
533  os << GetName() << " -"
534     << " begin: " << reinterpret_cast<void*>(Begin())
535     << " end: " << reinterpret_cast<void*>(End()) << "\n";
536  uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
537  const AllocationInfo* cur_info =
538      GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
539  const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
540  while (cur_info < end_info) {
541    size_t size = cur_info->ByteSize();
542    uintptr_t address = GetAddressForAllocationInfo(cur_info);
543    if (cur_info->IsFree()) {
544      os << "Free block at address: " << reinterpret_cast<const void*>(address)
545         << " of length " << size << " bytes\n";
546    } else {
547      os << "Large object at address: " << reinterpret_cast<const void*>(address)
548         << " of length " << size << " bytes\n";
549    }
550    cur_info = cur_info->GetNextInfo();
551  }
552  if (free_end_) {
553    os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
554       << " of length " << free_end_ << " bytes\n";
555  }
556}
557
558bool FreeListSpace::IsZygoteLargeObject(Thread* self ATTRIBUTE_UNUSED, mirror::Object* obj) const {
559  const AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
560  DCHECK(info != nullptr);
561  return info->IsZygoteObject();
562}
563
564void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
565  MutexLock mu(self, lock_);
566  uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
567  for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())),
568      *end_info = GetAllocationInfoForAddress(free_end_start); cur_info < end_info;
569      cur_info = cur_info->GetNextInfo()) {
570    if (!cur_info->IsFree()) {
571      cur_info->SetZygoteObject();
572    }
573  }
574}
575
576void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
577  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
578  space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
579  Thread* self = context->self;
580  Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
581  // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
582  // the bitmaps as an optimization.
583  if (!context->swap_bitmaps) {
584    accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
585    for (size_t i = 0; i < num_ptrs; ++i) {
586      bitmap->Clear(ptrs[i]);
587    }
588  }
589  context->freed.objects += num_ptrs;
590  context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
591}
592
593collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
594  if (Begin() >= End()) {
595    return collector::ObjectBytePair(0, 0);
596  }
597  accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
598  accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
599  if (swap_bitmaps) {
600    std::swap(live_bitmap, mark_bitmap);
601  }
602  AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
603  std::pair<uint8_t*, uint8_t*> range = GetBeginEndAtomic();
604  accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
605                                           reinterpret_cast<uintptr_t>(range.first),
606                                           reinterpret_cast<uintptr_t>(range.second),
607                                           SweepCallback,
608                                           &scc);
609  return scc.freed;
610}
611
612void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
613                                                    size_t /*failed_alloc_bytes*/) {
614  UNIMPLEMENTED(FATAL);
615}
616
617std::pair<uint8_t*, uint8_t*> LargeObjectMapSpace::GetBeginEndAtomic() const {
618  MutexLock mu(Thread::Current(), lock_);
619  return std::make_pair(Begin(), End());
620}
621
622std::pair<uint8_t*, uint8_t*> FreeListSpace::GetBeginEndAtomic() const {
623  MutexLock mu(Thread::Current(), lock_);
624  return std::make_pair(Begin(), End());
625}
626
627}  // namespace space
628}  // namespace gc
629}  // namespace art
630