large_object_space.h revision 38c488bcd41ba632a646d7a1d790ec71a2fcf6fa
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
19
20#include "gc/accounting/gc_allocator.h"
21#include "dlmalloc_space.h"
22#include "safe_map.h"
23#include "space.h"
24
25#include <set>
26#include <vector>
27
28namespace art {
29namespace gc {
30namespace space {
31
32// Abstraction implemented by all large object spaces.
33class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
34 public:
35  SpaceType GetType() const OVERRIDE {
36    return kSpaceTypeLargeObjectSpace;
37  }
38
39  void SwapBitmaps();
40  void CopyLiveToMarked();
41  virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
42  virtual ~LargeObjectSpace() {}
43
44  uint64_t GetBytesAllocated() OVERRIDE {
45    return num_bytes_allocated_;
46  }
47
48  uint64_t GetObjectsAllocated() OVERRIDE {
49    return num_objects_allocated_;
50  }
51
52  uint64_t GetTotalBytesAllocated() const {
53    return total_bytes_allocated_;
54  }
55
56  uint64_t GetTotalObjectsAllocated() const {
57    return total_objects_allocated_;
58  }
59
60  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
61
62  // LargeObjectSpaces don't have thread local state.
63  void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
64  }
65  void RevokeAllThreadLocalBuffers() OVERRIDE {
66  }
67
68  bool IsAllocSpace() const OVERRIDE {
69    return true;
70  }
71
72  AllocSpace* AsAllocSpace() OVERRIDE {
73    return this;
74  }
75
76  collector::ObjectBytePair Sweep(bool swap_bitmaps);
77
78  virtual bool CanMoveObjects() const OVERRIDE {
79    return false;
80  }
81
82  // Current address at which the space begins, which may vary as the space is filled.
83  byte* Begin() const {
84    return begin_;
85  }
86
87  // Current address at which the space ends, which may vary as the space is filled.
88  byte* End() const {
89    return end_;
90  }
91
92  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
93      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
94
95 protected:
96  explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
97
98  static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
99
100  // Approximate number of bytes which have been allocated into the space.
101  uint64_t num_bytes_allocated_;
102  uint64_t num_objects_allocated_;
103  uint64_t total_bytes_allocated_;
104  uint64_t total_objects_allocated_;
105
106  // Begin and end, may change as more large objects are allocated.
107  byte* begin_;
108  byte* end_;
109
110  friend class Space;
111
112 private:
113  DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
114};
115
116// A discontinuous large object space implemented by individual mmap/munmap calls.
117class LargeObjectMapSpace : public LargeObjectSpace {
118 public:
119  // Creates a large object space. Allocations into the large object space use memory maps instead
120  // of malloc.
121  static LargeObjectMapSpace* Create(const std::string& name);
122
123  // Return the storage space required by obj.
124  size_t AllocationSize(mirror::Object* obj, size_t* usable_size);
125  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
126                        size_t* usable_size);
127  size_t Free(Thread* self, mirror::Object* ptr);
128  void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
129  // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
130  bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
131
132 protected:
133  explicit LargeObjectMapSpace(const std::string& name);
134  virtual ~LargeObjectMapSpace() {}
135
136  // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
137  mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
138  std::vector<mirror::Object*,
139      accounting::GcAllocator<mirror::Object*>> large_objects_ GUARDED_BY(lock_);
140  typedef SafeMap<mirror::Object*, MemMap*, std::less<mirror::Object*>,
141      accounting::GcAllocator<std::pair<mirror::Object*, MemMap*>>> MemMaps;
142  MemMaps mem_maps_ GUARDED_BY(lock_);
143};
144
145// A continuous large object space with a free-list to handle holes.
146class FreeListSpace FINAL : public LargeObjectSpace {
147 public:
148  virtual ~FreeListSpace();
149  static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
150
151  size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
152      EXCLUSIVE_LOCKS_REQUIRED(lock_);
153  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
154                        size_t* usable_size) OVERRIDE;
155  size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
156  bool Contains(const mirror::Object* obj) const OVERRIDE;
157  void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
158
159  // Address at which the space begins.
160  byte* Begin() const {
161    return begin_;
162  }
163
164  // Address at which the space ends, which may vary as the space is filled.
165  byte* End() const {
166    return end_;
167  }
168
169  // Current size of space
170  size_t Size() const {
171    return End() - Begin();
172  }
173
174  void Dump(std::ostream& os) const;
175
176 protected:
177  static const size_t kAlignment = kPageSize;
178
179  class AllocationHeader {
180   public:
181    // Returns the allocation size, includes the header.
182    size_t AllocationSize() const {
183      return alloc_size_;
184    }
185
186    // Updates the allocation size in the header, the allocation size includes the header itself.
187    void SetAllocationSize(size_t size) {
188      DCHECK(IsAligned<kPageSize>(size));
189      alloc_size_ = size;
190    }
191
192    bool IsFree() const {
193      return AllocationSize() == 0;
194    }
195
196    // Returns the previous free allocation header by using the prev_free_ member to figure out
197    // where it is. If prev free is 0 then we just return ourself.
198    AllocationHeader* GetPrevFreeAllocationHeader() {
199      return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(this) - prev_free_);
200    }
201
202    // Returns the address of the object associated with this allocation header.
203    mirror::Object* GetObjectAddress() {
204      return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
205    }
206
207    // Returns the next allocation header after the object associated with this allocation header.
208    AllocationHeader* GetNextAllocationHeader() {
209      DCHECK_NE(alloc_size_, 0U);
210      return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(this) + alloc_size_);
211    }
212
213    // Returns how many free bytes there is before the block.
214    size_t GetPrevFree() const {
215      return prev_free_;
216    }
217
218    // Update the size of the free block prior to the allocation.
219    void SetPrevFree(size_t prev_free) {
220      DCHECK(IsAligned<kPageSize>(prev_free));
221      prev_free_ = prev_free;
222    }
223
224    // Finds and returns the next non free allocation header after ourself.
225    // TODO: Optimize, currently O(n) for n free following pages.
226    AllocationHeader* GetNextNonFree();
227
228    // Used to implement best fit object allocation. Each allocation has an AllocationHeader which
229    // contains the size of the previous free block preceding it. Implemented in such a way that we
230    // can also find the iterator for any allocation header pointer.
231    class SortByPrevFree {
232     public:
233      bool operator()(const AllocationHeader* a, const AllocationHeader* b) const {
234        if (a->GetPrevFree() < b->GetPrevFree()) return true;
235        if (a->GetPrevFree() > b->GetPrevFree()) return false;
236        if (a->AllocationSize() < b->AllocationSize()) return true;
237        if (a->AllocationSize() > b->AllocationSize()) return false;
238        return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
239      }
240    };
241
242   private:
243    // Contains the size of the previous free block, if 0 then the memory preceding us is an
244    // allocation.
245    size_t prev_free_;
246
247    // Allocation size of this object, 0 means that the allocation header is free memory.
248    size_t alloc_size_;
249
250    friend class FreeListSpace;
251  };
252
253  FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
254
255  // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
256  void RemoveFreePrev(AllocationHeader* header) EXCLUSIVE_LOCKS_REQUIRED(lock_);
257
258  // Finds the allocation header corresponding to obj.
259  AllocationHeader* GetAllocationHeader(const mirror::Object* obj);
260
261  typedef std::set<AllocationHeader*, AllocationHeader::SortByPrevFree,
262                   accounting::GcAllocator<AllocationHeader*>> FreeBlocks;
263
264  // There is not footer for any allocations at the end of the space, so we keep track of how much
265  // free space there is at the end manually.
266  std::unique_ptr<MemMap> mem_map_;
267  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
268  size_t free_end_ GUARDED_BY(lock_);
269  FreeBlocks free_blocks_ GUARDED_BY(lock_);
270};
271
272}  // namespace space
273}  // namespace gc
274}  // namespace art
275
276#endif  // ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
277