space.h revision 31f441464c0c8f840aba37e236ad133f30308d70
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_H_
19
20#include <string>
21
22#include "UniquePtr.h"
23#include "base/macros.h"
24#include "base/mutex.h"
25#include "gc/accounting/space_bitmap.h"
26#include "globals.h"
27#include "image.h"
28#include "mem_map.h"
29
30namespace art {
31namespace mirror {
32  class Object;
33}  // namespace mirror
34
35namespace gc {
36
37namespace accounting {
38  class SpaceBitmap;
39}  // namespace accounting
40
41class Heap;
42
43namespace space {
44
45class AllocSpace;
46class BumpPointerSpace;
47class ContinuousMemMapAllocSpace;
48class ContinuousSpace;
49class DiscontinuousSpace;
50class MallocSpace;
51class DlMallocSpace;
52class RosAllocSpace;
53class ImageSpace;
54class LargeObjectSpace;
55class ZygoteSpace;
56
57static constexpr bool kDebugSpaces = kIsDebugBuild;
58
59// See Space::GetGcRetentionPolicy.
60enum GcRetentionPolicy {
61  // Objects are retained forever with this policy for a space.
62  kGcRetentionPolicyNeverCollect,
63  // Every GC cycle will attempt to collect objects in this space.
64  kGcRetentionPolicyAlwaysCollect,
65  // Objects will be considered for collection only in "full" GC cycles, ie faster partial
66  // collections won't scan these areas such as the Zygote.
67  kGcRetentionPolicyFullCollect,
68};
69std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
70
71enum SpaceType {
72  kSpaceTypeImageSpace,
73  kSpaceTypeMallocSpace,
74  kSpaceTypeZygoteSpace,
75  kSpaceTypeBumpPointerSpace,
76  kSpaceTypeLargeObjectSpace,
77};
78std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
79
80// A space contains memory allocated for managed objects.
81class Space {
82 public:
83  // Dump space. Also key method for C++ vtables.
84  virtual void Dump(std::ostream& os) const;
85
86  // Name of the space. May vary, for example before/after the Zygote fork.
87  const char* GetName() const {
88    return name_.c_str();
89  }
90
91  // The policy of when objects are collected associated with this space.
92  GcRetentionPolicy GetGcRetentionPolicy() const {
93    return gc_retention_policy_;
94  }
95
96  // Is the given object contained within this space?
97  virtual bool Contains(const mirror::Object* obj) const = 0;
98
99  // The kind of space this: image, alloc, zygote, large object.
100  virtual SpaceType GetType() const = 0;
101
102  // Is this an image space, ie one backed by a memory mapped image file.
103  bool IsImageSpace() const {
104    return GetType() == kSpaceTypeImageSpace;
105  }
106  ImageSpace* AsImageSpace();
107
108  // Is this a dlmalloc backed allocation space?
109  bool IsMallocSpace() const {
110    SpaceType type = GetType();
111    return type == kSpaceTypeMallocSpace;
112  }
113  MallocSpace* AsMallocSpace();
114
115  virtual bool IsDlMallocSpace() const {
116    return false;
117  }
118  virtual DlMallocSpace* AsDlMallocSpace();
119
120  virtual bool IsRosAllocSpace() const {
121    return false;
122  }
123  virtual RosAllocSpace* AsRosAllocSpace();
124
125  // Is this the space allocated into by the Zygote and no-longer in use for allocation?
126  bool IsZygoteSpace() const {
127    return GetType() == kSpaceTypeZygoteSpace;
128  }
129  virtual ZygoteSpace* AsZygoteSpace();
130
131  // Is this space a bump pointer space?
132  bool IsBumpPointerSpace() const {
133    return GetType() == kSpaceTypeBumpPointerSpace;
134  }
135  virtual BumpPointerSpace* AsBumpPointerSpace();
136
137  // Does this space hold large objects and implement the large object space abstraction?
138  bool IsLargeObjectSpace() const {
139    return GetType() == kSpaceTypeLargeObjectSpace;
140  }
141  LargeObjectSpace* AsLargeObjectSpace();
142
143  virtual bool IsContinuousSpace() const {
144    return false;
145  }
146  ContinuousSpace* AsContinuousSpace();
147
148  virtual bool IsDiscontinuousSpace() const {
149    return false;
150  }
151  DiscontinuousSpace* AsDiscontinuousSpace();
152
153  virtual bool IsAllocSpace() const {
154    return false;
155  }
156  virtual AllocSpace* AsAllocSpace();
157
158  virtual bool IsContinuousMemMapAllocSpace() const {
159    return false;
160  }
161  virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace();
162
163  // Returns true if objects in the space are movable.
164  virtual bool CanMoveObjects() const = 0;
165
166  virtual ~Space() {}
167
168 protected:
169  Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
170
171  void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
172    gc_retention_policy_ = gc_retention_policy;
173  }
174
175  // Name of the space that may vary due to the Zygote fork.
176  std::string name_;
177
178 protected:
179  struct SweepCallbackContext {
180    bool swap_bitmaps;
181    Heap* heap;
182    space::Space* space;
183    Thread* self;
184    size_t freed_objects;
185    size_t freed_bytes;
186  };
187
188  // When should objects within this space be reclaimed? Not constant as we vary it in the case
189  // of Zygote forking.
190  GcRetentionPolicy gc_retention_policy_;
191
192 private:
193  friend class art::gc::Heap;
194  DISALLOW_COPY_AND_ASSIGN(Space);
195};
196std::ostream& operator<<(std::ostream& os, const Space& space);
197
198// AllocSpace interface.
199class AllocSpace {
200 public:
201  // Number of bytes currently allocated.
202  virtual uint64_t GetBytesAllocated() = 0;
203  // Number of objects currently allocated.
204  virtual uint64_t GetObjectsAllocated() = 0;
205
206  // Allocate num_bytes without allowing growth. If the allocation
207  // succeeds, the output parameter bytes_allocated will be set to the
208  // actually allocated bytes which is >= num_bytes.
209  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
210                                size_t* usable_size) = 0;
211
212  // Return the storage space required by obj.
213  virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
214
215  // Returns how many bytes were freed.
216  virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
217
218  // Returns how many bytes were freed.
219  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
220
221  // Revoke any sort of thread-local buffers that are used to speed up allocations for the given
222  // thread, if the alloc space implementation uses any.
223  virtual void RevokeThreadLocalBuffers(Thread* thread) = 0;
224
225  // Revoke any sort of thread-local buffers that are used to speed up allocations for all the
226  // threads, if the alloc space implementation uses any.
227  virtual void RevokeAllThreadLocalBuffers() = 0;
228
229 protected:
230  AllocSpace() {}
231  virtual ~AllocSpace() {}
232
233 private:
234  DISALLOW_COPY_AND_ASSIGN(AllocSpace);
235};
236
237// Continuous spaces have bitmaps, and an address range. Although not required, objects within
238// continuous spaces can be marked in the card table.
239class ContinuousSpace : public Space {
240 public:
241  // Address at which the space begins.
242  byte* Begin() const {
243    return begin_;
244  }
245
246  // Current address at which the space ends, which may vary as the space is filled.
247  byte* End() const {
248    return end_;
249  }
250
251  // The end of the address range covered by the space.
252  byte* Limit() const {
253    return limit_;
254  }
255
256  // Change the end of the space. Be careful with use since changing the end of a space to an
257  // invalid value may break the GC.
258  void SetEnd(byte* end) {
259    end_ = end;
260  }
261
262  void SetLimit(byte* limit) {
263    limit_ = limit;
264  }
265
266  // Current size of space
267  size_t Size() const {
268    return End() - Begin();
269  }
270
271  virtual accounting::SpaceBitmap* GetLiveBitmap() const = 0;
272  virtual accounting::SpaceBitmap* GetMarkBitmap() const = 0;
273
274  // Maximum which the mapped space can grow to.
275  virtual size_t Capacity() const {
276    return Limit() - Begin();
277  }
278
279  // Is object within this space? We check to see if the pointer is beyond the end first as
280  // continuous spaces are iterated over from low to high.
281  bool HasAddress(const mirror::Object* obj) const {
282    const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
283    return byte_ptr >= Begin() && byte_ptr < Limit();
284  }
285
286  bool Contains(const mirror::Object* obj) const {
287    return HasAddress(obj);
288  }
289
290  virtual bool IsContinuousSpace() const {
291    return true;
292  }
293
294  virtual ~ContinuousSpace() {}
295
296 protected:
297  ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
298                  byte* begin, byte* end, byte* limit) :
299      Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
300  }
301
302  // The beginning of the storage for fast access.
303  byte* begin_;
304
305  // Current end of the space.
306  byte* volatile end_;
307
308  // Limit of the space.
309  byte* limit_;
310
311 private:
312  DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
313};
314
315// A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
316// the card table can't cover these objects and so the write barrier shouldn't be triggered. This
317// is suitable for use for large primitive arrays.
318class DiscontinuousSpace : public Space {
319 public:
320  accounting::ObjectSet* GetLiveObjects() const {
321    return live_objects_.get();
322  }
323
324  accounting::ObjectSet* GetMarkObjects() const {
325    return mark_objects_.get();
326  }
327
328  virtual bool IsDiscontinuousSpace() const {
329    return true;
330  }
331
332  virtual ~DiscontinuousSpace() {}
333
334 protected:
335  DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
336
337  UniquePtr<accounting::ObjectSet> live_objects_;
338  UniquePtr<accounting::ObjectSet> mark_objects_;
339
340 private:
341  DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
342};
343
344class MemMapSpace : public ContinuousSpace {
345 public:
346  // Size of the space without a limit on its growth. By default this is just the Capacity, but
347  // for the allocation space we support starting with a small heap and then extending it.
348  virtual size_t NonGrowthLimitCapacity() const {
349    return Capacity();
350  }
351
352  MemMap* GetMemMap() {
353    return mem_map_.get();
354  }
355
356  const MemMap* GetMemMap() const {
357    return mem_map_.get();
358  }
359
360  MemMap* ReleaseMemMap() {
361    return mem_map_.release();
362  }
363
364 protected:
365  MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit,
366              GcRetentionPolicy gc_retention_policy)
367      : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
368        mem_map_(mem_map) {
369  }
370
371  // Underlying storage of the space
372  UniquePtr<MemMap> mem_map_;
373
374 private:
375  DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
376};
377
378// Used by the heap compaction interface to enable copying from one type of alloc space to another.
379class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
380 public:
381  bool IsAllocSpace() const OVERRIDE {
382    return true;
383  }
384  AllocSpace* AsAllocSpace() OVERRIDE {
385    return this;
386  }
387
388  bool IsContinuousMemMapAllocSpace() const OVERRIDE {
389    return true;
390  }
391  ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
392    return this;
393  }
394
395  bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
396  void BindLiveToMarkBitmap()
397      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
398  void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
399  // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
400  void SwapBitmaps();
401
402  // Reset the space back to an empty space and release memory.
403  virtual void Clear() = 0;
404
405  accounting::SpaceBitmap* GetLiveBitmap() const {
406    return live_bitmap_.get();
407  }
408
409  accounting::SpaceBitmap* GetMarkBitmap() const {
410    return mark_bitmap_.get();
411  }
412
413  void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
414  virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() = 0;
415
416 protected:
417  UniquePtr<accounting::SpaceBitmap> live_bitmap_;
418  UniquePtr<accounting::SpaceBitmap> mark_bitmap_;
419  UniquePtr<accounting::SpaceBitmap> temp_bitmap_;
420
421  ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
422                             byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
423      : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
424  }
425
426 private:
427  friend class gc::Heap;
428  DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace);
429};
430
431}  // namespace space
432}  // namespace gc
433}  // namespace art
434
435#endif  // ART_RUNTIME_GC_SPACE_SPACE_H_
436