space.h revision b76cac637691c29daa9c44e493b5bc26346ed116
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_SPACE_SPACE_H_ 18#define ART_RUNTIME_GC_SPACE_SPACE_H_ 19 20#include <memory> 21#include <string> 22 23#include "atomic.h" 24#include "base/macros.h" 25#include "base/mutex.h" 26#include "gc/accounting/space_bitmap.h" 27#include "gc/collector/garbage_collector.h" 28#include "globals.h" 29#include "image.h" 30#include "mem_map.h" 31 32namespace art { 33namespace mirror { 34 class Object; 35} // namespace mirror 36 37namespace gc { 38 39class Heap; 40 41namespace space { 42 43class AllocSpace; 44class BumpPointerSpace; 45class ContinuousMemMapAllocSpace; 46class ContinuousSpace; 47class DiscontinuousSpace; 48class MallocSpace; 49class DlMallocSpace; 50class RosAllocSpace; 51class ImageSpace; 52class LargeObjectSpace; 53class ZygoteSpace; 54 55static constexpr bool kDebugSpaces = kIsDebugBuild; 56 57// See Space::GetGcRetentionPolicy. 58enum GcRetentionPolicy { 59 // Objects are retained forever with this policy for a space. 60 kGcRetentionPolicyNeverCollect, 61 // Every GC cycle will attempt to collect objects in this space. 62 kGcRetentionPolicyAlwaysCollect, 63 // Objects will be considered for collection only in "full" GC cycles, ie faster partial 64 // collections won't scan these areas such as the Zygote. 65 kGcRetentionPolicyFullCollect, 66}; 67std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy); 68 69enum SpaceType { 70 kSpaceTypeImageSpace, 71 kSpaceTypeMallocSpace, 72 kSpaceTypeZygoteSpace, 73 kSpaceTypeBumpPointerSpace, 74 kSpaceTypeLargeObjectSpace, 75}; 76std::ostream& operator<<(std::ostream& os, const SpaceType& space_type); 77 78// A space contains memory allocated for managed objects. 79class Space { 80 public: 81 // Dump space. Also key method for C++ vtables. 82 virtual void Dump(std::ostream& os) const; 83 84 // Name of the space. May vary, for example before/after the Zygote fork. 85 const char* GetName() const { 86 return name_.c_str(); 87 } 88 89 // The policy of when objects are collected associated with this space. 90 GcRetentionPolicy GetGcRetentionPolicy() const { 91 return gc_retention_policy_; 92 } 93 94 // Is the given object contained within this space? 95 virtual bool Contains(const mirror::Object* obj) const = 0; 96 97 // The kind of space this: image, alloc, zygote, large object. 98 virtual SpaceType GetType() const = 0; 99 100 // Is this an image space, ie one backed by a memory mapped image file. 101 bool IsImageSpace() const { 102 return GetType() == kSpaceTypeImageSpace; 103 } 104 ImageSpace* AsImageSpace(); 105 106 // Is this a dlmalloc backed allocation space? 107 bool IsMallocSpace() const { 108 SpaceType type = GetType(); 109 return type == kSpaceTypeMallocSpace; 110 } 111 MallocSpace* AsMallocSpace(); 112 113 virtual bool IsDlMallocSpace() const { 114 return false; 115 } 116 virtual DlMallocSpace* AsDlMallocSpace(); 117 118 virtual bool IsRosAllocSpace() const { 119 return false; 120 } 121 virtual RosAllocSpace* AsRosAllocSpace(); 122 123 // Is this the space allocated into by the Zygote and no-longer in use for allocation? 124 bool IsZygoteSpace() const { 125 return GetType() == kSpaceTypeZygoteSpace; 126 } 127 virtual ZygoteSpace* AsZygoteSpace(); 128 129 // Is this space a bump pointer space? 130 bool IsBumpPointerSpace() const { 131 return GetType() == kSpaceTypeBumpPointerSpace; 132 } 133 virtual BumpPointerSpace* AsBumpPointerSpace(); 134 135 // Does this space hold large objects and implement the large object space abstraction? 136 bool IsLargeObjectSpace() const { 137 return GetType() == kSpaceTypeLargeObjectSpace; 138 } 139 LargeObjectSpace* AsLargeObjectSpace(); 140 141 virtual bool IsContinuousSpace() const { 142 return false; 143 } 144 ContinuousSpace* AsContinuousSpace(); 145 146 virtual bool IsDiscontinuousSpace() const { 147 return false; 148 } 149 DiscontinuousSpace* AsDiscontinuousSpace(); 150 151 virtual bool IsAllocSpace() const { 152 return false; 153 } 154 virtual AllocSpace* AsAllocSpace(); 155 156 virtual bool IsContinuousMemMapAllocSpace() const { 157 return false; 158 } 159 virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace(); 160 161 // Returns true if objects in the space are movable. 162 virtual bool CanMoveObjects() const = 0; 163 164 virtual ~Space() {} 165 166 protected: 167 Space(const std::string& name, GcRetentionPolicy gc_retention_policy); 168 169 void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) { 170 gc_retention_policy_ = gc_retention_policy; 171 } 172 173 // Name of the space that may vary due to the Zygote fork. 174 std::string name_; 175 176 protected: 177 // When should objects within this space be reclaimed? Not constant as we vary it in the case 178 // of Zygote forking. 179 GcRetentionPolicy gc_retention_policy_; 180 181 private: 182 friend class art::gc::Heap; 183 DISALLOW_COPY_AND_ASSIGN(Space); 184}; 185std::ostream& operator<<(std::ostream& os, const Space& space); 186 187// AllocSpace interface. 188class AllocSpace { 189 public: 190 // Number of bytes currently allocated. 191 virtual uint64_t GetBytesAllocated() = 0; 192 // Number of objects currently allocated. 193 virtual uint64_t GetObjectsAllocated() = 0; 194 195 // Allocate num_bytes without allowing growth. If the allocation 196 // succeeds, the output parameter bytes_allocated will be set to the 197 // actually allocated bytes which is >= num_bytes. 198 // Alloc can be called from multiple threads at the same time and must be thread-safe. 199 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, 200 size_t* usable_size) = 0; 201 202 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. 203 virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, 204 size_t* usable_size) 205 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { 206 return Alloc(self, num_bytes, bytes_allocated, usable_size); 207 } 208 209 // Return the storage space required by obj. 210 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0; 211 212 // Returns how many bytes were freed. 213 virtual size_t Free(Thread* self, mirror::Object* ptr) = 0; 214 215 // Returns how many bytes were freed. 216 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0; 217 218 // Revoke any sort of thread-local buffers that are used to speed up allocations for the given 219 // thread, if the alloc space implementation uses any. 220 virtual void RevokeThreadLocalBuffers(Thread* thread) = 0; 221 222 // Revoke any sort of thread-local buffers that are used to speed up allocations for all the 223 // threads, if the alloc space implementation uses any. 224 virtual void RevokeAllThreadLocalBuffers() = 0; 225 226 protected: 227 struct SweepCallbackContext { 228 SweepCallbackContext(bool swap_bitmaps, space::Space* space); 229 const bool swap_bitmaps; 230 space::Space* const space; 231 Thread* const self; 232 collector::ObjectBytePair freed; 233 }; 234 235 AllocSpace() {} 236 virtual ~AllocSpace() {} 237 238 private: 239 DISALLOW_COPY_AND_ASSIGN(AllocSpace); 240}; 241 242// Continuous spaces have bitmaps, and an address range. Although not required, objects within 243// continuous spaces can be marked in the card table. 244class ContinuousSpace : public Space { 245 public: 246 // Address at which the space begins. 247 byte* Begin() const { 248 return begin_; 249 } 250 251 // Current address at which the space ends, which may vary as the space is filled. 252 byte* End() const { 253 return end_.LoadRelaxed(); 254 } 255 256 // The end of the address range covered by the space. 257 byte* Limit() const { 258 return limit_; 259 } 260 261 // Change the end of the space. Be careful with use since changing the end of a space to an 262 // invalid value may break the GC. 263 void SetEnd(byte* end) { 264 end_.StoreRelaxed(end); 265 } 266 267 void SetLimit(byte* limit) { 268 limit_ = limit; 269 } 270 271 // Current size of space 272 size_t Size() const { 273 return End() - Begin(); 274 } 275 276 virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() const = 0; 277 virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() const = 0; 278 279 // Maximum which the mapped space can grow to. 280 virtual size_t Capacity() const { 281 return Limit() - Begin(); 282 } 283 284 // Is object within this space? We check to see if the pointer is beyond the end first as 285 // continuous spaces are iterated over from low to high. 286 bool HasAddress(const mirror::Object* obj) const { 287 const byte* byte_ptr = reinterpret_cast<const byte*>(obj); 288 return byte_ptr >= Begin() && byte_ptr < Limit(); 289 } 290 291 bool Contains(const mirror::Object* obj) const { 292 return HasAddress(obj); 293 } 294 295 virtual bool IsContinuousSpace() const { 296 return true; 297 } 298 299 virtual ~ContinuousSpace() {} 300 301 protected: 302 ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy, 303 byte* begin, byte* end, byte* limit) : 304 Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) { 305 } 306 307 // The beginning of the storage for fast access. 308 byte* begin_; 309 310 // Current end of the space. 311 Atomic<byte*> end_; 312 313 // Limit of the space. 314 byte* limit_; 315 316 private: 317 DISALLOW_COPY_AND_ASSIGN(ContinuousSpace); 318}; 319 320// A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently 321// the card table can't cover these objects and so the write barrier shouldn't be triggered. This 322// is suitable for use for large primitive arrays. 323class DiscontinuousSpace : public Space { 324 public: 325 accounting::LargeObjectBitmap* GetLiveBitmap() const { 326 return live_bitmap_.get(); 327 } 328 329 accounting::LargeObjectBitmap* GetMarkBitmap() const { 330 return mark_bitmap_.get(); 331 } 332 333 virtual bool IsDiscontinuousSpace() const OVERRIDE { 334 return true; 335 } 336 337 virtual ~DiscontinuousSpace() {} 338 339 protected: 340 DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy); 341 342 std::unique_ptr<accounting::LargeObjectBitmap> live_bitmap_; 343 std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_; 344 345 private: 346 DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace); 347}; 348 349class MemMapSpace : public ContinuousSpace { 350 public: 351 // Size of the space without a limit on its growth. By default this is just the Capacity, but 352 // for the allocation space we support starting with a small heap and then extending it. 353 virtual size_t NonGrowthLimitCapacity() const { 354 return Capacity(); 355 } 356 357 MemMap* GetMemMap() { 358 return mem_map_.get(); 359 } 360 361 const MemMap* GetMemMap() const { 362 return mem_map_.get(); 363 } 364 365 MemMap* ReleaseMemMap() { 366 return mem_map_.release(); 367 } 368 369 protected: 370 MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit, 371 GcRetentionPolicy gc_retention_policy) 372 : ContinuousSpace(name, gc_retention_policy, begin, end, limit), 373 mem_map_(mem_map) { 374 } 375 376 // Underlying storage of the space 377 std::unique_ptr<MemMap> mem_map_; 378 379 private: 380 DISALLOW_COPY_AND_ASSIGN(MemMapSpace); 381}; 382 383// Used by the heap compaction interface to enable copying from one type of alloc space to another. 384class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace { 385 public: 386 bool IsAllocSpace() const OVERRIDE { 387 return true; 388 } 389 AllocSpace* AsAllocSpace() OVERRIDE { 390 return this; 391 } 392 393 bool IsContinuousMemMapAllocSpace() const OVERRIDE { 394 return true; 395 } 396 ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() { 397 return this; 398 } 399 400 bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 401 void BindLiveToMarkBitmap() 402 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 403 void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 404 // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping. 405 void SwapBitmaps(); 406 407 // Clear the space back to an empty space. 408 virtual void Clear() = 0; 409 410 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE { 411 return live_bitmap_.get(); 412 } 413 414 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE { 415 return mark_bitmap_.get(); 416 } 417 418 collector::ObjectBytePair Sweep(bool swap_bitmaps); 419 virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0; 420 421 protected: 422 std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_; 423 std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_; 424 std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_; 425 426 ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin, 427 byte* end, byte* limit, GcRetentionPolicy gc_retention_policy) 428 : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) { 429 } 430 431 private: 432 friend class gc::Heap; 433 DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace); 434}; 435 436} // namespace space 437} // namespace gc 438} // namespace art 439 440#endif // ART_RUNTIME_GC_SPACE_SPACE_H_ 441