semi_space.h revision 815873ecc312b1d231acce71e1a16f42cdaf09f2
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 18#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 19 20#include "atomic.h" 21#include "barrier.h" 22#include "base/macros.h" 23#include "base/mutex.h" 24#include "garbage_collector.h" 25#include "object_callbacks.h" 26#include "offsets.h" 27#include "UniquePtr.h" 28 29namespace art { 30 31namespace mirror { 32 class Class; 33 class Object; 34 template<class T> class ObjectArray; 35} // namespace mirror 36 37class StackVisitor; 38class Thread; 39 40namespace gc { 41 42namespace accounting { 43 template <typename T> class AtomicStack; 44 class MarkIfReachesAllocspaceVisitor; 45 class ModUnionClearCardVisitor; 46 class ModUnionVisitor; 47 class ModUnionTableBitmap; 48 class MarkStackChunk; 49 typedef AtomicStack<mirror::Object*> ObjectStack; 50 class SpaceBitmap; 51} // namespace accounting 52 53namespace space { 54 class BumpPointerSpace; 55 class ContinuousMemMapAllocSpace; 56 class ContinuousSpace; 57 class MallocSpace; 58} // namespace space 59 60class Heap; 61 62namespace collector { 63 64class SemiSpace : public GarbageCollector { 65 public: 66 explicit SemiSpace(Heap* heap, bool generational = false, 67 const std::string& name_prefix = ""); 68 69 ~SemiSpace() {} 70 71 virtual void InitializePhase(); 72 virtual bool IsConcurrent() const { 73 return false; 74 } 75 virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 76 virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 77 virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 78 virtual void MarkReachableObjects() 79 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 80 virtual GcType GetGcType() const { 81 return kGcTypePartial; 82 } 83 84 // Sets which space we will be copying objects to. 85 void SetToSpace(space::ContinuousMemMapAllocSpace* to_space); 86 87 // Set the space where we copy objects from. 88 void SetFromSpace(space::ContinuousMemMapAllocSpace* from_space); 89 90 // Initializes internal structures. 91 void Init(); 92 93 // Find the default mark bitmap. 94 void FindDefaultMarkBitmap(); 95 96 // Returns the new address of the object. 97 mirror::Object* MarkObject(mirror::Object* object) 98 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 99 100 void ScanObject(mirror::Object* obj) 101 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 102 103 // Marks the root set at the start of a garbage collection. 104 void MarkRoots() 105 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 106 107 // Make a space immune, immune spaces have all live objects marked - that is the mark and 108 // live bitmaps are bound together. 109 void ImmuneSpace(space::ContinuousSpace* space) 110 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 111 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 112 113 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie 114 // the image. Mark that portion of the heap as immune. 115 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 116 117 void UnBindBitmaps() 118 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 119 120 void ProcessReferences(Thread* self) 121 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 122 123 // Sweeps unmarked objects to complete the garbage collection. 124 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 125 126 // Sweeps unmarked objects to complete the garbage collection. 127 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 128 129 // Sweep only pointers within an array. WARNING: Trashes objects. 130 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 131 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 132 133 // TODO: enable thread safety analysis when in use by multiple worker threads. 134 template <typename MarkVisitor> 135 void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) 136 NO_THREAD_SAFETY_ANALYSIS; 137 138 void SweepSystemWeaks() 139 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 140 141 template <typename Visitor> 142 static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor) 143 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 144 145 static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t /*tid*/, 146 RootType /*root_type*/) 147 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 148 149 static mirror::Object* MarkObjectCallback(mirror::Object* objecgt, void* arg) 150 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 151 152 static mirror::Object* RecursiveMarkObjectCallback(mirror::Object* root, void* arg) 153 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 154 155 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) 156 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 157 158 protected: 159 // Returns null if the object is not marked, otherwise returns the forwarding address (same as 160 // object for non movable things). 161 mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const; 162 163 static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg) 164 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 165 166 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we 167 // mark, otherwise we unmark. 168 bool MarkLargeObject(const mirror::Object* obj) 169 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 170 171 // Expand mark stack to 2x its current size. 172 void ResizeMarkStack(size_t new_size); 173 174 // Returns true if we should sweep the space. 175 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const; 176 177 // Returns how many threads we should use for the current GC phase based on if we are paused, 178 // whether or not we care about pauses. 179 size_t GetThreadCount(bool paused) const; 180 181 // Returns true if an object is inside of the immune region (assumed to be marked). 182 bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE { 183 return obj >= immune_begin_ && obj < immune_end_; 184 } 185 186 bool IsImmuneSpace(const space::ContinuousSpace* space) const; 187 188 static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, 189 const StackVisitor *visitor); 190 191 void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor) 192 NO_THREAD_SAFETY_ANALYSIS; 193 194 template <typename Visitor> 195 static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj, 196 const Visitor& visitor) 197 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 198 199 // Visit the header, static field references, and interface pointers of a class object. 200 template <typename Visitor> 201 static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj, 202 const Visitor& visitor) 203 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 204 205 template <typename Visitor> 206 static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor) 207 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 208 209 template <typename Visitor> 210 static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static, 211 const Visitor& visitor) 212 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 213 214 // Visit all of the references in an object array. 215 template <typename Visitor> 216 static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array, 217 const Visitor& visitor) 218 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 219 220 // Visits the header and field references of a data object. 221 template <typename Visitor> 222 static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj, 223 const Visitor& visitor) 224 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 225 return VisitInstanceFieldsReferences(klass, obj, visitor); 226 } 227 228 // Push an object onto the mark stack. 229 inline void MarkStackPush(mirror::Object* obj); 230 231 void UpdateAndMarkModUnion() 232 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 233 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 234 235 // Schedules an unmarked object for reference processing. 236 void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference) 237 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 238 239 // Recursively blackens objects on the mark stack. 240 void ProcessMarkStack(bool paused) 241 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 242 243 void EnqueueFinalizerReferences(mirror::Object** ref) 244 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 245 246 void PreserveSomeSoftReferences(mirror::Object** ref) 247 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 248 249 void ClearWhiteReferences(mirror::Object** list) 250 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 251 252 void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references, 253 mirror::Object** weak_references, 254 mirror::Object** finalizer_references, 255 mirror::Object** phantom_references) 256 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 257 258 inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const; 259 260 // Current space, we check this space first to avoid searching for the appropriate space for an 261 // object. 262 accounting::ObjectStack* mark_stack_; 263 264 // Immune range, every object inside the immune range is assumed to be marked. 265 mirror::Object* immune_begin_; 266 mirror::Object* immune_end_; 267 268 // If true, the large object space is immune. 269 bool is_large_object_space_immune_; 270 271 // Destination and source spaces (can be any type of ContinuousMemMapAllocSpace which either has 272 // a live bitmap or doesn't). 273 space::ContinuousMemMapAllocSpace* to_space_; 274 accounting::SpaceBitmap* to_space_live_bitmap_; // Cached live bitmap as an optimization. 275 space::ContinuousMemMapAllocSpace* from_space_; 276 277 Thread* self_; 278 279 // When true, the generational mode (promotion and the bump pointer 280 // space only collection) is enabled. TODO: move these to a new file 281 // as a new garbage collector? 282 bool generational_; 283 284 // Used for the generational mode. the end/top of the bump 285 // pointer space at the end of the last collection. 286 byte* last_gc_to_space_end_; 287 288 // Used for the generational mode. During a collection, keeps track 289 // of how many bytes of objects have been copied so far from the 290 // bump pointer space to the non-moving space. 291 uint64_t bytes_promoted_; 292 293 // Used for the generational mode. When true, collect the whole 294 // heap. When false, collect only the bump pointer spaces. 295 bool whole_heap_collection_; 296 297 // Used for the generational mode. A counter used to enable 298 // whole_heap_collection_ once per interval. 299 int whole_heap_collection_interval_counter_; 300 301 // How many bytes we avoided dirtying. 302 size_t saved_bytes_; 303 304 // Used for the generational mode. The default interval of the whole 305 // heap collection. If N, the whole heap collection occurs every N 306 // collections. 307 static constexpr int kDefaultWholeHeapCollectionInterval = 5; 308 309 private: 310 DISALLOW_COPY_AND_ASSIGN(SemiSpace); 311}; 312 313} // namespace collector 314} // namespace gc 315} // namespace art 316 317#endif // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 318