semi_space.h revision 8d562103c3a3452fb15ef4b1c64df767b70507a4
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 18#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 19 20#include "atomic.h" 21#include "base/macros.h" 22#include "base/mutex.h" 23#include "garbage_collector.h" 24#include "immune_region.h" 25#include "object_callbacks.h" 26#include "offsets.h" 27#include "UniquePtr.h" 28 29namespace art { 30 31namespace mirror { 32 class Class; 33 class Object; 34 template<class T> class ObjectArray; 35} // namespace mirror 36 37class StackVisitor; 38class Thread; 39 40namespace gc { 41 42namespace accounting { 43 template <typename T> class AtomicStack; 44 class MarkIfReachesAllocspaceVisitor; 45 class ModUnionClearCardVisitor; 46 class ModUnionVisitor; 47 class ModUnionTableBitmap; 48 class MarkStackChunk; 49 typedef AtomicStack<mirror::Object*> ObjectStack; 50 class SpaceBitmap; 51} // namespace accounting 52 53namespace space { 54 class BumpPointerSpace; 55 class ContinuousMemMapAllocSpace; 56 class ContinuousSpace; 57 class MallocSpace; 58} // namespace space 59 60class Heap; 61 62namespace collector { 63 64class SemiSpace : public GarbageCollector { 65 public: 66 explicit SemiSpace(Heap* heap, bool generational = false, 67 const std::string& name_prefix = ""); 68 69 ~SemiSpace() {} 70 71 virtual void InitializePhase(); 72 virtual bool IsConcurrent() const { 73 return false; 74 } 75 virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 76 virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 77 virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 78 virtual void MarkReachableObjects() 79 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 80 virtual GcType GetGcType() const { 81 return kGcTypePartial; 82 } 83 84 // Sets which space we will be copying objects to. 85 void SetToSpace(space::ContinuousMemMapAllocSpace* to_space); 86 87 // Set the space where we copy objects from. 88 void SetFromSpace(space::ContinuousMemMapAllocSpace* from_space); 89 90 // Initializes internal structures. 91 void Init(); 92 93 // Find the default mark bitmap. 94 void FindDefaultMarkBitmap(); 95 96 // Returns the new address of the object. 97 mirror::Object* MarkObject(mirror::Object* object) 98 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 99 100 void ScanObject(mirror::Object* obj) 101 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 102 103 // Marks the root set at the start of a garbage collection. 104 void MarkRoots() 105 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 106 107 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie 108 // the image. Mark that portion of the heap as immune. 109 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 110 111 void UnBindBitmaps() 112 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 113 114 void ProcessReferences(Thread* self) 115 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 116 117 // Sweeps unmarked objects to complete the garbage collection. 118 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 119 120 // Sweeps unmarked objects to complete the garbage collection. 121 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 122 123 // Sweep only pointers within an array. WARNING: Trashes objects. 124 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 125 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 126 127 // TODO: enable thread safety analysis when in use by multiple worker threads. 128 template <typename MarkVisitor> 129 void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) 130 NO_THREAD_SAFETY_ANALYSIS; 131 132 void SweepSystemWeaks() 133 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 134 135 template <typename Visitor> 136 static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor) 137 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 138 139 static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t /*tid*/, 140 RootType /*root_type*/) 141 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 142 143 static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg) 144 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 145 146 static void ProcessMarkStackCallback(void* arg) 147 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 148 149 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) 150 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 151 152 protected: 153 // Returns null if the object is not marked, otherwise returns the forwarding address (same as 154 // object for non movable things). 155 mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const; 156 157 static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg) 158 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 159 160 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we 161 // mark, otherwise we unmark. 162 bool MarkLargeObject(const mirror::Object* obj) 163 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 164 165 // Expand mark stack to 2x its current size. 166 void ResizeMarkStack(size_t new_size); 167 168 // Returns true if we should sweep the space. 169 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const; 170 171 static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, 172 const StackVisitor *visitor); 173 174 void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor) 175 NO_THREAD_SAFETY_ANALYSIS; 176 177 template <typename Visitor> 178 static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj, 179 const Visitor& visitor) 180 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 181 182 // Visit the header, static field references, and interface pointers of a class object. 183 template <typename Visitor> 184 static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj, 185 const Visitor& visitor) 186 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 187 188 template <typename Visitor> 189 static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor) 190 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 191 192 template <typename Visitor> 193 static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static, 194 const Visitor& visitor) 195 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 196 197 // Visit all of the references in an object array. 198 template <typename Visitor> 199 static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array, 200 const Visitor& visitor) 201 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 202 203 // Visits the header and field references of a data object. 204 template <typename Visitor> 205 static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj, 206 const Visitor& visitor) 207 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 208 return VisitInstanceFieldsReferences(klass, obj, visitor); 209 } 210 211 // Push an object onto the mark stack. 212 inline void MarkStackPush(mirror::Object* obj); 213 214 void UpdateAndMarkModUnion() 215 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 216 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 217 218 // Schedules an unmarked object for reference processing. 219 void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference) 220 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 221 222 // Recursively blackens objects on the mark stack. 223 void ProcessMarkStack() 224 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 225 226 void EnqueueFinalizerReferences(mirror::Object** ref) 227 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 228 229 void PreserveSomeSoftReferences(mirror::Object** ref) 230 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 231 232 void ClearWhiteReferences(mirror::Object** list) 233 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 234 235 void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references, 236 mirror::Object** weak_references, 237 mirror::Object** finalizer_references, 238 mirror::Object** phantom_references) 239 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 240 241 inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const; 242 243 // Current space, we check this space first to avoid searching for the appropriate space for an 244 // object. 245 accounting::ObjectStack* mark_stack_; 246 247 // Immune region, every object inside the immune region is assumed to be marked. 248 ImmuneRegion immune_region_; 249 250 // If true, the large object space is immune. 251 bool is_large_object_space_immune_; 252 253 // Destination and source spaces (can be any type of ContinuousMemMapAllocSpace which either has 254 // a live bitmap or doesn't). 255 space::ContinuousMemMapAllocSpace* to_space_; 256 accounting::SpaceBitmap* to_space_live_bitmap_; // Cached live bitmap as an optimization. 257 space::ContinuousMemMapAllocSpace* from_space_; 258 259 Thread* self_; 260 261 // When true, the generational mode (promotion and the bump pointer 262 // space only collection) is enabled. TODO: move these to a new file 263 // as a new garbage collector? 264 const bool generational_; 265 266 // Used for the generational mode. the end/top of the bump 267 // pointer space at the end of the last collection. 268 byte* last_gc_to_space_end_; 269 270 // Used for the generational mode. During a collection, keeps track 271 // of how many bytes of objects have been copied so far from the 272 // bump pointer space to the non-moving space. 273 uint64_t bytes_promoted_; 274 275 // Used for the generational mode. When true, collect the whole 276 // heap. When false, collect only the bump pointer spaces. 277 bool whole_heap_collection_; 278 279 // Used for the generational mode. A counter used to enable 280 // whole_heap_collection_ once per interval. 281 int whole_heap_collection_interval_counter_; 282 283 // How many bytes we avoided dirtying. 284 size_t saved_bytes_; 285 286 // Used for the generational mode. The default interval of the whole 287 // heap collection. If N, the whole heap collection occurs every N 288 // collections. 289 static constexpr int kDefaultWholeHeapCollectionInterval = 5; 290 291 private: 292 DISALLOW_COPY_AND_ASSIGN(SemiSpace); 293}; 294 295} // namespace collector 296} // namespace gc 297} // namespace art 298 299#endif // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 300