semi_space.h revision 38e68e9978236db87c9008bbe47db80525d2fa16
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 18#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 19 20#include "atomic.h" 21#include "base/macros.h" 22#include "base/mutex.h" 23#include "garbage_collector.h" 24#include "immune_region.h" 25#include "object_callbacks.h" 26#include "offsets.h" 27#include "UniquePtr.h" 28 29namespace art { 30 31namespace mirror { 32 class Class; 33 class Object; 34 template<class T> class ObjectArray; 35} // namespace mirror 36 37class StackVisitor; 38class Thread; 39 40namespace gc { 41 42namespace accounting { 43 template <typename T> class AtomicStack; 44 class MarkIfReachesAllocspaceVisitor; 45 class ModUnionClearCardVisitor; 46 class ModUnionVisitor; 47 class ModUnionTableBitmap; 48 class MarkStackChunk; 49 typedef AtomicStack<mirror::Object*> ObjectStack; 50 class SpaceBitmap; 51} // namespace accounting 52 53namespace space { 54 class BumpPointerSpace; 55 class ContinuousMemMapAllocSpace; 56 class ContinuousSpace; 57 class MallocSpace; 58} // namespace space 59 60class Heap; 61 62namespace collector { 63 64class SemiSpace : public GarbageCollector { 65 public: 66 // If true, use remembered sets in the generational mode. 67 static constexpr bool kUseRememberedSet = true; 68 69 explicit SemiSpace(Heap* heap, bool generational = false, 70 const std::string& name_prefix = ""); 71 72 ~SemiSpace() {} 73 74 virtual void InitializePhase(); 75 virtual bool IsConcurrent() const { 76 return false; 77 } 78 virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 79 virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 80 virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 81 virtual void MarkReachableObjects() 82 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 83 virtual GcType GetGcType() const { 84 return kGcTypePartial; 85 } 86 87 // Sets which space we will be copying objects to. 88 void SetToSpace(space::ContinuousMemMapAllocSpace* to_space); 89 90 // Set the space where we copy objects from. 91 void SetFromSpace(space::ContinuousMemMapAllocSpace* from_space); 92 93 // Initializes internal structures. 94 void Init(); 95 96 // Find the default mark bitmap. 97 void FindDefaultMarkBitmap(); 98 99 // Returns the new address of the object. 100 mirror::Object* MarkObject(mirror::Object* object) 101 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 102 103 void ScanObject(mirror::Object* obj) 104 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 105 106 void VerifyNoFromSpaceReferences(mirror::Object* obj) 107 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 108 109 // Marks the root set at the start of a garbage collection. 110 void MarkRoots() 111 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 112 113 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie 114 // the image. Mark that portion of the heap as immune. 115 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 116 117 void UnBindBitmaps() 118 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 119 120 void ProcessReferences(Thread* self) 121 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 122 123 // Sweeps unmarked objects to complete the garbage collection. 124 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 125 126 // Sweeps unmarked objects to complete the garbage collection. 127 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 128 129 // Sweep only pointers within an array. WARNING: Trashes objects. 130 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) 131 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 132 133 // TODO: enable thread safety analysis when in use by multiple worker threads. 134 template <typename MarkVisitor> 135 void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) 136 NO_THREAD_SAFETY_ANALYSIS; 137 138 void SweepSystemWeaks() 139 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 140 141 template <typename Visitor> 142 static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor) 143 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 144 145 static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t /*tid*/, 146 RootType /*root_type*/) 147 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 148 149 static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg) 150 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 151 152 static void ProcessMarkStackCallback(void* arg) 153 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 154 155 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) 156 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 157 158 protected: 159 // Returns null if the object is not marked, otherwise returns the forwarding address (same as 160 // object for non movable things). 161 mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const; 162 163 static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg) 164 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 165 166 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we 167 // mark, otherwise we unmark. 168 bool MarkLargeObject(const mirror::Object* obj) 169 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 170 171 // Expand mark stack to 2x its current size. 172 void ResizeMarkStack(size_t new_size); 173 174 // Returns true if we should sweep the space. 175 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const; 176 177 static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, 178 const StackVisitor *visitor); 179 180 void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor) 181 NO_THREAD_SAFETY_ANALYSIS; 182 183 template <typename Visitor> 184 static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj, 185 const Visitor& visitor) 186 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 187 188 // Visit the header, static field references, and interface pointers of a class object. 189 template <typename Visitor> 190 static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj, 191 const Visitor& visitor) 192 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 193 194 template <typename Visitor> 195 static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor) 196 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 197 198 template <typename Visitor> 199 static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static, 200 const Visitor& visitor) 201 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 202 203 // Visit all of the references in an object array. 204 template <typename Visitor> 205 static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array, 206 const Visitor& visitor) 207 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 208 209 // Visits the header and field references of a data object. 210 template <typename Visitor> 211 static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj, 212 const Visitor& visitor) 213 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { 214 return VisitInstanceFieldsReferences(klass, obj, visitor); 215 } 216 217 // Push an object onto the mark stack. 218 inline void MarkStackPush(mirror::Object* obj); 219 220 void UpdateAndMarkModUnion() 221 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 222 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 223 224 // Schedules an unmarked object for reference processing. 225 void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference) 226 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 227 228 // Recursively blackens objects on the mark stack. 229 void ProcessMarkStack() 230 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 231 232 void EnqueueFinalizerReferences(mirror::Object** ref) 233 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 234 235 void PreserveSomeSoftReferences(mirror::Object** ref) 236 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 237 238 void ClearWhiteReferences(mirror::Object** list) 239 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 240 241 void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references, 242 mirror::Object** weak_references, 243 mirror::Object** finalizer_references, 244 mirror::Object** phantom_references) 245 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 246 247 inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const; 248 249 // Current space, we check this space first to avoid searching for the appropriate space for an 250 // object. 251 accounting::ObjectStack* mark_stack_; 252 253 // Immune region, every object inside the immune region is assumed to be marked. 254 ImmuneRegion immune_region_; 255 256 // If true, the large object space is immune. 257 bool is_large_object_space_immune_; 258 259 // Destination and source spaces (can be any type of ContinuousMemMapAllocSpace which either has 260 // a live bitmap or doesn't). 261 space::ContinuousMemMapAllocSpace* to_space_; 262 accounting::SpaceBitmap* to_space_live_bitmap_; // Cached live bitmap as an optimization. 263 space::ContinuousMemMapAllocSpace* from_space_; 264 265 Thread* self_; 266 267 // When true, the generational mode (promotion and the bump pointer 268 // space only collection) is enabled. TODO: move these to a new file 269 // as a new garbage collector? 270 const bool generational_; 271 272 // Used for the generational mode. the end/top of the bump 273 // pointer space at the end of the last collection. 274 byte* last_gc_to_space_end_; 275 276 // Used for the generational mode. During a collection, keeps track 277 // of how many bytes of objects have been copied so far from the 278 // bump pointer space to the non-moving space. 279 uint64_t bytes_promoted_; 280 281 // Used for the generational mode. When true, collect the whole 282 // heap. When false, collect only the bump pointer spaces. 283 bool whole_heap_collection_; 284 285 // Used for the generational mode. A counter used to enable 286 // whole_heap_collection_ once per interval. 287 int whole_heap_collection_interval_counter_; 288 289 // How many bytes we avoided dirtying. 290 size_t saved_bytes_; 291 292 // Used for the generational mode. The default interval of the whole 293 // heap collection. If N, the whole heap collection occurs every N 294 // collections. 295 static constexpr int kDefaultWholeHeapCollectionInterval = 5; 296 297 private: 298 DISALLOW_COPY_AND_ASSIGN(SemiSpace); 299}; 300 301} // namespace collector 302} // namespace gc 303} // namespace art 304 305#endif // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 306