semi_space.h revision a8e8f9c0a8e259a807d7b99a148d14104c24209d
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 18#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 19 20#include "atomic.h" 21#include "base/macros.h" 22#include "base/mutex.h" 23#include "garbage_collector.h" 24#include "gc/accounting/space_bitmap.h" 25#include "immune_region.h" 26#include "object_callbacks.h" 27#include "offsets.h" 28#include "UniquePtr.h" 29 30namespace art { 31 32class Thread; 33 34namespace mirror { 35 class Class; 36 class Object; 37} // namespace mirror 38 39namespace gc { 40 41class Heap; 42 43namespace accounting { 44 template <typename T> class AtomicStack; 45 typedef AtomicStack<mirror::Object*> ObjectStack; 46} // namespace accounting 47 48namespace space { 49 class ContinuousMemMapAllocSpace; 50 class ContinuousSpace; 51} // namespace space 52 53namespace collector { 54 55class SemiSpace : public GarbageCollector { 56 public: 57 // If true, use remembered sets in the generational mode. 58 static constexpr bool kUseRememberedSet = true; 59 60 explicit SemiSpace(Heap* heap, bool generational = false, const std::string& name_prefix = ""); 61 62 ~SemiSpace() {} 63 64 virtual void InitializePhase() OVERRIDE; 65 virtual void MarkingPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 66 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 67 virtual void ReclaimPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 68 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 69 virtual void FinishPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 70 void MarkReachableObjects() 71 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 72 virtual GcType GetGcType() const OVERRIDE { 73 return kGcTypePartial; 74 } 75 virtual CollectorType GetCollectorType() const OVERRIDE { 76 return generational_ ? kCollectorTypeGSS : kCollectorTypeSS; 77 } 78 79 // Sets which space we will be copying objects to. 80 void SetToSpace(space::ContinuousMemMapAllocSpace* to_space); 81 82 // Set the space where we copy objects from. 83 void SetFromSpace(space::ContinuousMemMapAllocSpace* from_space); 84 85 // Initializes internal structures. 86 void Init(); 87 88 // Find the default mark bitmap. 89 void FindDefaultMarkBitmap(); 90 91 // Returns the new address of the object. 92 template<bool kPoisonReferences> 93 void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) 94 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 95 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 96 97 void ScanObject(mirror::Object* obj) 98 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 99 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 100 101 void VerifyNoFromSpaceReferences(mirror::Object* obj) 102 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 103 104 // Marks the root set at the start of a garbage collection. 105 void MarkRoots() 106 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 107 108 // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie 109 // the image. Mark that portion of the heap as immune. 110 virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 111 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); 112 113 void UnBindBitmaps() 114 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 115 116 void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 117 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); 118 119 // Sweeps unmarked objects to complete the garbage collection. 120 virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 121 122 // Sweeps unmarked objects to complete the garbage collection. 123 void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 124 125 void SweepSystemWeaks() 126 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 127 128 static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t /*tid*/, 129 RootType /*root_type*/) 130 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 131 132 static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg) 133 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 134 135 static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg) 136 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 137 138 static void ProcessMarkStackCallback(void* arg) 139 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 140 141 virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) 142 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 143 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 144 145 // Schedules an unmarked object for reference processing. 146 void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) 147 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); 148 149 protected: 150 // Returns null if the object is not marked, otherwise returns the forwarding address (same as 151 // object for non movable things). 152 mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const 153 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 154 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 155 156 static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg) 157 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) 158 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); 159 160 // Marks or unmarks a large object based on whether or not set is true. If set is true, then we 161 // mark, otherwise we unmark. 162 bool MarkLargeObject(const mirror::Object* obj) 163 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 164 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 165 166 // Expand mark stack to 2x its current size. 167 void ResizeMarkStack(size_t new_size); 168 169 // Returns true if we should sweep the space. 170 virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const; 171 172 // Push an object onto the mark stack. 173 void MarkStackPush(mirror::Object* obj); 174 175 void UpdateAndMarkModUnion() 176 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) 177 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 178 179 // Recursively blackens objects on the mark stack. 180 void ProcessMarkStack() 181 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); 182 183 inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const; 184 185 // Revoke all the thread-local buffers. 186 void RevokeAllThreadLocalBuffers(); 187 188 // Current space, we check this space first to avoid searching for the appropriate space for an 189 // object. 190 accounting::ObjectStack* mark_stack_; 191 192 // Immune region, every object inside the immune region is assumed to be marked. 193 ImmuneRegion immune_region_; 194 195 // If true, the large object space is immune. 196 bool is_large_object_space_immune_; 197 198 // Destination and source spaces (can be any type of ContinuousMemMapAllocSpace which either has 199 // a live bitmap or doesn't). 200 space::ContinuousMemMapAllocSpace* to_space_; 201 // Cached live bitmap as an optimization. 202 accounting::ContinuousSpaceBitmap* to_space_live_bitmap_; 203 space::ContinuousMemMapAllocSpace* from_space_; 204 205 Thread* self_; 206 207 // When true, the generational mode (promotion and the bump pointer 208 // space only collection) is enabled. TODO: move these to a new file 209 // as a new garbage collector? 210 const bool generational_; 211 212 // Used for the generational mode. the end/top of the bump 213 // pointer space at the end of the last collection. 214 byte* last_gc_to_space_end_; 215 216 // Used for the generational mode. During a collection, keeps track 217 // of how many bytes of objects have been copied so far from the 218 // bump pointer space to the non-moving space. 219 uint64_t bytes_promoted_; 220 221 // Used for the generational mode. Keeps track of how many bytes of 222 // objects have been copied so far from the bump pointer space to 223 // the non-moving space, since the last whole heap collection. 224 uint64_t bytes_promoted_since_last_whole_heap_collection_; 225 226 // Used for the generational mode. When true, collect the whole 227 // heap. When false, collect only the bump pointer spaces. 228 bool whole_heap_collection_; 229 230 // Used for the generational mode. A counter used to enable 231 // whole_heap_collection_ once per interval. 232 int whole_heap_collection_interval_counter_; 233 234 // How many bytes we avoided dirtying. 235 size_t saved_bytes_; 236 237 // The name of the collector. 238 std::string collector_name_; 239 240 // Used for the generational mode. The default interval of the whole 241 // heap collection. If N, the whole heap collection occurs every N 242 // collections. 243 static constexpr int kDefaultWholeHeapCollectionInterval = 5; 244 245 private: 246 DISALLOW_COPY_AND_ASSIGN(SemiSpace); 247}; 248 249} // namespace collector 250} // namespace gc 251} // namespace art 252 253#endif // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ 254