incremental-marking.h revision 109988c7ccb6f3fd1a58574fa3dfb88beaef6632
1// Copyright 2012 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#ifndef V8_HEAP_INCREMENTAL_MARKING_H_ 6#define V8_HEAP_INCREMENTAL_MARKING_H_ 7 8#include "src/cancelable-task.h" 9#include "src/execution.h" 10#include "src/heap/heap.h" 11#include "src/heap/incremental-marking-job.h" 12#include "src/heap/spaces.h" 13#include "src/objects.h" 14 15namespace v8 { 16namespace internal { 17 18// Forward declarations. 19class MarkBit; 20class PagedSpace; 21 22class IncrementalMarking { 23 public: 24 enum State { STOPPED, SWEEPING, MARKING, COMPLETE }; 25 26 enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD }; 27 28 enum ForceMarkingAction { FORCE_MARKING, DO_NOT_FORCE_MARKING }; 29 30 enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION }; 31 32 enum GCRequestType { COMPLETE_MARKING, FINALIZATION }; 33 34 struct StepActions { 35 StepActions(CompletionAction complete_action_, 36 ForceMarkingAction force_marking_, 37 ForceCompletionAction force_completion_) 38 : completion_action(complete_action_), 39 force_marking(force_marking_), 40 force_completion(force_completion_) {} 41 42 CompletionAction completion_action; 43 ForceMarkingAction force_marking; 44 ForceCompletionAction force_completion; 45 }; 46 47 static StepActions IdleStepActions(); 48 49 explicit IncrementalMarking(Heap* heap); 50 51 static void Initialize(); 52 53 State state() { 54 DCHECK(state_ == STOPPED || FLAG_incremental_marking); 55 return state_; 56 } 57 58 bool should_hurry() { return should_hurry_; } 59 void set_should_hurry(bool val) { should_hurry_ = val; } 60 61 bool finalize_marking_completed() const { 62 return finalize_marking_completed_; 63 } 64 65 void SetWeakClosureWasOverApproximatedForTesting(bool val) { 66 finalize_marking_completed_ = val; 67 } 68 69 inline bool IsStopped() { return state() == STOPPED; } 70 71 INLINE(bool IsMarking()) { return state() >= MARKING; } 72 73 inline bool IsMarkingIncomplete() { return state() == MARKING; } 74 75 inline bool IsComplete() { return state() == COMPLETE; } 76 77 inline bool IsReadyToOverApproximateWeakClosure() const { 78 return request_type_ == FINALIZATION && !finalize_marking_completed_; 79 } 80 81 GCRequestType request_type() const { return request_type_; } 82 83 bool CanBeActivated(); 84 85 bool ShouldActivateEvenWithoutIdleNotification(); 86 87 bool WasActivated(); 88 89 void Start(const char* reason = nullptr); 90 91 void FinalizeIncrementally(); 92 93 void UpdateMarkingDequeAfterScavenge(); 94 95 void Hurry(); 96 97 void Finalize(); 98 99 void Stop(); 100 101 void FinalizeMarking(CompletionAction action); 102 103 void MarkingComplete(CompletionAction action); 104 105 void Epilogue(); 106 107 // Performs incremental marking steps of step_size_in_bytes as long as 108 // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute 109 // an estimate increment. Returns the remaining time that cannot be used 110 // for incremental marking anymore because a single step would exceed the 111 // deadline. 112 double AdvanceIncrementalMarking(intptr_t step_size_in_bytes, 113 double deadline_in_ms, 114 StepActions step_actions); 115 116 // It's hard to know how much work the incremental marker should do to make 117 // progress in the face of the mutator creating new work for it. We start 118 // of at a moderate rate of work and gradually increase the speed of the 119 // incremental marker until it completes. 120 // Do some marking every time this much memory has been allocated or that many 121 // heavy (color-checking) write barriers have been invoked. 122 static const intptr_t kAllocatedThreshold = 65536; 123 static const intptr_t kWriteBarriersInvokedThreshold = 32768; 124 // Start off by marking this many times more memory than has been allocated. 125 static const intptr_t kInitialMarkingSpeed = 1; 126 // But if we are promoting a lot of data we need to mark faster to keep up 127 // with the data that is entering the old space through promotion. 128 static const intptr_t kFastMarking = 3; 129 // After this many steps we increase the marking/allocating factor. 130 static const intptr_t kMarkingSpeedAccellerationInterval = 1024; 131 // This is how much we increase the marking/allocating factor by. 132 static const intptr_t kMarkingSpeedAccelleration = 2; 133 static const intptr_t kMaxMarkingSpeed = 1000; 134 135 // This is the upper bound for how many times we allow finalization of 136 // incremental marking to be postponed. 137 static const size_t kMaxIdleMarkingDelayCounter = 3; 138 139 void OldSpaceStep(intptr_t allocated); 140 141 intptr_t Step(intptr_t allocated, CompletionAction action, 142 ForceMarkingAction marking = DO_NOT_FORCE_MARKING, 143 ForceCompletionAction completion = FORCE_COMPLETION); 144 145 inline void RestartIfNotMarking() { 146 if (state_ == COMPLETE) { 147 state_ = MARKING; 148 if (FLAG_trace_incremental_marking) { 149 PrintF("[IncrementalMarking] Restarting (new grey objects)\n"); 150 } 151 } 152 } 153 154 static void RecordWriteFromCode(HeapObject* obj, Object** slot, 155 Isolate* isolate); 156 157 static void RecordWriteOfCodeEntryFromCode(JSFunction* host, Object** slot, 158 Isolate* isolate); 159 160 // Record a slot for compaction. Returns false for objects that are 161 // guaranteed to be rescanned or not guaranteed to survive. 162 // 163 // No slots in white objects should be recorded, as some slots are typed and 164 // cannot be interpreted correctly if the underlying object does not survive 165 // the incremental cycle (stays white). 166 INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value)); 167 INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value)); 168 INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo, 169 Object* value)); 170 INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot, 171 Code* value)); 172 173 174 void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value); 175 void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo, 176 Object* value); 177 void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value); 178 void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value); 179 void RecordCodeTargetPatch(Address pc, HeapObject* value); 180 181 void RecordWrites(HeapObject* obj); 182 183 void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit); 184 185 void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit); 186 187 inline void SetOldSpacePageFlags(MemoryChunk* chunk) { 188 SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting()); 189 } 190 191 inline void SetNewSpacePageFlags(MemoryChunk* chunk) { 192 SetNewSpacePageFlags(chunk, IsMarking()); 193 } 194 195 bool IsCompacting() { return IsMarking() && is_compacting_; } 196 197 void ActivateGeneratedStub(Code* stub); 198 199 void NotifyOfHighPromotionRate(); 200 201 void EnterNoMarkingScope() { no_marking_scope_depth_++; } 202 203 void LeaveNoMarkingScope() { no_marking_scope_depth_--; } 204 205 void NotifyIncompleteScanOfObject(int unscanned_bytes) { 206 unscanned_bytes_of_large_object_ = unscanned_bytes; 207 } 208 209 void ClearIdleMarkingDelayCounter(); 210 211 bool IsIdleMarkingDelayCounterLimitReached(); 212 213 INLINE(static void MarkObject(Heap* heap, HeapObject* object)); 214 215 Heap* heap() const { return heap_; } 216 217 IncrementalMarkingJob* incremental_marking_job() { 218 return &incremental_marking_job_; 219 } 220 221 private: 222 class Observer : public AllocationObserver { 223 public: 224 Observer(IncrementalMarking& incremental_marking, intptr_t step_size) 225 : AllocationObserver(step_size), 226 incremental_marking_(incremental_marking) {} 227 228 void Step(int bytes_allocated, Address, size_t) override { 229 incremental_marking_.Step(bytes_allocated, 230 IncrementalMarking::GC_VIA_STACK_GUARD); 231 } 232 233 private: 234 IncrementalMarking& incremental_marking_; 235 }; 236 237 int64_t SpaceLeftInOldSpace(); 238 239 void SpeedUp(); 240 241 void ResetStepCounters(); 242 243 void StartMarking(); 244 245 void MarkRoots(); 246 void MarkObjectGroups(); 247 void ProcessWeakCells(); 248 // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to 249 // increase chances of reusing of map transition tree in future. 250 void RetainMaps(); 251 252 void ActivateIncrementalWriteBarrier(PagedSpace* space); 253 static void ActivateIncrementalWriteBarrier(NewSpace* space); 254 void ActivateIncrementalWriteBarrier(); 255 256 static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space); 257 static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space); 258 void DeactivateIncrementalWriteBarrier(); 259 260 static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking, 261 bool is_compacting); 262 263 static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking); 264 265 INLINE(void ProcessMarkingDeque()); 266 267 INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process)); 268 269 INLINE(void VisitObject(Map* map, HeapObject* obj, int size)); 270 271 void IncrementIdleMarkingDelayCounter(); 272 273 Heap* heap_; 274 275 Observer observer_; 276 277 State state_; 278 bool is_compacting_; 279 280 int steps_count_; 281 int64_t old_generation_space_available_at_start_of_incremental_; 282 int64_t old_generation_space_used_at_start_of_incremental_; 283 int64_t bytes_rescanned_; 284 bool should_hurry_; 285 int marking_speed_; 286 intptr_t bytes_scanned_; 287 intptr_t allocated_; 288 intptr_t write_barriers_invoked_since_last_step_; 289 size_t idle_marking_delay_counter_; 290 291 int no_marking_scope_depth_; 292 293 int unscanned_bytes_of_large_object_; 294 295 bool was_activated_; 296 297 bool finalize_marking_completed_; 298 299 int incremental_marking_finalization_rounds_; 300 301 GCRequestType request_type_; 302 303 IncrementalMarkingJob incremental_marking_job_; 304 305 DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); 306}; 307} // namespace internal 308} // namespace v8 309 310#endif // V8_HEAP_INCREMENTAL_MARKING_H_ 311