incremental-marking.h revision c8c1d9e03f4babd16833b0f8ccf6aab5fa6e8c7a
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
6#define V8_HEAP_INCREMENTAL_MARKING_H_
7
8#include "src/cancelable-task.h"
9#include "src/execution.h"
10#include "src/heap/heap.h"
11#include "src/heap/incremental-marking-job.h"
12#include "src/heap/mark-compact.h"
13#include "src/heap/spaces.h"
14#include "src/objects.h"
15
16namespace v8 {
17namespace internal {
18
19// Forward declarations.
20class MarkBit;
21class PagedSpace;
22
23enum class StepOrigin { kV8, kTask };
24
25class IncrementalMarking {
26 public:
27  enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
28
29  enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
30
31  enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
32
33  enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
34
35  explicit IncrementalMarking(Heap* heap);
36
37  static void Initialize();
38
39  State state() {
40    DCHECK(state_ == STOPPED || FLAG_incremental_marking);
41    return state_;
42  }
43
44  bool should_hurry() { return should_hurry_; }
45  void set_should_hurry(bool val) { should_hurry_ = val; }
46
47  bool finalize_marking_completed() const {
48    return finalize_marking_completed_;
49  }
50
51  void SetWeakClosureWasOverApproximatedForTesting(bool val) {
52    finalize_marking_completed_ = val;
53  }
54
55  inline bool IsStopped() { return state() == STOPPED; }
56
57  inline bool IsSweeping() { return state() == SWEEPING; }
58
59  INLINE(bool IsMarking()) { return state() >= MARKING; }
60
61  inline bool IsMarkingIncomplete() { return state() == MARKING; }
62
63  inline bool IsComplete() { return state() == COMPLETE; }
64
65  inline bool IsReadyToOverApproximateWeakClosure() const {
66    return request_type_ == FINALIZATION && !finalize_marking_completed_;
67  }
68
69  inline bool NeedsFinalization() {
70    return IsMarking() &&
71           (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING);
72  }
73
74  GCRequestType request_type() const { return request_type_; }
75
76  void reset_request_type() { request_type_ = NONE; }
77
78  bool CanBeActivated();
79
80  bool WasActivated();
81
82  void Start(GarbageCollectionReason gc_reason);
83
84  void FinalizeIncrementally();
85
86  void UpdateMarkingDequeAfterScavenge();
87
88  void Hurry();
89
90  void Finalize();
91
92  void Stop();
93
94  void FinalizeMarking(CompletionAction action);
95
96  void MarkingComplete(CompletionAction action);
97
98  void Epilogue();
99
100  // Performs incremental marking steps until deadline_in_ms is reached. It
101  // returns the remaining time that cannot be used for incremental marking
102  // anymore because a single step would exceed the deadline.
103  double AdvanceIncrementalMarking(double deadline_in_ms,
104                                   CompletionAction completion_action,
105                                   ForceCompletionAction force_completion,
106                                   StepOrigin step_origin);
107
108  // It's hard to know how much work the incremental marker should do to make
109  // progress in the face of the mutator creating new work for it.  We start
110  // of at a moderate rate of work and gradually increase the speed of the
111  // incremental marker until it completes.
112  // Do some marking every time this much memory has been allocated or that many
113  // heavy (color-checking) write barriers have been invoked.
114  static const size_t kAllocatedThreshold = 64 * KB;
115
116  static const int kStepSizeInMs = 1;
117  static const int kMaxStepSizeInMs = 5;
118
119  // This is the upper bound for how many times we allow finalization of
120  // incremental marking to be postponed.
121  static const int kMaxIdleMarkingDelayCounter = 3;
122
123#ifndef DEBUG
124  static const intptr_t kActivationThreshold = 8 * MB;
125#else
126  static const intptr_t kActivationThreshold = 0;
127#endif
128
129  void FinalizeSweeping();
130
131  size_t Step(size_t bytes_to_process, CompletionAction action,
132              ForceCompletionAction completion, StepOrigin step_origin);
133
134  inline void RestartIfNotMarking();
135
136  static void RecordWriteFromCode(HeapObject* obj, Object** slot,
137                                  Isolate* isolate);
138
139  static void RecordWriteOfCodeEntryFromCode(JSFunction* host, Object** slot,
140                                             Isolate* isolate);
141
142  // Record a slot for compaction.  Returns false for objects that are
143  // guaranteed to be rescanned or not guaranteed to survive.
144  //
145  // No slots in white objects should be recorded, as some slots are typed and
146  // cannot be interpreted correctly if the underlying object does not survive
147  // the incremental cycle (stays white).
148  INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
149  INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
150  INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
151  INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
152                                     Code* value));
153
154  V8_EXPORT_PRIVATE void RecordWriteSlow(HeapObject* obj, Object** slot,
155                                         Object* value);
156  void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
157  void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
158  void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
159  void RecordCodeTargetPatch(Address pc, HeapObject* value);
160
161  void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
162
163  inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
164    SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
165  }
166
167  inline void SetNewSpacePageFlags(Page* chunk) {
168    SetNewSpacePageFlags(chunk, IsMarking());
169  }
170
171  bool IsCompacting() { return IsMarking() && is_compacting_; }
172
173  void ActivateGeneratedStub(Code* stub);
174
175  void NotifyIncompleteScanOfObject(int unscanned_bytes) {
176    unscanned_bytes_of_large_object_ = unscanned_bytes;
177  }
178
179  void ClearIdleMarkingDelayCounter();
180
181  bool IsIdleMarkingDelayCounterLimitReached();
182
183  static void MarkGrey(Heap* heap, HeapObject* object);
184
185  static void MarkBlack(HeapObject* object, int size);
186
187  static void TransferMark(Heap* heap, Address old_start, Address new_start);
188
189  // Returns true if the color transfer requires live bytes updating.
190  INLINE(static bool TransferColor(HeapObject* from, HeapObject* to,
191                                   int size)) {
192    MarkBit from_mark_bit = ObjectMarking::MarkBitFrom(from);
193    MarkBit to_mark_bit = ObjectMarking::MarkBitFrom(to);
194
195    if (Marking::IsBlack(to_mark_bit)) {
196      DCHECK(to->GetHeap()->incremental_marking()->black_allocation());
197      return false;
198    }
199
200    DCHECK(Marking::IsWhite(to_mark_bit));
201    if (from_mark_bit.Get()) {
202      to_mark_bit.Set();
203      if (from_mark_bit.Next().Get()) {
204        to_mark_bit.Next().Set();
205        return true;
206      }
207    }
208    return false;
209  }
210
211  void IterateBlackObject(HeapObject* object);
212
213  Heap* heap() const { return heap_; }
214
215  IncrementalMarkingJob* incremental_marking_job() {
216    return &incremental_marking_job_;
217  }
218
219  bool black_allocation() { return black_allocation_; }
220
221  void StartBlackAllocationForTesting() { StartBlackAllocation(); }
222
223  void AbortBlackAllocation();
224
225 private:
226  class Observer : public AllocationObserver {
227   public:
228    Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
229        : AllocationObserver(step_size),
230          incremental_marking_(incremental_marking) {}
231
232    void Step(int bytes_allocated, Address, size_t) override {
233      incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
234    }
235
236   private:
237    IncrementalMarking& incremental_marking_;
238  };
239
240  int64_t SpaceLeftInOldSpace();
241
242  void StartMarking();
243
244  void StartBlackAllocation();
245  void FinishBlackAllocation();
246
247  void MarkRoots();
248  void MarkObjectGroups();
249  void ProcessWeakCells();
250  // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
251  // increase chances of reusing of map transition tree in future.
252  void RetainMaps();
253
254  void ActivateIncrementalWriteBarrier(PagedSpace* space);
255  static void ActivateIncrementalWriteBarrier(NewSpace* space);
256  void ActivateIncrementalWriteBarrier();
257
258  static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
259  static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
260  void DeactivateIncrementalWriteBarrier();
261
262  static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
263                                   bool is_compacting);
264
265  static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
266
267  INLINE(void ProcessMarkingDeque());
268
269  INLINE(intptr_t ProcessMarkingDeque(
270      intptr_t bytes_to_process,
271      ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION));
272
273  INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
274
275  void IncrementIdleMarkingDelayCounter();
276
277  void AdvanceIncrementalMarkingOnAllocation();
278
279  size_t StepSizeToKeepUpWithAllocations();
280  size_t StepSizeToMakeProgress();
281
282  Heap* heap_;
283
284  State state_;
285
286  double start_time_ms_;
287  size_t initial_old_generation_size_;
288  size_t old_generation_allocation_counter_;
289  size_t bytes_allocated_;
290  size_t bytes_marked_ahead_of_schedule_;
291  size_t unscanned_bytes_of_large_object_;
292
293  int idle_marking_delay_counter_;
294  int incremental_marking_finalization_rounds_;
295
296  bool is_compacting_;
297  bool should_hurry_;
298  bool was_activated_;
299  bool black_allocation_;
300  bool finalize_marking_completed_;
301
302  GCRequestType request_type_;
303
304  IncrementalMarkingJob incremental_marking_job_;
305  Observer new_generation_observer_;
306  Observer old_generation_observer_;
307
308  DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
309};
310}  // namespace internal
311}  // namespace v8
312
313#endif  // V8_HEAP_INCREMENTAL_MARKING_H_
314