1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/v8.h"
6
7#include "src/heap/incremental-marking.h"
8
9#include "src/code-stubs.h"
10#include "src/compilation-cache.h"
11#include "src/conversions.h"
12#include "src/heap/objects-visiting.h"
13#include "src/heap/objects-visiting-inl.h"
14
15namespace v8 {
16namespace internal {
17
18
19IncrementalMarking::IncrementalMarking(Heap* heap)
20    : heap_(heap),
21      state_(STOPPED),
22      marking_deque_memory_(NULL),
23      marking_deque_memory_committed_(false),
24      steps_count_(0),
25      old_generation_space_available_at_start_of_incremental_(0),
26      old_generation_space_used_at_start_of_incremental_(0),
27      should_hurry_(false),
28      marking_speed_(0),
29      allocated_(0),
30      no_marking_scope_depth_(0),
31      unscanned_bytes_of_large_object_(0) {}
32
33
34void IncrementalMarking::TearDown() { delete marking_deque_memory_; }
35
36
37void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
38                                         Object* value) {
39  if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
40    MarkBit obj_bit = Marking::MarkBitFrom(obj);
41    if (Marking::IsBlack(obj_bit)) {
42      // Object is not going to be rescanned we need to record the slot.
43      heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
44                                                  slot, value);
45    }
46  }
47}
48
49
50void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
51                                             Isolate* isolate) {
52  DCHECK(obj->IsHeapObject());
53  IncrementalMarking* marking = isolate->heap()->incremental_marking();
54
55  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
56  int counter = chunk->write_barrier_counter();
57  if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
58    marking->write_barriers_invoked_since_last_step_ +=
59        MemoryChunk::kWriteBarrierCounterGranularity -
60        chunk->write_barrier_counter();
61    chunk->set_write_barrier_counter(
62        MemoryChunk::kWriteBarrierCounterGranularity);
63  }
64
65  marking->RecordWrite(obj, slot, *slot);
66}
67
68
69void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
70                                               HeapObject* value) {
71  if (IsMarking()) {
72    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
73    RecordWriteIntoCode(host, &rinfo, value);
74  }
75}
76
77
78void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
79  if (IsMarking()) {
80    Code* host = heap_->isolate()
81                     ->inner_pointer_to_code_cache()
82                     ->GcSafeFindCodeForInnerPointer(pc);
83    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
84    RecordWriteIntoCode(host, &rinfo, value);
85  }
86}
87
88
89void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
90                                                    Object** slot,
91                                                    Code* value) {
92  if (BaseRecordWrite(host, slot, value)) {
93    DCHECK(slot != NULL);
94    heap_->mark_compact_collector()->RecordCodeEntrySlot(
95        reinterpret_cast<Address>(slot), value);
96  }
97}
98
99
100void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
101                                                 RelocInfo* rinfo,
102                                                 Object* value) {
103  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
104  if (Marking::IsWhite(value_bit)) {
105    MarkBit obj_bit = Marking::MarkBitFrom(obj);
106    if (Marking::IsBlack(obj_bit)) {
107      BlackToGreyAndUnshift(obj, obj_bit);
108      RestartIfNotMarking();
109    }
110    // Object is either grey or white.  It will be scanned if survives.
111    return;
112  }
113
114  if (is_compacting_) {
115    MarkBit obj_bit = Marking::MarkBitFrom(obj);
116    if (Marking::IsBlack(obj_bit)) {
117      // Object is not going to be rescanned.  We need to record the slot.
118      heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
119                                                       Code::cast(value));
120    }
121  }
122}
123
124
125static void MarkObjectGreyDoNotEnqueue(Object* obj) {
126  if (obj->IsHeapObject()) {
127    HeapObject* heap_obj = HeapObject::cast(obj);
128    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
129    if (Marking::IsBlack(mark_bit)) {
130      MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
131                                            -heap_obj->Size());
132    }
133    Marking::AnyToGrey(mark_bit);
134  }
135}
136
137
138static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
139                                       MarkBit mark_bit, int size) {
140  DCHECK(!Marking::IsImpossible(mark_bit));
141  if (mark_bit.Get()) return;
142  mark_bit.Set();
143  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
144  DCHECK(Marking::IsBlack(mark_bit));
145}
146
147
148static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
149                                        MarkBit mark_bit, int size) {
150  DCHECK(!Marking::IsImpossible(mark_bit));
151  if (Marking::IsBlack(mark_bit)) return;
152  Marking::MarkBlack(mark_bit);
153  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
154  DCHECK(Marking::IsBlack(mark_bit));
155}
156
157
158class IncrementalMarkingMarkingVisitor
159    : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
160 public:
161  static void Initialize() {
162    StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
163    table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
164    table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
165    table_.Register(kVisitJSRegExp, &VisitJSRegExp);
166  }
167
168  static const int kProgressBarScanningChunk = 32 * 1024;
169
170  static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
171    MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
172    // TODO(mstarzinger): Move setting of the flag to the allocation site of
173    // the array. The visitor should just check the flag.
174    if (FLAG_use_marking_progress_bar &&
175        chunk->owner()->identity() == LO_SPACE) {
176      chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
177    }
178    if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
179      Heap* heap = map->GetHeap();
180      // When using a progress bar for large fixed arrays, scan only a chunk of
181      // the array and try to push it onto the marking deque again until it is
182      // fully scanned. Fall back to scanning it through to the end in case this
183      // fails because of a full deque.
184      int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
185      int start_offset =
186          Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
187      int end_offset =
188          Min(object_size, start_offset + kProgressBarScanningChunk);
189      int already_scanned_offset = start_offset;
190      bool scan_until_end = false;
191      do {
192        VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
193                                HeapObject::RawField(object, start_offset),
194                                HeapObject::RawField(object, end_offset));
195        start_offset = end_offset;
196        end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
197        scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
198      } while (scan_until_end && start_offset < object_size);
199      chunk->set_progress_bar(start_offset);
200      if (start_offset < object_size) {
201        heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
202        heap->incremental_marking()->NotifyIncompleteScanOfObject(
203            object_size - (start_offset - already_scanned_offset));
204      }
205    } else {
206      FixedArrayVisitor::Visit(map, object);
207    }
208  }
209
210  static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
211    Context* context = Context::cast(object);
212
213    // We will mark cache black with a separate pass when we finish marking.
214    // Note that GC can happen when the context is not fully initialized,
215    // so the cache can be undefined.
216    Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
217    if (!cache->IsUndefined()) {
218      MarkObjectGreyDoNotEnqueue(cache);
219    }
220    VisitNativeContext(map, context);
221  }
222
223  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
224    Object* obj = *p;
225    if (obj->IsHeapObject()) {
226      heap->mark_compact_collector()->RecordSlot(p, p, obj);
227      MarkObject(heap, obj);
228    }
229  }
230
231  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
232    for (Object** p = start; p < end; p++) {
233      Object* obj = *p;
234      if (obj->IsHeapObject()) {
235        heap->mark_compact_collector()->RecordSlot(start, p, obj);
236        MarkObject(heap, obj);
237      }
238    }
239  }
240
241  INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
242                                             Object** start, Object** end)) {
243    for (Object** p = start; p < end; p++) {
244      Object* obj = *p;
245      if (obj->IsHeapObject()) {
246        heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
247        MarkObject(heap, obj);
248      }
249    }
250  }
251
252  // Marks the object grey and pushes it on the marking stack.
253  INLINE(static void MarkObject(Heap* heap, Object* obj)) {
254    HeapObject* heap_object = HeapObject::cast(obj);
255    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
256    if (mark_bit.data_only()) {
257      MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
258    } else if (Marking::IsWhite(mark_bit)) {
259      heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
260    }
261  }
262
263  // Marks the object black without pushing it on the marking stack.
264  // Returns true if object needed marking and false otherwise.
265  INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
266    HeapObject* heap_object = HeapObject::cast(obj);
267    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
268    if (Marking::IsWhite(mark_bit)) {
269      mark_bit.Set();
270      MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
271                                            heap_object->Size());
272      return true;
273    }
274    return false;
275  }
276};
277
278
279class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
280 public:
281  explicit IncrementalMarkingRootMarkingVisitor(
282      IncrementalMarking* incremental_marking)
283      : incremental_marking_(incremental_marking) {}
284
285  void VisitPointer(Object** p) { MarkObjectByPointer(p); }
286
287  void VisitPointers(Object** start, Object** end) {
288    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
289  }
290
291 private:
292  void MarkObjectByPointer(Object** p) {
293    Object* obj = *p;
294    if (!obj->IsHeapObject()) return;
295
296    HeapObject* heap_object = HeapObject::cast(obj);
297    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
298    if (mark_bit.data_only()) {
299      MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
300    } else {
301      if (Marking::IsWhite(mark_bit)) {
302        incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
303      }
304    }
305  }
306
307  IncrementalMarking* incremental_marking_;
308};
309
310
311void IncrementalMarking::Initialize() {
312  IncrementalMarkingMarkingVisitor::Initialize();
313}
314
315
316void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
317                                              bool is_marking,
318                                              bool is_compacting) {
319  if (is_marking) {
320    chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
321    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
322
323    // It's difficult to filter out slots recorded for large objects.
324    if (chunk->owner()->identity() == LO_SPACE &&
325        chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
326      chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
327    }
328  } else if (chunk->owner()->identity() == CELL_SPACE ||
329             chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
330             chunk->scan_on_scavenge()) {
331    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
332    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
333  } else {
334    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
335    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
336  }
337}
338
339
340void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
341                                              bool is_marking) {
342  chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
343  if (is_marking) {
344    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
345  } else {
346    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
347  }
348  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
349}
350
351
352void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
353    PagedSpace* space) {
354  PageIterator it(space);
355  while (it.has_next()) {
356    Page* p = it.next();
357    SetOldSpacePageFlags(p, false, false);
358  }
359}
360
361
362void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
363    NewSpace* space) {
364  NewSpacePageIterator it(space);
365  while (it.has_next()) {
366    NewSpacePage* p = it.next();
367    SetNewSpacePageFlags(p, false);
368  }
369}
370
371
372void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
373  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
374  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
375  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
376  DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
377  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
378  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
379  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
380
381  LargePage* lop = heap_->lo_space()->first_page();
382  while (lop->is_valid()) {
383    SetOldSpacePageFlags(lop, false, false);
384    lop = lop->next_page();
385  }
386}
387
388
389void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
390  PageIterator it(space);
391  while (it.has_next()) {
392    Page* p = it.next();
393    SetOldSpacePageFlags(p, true, is_compacting_);
394  }
395}
396
397
398void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
399  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
400  while (it.has_next()) {
401    NewSpacePage* p = it.next();
402    SetNewSpacePageFlags(p, true);
403  }
404}
405
406
407void IncrementalMarking::ActivateIncrementalWriteBarrier() {
408  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
409  ActivateIncrementalWriteBarrier(heap_->old_data_space());
410  ActivateIncrementalWriteBarrier(heap_->cell_space());
411  ActivateIncrementalWriteBarrier(heap_->property_cell_space());
412  ActivateIncrementalWriteBarrier(heap_->map_space());
413  ActivateIncrementalWriteBarrier(heap_->code_space());
414  ActivateIncrementalWriteBarrier(heap_->new_space());
415
416  LargePage* lop = heap_->lo_space()->first_page();
417  while (lop->is_valid()) {
418    SetOldSpacePageFlags(lop, true, is_compacting_);
419    lop = lop->next_page();
420  }
421}
422
423
424bool IncrementalMarking::ShouldActivate() {
425  return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
426}
427
428
429bool IncrementalMarking::WorthActivating() {
430#ifndef DEBUG
431  static const intptr_t kActivationThreshold = 8 * MB;
432#else
433  // TODO(gc) consider setting this to some low level so that some
434  // debug tests run with incremental marking and some without.
435  static const intptr_t kActivationThreshold = 0;
436#endif
437  // Only start incremental marking in a safe state: 1) when incremental
438  // marking is turned on, 2) when we are currently not in a GC, and
439  // 3) when we are currently not serializing or deserializing the heap.
440  return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
441         heap_->gc_state() == Heap::NOT_IN_GC &&
442         !heap_->isolate()->serializer_enabled() &&
443         heap_->isolate()->IsInitialized() &&
444         heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
445}
446
447
448void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
449  DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
450
451  if (!IsMarking()) {
452    // Initially stub is generated in STORE_BUFFER_ONLY mode thus
453    // we don't need to do anything if incremental marking is
454    // not active.
455  } else if (IsCompacting()) {
456    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
457  } else {
458    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
459  }
460}
461
462
463static void PatchIncrementalMarkingRecordWriteStubs(
464    Heap* heap, RecordWriteStub::Mode mode) {
465  UnseededNumberDictionary* stubs = heap->code_stubs();
466
467  int capacity = stubs->Capacity();
468  for (int i = 0; i < capacity; i++) {
469    Object* k = stubs->KeyAt(i);
470    if (stubs->IsKey(k)) {
471      uint32_t key = NumberToUint32(k);
472
473      if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
474        Object* e = stubs->ValueAt(i);
475        if (e->IsCode()) {
476          RecordWriteStub::Patch(Code::cast(e), mode);
477        }
478      }
479    }
480  }
481}
482
483
484void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
485  if (marking_deque_memory_ == NULL) {
486    marking_deque_memory_ = new base::VirtualMemory(4 * MB);
487  }
488  if (!marking_deque_memory_committed_) {
489    bool success = marking_deque_memory_->Commit(
490        reinterpret_cast<Address>(marking_deque_memory_->address()),
491        marking_deque_memory_->size(),
492        false);  // Not executable.
493    CHECK(success);
494    marking_deque_memory_committed_ = true;
495  }
496}
497
498
499void IncrementalMarking::UncommitMarkingDeque() {
500  if (state_ == STOPPED && marking_deque_memory_committed_) {
501    bool success = marking_deque_memory_->Uncommit(
502        reinterpret_cast<Address>(marking_deque_memory_->address()),
503        marking_deque_memory_->size());
504    CHECK(success);
505    marking_deque_memory_committed_ = false;
506  }
507}
508
509
510void IncrementalMarking::Start(CompactionFlag flag) {
511  if (FLAG_trace_incremental_marking) {
512    PrintF("[IncrementalMarking] Start\n");
513  }
514  DCHECK(FLAG_incremental_marking);
515  DCHECK(FLAG_incremental_marking_steps);
516  DCHECK(state_ == STOPPED);
517  DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
518  DCHECK(!heap_->isolate()->serializer_enabled());
519  DCHECK(heap_->isolate()->IsInitialized());
520
521  ResetStepCounters();
522
523  if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
524    StartMarking(flag);
525  } else {
526    if (FLAG_trace_incremental_marking) {
527      PrintF("[IncrementalMarking] Start sweeping.\n");
528    }
529    state_ = SWEEPING;
530  }
531
532  heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
533}
534
535
536void IncrementalMarking::StartMarking(CompactionFlag flag) {
537  if (FLAG_trace_incremental_marking) {
538    PrintF("[IncrementalMarking] Start marking\n");
539  }
540
541  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
542                   heap_->mark_compact_collector()->StartCompaction(
543                       MarkCompactCollector::INCREMENTAL_COMPACTION);
544
545  state_ = MARKING;
546
547  RecordWriteStub::Mode mode = is_compacting_
548                                   ? RecordWriteStub::INCREMENTAL_COMPACTION
549                                   : RecordWriteStub::INCREMENTAL;
550
551  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
552
553  EnsureMarkingDequeIsCommitted();
554
555  // Initialize marking stack.
556  Address addr = static_cast<Address>(marking_deque_memory_->address());
557  size_t size = marking_deque_memory_->size();
558  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
559  marking_deque_.Initialize(addr, addr + size);
560
561  ActivateIncrementalWriteBarrier();
562
563// Marking bits are cleared by the sweeper.
564#ifdef VERIFY_HEAP
565  if (FLAG_verify_heap) {
566    heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
567  }
568#endif
569
570  heap_->CompletelyClearInstanceofCache();
571  heap_->isolate()->compilation_cache()->MarkCompactPrologue();
572
573  if (FLAG_cleanup_code_caches_at_gc) {
574    // We will mark cache black with a separate pass
575    // when we finish marking.
576    MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
577  }
578
579  // Mark strong roots grey.
580  IncrementalMarkingRootMarkingVisitor visitor(this);
581  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
582
583  heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
584
585  // Ready to start incremental marking.
586  if (FLAG_trace_incremental_marking) {
587    PrintF("[IncrementalMarking] Running\n");
588  }
589}
590
591
592void IncrementalMarking::PrepareForScavenge() {
593  if (!IsMarking()) return;
594  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
595                          heap_->new_space()->FromSpaceEnd());
596  while (it.has_next()) {
597    Bitmap::Clear(it.next());
598  }
599}
600
601
602void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
603  if (!IsMarking()) return;
604
605  int current = marking_deque_.bottom();
606  int mask = marking_deque_.mask();
607  int limit = marking_deque_.top();
608  HeapObject** array = marking_deque_.array();
609  int new_top = current;
610
611  Map* filler_map = heap_->one_pointer_filler_map();
612
613  while (current != limit) {
614    HeapObject* obj = array[current];
615    DCHECK(obj->IsHeapObject());
616    current = ((current + 1) & mask);
617    if (heap_->InNewSpace(obj)) {
618      MapWord map_word = obj->map_word();
619      if (map_word.IsForwardingAddress()) {
620        HeapObject* dest = map_word.ToForwardingAddress();
621        array[new_top] = dest;
622        new_top = ((new_top + 1) & mask);
623        DCHECK(new_top != marking_deque_.bottom());
624#ifdef DEBUG
625        MarkBit mark_bit = Marking::MarkBitFrom(obj);
626        DCHECK(Marking::IsGrey(mark_bit) ||
627               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
628#endif
629      }
630    } else if (obj->map() != filler_map) {
631      // Skip one word filler objects that appear on the
632      // stack when we perform in place array shift.
633      array[new_top] = obj;
634      new_top = ((new_top + 1) & mask);
635      DCHECK(new_top != marking_deque_.bottom());
636#ifdef DEBUG
637      MarkBit mark_bit = Marking::MarkBitFrom(obj);
638      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
639      DCHECK(Marking::IsGrey(mark_bit) ||
640             (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
641             (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
642              Marking::IsBlack(mark_bit)));
643#endif
644    }
645  }
646  marking_deque_.set_top(new_top);
647}
648
649
650void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
651  MarkBit map_mark_bit = Marking::MarkBitFrom(map);
652  if (Marking::IsWhite(map_mark_bit)) {
653    WhiteToGreyAndPush(map, map_mark_bit);
654  }
655
656  IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
657
658  MarkBit mark_bit = Marking::MarkBitFrom(obj);
659#if ENABLE_SLOW_DCHECKS
660  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
661  SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
662              (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
663              (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
664               Marking::IsBlack(mark_bit)));
665#endif
666  MarkBlackOrKeepBlack(obj, mark_bit, size);
667}
668
669
670intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
671  intptr_t bytes_processed = 0;
672  Map* filler_map = heap_->one_pointer_filler_map();
673  while (!marking_deque_.IsEmpty() && bytes_processed < bytes_to_process) {
674    HeapObject* obj = marking_deque_.Pop();
675
676    // Explicitly skip one word fillers. Incremental markbit patterns are
677    // correct only for objects that occupy at least two words.
678    Map* map = obj->map();
679    if (map == filler_map) continue;
680
681    int size = obj->SizeFromMap(map);
682    unscanned_bytes_of_large_object_ = 0;
683    VisitObject(map, obj, size);
684    int delta = (size - unscanned_bytes_of_large_object_);
685    // TODO(jochen): remove after http://crbug.com/381820 is resolved.
686    CHECK_LT(0, delta);
687    bytes_processed += delta;
688  }
689  return bytes_processed;
690}
691
692
693void IncrementalMarking::ProcessMarkingDeque() {
694  Map* filler_map = heap_->one_pointer_filler_map();
695  while (!marking_deque_.IsEmpty()) {
696    HeapObject* obj = marking_deque_.Pop();
697
698    // Explicitly skip one word fillers. Incremental markbit patterns are
699    // correct only for objects that occupy at least two words.
700    Map* map = obj->map();
701    if (map == filler_map) continue;
702
703    VisitObject(map, obj, obj->SizeFromMap(map));
704  }
705}
706
707
708void IncrementalMarking::Hurry() {
709  if (state() == MARKING) {
710    double start = 0.0;
711    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
712      start = base::OS::TimeCurrentMillis();
713      if (FLAG_trace_incremental_marking) {
714        PrintF("[IncrementalMarking] Hurry\n");
715      }
716    }
717    // TODO(gc) hurry can mark objects it encounters black as mutator
718    // was stopped.
719    ProcessMarkingDeque();
720    state_ = COMPLETE;
721    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
722      double end = base::OS::TimeCurrentMillis();
723      double delta = end - start;
724      heap_->tracer()->AddMarkingTime(delta);
725      if (FLAG_trace_incremental_marking) {
726        PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
727               static_cast<int>(delta));
728      }
729    }
730  }
731
732  if (FLAG_cleanup_code_caches_at_gc) {
733    PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
734    Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
735    MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
736                                          PolymorphicCodeCache::kSize);
737  }
738
739  Object* context = heap_->native_contexts_list();
740  while (!context->IsUndefined()) {
741    // GC can happen when the context is not fully initialized,
742    // so the cache can be undefined.
743    HeapObject* cache = HeapObject::cast(
744        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
745    if (!cache->IsUndefined()) {
746      MarkBit mark_bit = Marking::MarkBitFrom(cache);
747      if (Marking::IsGrey(mark_bit)) {
748        Marking::GreyToBlack(mark_bit);
749        MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
750      }
751    }
752    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
753  }
754}
755
756
757void IncrementalMarking::Abort() {
758  if (IsStopped()) return;
759  if (FLAG_trace_incremental_marking) {
760    PrintF("[IncrementalMarking] Aborting.\n");
761  }
762  heap_->new_space()->LowerInlineAllocationLimit(0);
763  IncrementalMarking::set_should_hurry(false);
764  ResetStepCounters();
765  if (IsMarking()) {
766    PatchIncrementalMarkingRecordWriteStubs(heap_,
767                                            RecordWriteStub::STORE_BUFFER_ONLY);
768    DeactivateIncrementalWriteBarrier();
769
770    if (is_compacting_) {
771      LargeObjectIterator it(heap_->lo_space());
772      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
773        Page* p = Page::FromAddress(obj->address());
774        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
775          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
776        }
777      }
778    }
779  }
780  heap_->isolate()->stack_guard()->ClearGC();
781  state_ = STOPPED;
782  is_compacting_ = false;
783}
784
785
786void IncrementalMarking::Finalize() {
787  Hurry();
788  state_ = STOPPED;
789  is_compacting_ = false;
790  heap_->new_space()->LowerInlineAllocationLimit(0);
791  IncrementalMarking::set_should_hurry(false);
792  ResetStepCounters();
793  PatchIncrementalMarkingRecordWriteStubs(heap_,
794                                          RecordWriteStub::STORE_BUFFER_ONLY);
795  DeactivateIncrementalWriteBarrier();
796  DCHECK(marking_deque_.IsEmpty());
797  heap_->isolate()->stack_guard()->ClearGC();
798}
799
800
801void IncrementalMarking::MarkingComplete(CompletionAction action) {
802  state_ = COMPLETE;
803  // We will set the stack guard to request a GC now.  This will mean the rest
804  // of the GC gets performed as soon as possible (we can't do a GC here in a
805  // record-write context).  If a few things get allocated between now and then
806  // that shouldn't make us do a scavenge and keep being incremental, so we set
807  // the should-hurry flag to indicate that there can't be much work left to do.
808  set_should_hurry(true);
809  if (FLAG_trace_incremental_marking) {
810    PrintF("[IncrementalMarking] Complete (normal).\n");
811  }
812  if (action == GC_VIA_STACK_GUARD) {
813    heap_->isolate()->stack_guard()->RequestGC();
814  }
815}
816
817
818void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
819  if (IsStopped() && ShouldActivate()) {
820    // TODO(hpayer): Let's play safe for now, but compaction should be
821    // in principle possible.
822    Start(PREVENT_COMPACTION);
823  } else {
824    Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
825  }
826}
827
828
829void IncrementalMarking::SpeedUp() {
830  bool speed_up = false;
831
832  if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
833    if (FLAG_trace_gc) {
834      PrintPID("Speed up marking after %d steps\n",
835               static_cast<int>(kMarkingSpeedAccellerationInterval));
836    }
837    speed_up = true;
838  }
839
840  bool space_left_is_very_small =
841      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
842
843  bool only_1_nth_of_space_that_was_available_still_left =
844      (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
845       old_generation_space_available_at_start_of_incremental_);
846
847  if (space_left_is_very_small ||
848      only_1_nth_of_space_that_was_available_still_left) {
849    if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
850    speed_up = true;
851  }
852
853  bool size_of_old_space_multiplied_by_n_during_marking =
854      (heap_->PromotedTotalSize() >
855       (marking_speed_ + 1) *
856           old_generation_space_used_at_start_of_incremental_);
857  if (size_of_old_space_multiplied_by_n_during_marking) {
858    speed_up = true;
859    if (FLAG_trace_gc) {
860      PrintPID("Speed up marking because of heap size increase\n");
861    }
862  }
863
864  int64_t promoted_during_marking =
865      heap_->PromotedTotalSize() -
866      old_generation_space_used_at_start_of_incremental_;
867  intptr_t delay = marking_speed_ * MB;
868  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
869
870  // We try to scan at at least twice the speed that we are allocating.
871  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
872    if (FLAG_trace_gc) {
873      PrintPID("Speed up marking because marker was not keeping up\n");
874    }
875    speed_up = true;
876  }
877
878  if (speed_up) {
879    if (state_ != MARKING) {
880      if (FLAG_trace_gc) {
881        PrintPID("Postponing speeding up marking until marking starts\n");
882      }
883    } else {
884      marking_speed_ += kMarkingSpeedAccelleration;
885      marking_speed_ = static_cast<int>(
886          Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
887      if (FLAG_trace_gc) {
888        PrintPID("Marking speed increased to %d\n", marking_speed_);
889      }
890    }
891  }
892}
893
894
895void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action,
896                              bool force_marking) {
897  if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
898      !FLAG_incremental_marking_steps ||
899      (state_ != SWEEPING && state_ != MARKING)) {
900    return;
901  }
902
903  allocated_ += allocated_bytes;
904
905  if (!force_marking && allocated_ < kAllocatedThreshold &&
906      write_barriers_invoked_since_last_step_ <
907          kWriteBarriersInvokedThreshold) {
908    return;
909  }
910
911  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
912
913  {
914    HistogramTimerScope incremental_marking_scope(
915        heap_->isolate()->counters()->gc_incremental_marking());
916    double start = base::OS::TimeCurrentMillis();
917
918    // The marking speed is driven either by the allocation rate or by the rate
919    // at which we are having to check the color of objects in the write
920    // barrier.
921    // It is possible for a tight non-allocating loop to run a lot of write
922    // barriers before we get here and check them (marking can only take place
923    // on
924    // allocation), so to reduce the lumpiness we don't use the write barriers
925    // invoked since last step directly to determine the amount of work to do.
926    intptr_t bytes_to_process =
927        marking_speed_ *
928        Max(allocated_, write_barriers_invoked_since_last_step_);
929    allocated_ = 0;
930    write_barriers_invoked_since_last_step_ = 0;
931
932    bytes_scanned_ += bytes_to_process;
933    intptr_t bytes_processed = 0;
934
935    if (state_ == SWEEPING) {
936      if (heap_->mark_compact_collector()->sweeping_in_progress() &&
937          heap_->mark_compact_collector()->IsSweepingCompleted()) {
938        heap_->mark_compact_collector()->EnsureSweepingCompleted();
939      }
940      if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
941        bytes_scanned_ = 0;
942        StartMarking(PREVENT_COMPACTION);
943      }
944    } else if (state_ == MARKING) {
945      bytes_processed = ProcessMarkingDeque(bytes_to_process);
946      if (marking_deque_.IsEmpty()) MarkingComplete(action);
947    }
948
949    steps_count_++;
950
951    // Speed up marking if we are marking too slow or if we are almost done
952    // with marking.
953    SpeedUp();
954
955    double end = base::OS::TimeCurrentMillis();
956    double duration = (end - start);
957    // Note that we report zero bytes here when sweeping was in progress or
958    // when we just started incremental marking. In these cases we did not
959    // process the marking deque.
960    heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
961  }
962}
963
964
965void IncrementalMarking::ResetStepCounters() {
966  steps_count_ = 0;
967  old_generation_space_available_at_start_of_incremental_ =
968      SpaceLeftInOldSpace();
969  old_generation_space_used_at_start_of_incremental_ =
970      heap_->PromotedTotalSize();
971  bytes_rescanned_ = 0;
972  marking_speed_ = kInitialMarkingSpeed;
973  bytes_scanned_ = 0;
974  write_barriers_invoked_since_last_step_ = 0;
975}
976
977
978int64_t IncrementalMarking::SpaceLeftInOldSpace() {
979  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
980}
981}
982}  // namespace v8::internal
983