1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MARK_COMPACT_H_
6#define V8_HEAP_MARK_COMPACT_H_
7
8#include "src/base/bits.h"
9#include "src/heap/spaces.h"
10
11namespace v8 {
12namespace internal {
13
14// Callback function, returns whether an object is alive. The heap size
15// of the object is returned in size. It optionally updates the offset
16// to the first live object in the page (only used for old and map objects).
17typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
18
19// Forward declarations.
20class CodeFlusher;
21class MarkCompactCollector;
22class MarkingVisitor;
23class RootMarkingVisitor;
24
25
26class Marking {
27 public:
28  explicit Marking(Heap* heap) : heap_(heap) {}
29
30  INLINE(static MarkBit MarkBitFrom(Address addr));
31
32  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
33    return MarkBitFrom(reinterpret_cast<Address>(obj));
34  }
35
36  // Impossible markbits: 01
37  static const char* kImpossibleBitPattern;
38  INLINE(static bool IsImpossible(MarkBit mark_bit)) {
39    return !mark_bit.Get() && mark_bit.Next().Get();
40  }
41
42  // Black markbits: 10 - this is required by the sweeper.
43  static const char* kBlackBitPattern;
44  INLINE(static bool IsBlack(MarkBit mark_bit)) {
45    return mark_bit.Get() && !mark_bit.Next().Get();
46  }
47
48  // White markbits: 00 - this is required by the mark bit clearer.
49  static const char* kWhiteBitPattern;
50  INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
51
52  // Grey markbits: 11
53  static const char* kGreyBitPattern;
54  INLINE(static bool IsGrey(MarkBit mark_bit)) {
55    return mark_bit.Get() && mark_bit.Next().Get();
56  }
57
58  INLINE(static void MarkBlack(MarkBit mark_bit)) {
59    mark_bit.Set();
60    mark_bit.Next().Clear();
61  }
62
63  INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
64
65  INLINE(static void WhiteToGrey(MarkBit markbit)) {
66    markbit.Set();
67    markbit.Next().Set();
68  }
69
70  INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
71
72  INLINE(static void BlackToGrey(HeapObject* obj)) {
73    BlackToGrey(MarkBitFrom(obj));
74  }
75
76  INLINE(static void AnyToGrey(MarkBit markbit)) {
77    markbit.Set();
78    markbit.Next().Set();
79  }
80
81  void TransferMark(Address old_start, Address new_start);
82
83#ifdef DEBUG
84  enum ObjectColor {
85    BLACK_OBJECT,
86    WHITE_OBJECT,
87    GREY_OBJECT,
88    IMPOSSIBLE_COLOR
89  };
90
91  static const char* ColorName(ObjectColor color) {
92    switch (color) {
93      case BLACK_OBJECT:
94        return "black";
95      case WHITE_OBJECT:
96        return "white";
97      case GREY_OBJECT:
98        return "grey";
99      case IMPOSSIBLE_COLOR:
100        return "impossible";
101    }
102    return "error";
103  }
104
105  static ObjectColor Color(HeapObject* obj) {
106    return Color(Marking::MarkBitFrom(obj));
107  }
108
109  static ObjectColor Color(MarkBit mark_bit) {
110    if (IsBlack(mark_bit)) return BLACK_OBJECT;
111    if (IsWhite(mark_bit)) return WHITE_OBJECT;
112    if (IsGrey(mark_bit)) return GREY_OBJECT;
113    UNREACHABLE();
114    return IMPOSSIBLE_COLOR;
115  }
116#endif
117
118  // Returns true if the transferred color is black.
119  INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
120    MarkBit from_mark_bit = MarkBitFrom(from);
121    MarkBit to_mark_bit = MarkBitFrom(to);
122    bool is_black = false;
123    if (from_mark_bit.Get()) {
124      to_mark_bit.Set();
125      is_black = true;  // Looks black so far.
126    }
127    if (from_mark_bit.Next().Get()) {
128      to_mark_bit.Next().Set();
129      is_black = false;  // Was actually gray.
130    }
131    return is_black;
132  }
133
134 private:
135  Heap* heap_;
136};
137
138// ----------------------------------------------------------------------------
139// Marking deque for tracing live objects.
140class MarkingDeque {
141 public:
142  MarkingDeque()
143      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
144
145  void Initialize(Address low, Address high) {
146    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
147    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
148    array_ = obj_low;
149    mask_ = base::bits::RoundDownToPowerOfTwo32(
150                static_cast<uint32_t>(obj_high - obj_low)) -
151            1;
152    top_ = bottom_ = 0;
153    overflowed_ = false;
154  }
155
156  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
157
158  inline bool IsEmpty() { return top_ == bottom_; }
159
160  bool overflowed() const { return overflowed_; }
161
162  void ClearOverflowed() { overflowed_ = false; }
163
164  void SetOverflowed() { overflowed_ = true; }
165
166  // Push the (marked) object on the marking stack if there is room,
167  // otherwise mark the object as overflowed and wait for a rescan of the
168  // heap.
169  INLINE(void PushBlack(HeapObject* object)) {
170    DCHECK(object->IsHeapObject());
171    if (IsFull()) {
172      Marking::BlackToGrey(object);
173      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
174      SetOverflowed();
175    } else {
176      array_[top_] = object;
177      top_ = ((top_ + 1) & mask_);
178    }
179  }
180
181  INLINE(void PushGrey(HeapObject* object)) {
182    DCHECK(object->IsHeapObject());
183    if (IsFull()) {
184      SetOverflowed();
185    } else {
186      array_[top_] = object;
187      top_ = ((top_ + 1) & mask_);
188    }
189  }
190
191  INLINE(HeapObject* Pop()) {
192    DCHECK(!IsEmpty());
193    top_ = ((top_ - 1) & mask_);
194    HeapObject* object = array_[top_];
195    DCHECK(object->IsHeapObject());
196    return object;
197  }
198
199  INLINE(void UnshiftGrey(HeapObject* object)) {
200    DCHECK(object->IsHeapObject());
201    if (IsFull()) {
202      SetOverflowed();
203    } else {
204      bottom_ = ((bottom_ - 1) & mask_);
205      array_[bottom_] = object;
206    }
207  }
208
209  HeapObject** array() { return array_; }
210  int bottom() { return bottom_; }
211  int top() { return top_; }
212  int mask() { return mask_; }
213  void set_top(int top) { top_ = top; }
214
215 private:
216  HeapObject** array_;
217  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
218  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
219  // (mod mask + 1).
220  int top_;
221  int bottom_;
222  int mask_;
223  bool overflowed_;
224
225  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
226};
227
228
229class SlotsBufferAllocator {
230 public:
231  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
232  void DeallocateBuffer(SlotsBuffer* buffer);
233
234  void DeallocateChain(SlotsBuffer** buffer_address);
235};
236
237
238// SlotsBuffer records a sequence of slots that has to be updated
239// after live objects were relocated from evacuation candidates.
240// All slots are either untyped or typed:
241//    - Untyped slots are expected to contain a tagged object pointer.
242//      They are recorded by an address.
243//    - Typed slots are expected to contain an encoded pointer to a heap
244//      object where the way of encoding depends on the type of the slot.
245//      They are recorded as a pair (SlotType, slot address).
246// We assume that zero-page is never mapped this allows us to distinguish
247// untyped slots from typed slots during iteration by a simple comparison:
248// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
249// is the first element of typed slot's pair.
250class SlotsBuffer {
251 public:
252  typedef Object** ObjectSlot;
253
254  explicit SlotsBuffer(SlotsBuffer* next_buffer)
255      : idx_(0), chain_length_(1), next_(next_buffer) {
256    if (next_ != NULL) {
257      chain_length_ = next_->chain_length_ + 1;
258    }
259  }
260
261  ~SlotsBuffer() {}
262
263  void Add(ObjectSlot slot) {
264    DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
265    slots_[idx_++] = slot;
266  }
267
268  enum SlotType {
269    EMBEDDED_OBJECT_SLOT,
270    RELOCATED_CODE_OBJECT,
271    CODE_TARGET_SLOT,
272    CODE_ENTRY_SLOT,
273    DEBUG_TARGET_SLOT,
274    JS_RETURN_SLOT,
275    NUMBER_OF_SLOT_TYPES
276  };
277
278  static const char* SlotTypeToString(SlotType type) {
279    switch (type) {
280      case EMBEDDED_OBJECT_SLOT:
281        return "EMBEDDED_OBJECT_SLOT";
282      case RELOCATED_CODE_OBJECT:
283        return "RELOCATED_CODE_OBJECT";
284      case CODE_TARGET_SLOT:
285        return "CODE_TARGET_SLOT";
286      case CODE_ENTRY_SLOT:
287        return "CODE_ENTRY_SLOT";
288      case DEBUG_TARGET_SLOT:
289        return "DEBUG_TARGET_SLOT";
290      case JS_RETURN_SLOT:
291        return "JS_RETURN_SLOT";
292      case NUMBER_OF_SLOT_TYPES:
293        return "NUMBER_OF_SLOT_TYPES";
294    }
295    return "UNKNOWN SlotType";
296  }
297
298  void UpdateSlots(Heap* heap);
299
300  void UpdateSlotsWithFilter(Heap* heap);
301
302  SlotsBuffer* next() { return next_; }
303
304  static int SizeOfChain(SlotsBuffer* buffer) {
305    if (buffer == NULL) return 0;
306    return static_cast<int>(buffer->idx_ +
307                            (buffer->chain_length_ - 1) * kNumberOfElements);
308  }
309
310  inline bool IsFull() { return idx_ == kNumberOfElements; }
311
312  inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
313
314  static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
315                                    bool code_slots_filtering_required) {
316    while (buffer != NULL) {
317      if (code_slots_filtering_required) {
318        buffer->UpdateSlotsWithFilter(heap);
319      } else {
320        buffer->UpdateSlots(heap);
321      }
322      buffer = buffer->next();
323    }
324  }
325
326  enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
327
328  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
329    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
330  }
331
332  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
333                           SlotsBuffer** buffer_address, ObjectSlot slot,
334                           AdditionMode mode)) {
335    SlotsBuffer* buffer = *buffer_address;
336    if (buffer == NULL || buffer->IsFull()) {
337      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
338        allocator->DeallocateChain(buffer_address);
339        return false;
340      }
341      buffer = allocator->AllocateBuffer(buffer);
342      *buffer_address = buffer;
343    }
344    buffer->Add(slot);
345    return true;
346  }
347
348  static bool IsTypedSlot(ObjectSlot slot);
349
350  static bool AddTo(SlotsBufferAllocator* allocator,
351                    SlotsBuffer** buffer_address, SlotType type, Address addr,
352                    AdditionMode mode);
353
354  static const int kNumberOfElements = 1021;
355
356 private:
357  static const int kChainLengthThreshold = 15;
358
359  intptr_t idx_;
360  intptr_t chain_length_;
361  SlotsBuffer* next_;
362  ObjectSlot slots_[kNumberOfElements];
363};
364
365
366// CodeFlusher collects candidates for code flushing during marking and
367// processes those candidates after marking has completed in order to
368// reset those functions referencing code objects that would otherwise
369// be unreachable. Code objects can be referenced in three ways:
370//    - SharedFunctionInfo references unoptimized code.
371//    - JSFunction references either unoptimized or optimized code.
372//    - OptimizedCodeMap references optimized code.
373// We are not allowed to flush unoptimized code for functions that got
374// optimized or inlined into optimized code, because we might bailout
375// into the unoptimized code again during deoptimization.
376class CodeFlusher {
377 public:
378  explicit CodeFlusher(Isolate* isolate)
379      : isolate_(isolate),
380        jsfunction_candidates_head_(NULL),
381        shared_function_info_candidates_head_(NULL),
382        optimized_code_map_holder_head_(NULL) {}
383
384  void AddCandidate(SharedFunctionInfo* shared_info) {
385    if (GetNextCandidate(shared_info) == NULL) {
386      SetNextCandidate(shared_info, shared_function_info_candidates_head_);
387      shared_function_info_candidates_head_ = shared_info;
388    }
389  }
390
391  void AddCandidate(JSFunction* function) {
392    DCHECK(function->code() == function->shared()->code());
393    if (GetNextCandidate(function)->IsUndefined()) {
394      SetNextCandidate(function, jsfunction_candidates_head_);
395      jsfunction_candidates_head_ = function;
396    }
397  }
398
399  void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
400    if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
401      SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
402      optimized_code_map_holder_head_ = code_map_holder;
403    }
404  }
405
406  void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
407  void EvictCandidate(SharedFunctionInfo* shared_info);
408  void EvictCandidate(JSFunction* function);
409
410  void ProcessCandidates() {
411    ProcessOptimizedCodeMaps();
412    ProcessSharedFunctionInfoCandidates();
413    ProcessJSFunctionCandidates();
414  }
415
416  void EvictAllCandidates() {
417    EvictOptimizedCodeMaps();
418    EvictJSFunctionCandidates();
419    EvictSharedFunctionInfoCandidates();
420  }
421
422  void IteratePointersToFromSpace(ObjectVisitor* v);
423
424 private:
425  void ProcessOptimizedCodeMaps();
426  void ProcessJSFunctionCandidates();
427  void ProcessSharedFunctionInfoCandidates();
428  void EvictOptimizedCodeMaps();
429  void EvictJSFunctionCandidates();
430  void EvictSharedFunctionInfoCandidates();
431
432  static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
433    return reinterpret_cast<JSFunction**>(
434        HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
435  }
436
437  static JSFunction* GetNextCandidate(JSFunction* candidate) {
438    Object* next_candidate = candidate->next_function_link();
439    return reinterpret_cast<JSFunction*>(next_candidate);
440  }
441
442  static void SetNextCandidate(JSFunction* candidate,
443                               JSFunction* next_candidate) {
444    candidate->set_next_function_link(next_candidate);
445  }
446
447  static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
448    DCHECK(undefined->IsUndefined());
449    candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
450  }
451
452  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
453    Object* next_candidate = candidate->code()->gc_metadata();
454    return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
455  }
456
457  static void SetNextCandidate(SharedFunctionInfo* candidate,
458                               SharedFunctionInfo* next_candidate) {
459    candidate->code()->set_gc_metadata(next_candidate);
460  }
461
462  static void ClearNextCandidate(SharedFunctionInfo* candidate) {
463    candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
464  }
465
466  static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
467    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
468    Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
469    return reinterpret_cast<SharedFunctionInfo*>(next_map);
470  }
471
472  static void SetNextCodeMap(SharedFunctionInfo* holder,
473                             SharedFunctionInfo* next_holder) {
474    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
475    code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
476  }
477
478  static void ClearNextCodeMap(SharedFunctionInfo* holder) {
479    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
480    code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
481  }
482
483  Isolate* isolate_;
484  JSFunction* jsfunction_candidates_head_;
485  SharedFunctionInfo* shared_function_info_candidates_head_;
486  SharedFunctionInfo* optimized_code_map_holder_head_;
487
488  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
489};
490
491
492// Defined in isolate.h.
493class ThreadLocalTop;
494
495
496// -------------------------------------------------------------------------
497// Mark-Compact collector
498class MarkCompactCollector {
499 public:
500  // Set the global flags, it must be called before Prepare to take effect.
501  inline void SetFlags(int flags);
502
503  static void Initialize();
504
505  void SetUp();
506
507  void TearDown();
508
509  void CollectEvacuationCandidates(PagedSpace* space);
510
511  void AddEvacuationCandidate(Page* p);
512
513  // Prepares for GC by resetting relocation info in old and map spaces and
514  // choosing spaces to compact.
515  void Prepare();
516
517  // Performs a global garbage collection.
518  void CollectGarbage();
519
520  enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
521
522  bool StartCompaction(CompactionMode mode);
523
524  void AbortCompaction();
525
526#ifdef DEBUG
527  // Checks whether performing mark-compact collection.
528  bool in_use() { return state_ > PREPARE_GC; }
529  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
530#endif
531
532  // Determine type of object and emit deletion log event.
533  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
534
535  // Distinguishable invalid map encodings (for single word and multiple words)
536  // that indicate free regions.
537  static const uint32_t kSingleFreeEncoding = 0;
538  static const uint32_t kMultiFreeEncoding = 1;
539
540  static inline bool IsMarked(Object* obj);
541
542  inline Heap* heap() const { return heap_; }
543  inline Isolate* isolate() const;
544
545  CodeFlusher* code_flusher() { return code_flusher_; }
546  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
547  void EnableCodeFlushing(bool enable);
548
549  enum SweeperType {
550    PARALLEL_SWEEPING,
551    CONCURRENT_SWEEPING,
552    SEQUENTIAL_SWEEPING
553  };
554
555  enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
556
557#ifdef VERIFY_HEAP
558  void VerifyMarkbitsAreClean();
559  static void VerifyMarkbitsAreClean(PagedSpace* space);
560  static void VerifyMarkbitsAreClean(NewSpace* space);
561  void VerifyWeakEmbeddedObjectsInCode();
562  void VerifyOmittedMapChecks();
563#endif
564
565  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
566    return Page::FromAddress(reinterpret_cast<Address>(anchor))
567        ->ShouldSkipEvacuationSlotRecording();
568  }
569
570  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
571    return Page::FromAddress(reinterpret_cast<Address>(host))
572        ->ShouldSkipEvacuationSlotRecording();
573  }
574
575  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
576    return Page::FromAddress(reinterpret_cast<Address>(obj))
577        ->IsEvacuationCandidate();
578  }
579
580  INLINE(void EvictEvacuationCandidate(Page* page)) {
581    if (FLAG_trace_fragmentation) {
582      PrintF("Page %p is too popular. Disabling evacuation.\n",
583             reinterpret_cast<void*>(page));
584    }
585
586    // TODO(gc) If all evacuation candidates are too popular we
587    // should stop slots recording entirely.
588    page->ClearEvacuationCandidate();
589
590    // We were not collecting slots on this page that point
591    // to other evacuation candidates thus we have to
592    // rescan the page after evacuation to discover and update all
593    // pointers to evacuated objects.
594    if (page->owner()->identity() == OLD_DATA_SPACE) {
595      evacuation_candidates_.RemoveElement(page);
596    } else {
597      page->SetFlag(Page::RESCAN_ON_EVACUATION);
598    }
599  }
600
601  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
602  void RecordCodeEntrySlot(Address slot, Code* target);
603  void RecordCodeTargetPatch(Address pc, Code* target);
604
605  INLINE(void RecordSlot(
606      Object** anchor_slot, Object** slot, Object* object,
607      SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
608
609  void MigrateObject(HeapObject* dst, HeapObject* src, int size,
610                     AllocationSpace to_old_space);
611
612  bool TryPromoteObject(HeapObject* object, int object_size);
613
614  void InvalidateCode(Code* code);
615
616  void ClearMarkbits();
617
618  bool abort_incremental_marking() const { return abort_incremental_marking_; }
619
620  bool is_compacting() const { return compacting_; }
621
622  MarkingParity marking_parity() { return marking_parity_; }
623
624  // Concurrent and parallel sweeping support. If required_freed_bytes was set
625  // to a value larger than 0, then sweeping returns after a block of at least
626  // required_freed_bytes was freed. If required_freed_bytes was set to zero
627  // then the whole given space is swept. It returns the size of the maximum
628  // continuous freed memory chunk.
629  int SweepInParallel(PagedSpace* space, int required_freed_bytes);
630
631  // Sweeps a given page concurrently to the sweeper threads. It returns the
632  // size of the maximum continuous freed memory chunk.
633  int SweepInParallel(Page* page, PagedSpace* space);
634
635  void EnsureSweepingCompleted();
636
637  // If sweeper threads are not active this method will return true. If
638  // this is a latency issue we should be smarter here. Otherwise, it will
639  // return true if the sweeper threads are done processing the pages.
640  bool IsSweepingCompleted();
641
642  void RefillFreeList(PagedSpace* space);
643
644  bool AreSweeperThreadsActivated();
645
646  // Checks if sweeping is in progress right now on any space.
647  bool sweeping_in_progress() { return sweeping_in_progress_; }
648
649  void set_sequential_sweeping(bool sequential_sweeping) {
650    sequential_sweeping_ = sequential_sweeping;
651  }
652
653  bool sequential_sweeping() const { return sequential_sweeping_; }
654
655  // Mark the global table which maps weak objects to dependent code without
656  // marking its contents.
657  void MarkWeakObjectToCodeTable();
658
659  // Special case for processing weak references in a full collection. We need
660  // to artificially keep AllocationSites alive for a time.
661  void MarkAllocationSite(AllocationSite* site);
662
663 private:
664  class SweeperTask;
665
666  explicit MarkCompactCollector(Heap* heap);
667  ~MarkCompactCollector();
668
669  bool MarkInvalidatedCode();
670  bool WillBeDeoptimized(Code* code);
671  void RemoveDeadInvalidatedCode();
672  void ProcessInvalidatedCode(ObjectVisitor* visitor);
673
674  void StartSweeperThreads();
675
676#ifdef DEBUG
677  enum CollectorState {
678    IDLE,
679    PREPARE_GC,
680    MARK_LIVE_OBJECTS,
681    SWEEP_SPACES,
682    ENCODE_FORWARDING_ADDRESSES,
683    UPDATE_POINTERS,
684    RELOCATE_OBJECTS
685  };
686
687  // The current stage of the collector.
688  CollectorState state_;
689#endif
690
691  bool reduce_memory_footprint_;
692
693  bool abort_incremental_marking_;
694
695  MarkingParity marking_parity_;
696
697  // True if we are collecting slots to perform evacuation from evacuation
698  // candidates.
699  bool compacting_;
700
701  bool was_marked_incrementally_;
702
703  // True if concurrent or parallel sweeping is currently in progress.
704  bool sweeping_in_progress_;
705
706  base::Semaphore pending_sweeper_jobs_semaphore_;
707
708  bool sequential_sweeping_;
709
710  SlotsBufferAllocator slots_buffer_allocator_;
711
712  SlotsBuffer* migration_slots_buffer_;
713
714  // Finishes GC, performs heap verification if enabled.
715  void Finish();
716
717  // -----------------------------------------------------------------------
718  // Phase 1: Marking live objects.
719  //
720  //  Before: The heap has been prepared for garbage collection by
721  //          MarkCompactCollector::Prepare() and is otherwise in its
722  //          normal state.
723  //
724  //   After: Live objects are marked and non-live objects are unmarked.
725
726  friend class RootMarkingVisitor;
727  friend class MarkingVisitor;
728  friend class MarkCompactMarkingVisitor;
729  friend class CodeMarkingVisitor;
730  friend class SharedFunctionInfoMarkingVisitor;
731
732  // Mark code objects that are active on the stack to prevent them
733  // from being flushed.
734  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
735
736  void PrepareForCodeFlushing();
737
738  // Marking operations for objects reachable from roots.
739  void MarkLiveObjects();
740
741  void AfterMarking();
742
743  // Marks the object black and pushes it on the marking stack.
744  // This is for non-incremental marking only.
745  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
746
747  // Marks the object black assuming that it is not yet marked.
748  // This is for non-incremental marking only.
749  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
750
751  // Mark the heap roots and all objects reachable from them.
752  void MarkRoots(RootMarkingVisitor* visitor);
753
754  // Mark the string table specially.  References to internalized strings from
755  // the string table are weak.
756  void MarkStringTable(RootMarkingVisitor* visitor);
757
758  // Mark objects in implicit references groups if their parent object
759  // is marked.
760  void MarkImplicitRefGroups();
761
762  // Mark objects reachable (transitively) from objects in the marking stack
763  // or overflowed in the heap.
764  void ProcessMarkingDeque();
765
766  // Mark objects reachable (transitively) from objects in the marking stack
767  // or overflowed in the heap.  This respects references only considered in
768  // the final atomic marking pause including the following:
769  //    - Processing of objects reachable through Harmony WeakMaps.
770  //    - Objects reachable due to host application logic like object groups
771  //      or implicit references' groups.
772  void ProcessEphemeralMarking(ObjectVisitor* visitor);
773
774  // If the call-site of the top optimized code was not prepared for
775  // deoptimization, then treat the maps in the code as strong pointers,
776  // otherwise a map can die and deoptimize the code.
777  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
778
779  // Mark objects reachable (transitively) from objects in the marking
780  // stack.  This function empties the marking stack, but may leave
781  // overflowed objects in the heap, in which case the marking stack's
782  // overflow flag will be set.
783  void EmptyMarkingDeque();
784
785  // Refill the marking stack with overflowed objects from the heap.  This
786  // function either leaves the marking stack full or clears the overflow
787  // flag on the marking stack.
788  void RefillMarkingDeque();
789
790  // After reachable maps have been marked process per context object
791  // literal map caches removing unmarked entries.
792  void ProcessMapCaches();
793
794  // Callback function for telling whether the object *p is an unmarked
795  // heap object.
796  static bool IsUnmarkedHeapObject(Object** p);
797  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
798
799  // Map transitions from a live map to a dead map must be killed.
800  // We replace them with a null descriptor, with the same key.
801  void ClearNonLiveReferences();
802  void ClearNonLivePrototypeTransitions(Map* map);
803  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
804  void ClearMapTransitions(Map* map);
805  bool ClearMapBackPointer(Map* map);
806  void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
807                           int number_of_own_descriptors);
808  void TrimEnumCache(Map* map, DescriptorArray* descriptors);
809
810  void ClearDependentCode(DependentCode* dependent_code);
811  void ClearDependentICList(Object* head);
812  void ClearNonLiveDependentCode(DependentCode* dependent_code);
813  int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
814                                       int start, int end, int new_start);
815
816  // Mark all values associated with reachable keys in weak collections
817  // encountered so far.  This might push new object or even new weak maps onto
818  // the marking stack.
819  void ProcessWeakCollections();
820
821  // After all reachable objects have been marked those weak map entries
822  // with an unreachable key are removed from all encountered weak maps.
823  // The linked list of all encountered weak maps is destroyed.
824  void ClearWeakCollections();
825
826  // We have to remove all encountered weak maps from the list of weak
827  // collections when incremental marking is aborted.
828  void AbortWeakCollections();
829
830  // -----------------------------------------------------------------------
831  // Phase 2: Sweeping to clear mark bits and free non-live objects for
832  // a non-compacting collection.
833  //
834  //  Before: Live objects are marked and non-live objects are unmarked.
835  //
836  //   After: Live objects are unmarked, non-live regions have been added to
837  //          their space's free list. Active eden semispace is compacted by
838  //          evacuation.
839  //
840
841  // If we are not compacting the heap, we simply sweep the spaces except
842  // for the large object space, clearing mark bits and adding unmarked
843  // regions to each space's free list.
844  void SweepSpaces();
845
846  int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
847                                            NewSpacePage* p);
848
849  void EvacuateNewSpace();
850
851  void EvacuateLiveObjectsFromPage(Page* p);
852
853  void EvacuatePages();
854
855  void EvacuateNewSpaceAndCandidates();
856
857  void ReleaseEvacuationCandidates();
858
859  // Moves the pages of the evacuation_candidates_ list to the end of their
860  // corresponding space pages list.
861  void MoveEvacuationCandidatesToEndOfPagesList();
862
863  void SweepSpace(PagedSpace* space, SweeperType sweeper);
864
865  // Finalizes the parallel sweeping phase. Marks all the pages that were
866  // swept in parallel.
867  void ParallelSweepSpacesComplete();
868
869  void ParallelSweepSpaceComplete(PagedSpace* space);
870
871  // Updates store buffer and slot buffer for a pointer in a migrating object.
872  void RecordMigratedSlot(Object* value, Address slot);
873
874#ifdef DEBUG
875  friend class MarkObjectVisitor;
876  static void VisitObject(HeapObject* obj);
877
878  friend class UnmarkObjectVisitor;
879  static void UnmarkObject(HeapObject* obj);
880#endif
881
882  Heap* heap_;
883  MarkingDeque marking_deque_;
884  CodeFlusher* code_flusher_;
885  bool have_code_to_deoptimize_;
886
887  List<Page*> evacuation_candidates_;
888  List<Code*> invalidated_code_;
889
890  SmartPointer<FreeList> free_list_old_data_space_;
891  SmartPointer<FreeList> free_list_old_pointer_space_;
892
893  friend class Heap;
894};
895
896
897class MarkBitCellIterator BASE_EMBEDDED {
898 public:
899  explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
900    last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
901        chunk_->AddressToMarkbitIndex(chunk_->area_end())));
902    cell_base_ = chunk_->area_start();
903    cell_index_ = Bitmap::IndexToCell(
904        Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
905    cells_ = chunk_->markbits()->cells();
906  }
907
908  inline bool Done() { return cell_index_ == last_cell_index_; }
909
910  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
911
912  inline MarkBit::CellType* CurrentCell() {
913    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
914                              chunk_->AddressToMarkbitIndex(cell_base_))));
915    return &cells_[cell_index_];
916  }
917
918  inline Address CurrentCellBase() {
919    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
920                              chunk_->AddressToMarkbitIndex(cell_base_))));
921    return cell_base_;
922  }
923
924  inline void Advance() {
925    cell_index_++;
926    cell_base_ += 32 * kPointerSize;
927  }
928
929 private:
930  MemoryChunk* chunk_;
931  MarkBit::CellType* cells_;
932  unsigned int last_cell_index_;
933  unsigned int cell_index_;
934  Address cell_base_;
935};
936
937
938class SequentialSweepingScope BASE_EMBEDDED {
939 public:
940  explicit SequentialSweepingScope(MarkCompactCollector* collector)
941      : collector_(collector) {
942    collector_->set_sequential_sweeping(true);
943  }
944
945  ~SequentialSweepingScope() { collector_->set_sequential_sweeping(false); }
946
947 private:
948  MarkCompactCollector* collector_;
949};
950
951
952const char* AllocationSpaceName(AllocationSpace space);
953}
954}  // namespace v8::internal
955
956#endif  // V8_HEAP_MARK_COMPACT_H_
957