1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_MARK_COMPACT_H_
29#define V8_MARK_COMPACT_H_
30
31#include "compiler-intrinsics.h"
32#include "spaces.h"
33
34namespace v8 {
35namespace internal {
36
37// Callback function, returns whether an object is alive. The heap size
38// of the object is returned in size. It optionally updates the offset
39// to the first live object in the page (only used for old and map objects).
40typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
41
42// Forward declarations.
43class CodeFlusher;
44class GCTracer;
45class MarkCompactCollector;
46class MarkingVisitor;
47class RootMarkingVisitor;
48
49
50class Marking {
51 public:
52  explicit Marking(Heap* heap)
53      : heap_(heap) {
54  }
55
56  INLINE(static MarkBit MarkBitFrom(Address addr));
57
58  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
59    return MarkBitFrom(reinterpret_cast<Address>(obj));
60  }
61
62  // Impossible markbits: 01
63  static const char* kImpossibleBitPattern;
64  INLINE(static bool IsImpossible(MarkBit mark_bit)) {
65    return !mark_bit.Get() && mark_bit.Next().Get();
66  }
67
68  // Black markbits: 10 - this is required by the sweeper.
69  static const char* kBlackBitPattern;
70  INLINE(static bool IsBlack(MarkBit mark_bit)) {
71    return mark_bit.Get() && !mark_bit.Next().Get();
72  }
73
74  // White markbits: 00 - this is required by the mark bit clearer.
75  static const char* kWhiteBitPattern;
76  INLINE(static bool IsWhite(MarkBit mark_bit)) {
77    return !mark_bit.Get();
78  }
79
80  // Grey markbits: 11
81  static const char* kGreyBitPattern;
82  INLINE(static bool IsGrey(MarkBit mark_bit)) {
83    return mark_bit.Get() && mark_bit.Next().Get();
84  }
85
86  INLINE(static void MarkBlack(MarkBit mark_bit)) {
87    mark_bit.Set();
88    mark_bit.Next().Clear();
89  }
90
91  INLINE(static void BlackToGrey(MarkBit markbit)) {
92    markbit.Next().Set();
93  }
94
95  INLINE(static void WhiteToGrey(MarkBit markbit)) {
96    markbit.Set();
97    markbit.Next().Set();
98  }
99
100  INLINE(static void GreyToBlack(MarkBit markbit)) {
101    markbit.Next().Clear();
102  }
103
104  INLINE(static void BlackToGrey(HeapObject* obj)) {
105    BlackToGrey(MarkBitFrom(obj));
106  }
107
108  INLINE(static void AnyToGrey(MarkBit markbit)) {
109    markbit.Set();
110    markbit.Next().Set();
111  }
112
113  // Returns true if the the object whose mark is transferred is marked black.
114  bool TransferMark(Address old_start, Address new_start);
115
116#ifdef DEBUG
117  enum ObjectColor {
118    BLACK_OBJECT,
119    WHITE_OBJECT,
120    GREY_OBJECT,
121    IMPOSSIBLE_COLOR
122  };
123
124  static const char* ColorName(ObjectColor color) {
125    switch (color) {
126      case BLACK_OBJECT: return "black";
127      case WHITE_OBJECT: return "white";
128      case GREY_OBJECT: return "grey";
129      case IMPOSSIBLE_COLOR: return "impossible";
130    }
131    return "error";
132  }
133
134  static ObjectColor Color(HeapObject* obj) {
135    return Color(Marking::MarkBitFrom(obj));
136  }
137
138  static ObjectColor Color(MarkBit mark_bit) {
139    if (IsBlack(mark_bit)) return BLACK_OBJECT;
140    if (IsWhite(mark_bit)) return WHITE_OBJECT;
141    if (IsGrey(mark_bit)) return GREY_OBJECT;
142    UNREACHABLE();
143    return IMPOSSIBLE_COLOR;
144  }
145#endif
146
147  // Returns true if the transferred color is black.
148  INLINE(static bool TransferColor(HeapObject* from,
149                                   HeapObject* to)) {
150    MarkBit from_mark_bit = MarkBitFrom(from);
151    MarkBit to_mark_bit = MarkBitFrom(to);
152    bool is_black = false;
153    if (from_mark_bit.Get()) {
154      to_mark_bit.Set();
155      is_black = true;  // Looks black so far.
156    }
157    if (from_mark_bit.Next().Get()) {
158      to_mark_bit.Next().Set();
159      is_black = false;  // Was actually gray.
160    }
161    return is_black;
162  }
163
164 private:
165  Heap* heap_;
166};
167
168// ----------------------------------------------------------------------------
169// Marking deque for tracing live objects.
170class MarkingDeque {
171 public:
172  MarkingDeque()
173      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
174
175  void Initialize(Address low, Address high) {
176    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
177    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
178    array_ = obj_low;
179    mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
180    top_ = bottom_ = 0;
181    overflowed_ = false;
182  }
183
184  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
185
186  inline bool IsEmpty() { return top_ == bottom_; }
187
188  bool overflowed() const { return overflowed_; }
189
190  void ClearOverflowed() { overflowed_ = false; }
191
192  void SetOverflowed() { overflowed_ = true; }
193
194  // Push the (marked) object on the marking stack if there is room,
195  // otherwise mark the object as overflowed and wait for a rescan of the
196  // heap.
197  INLINE(void PushBlack(HeapObject* object)) {
198    ASSERT(object->IsHeapObject());
199    if (IsFull()) {
200      Marking::BlackToGrey(object);
201      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
202      SetOverflowed();
203    } else {
204      array_[top_] = object;
205      top_ = ((top_ + 1) & mask_);
206    }
207  }
208
209  INLINE(void PushGrey(HeapObject* object)) {
210    ASSERT(object->IsHeapObject());
211    if (IsFull()) {
212      SetOverflowed();
213    } else {
214      array_[top_] = object;
215      top_ = ((top_ + 1) & mask_);
216    }
217  }
218
219  INLINE(HeapObject* Pop()) {
220    ASSERT(!IsEmpty());
221    top_ = ((top_ - 1) & mask_);
222    HeapObject* object = array_[top_];
223    ASSERT(object->IsHeapObject());
224    return object;
225  }
226
227  INLINE(void UnshiftGrey(HeapObject* object)) {
228    ASSERT(object->IsHeapObject());
229    if (IsFull()) {
230      SetOverflowed();
231    } else {
232      bottom_ = ((bottom_ - 1) & mask_);
233      array_[bottom_] = object;
234    }
235  }
236
237  HeapObject** array() { return array_; }
238  int bottom() { return bottom_; }
239  int top() { return top_; }
240  int mask() { return mask_; }
241  void set_top(int top) { top_ = top; }
242
243 private:
244  HeapObject** array_;
245  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
246  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
247  // (mod mask + 1).
248  int top_;
249  int bottom_;
250  int mask_;
251  bool overflowed_;
252
253  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
254};
255
256
257class SlotsBufferAllocator {
258 public:
259  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
260  void DeallocateBuffer(SlotsBuffer* buffer);
261
262  void DeallocateChain(SlotsBuffer** buffer_address);
263};
264
265
266// SlotsBuffer records a sequence of slots that has to be updated
267// after live objects were relocated from evacuation candidates.
268// All slots are either untyped or typed:
269//    - Untyped slots are expected to contain a tagged object pointer.
270//      They are recorded by an address.
271//    - Typed slots are expected to contain an encoded pointer to a heap
272//      object where the way of encoding depends on the type of the slot.
273//      They are recorded as a pair (SlotType, slot address).
274// We assume that zero-page is never mapped this allows us to distinguish
275// untyped slots from typed slots during iteration by a simple comparison:
276// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
277// is the first element of typed slot's pair.
278class SlotsBuffer {
279 public:
280  typedef Object** ObjectSlot;
281
282  explicit SlotsBuffer(SlotsBuffer* next_buffer)
283      : idx_(0), chain_length_(1), next_(next_buffer) {
284    if (next_ != NULL) {
285      chain_length_ = next_->chain_length_ + 1;
286    }
287  }
288
289  ~SlotsBuffer() {
290  }
291
292  void Add(ObjectSlot slot) {
293    ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
294    slots_[idx_++] = slot;
295  }
296
297  enum SlotType {
298    EMBEDDED_OBJECT_SLOT,
299    RELOCATED_CODE_OBJECT,
300    CODE_TARGET_SLOT,
301    CODE_ENTRY_SLOT,
302    DEBUG_TARGET_SLOT,
303    JS_RETURN_SLOT,
304    NUMBER_OF_SLOT_TYPES
305  };
306
307  static const char* SlotTypeToString(SlotType type) {
308    switch (type) {
309      case EMBEDDED_OBJECT_SLOT:
310        return "EMBEDDED_OBJECT_SLOT";
311      case RELOCATED_CODE_OBJECT:
312        return "RELOCATED_CODE_OBJECT";
313      case CODE_TARGET_SLOT:
314        return "CODE_TARGET_SLOT";
315      case CODE_ENTRY_SLOT:
316        return "CODE_ENTRY_SLOT";
317      case DEBUG_TARGET_SLOT:
318        return "DEBUG_TARGET_SLOT";
319      case JS_RETURN_SLOT:
320        return "JS_RETURN_SLOT";
321      case NUMBER_OF_SLOT_TYPES:
322        return "NUMBER_OF_SLOT_TYPES";
323    }
324    return "UNKNOWN SlotType";
325  }
326
327  void UpdateSlots(Heap* heap);
328
329  void UpdateSlotsWithFilter(Heap* heap);
330
331  SlotsBuffer* next() { return next_; }
332
333  static int SizeOfChain(SlotsBuffer* buffer) {
334    if (buffer == NULL) return 0;
335    return static_cast<int>(buffer->idx_ +
336                            (buffer->chain_length_ - 1) * kNumberOfElements);
337  }
338
339  inline bool IsFull() {
340    return idx_ == kNumberOfElements;
341  }
342
343  inline bool HasSpaceForTypedSlot() {
344    return idx_ < kNumberOfElements - 1;
345  }
346
347  static void UpdateSlotsRecordedIn(Heap* heap,
348                                    SlotsBuffer* buffer,
349                                    bool code_slots_filtering_required) {
350    while (buffer != NULL) {
351      if (code_slots_filtering_required) {
352        buffer->UpdateSlotsWithFilter(heap);
353      } else {
354        buffer->UpdateSlots(heap);
355      }
356      buffer = buffer->next();
357    }
358  }
359
360  enum AdditionMode {
361    FAIL_ON_OVERFLOW,
362    IGNORE_OVERFLOW
363  };
364
365  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
366    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
367  }
368
369  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
370                           SlotsBuffer** buffer_address,
371                           ObjectSlot slot,
372                           AdditionMode mode)) {
373    SlotsBuffer* buffer = *buffer_address;
374    if (buffer == NULL || buffer->IsFull()) {
375      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
376        allocator->DeallocateChain(buffer_address);
377        return false;
378      }
379      buffer = allocator->AllocateBuffer(buffer);
380      *buffer_address = buffer;
381    }
382    buffer->Add(slot);
383    return true;
384  }
385
386  static bool IsTypedSlot(ObjectSlot slot);
387
388  static bool AddTo(SlotsBufferAllocator* allocator,
389                    SlotsBuffer** buffer_address,
390                    SlotType type,
391                    Address addr,
392                    AdditionMode mode);
393
394  static const int kNumberOfElements = 1021;
395
396 private:
397  static const int kChainLengthThreshold = 15;
398
399  intptr_t idx_;
400  intptr_t chain_length_;
401  SlotsBuffer* next_;
402  ObjectSlot slots_[kNumberOfElements];
403};
404
405
406// CodeFlusher collects candidates for code flushing during marking and
407// processes those candidates after marking has completed in order to
408// reset those functions referencing code objects that would otherwise
409// be unreachable. Code objects can be referenced in three ways:
410//    - SharedFunctionInfo references unoptimized code.
411//    - JSFunction references either unoptimized or optimized code.
412//    - OptimizedCodeMap references optimized code.
413// We are not allowed to flush unoptimized code for functions that got
414// optimized or inlined into optimized code, because we might bailout
415// into the unoptimized code again during deoptimization.
416class CodeFlusher {
417 public:
418  explicit CodeFlusher(Isolate* isolate)
419      : isolate_(isolate),
420        jsfunction_candidates_head_(NULL),
421        shared_function_info_candidates_head_(NULL),
422        optimized_code_map_holder_head_(NULL) {}
423
424  void AddCandidate(SharedFunctionInfo* shared_info) {
425    if (GetNextCandidate(shared_info) == NULL) {
426      SetNextCandidate(shared_info, shared_function_info_candidates_head_);
427      shared_function_info_candidates_head_ = shared_info;
428    }
429  }
430
431  void AddCandidate(JSFunction* function) {
432    ASSERT(function->code() == function->shared()->code());
433    if (GetNextCandidate(function)->IsUndefined()) {
434      SetNextCandidate(function, jsfunction_candidates_head_);
435      jsfunction_candidates_head_ = function;
436    }
437  }
438
439  void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
440    if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
441      SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
442      optimized_code_map_holder_head_ = code_map_holder;
443    }
444  }
445
446  void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
447  void EvictCandidate(SharedFunctionInfo* shared_info);
448  void EvictCandidate(JSFunction* function);
449
450  void ProcessCandidates() {
451    ProcessOptimizedCodeMaps();
452    ProcessSharedFunctionInfoCandidates();
453    ProcessJSFunctionCandidates();
454  }
455
456  void EvictAllCandidates() {
457    EvictOptimizedCodeMaps();
458    EvictJSFunctionCandidates();
459    EvictSharedFunctionInfoCandidates();
460  }
461
462  void IteratePointersToFromSpace(ObjectVisitor* v);
463
464 private:
465  void ProcessOptimizedCodeMaps();
466  void ProcessJSFunctionCandidates();
467  void ProcessSharedFunctionInfoCandidates();
468  void EvictOptimizedCodeMaps();
469  void EvictJSFunctionCandidates();
470  void EvictSharedFunctionInfoCandidates();
471
472  static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
473    return reinterpret_cast<JSFunction**>(
474        HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
475  }
476
477  static JSFunction* GetNextCandidate(JSFunction* candidate) {
478    Object* next_candidate = candidate->next_function_link();
479    return reinterpret_cast<JSFunction*>(next_candidate);
480  }
481
482  static void SetNextCandidate(JSFunction* candidate,
483                               JSFunction* next_candidate) {
484    candidate->set_next_function_link(next_candidate);
485  }
486
487  static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
488    ASSERT(undefined->IsUndefined());
489    candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
490  }
491
492  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
493    Object* next_candidate = candidate->code()->gc_metadata();
494    return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
495  }
496
497  static void SetNextCandidate(SharedFunctionInfo* candidate,
498                               SharedFunctionInfo* next_candidate) {
499    candidate->code()->set_gc_metadata(next_candidate);
500  }
501
502  static void ClearNextCandidate(SharedFunctionInfo* candidate) {
503    candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
504  }
505
506  static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
507    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
508    Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
509    return reinterpret_cast<SharedFunctionInfo*>(next_map);
510  }
511
512  static void SetNextCodeMap(SharedFunctionInfo* holder,
513                             SharedFunctionInfo* next_holder) {
514    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
515    code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
516  }
517
518  static void ClearNextCodeMap(SharedFunctionInfo* holder) {
519    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
520    code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
521  }
522
523  Isolate* isolate_;
524  JSFunction* jsfunction_candidates_head_;
525  SharedFunctionInfo* shared_function_info_candidates_head_;
526  SharedFunctionInfo* optimized_code_map_holder_head_;
527
528  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
529};
530
531
532// Defined in isolate.h.
533class ThreadLocalTop;
534
535
536// -------------------------------------------------------------------------
537// Mark-Compact collector
538class MarkCompactCollector {
539 public:
540  // Type of functions to compute forwarding addresses of objects in
541  // compacted spaces.  Given an object and its size, return a (non-failure)
542  // Object* that will be the object after forwarding.  There is a separate
543  // allocation function for each (compactable) space based on the location
544  // of the object before compaction.
545  typedef MaybeObject* (*AllocationFunction)(Heap* heap,
546                                             HeapObject* object,
547                                             int object_size);
548
549  // Type of functions to encode the forwarding address for an object.
550  // Given the object, its size, and the new (non-failure) object it will be
551  // forwarded to, encode the forwarding address.  For paged spaces, the
552  // 'offset' input/output parameter contains the offset of the forwarded
553  // object from the forwarding address of the previous live object in the
554  // page as input, and is updated to contain the offset to be used for the
555  // next live object in the same page.  For spaces using a different
556  // encoding (i.e., contiguous spaces), the offset parameter is ignored.
557  typedef void (*EncodingFunction)(Heap* heap,
558                                   HeapObject* old_object,
559                                   int object_size,
560                                   Object* new_object,
561                                   int* offset);
562
563  // Type of functions to process non-live objects.
564  typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
565
566  // Pointer to member function, used in IterateLiveObjects.
567  typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
568
569  // Set the global flags, it must be called before Prepare to take effect.
570  inline void SetFlags(int flags);
571
572  static void Initialize();
573
574  void TearDown();
575
576  void CollectEvacuationCandidates(PagedSpace* space);
577
578  void AddEvacuationCandidate(Page* p);
579
580  // Prepares for GC by resetting relocation info in old and map spaces and
581  // choosing spaces to compact.
582  void Prepare(GCTracer* tracer);
583
584  // Performs a global garbage collection.
585  void CollectGarbage();
586
587  enum CompactionMode {
588    INCREMENTAL_COMPACTION,
589    NON_INCREMENTAL_COMPACTION
590  };
591
592  bool StartCompaction(CompactionMode mode);
593
594  void AbortCompaction();
595
596  // During a full GC, there is a stack-allocated GCTracer that is used for
597  // bookkeeping information.  Return a pointer to that tracer.
598  GCTracer* tracer() { return tracer_; }
599
600#ifdef DEBUG
601  // Checks whether performing mark-compact collection.
602  bool in_use() { return state_ > PREPARE_GC; }
603  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
604#endif
605
606  // Determine type of object and emit deletion log event.
607  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
608
609  // Distinguishable invalid map encodings (for single word and multiple words)
610  // that indicate free regions.
611  static const uint32_t kSingleFreeEncoding = 0;
612  static const uint32_t kMultiFreeEncoding = 1;
613
614  static inline bool IsMarked(Object* obj);
615
616  inline Heap* heap() const { return heap_; }
617  inline Isolate* isolate() const;
618
619  CodeFlusher* code_flusher() { return code_flusher_; }
620  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
621  void EnableCodeFlushing(bool enable);
622
623  enum SweeperType {
624    CONSERVATIVE,
625    LAZY_CONSERVATIVE,
626    PARALLEL_CONSERVATIVE,
627    CONCURRENT_CONSERVATIVE,
628    PRECISE
629  };
630
631  enum SweepingParallelism {
632    SWEEP_SEQUENTIALLY,
633    SWEEP_IN_PARALLEL
634  };
635
636#ifdef VERIFY_HEAP
637  void VerifyMarkbitsAreClean();
638  static void VerifyMarkbitsAreClean(PagedSpace* space);
639  static void VerifyMarkbitsAreClean(NewSpace* space);
640  void VerifyWeakEmbeddedObjectsInOptimizedCode();
641  void VerifyOmittedMapChecks();
642#endif
643
644  // Sweep a single page from the given space conservatively.
645  // Return a number of reclaimed bytes.
646  template<SweepingParallelism type>
647  static intptr_t SweepConservatively(PagedSpace* space,
648                                      FreeList* free_list,
649                                      Page* p);
650
651  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
652    return Page::FromAddress(reinterpret_cast<Address>(anchor))->
653        ShouldSkipEvacuationSlotRecording();
654  }
655
656  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
657    return Page::FromAddress(reinterpret_cast<Address>(host))->
658        ShouldSkipEvacuationSlotRecording();
659  }
660
661  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
662    return Page::FromAddress(reinterpret_cast<Address>(obj))->
663        IsEvacuationCandidate();
664  }
665
666  INLINE(void EvictEvacuationCandidate(Page* page)) {
667    if (FLAG_trace_fragmentation) {
668      PrintF("Page %p is too popular. Disabling evacuation.\n",
669             reinterpret_cast<void*>(page));
670    }
671
672    // TODO(gc) If all evacuation candidates are too popular we
673    // should stop slots recording entirely.
674    page->ClearEvacuationCandidate();
675
676    // We were not collecting slots on this page that point
677    // to other evacuation candidates thus we have to
678    // rescan the page after evacuation to discover and update all
679    // pointers to evacuated objects.
680    if (page->owner()->identity() == OLD_DATA_SPACE) {
681      evacuation_candidates_.RemoveElement(page);
682    } else {
683      page->SetFlag(Page::RESCAN_ON_EVACUATION);
684    }
685  }
686
687  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
688  void RecordCodeEntrySlot(Address slot, Code* target);
689  void RecordCodeTargetPatch(Address pc, Code* target);
690
691  INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
692
693  void MigrateObject(Address dst,
694                     Address src,
695                     int size,
696                     AllocationSpace to_old_space);
697
698  bool TryPromoteObject(HeapObject* object, int object_size);
699
700  inline Object* encountered_weak_collections() {
701    return encountered_weak_collections_;
702  }
703  inline void set_encountered_weak_collections(Object* weak_collection) {
704    encountered_weak_collections_ = weak_collection;
705  }
706
707  void InvalidateCode(Code* code);
708
709  void ClearMarkbits();
710
711  bool abort_incremental_marking() const { return abort_incremental_marking_; }
712
713  bool is_compacting() const { return compacting_; }
714
715  MarkingParity marking_parity() { return marking_parity_; }
716
717  // Concurrent and parallel sweeping support.
718  void SweepInParallel(PagedSpace* space,
719                       FreeList* private_free_list,
720                       FreeList* free_list);
721
722  void WaitUntilSweepingCompleted();
723
724  intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
725
726  bool AreSweeperThreadsActivated();
727
728  bool IsConcurrentSweepingInProgress();
729
730  void set_sequential_sweeping(bool sequential_sweeping) {
731    sequential_sweeping_ = sequential_sweeping;
732  }
733
734  bool sequential_sweeping() const {
735    return sequential_sweeping_;
736  }
737
738  // Mark the global table which maps weak objects to dependent code without
739  // marking its contents.
740  void MarkWeakObjectToCodeTable();
741
742  // Special case for processing weak references in a full collection. We need
743  // to artifically keep AllocationSites alive for a time.
744  void MarkAllocationSite(AllocationSite* site);
745
746 private:
747  MarkCompactCollector();
748  ~MarkCompactCollector();
749
750  bool MarkInvalidatedCode();
751  bool WillBeDeoptimized(Code* code);
752  void RemoveDeadInvalidatedCode();
753  void ProcessInvalidatedCode(ObjectVisitor* visitor);
754
755  void UnlinkEvacuationCandidates();
756  void ReleaseEvacuationCandidates();
757
758  void StartSweeperThreads();
759
760#ifdef DEBUG
761  enum CollectorState {
762    IDLE,
763    PREPARE_GC,
764    MARK_LIVE_OBJECTS,
765    SWEEP_SPACES,
766    ENCODE_FORWARDING_ADDRESSES,
767    UPDATE_POINTERS,
768    RELOCATE_OBJECTS
769  };
770
771  // The current stage of the collector.
772  CollectorState state_;
773#endif
774
775  // Global flag that forces sweeping to be precise, so we can traverse the
776  // heap.
777  bool sweep_precisely_;
778
779  bool reduce_memory_footprint_;
780
781  bool abort_incremental_marking_;
782
783  MarkingParity marking_parity_;
784
785  // True if we are collecting slots to perform evacuation from evacuation
786  // candidates.
787  bool compacting_;
788
789  bool was_marked_incrementally_;
790
791  // True if concurrent or parallel sweeping is currently in progress.
792  bool sweeping_pending_;
793
794  bool sequential_sweeping_;
795
796  // A pointer to the current stack-allocated GC tracer object during a full
797  // collection (NULL before and after).
798  GCTracer* tracer_;
799
800  SlotsBufferAllocator slots_buffer_allocator_;
801
802  SlotsBuffer* migration_slots_buffer_;
803
804  // Finishes GC, performs heap verification if enabled.
805  void Finish();
806
807  // -----------------------------------------------------------------------
808  // Phase 1: Marking live objects.
809  //
810  //  Before: The heap has been prepared for garbage collection by
811  //          MarkCompactCollector::Prepare() and is otherwise in its
812  //          normal state.
813  //
814  //   After: Live objects are marked and non-live objects are unmarked.
815
816  friend class RootMarkingVisitor;
817  friend class MarkingVisitor;
818  friend class MarkCompactMarkingVisitor;
819  friend class CodeMarkingVisitor;
820  friend class SharedFunctionInfoMarkingVisitor;
821
822  // Mark code objects that are active on the stack to prevent them
823  // from being flushed.
824  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
825
826  void PrepareForCodeFlushing();
827
828  // Marking operations for objects reachable from roots.
829  void MarkLiveObjects();
830
831  void AfterMarking();
832
833  // Marks the object black and pushes it on the marking stack.
834  // This is for non-incremental marking only.
835  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
836
837  // Marks the object black assuming that it is not yet marked.
838  // This is for non-incremental marking only.
839  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
840
841  // Mark the heap roots and all objects reachable from them.
842  void MarkRoots(RootMarkingVisitor* visitor);
843
844  // Mark the string table specially.  References to internalized strings from
845  // the string table are weak.
846  void MarkStringTable(RootMarkingVisitor* visitor);
847
848  // Mark objects in implicit references groups if their parent object
849  // is marked.
850  void MarkImplicitRefGroups();
851
852  // Mark objects reachable (transitively) from objects in the marking stack
853  // or overflowed in the heap.
854  void ProcessMarkingDeque();
855
856  // Mark objects reachable (transitively) from objects in the marking stack
857  // or overflowed in the heap.  This respects references only considered in
858  // the final atomic marking pause including the following:
859  //    - Processing of objects reachable through Harmony WeakMaps.
860  //    - Objects reachable due to host application logic like object groups
861  //      or implicit references' groups.
862  void ProcessEphemeralMarking(ObjectVisitor* visitor);
863
864  // If the call-site of the top optimized code was not prepared for
865  // deoptimization, then treat the maps in the code as strong pointers,
866  // otherwise a map can die and deoptimize the code.
867  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
868
869  // Mark objects reachable (transitively) from objects in the marking
870  // stack.  This function empties the marking stack, but may leave
871  // overflowed objects in the heap, in which case the marking stack's
872  // overflow flag will be set.
873  void EmptyMarkingDeque();
874
875  // Refill the marking stack with overflowed objects from the heap.  This
876  // function either leaves the marking stack full or clears the overflow
877  // flag on the marking stack.
878  void RefillMarkingDeque();
879
880  // After reachable maps have been marked process per context object
881  // literal map caches removing unmarked entries.
882  void ProcessMapCaches();
883
884  // Callback function for telling whether the object *p is an unmarked
885  // heap object.
886  static bool IsUnmarkedHeapObject(Object** p);
887  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
888
889  // Map transitions from a live map to a dead map must be killed.
890  // We replace them with a null descriptor, with the same key.
891  void ClearNonLiveReferences();
892  void ClearNonLivePrototypeTransitions(Map* map);
893  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
894
895  void ClearAndDeoptimizeDependentCode(DependentCode* dependent_code);
896  void ClearNonLiveDependentCode(DependentCode* dependent_code);
897
898  // Marking detaches initial maps from SharedFunctionInfo objects
899  // to make this reference weak. We need to reattach initial maps
900  // back after collection. This is either done during
901  // ClearNonLiveTransitions pass or by calling this function.
902  void ReattachInitialMaps();
903
904  // Mark all values associated with reachable keys in weak collections
905  // encountered so far.  This might push new object or even new weak maps onto
906  // the marking stack.
907  void ProcessWeakCollections();
908
909  // After all reachable objects have been marked those weak map entries
910  // with an unreachable key are removed from all encountered weak maps.
911  // The linked list of all encountered weak maps is destroyed.
912  void ClearWeakCollections();
913
914  // -----------------------------------------------------------------------
915  // Phase 2: Sweeping to clear mark bits and free non-live objects for
916  // a non-compacting collection.
917  //
918  //  Before: Live objects are marked and non-live objects are unmarked.
919  //
920  //   After: Live objects are unmarked, non-live regions have been added to
921  //          their space's free list. Active eden semispace is compacted by
922  //          evacuation.
923  //
924
925  // If we are not compacting the heap, we simply sweep the spaces except
926  // for the large object space, clearing mark bits and adding unmarked
927  // regions to each space's free list.
928  void SweepSpaces();
929
930  int DiscoverAndPromoteBlackObjectsOnPage(NewSpace* new_space,
931                                           NewSpacePage* p);
932
933  void EvacuateNewSpace();
934
935  void EvacuateLiveObjectsFromPage(Page* p);
936
937  void EvacuatePages();
938
939  void EvacuateNewSpaceAndCandidates();
940
941  void SweepSpace(PagedSpace* space, SweeperType sweeper);
942
943#ifdef DEBUG
944  friend class MarkObjectVisitor;
945  static void VisitObject(HeapObject* obj);
946
947  friend class UnmarkObjectVisitor;
948  static void UnmarkObject(HeapObject* obj);
949#endif
950
951  Heap* heap_;
952  MarkingDeque marking_deque_;
953  CodeFlusher* code_flusher_;
954  Object* encountered_weak_collections_;
955  bool have_code_to_deoptimize_;
956
957  List<Page*> evacuation_candidates_;
958  List<Code*> invalidated_code_;
959
960  friend class Heap;
961};
962
963
964class MarkBitCellIterator BASE_EMBEDDED {
965 public:
966  explicit MarkBitCellIterator(MemoryChunk* chunk)
967      : chunk_(chunk) {
968    last_cell_index_ = Bitmap::IndexToCell(
969        Bitmap::CellAlignIndex(
970            chunk_->AddressToMarkbitIndex(chunk_->area_end())));
971    cell_base_ = chunk_->area_start();
972    cell_index_ = Bitmap::IndexToCell(
973        Bitmap::CellAlignIndex(
974            chunk_->AddressToMarkbitIndex(cell_base_)));
975    cells_ = chunk_->markbits()->cells();
976  }
977
978  inline bool Done() { return cell_index_ == last_cell_index_; }
979
980  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
981
982  inline MarkBit::CellType* CurrentCell() {
983    ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
984        chunk_->AddressToMarkbitIndex(cell_base_))));
985    return &cells_[cell_index_];
986  }
987
988  inline Address CurrentCellBase() {
989    ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
990        chunk_->AddressToMarkbitIndex(cell_base_))));
991    return cell_base_;
992  }
993
994  inline void Advance() {
995    cell_index_++;
996    cell_base_ += 32 * kPointerSize;
997  }
998
999 private:
1000  MemoryChunk* chunk_;
1001  MarkBit::CellType* cells_;
1002  unsigned int last_cell_index_;
1003  unsigned int cell_index_;
1004  Address cell_base_;
1005};
1006
1007
1008class SequentialSweepingScope BASE_EMBEDDED {
1009 public:
1010  explicit SequentialSweepingScope(MarkCompactCollector *collector) :
1011    collector_(collector) {
1012    collector_->set_sequential_sweeping(true);
1013  }
1014
1015  ~SequentialSweepingScope() {
1016    collector_->set_sequential_sweeping(false);
1017  }
1018
1019 private:
1020  MarkCompactCollector* collector_;
1021};
1022
1023
1024const char* AllocationSpaceName(AllocationSpace space);
1025
1026} }  // namespace v8::internal
1027
1028#endif  // V8_MARK_COMPACT_H_
1029