mark-compact.h revision 014dc512cdd3e367bee49a713fdc5ed92584a3e5
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MARK_COMPACT_H_
6#define V8_HEAP_MARK_COMPACT_H_
7
8#include "src/base/bits.h"
9#include "src/heap/spaces.h"
10
11namespace v8 {
12namespace internal {
13
14// Callback function, returns whether an object is alive. The heap size
15// of the object is returned in size. It optionally updates the offset
16// to the first live object in the page (only used for old and map objects).
17typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
18
19// Callback function to mark an object in a given heap.
20typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
21
22// Forward declarations.
23class CodeFlusher;
24class MarkCompactCollector;
25class MarkingVisitor;
26class RootMarkingVisitor;
27class SlotsBuffer;
28class SlotsBufferAllocator;
29
30
31class Marking : public AllStatic {
32 public:
33  INLINE(static MarkBit MarkBitFrom(Address addr)) {
34    MemoryChunk* p = MemoryChunk::FromAddress(addr);
35    return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
36  }
37
38  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
39    return MarkBitFrom(reinterpret_cast<Address>(obj));
40  }
41
42  // Impossible markbits: 01
43  static const char* kImpossibleBitPattern;
44  INLINE(static bool IsImpossible(MarkBit mark_bit)) {
45    return !mark_bit.Get() && mark_bit.Next().Get();
46  }
47
48  // Black markbits: 11
49  static const char* kBlackBitPattern;
50  INLINE(static bool IsBlack(MarkBit mark_bit)) {
51    return mark_bit.Get() && mark_bit.Next().Get();
52  }
53
54  // White markbits: 00 - this is required by the mark bit clearer.
55  static const char* kWhiteBitPattern;
56  INLINE(static bool IsWhite(MarkBit mark_bit)) {
57    DCHECK(!IsImpossible(mark_bit));
58    return !mark_bit.Get();
59  }
60
61  // Grey markbits: 10
62  static const char* kGreyBitPattern;
63  INLINE(static bool IsGrey(MarkBit mark_bit)) {
64    return mark_bit.Get() && !mark_bit.Next().Get();
65  }
66
67  // IsBlackOrGrey assumes that the first bit is set for black or grey
68  // objects.
69  INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }
70
71  INLINE(static void MarkBlack(MarkBit mark_bit)) {
72    mark_bit.Set();
73    mark_bit.Next().Set();
74  }
75
76  INLINE(static void MarkWhite(MarkBit mark_bit)) {
77    mark_bit.Clear();
78    mark_bit.Next().Clear();
79  }
80
81  INLINE(static void BlackToWhite(MarkBit markbit)) {
82    DCHECK(IsBlack(markbit));
83    markbit.Clear();
84    markbit.Next().Clear();
85  }
86
87  INLINE(static void GreyToWhite(MarkBit markbit)) {
88    DCHECK(IsGrey(markbit));
89    markbit.Clear();
90    markbit.Next().Clear();
91  }
92
93  INLINE(static void BlackToGrey(MarkBit markbit)) {
94    DCHECK(IsBlack(markbit));
95    markbit.Next().Clear();
96  }
97
98  INLINE(static void WhiteToGrey(MarkBit markbit)) {
99    DCHECK(IsWhite(markbit));
100    markbit.Set();
101  }
102
103  INLINE(static void WhiteToBlack(MarkBit markbit)) {
104    DCHECK(IsWhite(markbit));
105    markbit.Set();
106    markbit.Next().Set();
107  }
108
109  INLINE(static void GreyToBlack(MarkBit markbit)) {
110    DCHECK(IsGrey(markbit));
111    markbit.Next().Set();
112  }
113
114  INLINE(static void BlackToGrey(HeapObject* obj)) {
115    BlackToGrey(MarkBitFrom(obj));
116  }
117
118  INLINE(static void AnyToGrey(MarkBit markbit)) {
119    markbit.Set();
120    markbit.Next().Clear();
121  }
122
123  static void TransferMark(Heap* heap, Address old_start, Address new_start);
124
125#ifdef DEBUG
126  enum ObjectColor {
127    BLACK_OBJECT,
128    WHITE_OBJECT,
129    GREY_OBJECT,
130    IMPOSSIBLE_COLOR
131  };
132
133  static const char* ColorName(ObjectColor color) {
134    switch (color) {
135      case BLACK_OBJECT:
136        return "black";
137      case WHITE_OBJECT:
138        return "white";
139      case GREY_OBJECT:
140        return "grey";
141      case IMPOSSIBLE_COLOR:
142        return "impossible";
143    }
144    return "error";
145  }
146
147  static ObjectColor Color(HeapObject* obj) {
148    return Color(Marking::MarkBitFrom(obj));
149  }
150
151  static ObjectColor Color(MarkBit mark_bit) {
152    if (IsBlack(mark_bit)) return BLACK_OBJECT;
153    if (IsWhite(mark_bit)) return WHITE_OBJECT;
154    if (IsGrey(mark_bit)) return GREY_OBJECT;
155    UNREACHABLE();
156    return IMPOSSIBLE_COLOR;
157  }
158#endif
159
160  // Returns true if the transferred color is black.
161  INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
162    MarkBit from_mark_bit = MarkBitFrom(from);
163    MarkBit to_mark_bit = MarkBitFrom(to);
164    DCHECK(Marking::IsWhite(to_mark_bit));
165    if (from_mark_bit.Get()) {
166      to_mark_bit.Set();
167      if (from_mark_bit.Next().Get()) {
168        to_mark_bit.Next().Set();
169        return true;
170      }
171    }
172    return false;
173  }
174
175 private:
176  DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
177};
178
179// ----------------------------------------------------------------------------
180// Marking deque for tracing live objects.
181class MarkingDeque {
182 public:
183  MarkingDeque()
184      : array_(NULL),
185        top_(0),
186        bottom_(0),
187        mask_(0),
188        overflowed_(false),
189        in_use_(false) {}
190
191  void Initialize(Address low, Address high);
192  void Uninitialize(bool aborting = false);
193
194  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
195
196  inline bool IsEmpty() { return top_ == bottom_; }
197
198  bool overflowed() const { return overflowed_; }
199
200  bool in_use() const { return in_use_; }
201
202  void ClearOverflowed() { overflowed_ = false; }
203
204  void SetOverflowed() { overflowed_ = true; }
205
206  // Push the object on the marking stack if there is room, otherwise mark the
207  // deque as overflowed and wait for a rescan of the heap.
208  INLINE(bool Push(HeapObject* object)) {
209    DCHECK(object->IsHeapObject());
210    if (IsFull()) {
211      SetOverflowed();
212      return false;
213    } else {
214      array_[top_] = object;
215      top_ = ((top_ + 1) & mask_);
216      return true;
217    }
218  }
219
220  INLINE(HeapObject* Pop()) {
221    DCHECK(!IsEmpty());
222    top_ = ((top_ - 1) & mask_);
223    HeapObject* object = array_[top_];
224    DCHECK(object->IsHeapObject());
225    return object;
226  }
227
228  // Unshift the object into the marking stack if there is room, otherwise mark
229  // the deque as overflowed and wait for a rescan of the heap.
230  INLINE(bool Unshift(HeapObject* object)) {
231    DCHECK(object->IsHeapObject());
232    if (IsFull()) {
233      SetOverflowed();
234      return false;
235    } else {
236      bottom_ = ((bottom_ - 1) & mask_);
237      array_[bottom_] = object;
238      return true;
239    }
240  }
241
242  HeapObject** array() { return array_; }
243  int bottom() { return bottom_; }
244  int top() { return top_; }
245  int mask() { return mask_; }
246  void set_top(int top) { top_ = top; }
247
248 private:
249  HeapObject** array_;
250  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
251  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
252  // (mod mask + 1).
253  int top_;
254  int bottom_;
255  int mask_;
256  bool overflowed_;
257  bool in_use_;
258
259  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
260};
261
262
263// CodeFlusher collects candidates for code flushing during marking and
264// processes those candidates after marking has completed in order to
265// reset those functions referencing code objects that would otherwise
266// be unreachable. Code objects can be referenced in two ways:
267//    - SharedFunctionInfo references unoptimized code.
268//    - JSFunction references either unoptimized or optimized code.
269// We are not allowed to flush unoptimized code for functions that got
270// optimized or inlined into optimized code, because we might bailout
271// into the unoptimized code again during deoptimization.
272class CodeFlusher {
273 public:
274  explicit CodeFlusher(Isolate* isolate)
275      : isolate_(isolate),
276        jsfunction_candidates_head_(nullptr),
277        shared_function_info_candidates_head_(nullptr) {}
278
279  inline void AddCandidate(SharedFunctionInfo* shared_info);
280  inline void AddCandidate(JSFunction* function);
281
282  void EvictCandidate(SharedFunctionInfo* shared_info);
283  void EvictCandidate(JSFunction* function);
284
285  void ProcessCandidates() {
286    ProcessSharedFunctionInfoCandidates();
287    ProcessJSFunctionCandidates();
288  }
289
290  void IteratePointersToFromSpace(ObjectVisitor* v);
291
292 private:
293  void ProcessJSFunctionCandidates();
294  void ProcessSharedFunctionInfoCandidates();
295
296  static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
297  static inline JSFunction* GetNextCandidate(JSFunction* candidate);
298  static inline void SetNextCandidate(JSFunction* candidate,
299                                      JSFunction* next_candidate);
300  static inline void ClearNextCandidate(JSFunction* candidate,
301                                        Object* undefined);
302
303  static inline SharedFunctionInfo* GetNextCandidate(
304      SharedFunctionInfo* candidate);
305  static inline void SetNextCandidate(SharedFunctionInfo* candidate,
306                                      SharedFunctionInfo* next_candidate);
307  static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
308
309  Isolate* isolate_;
310  JSFunction* jsfunction_candidates_head_;
311  SharedFunctionInfo* shared_function_info_candidates_head_;
312
313  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
314};
315
316
317// Defined in isolate.h.
318class ThreadLocalTop;
319
320
321// -------------------------------------------------------------------------
322// Mark-Compact collector
323class MarkCompactCollector {
324 public:
325  enum IterationMode {
326    kKeepMarking,
327    kClearMarkbits,
328  };
329
330  static void Initialize();
331
332  void SetUp();
333
334  void TearDown();
335
336  void CollectEvacuationCandidates(PagedSpace* space);
337
338  void AddEvacuationCandidate(Page* p);
339
340  // Prepares for GC by resetting relocation info in old and map spaces and
341  // choosing spaces to compact.
342  void Prepare();
343
344  // Performs a global garbage collection.
345  void CollectGarbage();
346
347  enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
348
349  bool StartCompaction(CompactionMode mode);
350
351  void AbortCompaction();
352
353#ifdef DEBUG
354  // Checks whether performing mark-compact collection.
355  bool in_use() { return state_ > PREPARE_GC; }
356  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
357#endif
358
359  // Determine type of object and emit deletion log event.
360  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
361
362  // Distinguishable invalid map encodings (for single word and multiple words)
363  // that indicate free regions.
364  static const uint32_t kSingleFreeEncoding = 0;
365  static const uint32_t kMultiFreeEncoding = 1;
366
367  static inline bool IsMarked(Object* obj);
368  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
369
370  inline Heap* heap() const { return heap_; }
371  inline Isolate* isolate() const;
372
373  CodeFlusher* code_flusher() { return code_flusher_; }
374  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
375
376  enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
377
378#ifdef VERIFY_HEAP
379  void VerifyValidStoreAndSlotsBufferEntries();
380  void VerifyMarkbitsAreClean();
381  static void VerifyMarkbitsAreClean(PagedSpace* space);
382  static void VerifyMarkbitsAreClean(NewSpace* space);
383  void VerifyWeakEmbeddedObjectsInCode();
384  void VerifyOmittedMapChecks();
385#endif
386
387  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
388    return Page::FromAddress(reinterpret_cast<Address>(host))
389        ->ShouldSkipEvacuationSlotRecording();
390  }
391
392  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
393    return Page::FromAddress(reinterpret_cast<Address>(obj))
394        ->IsEvacuationCandidate();
395  }
396
397  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
398  void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
399  void RecordCodeTargetPatch(Address pc, Code* target);
400  INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
401  INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
402                              Object* target));
403
404  void UpdateSlots(SlotsBuffer* buffer);
405  void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
406
407  void MigrateObject(HeapObject* dst, HeapObject* src, int size,
408                     AllocationSpace to_old_space,
409                     SlotsBuffer** evacuation_slots_buffer);
410
411  void InvalidateCode(Code* code);
412
413  void ClearMarkbits();
414
415  bool is_compacting() const { return compacting_; }
416
417  MarkingParity marking_parity() { return marking_parity_; }
418
419  // Concurrent and parallel sweeping support. If required_freed_bytes was set
420  // to a value larger than 0, then sweeping returns after a block of at least
421  // required_freed_bytes was freed. If required_freed_bytes was set to zero
422  // then the whole given space is swept. It returns the size of the maximum
423  // continuous freed memory chunk.
424  int SweepInParallel(PagedSpace* space, int required_freed_bytes);
425
426  // Sweeps a given page concurrently to the sweeper threads. It returns the
427  // size of the maximum continuous freed memory chunk.
428  int SweepInParallel(Page* page, PagedSpace* space);
429
430  // Ensures that sweeping is finished.
431  //
432  // Note: Can only be called safely from main thread.
433  void EnsureSweepingCompleted();
434
435  void SweepOrWaitUntilSweepingCompleted(Page* page);
436
437  // Help out in sweeping the corresponding space and refill memory that has
438  // been regained.
439  //
440  // Note: Thread-safe.
441  void SweepAndRefill(CompactionSpace* space);
442
443  // If sweeper threads are not active this method will return true. If
444  // this is a latency issue we should be smarter here. Otherwise, it will
445  // return true if the sweeper threads are done processing the pages.
446  bool IsSweepingCompleted();
447
448  // Checks if sweeping is in progress right now on any space.
449  bool sweeping_in_progress() { return sweeping_in_progress_; }
450
451  void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
452
453  bool evacuation() const { return evacuation_; }
454
455  // Special case for processing weak references in a full collection. We need
456  // to artificially keep AllocationSites alive for a time.
457  void MarkAllocationSite(AllocationSite* site);
458
459  // Mark objects in implicit references groups if their parent object
460  // is marked.
461  void MarkImplicitRefGroups(MarkObjectFunction mark_object);
462
463  MarkingDeque* marking_deque() { return &marking_deque_; }
464
465  static const size_t kMaxMarkingDequeSize = 4 * MB;
466  static const size_t kMinMarkingDequeSize = 256 * KB;
467
468  void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
469    if (!marking_deque_.in_use()) {
470      EnsureMarkingDequeIsCommitted(max_size);
471      InitializeMarkingDeque();
472    }
473  }
474
475  void EnsureMarkingDequeIsCommitted(size_t max_size);
476  void EnsureMarkingDequeIsReserved();
477
478  void InitializeMarkingDeque();
479
480  // The following four methods can just be called after marking, when the
481  // whole transitive closure is known. They must be called before sweeping
482  // when mark bits are still intact.
483  bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
484  bool IsSlotInBlackObjectSlow(Page* p, Address slot);
485  bool IsSlotInLiveObject(Address slot);
486  void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
487
488  // Removes all the slots in the slot buffers that are within the given
489  // address range.
490  void RemoveObjectSlots(Address start_slot, Address end_slot);
491
492  //
493  // Free lists filled by sweeper and consumed by corresponding spaces
494  // (including compaction spaces).
495  //
496  base::SmartPointer<FreeList>& free_list_old_space() {
497    return free_list_old_space_;
498  }
499  base::SmartPointer<FreeList>& free_list_code_space() {
500    return free_list_code_space_;
501  }
502  base::SmartPointer<FreeList>& free_list_map_space() {
503    return free_list_map_space_;
504  }
505
506 private:
507  class CompactionTask;
508  class EvacuateNewSpaceVisitor;
509  class EvacuateOldSpaceVisitor;
510  class EvacuateVisitorBase;
511  class HeapObjectVisitor;
512  class SweeperTask;
513
514  static const int kInitialLocalPretenuringFeedbackCapacity = 256;
515
516  explicit MarkCompactCollector(Heap* heap);
517
518  bool WillBeDeoptimized(Code* code);
519  void EvictPopularEvacuationCandidate(Page* page);
520  void ClearInvalidStoreAndSlotsBufferEntries();
521
522  void StartSweeperThreads();
523
524  void ComputeEvacuationHeuristics(int area_size,
525                                   int* target_fragmentation_percent,
526                                   int* max_evacuated_bytes);
527
528#ifdef DEBUG
529  enum CollectorState {
530    IDLE,
531    PREPARE_GC,
532    MARK_LIVE_OBJECTS,
533    SWEEP_SPACES,
534    ENCODE_FORWARDING_ADDRESSES,
535    UPDATE_POINTERS,
536    RELOCATE_OBJECTS
537  };
538
539  // The current stage of the collector.
540  CollectorState state_;
541#endif
542
543  MarkingParity marking_parity_;
544
545  bool was_marked_incrementally_;
546
547  bool evacuation_;
548
549  SlotsBufferAllocator* slots_buffer_allocator_;
550
551  SlotsBuffer* migration_slots_buffer_;
552
553  // Finishes GC, performs heap verification if enabled.
554  void Finish();
555
556  // -----------------------------------------------------------------------
557  // Phase 1: Marking live objects.
558  //
559  //  Before: The heap has been prepared for garbage collection by
560  //          MarkCompactCollector::Prepare() and is otherwise in its
561  //          normal state.
562  //
563  //   After: Live objects are marked and non-live objects are unmarked.
564
565  friend class CodeMarkingVisitor;
566  friend class IncrementalMarkingMarkingVisitor;
567  friend class MarkCompactMarkingVisitor;
568  friend class MarkingVisitor;
569  friend class RecordMigratedSlotVisitor;
570  friend class RootMarkingVisitor;
571  friend class SharedFunctionInfoMarkingVisitor;
572
573  // Mark code objects that are active on the stack to prevent them
574  // from being flushed.
575  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
576
577  void PrepareForCodeFlushing();
578
579  // Marking operations for objects reachable from roots.
580  void MarkLiveObjects();
581
582  // Pushes a black object onto the marking stack and accounts for live bytes.
583  // Note that this assumes live bytes have not yet been counted.
584  INLINE(void PushBlack(HeapObject* obj));
585
586  // Unshifts a black object into the marking stack and accounts for live bytes.
587  // Note that this assumes lives bytes have already been counted.
588  INLINE(void UnshiftBlack(HeapObject* obj));
589
590  // Marks the object black and pushes it on the marking stack.
591  // This is for non-incremental marking only.
592  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
593
594  // Marks the object black assuming that it is not yet marked.
595  // This is for non-incremental marking only.
596  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
597
598  // Mark the heap roots and all objects reachable from them.
599  void MarkRoots(RootMarkingVisitor* visitor);
600
601  // Mark the string table specially.  References to internalized strings from
602  // the string table are weak.
603  void MarkStringTable(RootMarkingVisitor* visitor);
604
605  // Mark objects reachable (transitively) from objects in the marking stack
606  // or overflowed in the heap.
607  void ProcessMarkingDeque();
608
609  // Mark objects reachable (transitively) from objects in the marking stack
610  // or overflowed in the heap.  This respects references only considered in
611  // the final atomic marking pause including the following:
612  //    - Processing of objects reachable through Harmony WeakMaps.
613  //    - Objects reachable due to host application logic like object groups
614  //      or implicit references' groups.
615  void ProcessEphemeralMarking(ObjectVisitor* visitor,
616                               bool only_process_harmony_weak_collections);
617
618  // If the call-site of the top optimized code was not prepared for
619  // deoptimization, then treat the maps in the code as strong pointers,
620  // otherwise a map can die and deoptimize the code.
621  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
622
623  // Collects a list of dependent code from maps embedded in optimize code.
624  DependentCode* DependentCodeListFromNonLiveMaps();
625
626  // Mark objects reachable (transitively) from objects in the marking
627  // stack.  This function empties the marking stack, but may leave
628  // overflowed objects in the heap, in which case the marking stack's
629  // overflow flag will be set.
630  void EmptyMarkingDeque();
631
632  // Refill the marking stack with overflowed objects from the heap.  This
633  // function either leaves the marking stack full or clears the overflow
634  // flag on the marking stack.
635  void RefillMarkingDeque();
636
637  // Helper methods for refilling the marking stack by discovering grey objects
638  // on various pages of the heap. Used by {RefillMarkingDeque} only.
639  template <class T>
640  void DiscoverGreyObjectsWithIterator(T* it);
641  void DiscoverGreyObjectsOnPage(MemoryChunk* p);
642  void DiscoverGreyObjectsInSpace(PagedSpace* space);
643  void DiscoverGreyObjectsInNewSpace();
644
645  // Callback function for telling whether the object *p is an unmarked
646  // heap object.
647  static bool IsUnmarkedHeapObject(Object** p);
648
649  // Clear non-live references in weak cells, transition and descriptor arrays,
650  // and deoptimize dependent code of non-live maps.
651  void ClearNonLiveReferences();
652  void MarkDependentCodeForDeoptimization(DependentCode* list);
653  // Find non-live targets of simple transitions in the given list. Clear
654  // transitions to non-live targets and if needed trim descriptors arrays.
655  void ClearSimpleMapTransitions(Object* non_live_map_list);
656  void ClearSimpleMapTransition(Map* map, Map* dead_transition);
657  // Compact every array in the global list of transition arrays and
658  // trim the corresponding descriptor array if a transition target is non-live.
659  void ClearFullMapTransitions();
660  bool CompactTransitionArray(Map* map, TransitionArray* transitions,
661                              DescriptorArray* descriptors);
662  void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
663  void TrimEnumCache(Map* map, DescriptorArray* descriptors);
664
665  // Mark all values associated with reachable keys in weak collections
666  // encountered so far.  This might push new object or even new weak maps onto
667  // the marking stack.
668  void ProcessWeakCollections();
669
670  // After all reachable objects have been marked those weak map entries
671  // with an unreachable key are removed from all encountered weak maps.
672  // The linked list of all encountered weak maps is destroyed.
673  void ClearWeakCollections();
674
675  // We have to remove all encountered weak maps from the list of weak
676  // collections when incremental marking is aborted.
677  void AbortWeakCollections();
678
679  void ClearWeakCells(Object** non_live_map_list,
680                      DependentCode** dependent_code_list);
681  void AbortWeakCells();
682
683  void AbortTransitionArrays();
684
685  // -----------------------------------------------------------------------
686  // Phase 2: Sweeping to clear mark bits and free non-live objects for
687  // a non-compacting collection.
688  //
689  //  Before: Live objects are marked and non-live objects are unmarked.
690  //
691  //   After: Live objects are unmarked, non-live regions have been added to
692  //          their space's free list. Active eden semispace is compacted by
693  //          evacuation.
694  //
695
696  // If we are not compacting the heap, we simply sweep the spaces except
697  // for the large object space, clearing mark bits and adding unmarked
698  // regions to each space's free list.
699  void SweepSpaces();
700
701  void EvacuateNewSpacePrologue();
702
703  // Returns local pretenuring feedback.
704  HashMap* EvacuateNewSpaceInParallel();
705
706  void AddEvacuationSlotsBufferSynchronized(
707      SlotsBuffer* evacuation_slots_buffer);
708
709  void EvacuatePages(CompactionSpaceCollection* compaction_spaces,
710                     SlotsBuffer** evacuation_slots_buffer);
711
712  void EvacuatePagesInParallel();
713
714  // The number of parallel compaction tasks, including the main thread.
715  int NumberOfParallelCompactionTasks();
716
717
718  void StartParallelCompaction(CompactionSpaceCollection** compaction_spaces,
719                               uint32_t* task_ids, int len);
720  void WaitUntilCompactionCompleted(uint32_t* task_ids, int len);
721
722  void EvacuateNewSpaceAndCandidates();
723
724  void UpdatePointersAfterEvacuation();
725
726  // Iterates through all live objects on a page using marking information.
727  // Returns whether all objects have successfully been visited.
728  bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
729                        IterationMode mode);
730
731  void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
732
733  void RecomputeLiveBytes(MemoryChunk* page);
734
735  void SweepAbortedPages();
736
737  void ReleaseEvacuationCandidates();
738
739  // Moves the pages of the evacuation_candidates_ list to the end of their
740  // corresponding space pages list.
741  void MoveEvacuationCandidatesToEndOfPagesList();
742
743  // Starts sweeping of a space by contributing on the main thread and setting
744  // up other pages for sweeping.
745  void StartSweepSpace(PagedSpace* space);
746
747  // Finalizes the parallel sweeping phase. Marks all the pages that were
748  // swept in parallel.
749  void ParallelSweepSpacesComplete();
750
751  void ParallelSweepSpaceComplete(PagedSpace* space);
752
753  // Updates store buffer and slot buffer for a pointer in a migrating object.
754  void RecordMigratedSlot(Object* value, Address slot,
755                          SlotsBuffer** evacuation_slots_buffer);
756
757  // Adds the code entry slot to the slots buffer.
758  void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
759                                   SlotsBuffer** evacuation_slots_buffer);
760
761  // Adds the slot of a moved code object.
762  void RecordMigratedCodeObjectSlot(Address code_object,
763                                    SlotsBuffer** evacuation_slots_buffer);
764
765#ifdef DEBUG
766  friend class MarkObjectVisitor;
767  static void VisitObject(HeapObject* obj);
768
769  friend class UnmarkObjectVisitor;
770  static void UnmarkObject(HeapObject* obj);
771#endif
772
773  Heap* heap_;
774  base::VirtualMemory* marking_deque_memory_;
775  size_t marking_deque_memory_committed_;
776  MarkingDeque marking_deque_;
777  CodeFlusher* code_flusher_;
778  bool have_code_to_deoptimize_;
779
780  List<Page*> evacuation_candidates_;
781
782  List<MemoryChunk*> newspace_evacuation_candidates_;
783
784  // The evacuation_slots_buffers_ are used by the compaction threads.
785  // When a compaction task finishes, it uses
786  // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
787  // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
788  // lock.
789  base::Mutex evacuation_slots_buffers_mutex_;
790  List<SlotsBuffer*> evacuation_slots_buffers_;
791
792  base::SmartPointer<FreeList> free_list_old_space_;
793  base::SmartPointer<FreeList> free_list_code_space_;
794  base::SmartPointer<FreeList> free_list_map_space_;
795
796  // True if we are collecting slots to perform evacuation from evacuation
797  // candidates.
798  bool compacting_;
799
800  // True if concurrent or parallel sweeping is currently in progress.
801  bool sweeping_in_progress_;
802
803  // True if parallel compaction is currently in progress.
804  bool compaction_in_progress_;
805
806  // Semaphore used to synchronize sweeper tasks.
807  base::Semaphore pending_sweeper_tasks_semaphore_;
808
809  // Semaphore used to synchronize compaction tasks.
810  base::Semaphore pending_compaction_tasks_semaphore_;
811
812  friend class Heap;
813  friend class StoreBuffer;
814};
815
816
817class MarkBitCellIterator BASE_EMBEDDED {
818 public:
819  explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
820    last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
821        chunk_->AddressToMarkbitIndex(chunk_->area_end())));
822    cell_base_ = chunk_->area_start();
823    cell_index_ = Bitmap::IndexToCell(
824        Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
825    cells_ = chunk_->markbits()->cells();
826  }
827
828  inline bool Done() { return cell_index_ == last_cell_index_; }
829
830  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
831
832  inline MarkBit::CellType* CurrentCell() {
833    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
834                              chunk_->AddressToMarkbitIndex(cell_base_))));
835    return &cells_[cell_index_];
836  }
837
838  inline Address CurrentCellBase() {
839    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
840                              chunk_->AddressToMarkbitIndex(cell_base_))));
841    return cell_base_;
842  }
843
844  inline void Advance() {
845    cell_index_++;
846    cell_base_ += 32 * kPointerSize;
847  }
848
849  // Return the next mark bit cell. If there is no next it returns 0;
850  inline MarkBit::CellType PeekNext() {
851    if (HasNext()) {
852      return cells_[cell_index_ + 1];
853    }
854    return 0;
855  }
856
857 private:
858  MemoryChunk* chunk_;
859  MarkBit::CellType* cells_;
860  unsigned int last_cell_index_;
861  unsigned int cell_index_;
862  Address cell_base_;
863};
864
865enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
866
867template <LiveObjectIterationMode T>
868class LiveObjectIterator BASE_EMBEDDED {
869 public:
870  explicit LiveObjectIterator(MemoryChunk* chunk)
871      : chunk_(chunk),
872        it_(chunk_),
873        cell_base_(it_.CurrentCellBase()),
874        current_cell_(*it_.CurrentCell()) {}
875
876  HeapObject* Next();
877
878 private:
879  MemoryChunk* chunk_;
880  MarkBitCellIterator it_;
881  Address cell_base_;
882  MarkBit::CellType current_cell_;
883};
884
885
886class EvacuationScope BASE_EMBEDDED {
887 public:
888  explicit EvacuationScope(MarkCompactCollector* collector)
889      : collector_(collector) {
890    collector_->set_evacuation(true);
891  }
892
893  ~EvacuationScope() { collector_->set_evacuation(false); }
894
895 private:
896  MarkCompactCollector* collector_;
897};
898
899
900const char* AllocationSpaceName(AllocationSpace space);
901}  // namespace internal
902}  // namespace v8
903
904#endif  // V8_HEAP_MARK_COMPACT_H_
905