mark-compact.cc revision 692be65d6b06edd9ff4cfc4c308555b7c99c1191
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "compilation-cache.h"
31#include "execution.h"
32#include "heap-profiler.h"
33#include "gdb-jit.h"
34#include "global-handles.h"
35#include "ic-inl.h"
36#include "liveobjectlist-inl.h"
37#include "mark-compact.h"
38#include "objects-visiting.h"
39#include "stub-cache.h"
40
41namespace v8 {
42namespace internal {
43
44// -------------------------------------------------------------------------
45// MarkCompactCollector
46
47MarkCompactCollector::MarkCompactCollector() :  // NOLINT
48#ifdef DEBUG
49      state_(IDLE),
50#endif
51      force_compaction_(false),
52      compacting_collection_(false),
53      compact_on_next_gc_(false),
54      previous_marked_count_(0),
55      tracer_(NULL),
56#ifdef DEBUG
57      live_young_objects_size_(0),
58      live_old_pointer_objects_size_(0),
59      live_old_data_objects_size_(0),
60      live_code_objects_size_(0),
61      live_map_objects_size_(0),
62      live_cell_objects_size_(0),
63      live_lo_objects_size_(0),
64      live_bytes_(0),
65#endif
66      heap_(NULL),
67      code_flusher_(NULL),
68      encountered_weak_maps_(NULL) { }
69
70
71void MarkCompactCollector::CollectGarbage() {
72  // Make sure that Prepare() has been called. The individual steps below will
73  // update the state as they proceed.
74  ASSERT(state_ == PREPARE_GC);
75  ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
76
77  // Prepare has selected whether to compact the old generation or not.
78  // Tell the tracer.
79  if (IsCompacting()) tracer_->set_is_compacting();
80
81  MarkLiveObjects();
82
83  if (FLAG_collect_maps) ClearNonLiveTransitions();
84
85  ClearWeakMaps();
86
87  SweepLargeObjectSpace();
88
89  if (IsCompacting()) {
90    GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
91    EncodeForwardingAddresses();
92
93    heap()->MarkMapPointersAsEncoded(true);
94    UpdatePointers();
95    heap()->MarkMapPointersAsEncoded(false);
96    heap()->isolate()->pc_to_code_cache()->Flush();
97
98    RelocateObjects();
99  } else {
100    SweepSpaces();
101    heap()->isolate()->pc_to_code_cache()->Flush();
102  }
103
104  Finish();
105
106  // Save the count of marked objects remaining after the collection and
107  // null out the GC tracer.
108  previous_marked_count_ = tracer_->marked_count();
109  ASSERT(previous_marked_count_ == 0);
110  tracer_ = NULL;
111}
112
113
114void MarkCompactCollector::Prepare(GCTracer* tracer) {
115  // Rather than passing the tracer around we stash it in a static member
116  // variable.
117  tracer_ = tracer;
118
119#ifdef DEBUG
120  ASSERT(state_ == IDLE);
121  state_ = PREPARE_GC;
122#endif
123  ASSERT(!FLAG_always_compact || !FLAG_never_compact);
124
125  compacting_collection_ =
126      FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
127  compact_on_next_gc_ = false;
128
129  if (FLAG_never_compact) compacting_collection_ = false;
130  if (!heap()->map_space()->MapPointersEncodable())
131      compacting_collection_ = false;
132  if (FLAG_collect_maps) CreateBackPointers();
133#ifdef ENABLE_GDB_JIT_INTERFACE
134  if (FLAG_gdbjit) {
135    // If GDBJIT interface is active disable compaction.
136    compacting_collection_ = false;
137  }
138#endif
139
140  PagedSpaces spaces;
141  for (PagedSpace* space = spaces.next();
142       space != NULL; space = spaces.next()) {
143    space->PrepareForMarkCompact(compacting_collection_);
144  }
145
146#ifdef DEBUG
147  live_bytes_ = 0;
148  live_young_objects_size_ = 0;
149  live_old_pointer_objects_size_ = 0;
150  live_old_data_objects_size_ = 0;
151  live_code_objects_size_ = 0;
152  live_map_objects_size_ = 0;
153  live_cell_objects_size_ = 0;
154  live_lo_objects_size_ = 0;
155#endif
156}
157
158
159void MarkCompactCollector::Finish() {
160#ifdef DEBUG
161  ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
162  state_ = IDLE;
163#endif
164  // The stub cache is not traversed during GC; clear the cache to
165  // force lazy re-initialization of it. This must be done after the
166  // GC, because it relies on the new address of certain old space
167  // objects (empty string, illegal builtin).
168  heap()->isolate()->stub_cache()->Clear();
169
170  heap()->external_string_table_.CleanUp();
171
172  // If we've just compacted old space there's no reason to check the
173  // fragmentation limit. Just return.
174  if (HasCompacted()) return;
175
176  // We compact the old generation on the next GC if it has gotten too
177  // fragmented (ie, we could recover an expected amount of space by
178  // reclaiming the waste and free list blocks).
179  static const int kFragmentationLimit = 15;        // Percent.
180  static const int kFragmentationAllowed = 1 * MB;  // Absolute.
181  intptr_t old_gen_recoverable = 0;
182  intptr_t old_gen_used = 0;
183
184  OldSpaces spaces;
185  for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
186    old_gen_recoverable += space->Waste() + space->AvailableFree();
187    old_gen_used += space->Size();
188  }
189
190  int old_gen_fragmentation =
191      static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
192  if (old_gen_fragmentation > kFragmentationLimit &&
193      old_gen_recoverable > kFragmentationAllowed) {
194    compact_on_next_gc_ = true;
195  }
196}
197
198
199// -------------------------------------------------------------------------
200// Phase 1: tracing and marking live objects.
201//   before: all objects are in normal state.
202//   after: a live object's map pointer is marked as '00'.
203
204// Marking all live objects in the heap as part of mark-sweep or mark-compact
205// collection.  Before marking, all objects are in their normal state.  After
206// marking, live objects' map pointers are marked indicating that the object
207// has been found reachable.
208//
209// The marking algorithm is a (mostly) depth-first (because of possible stack
210// overflow) traversal of the graph of objects reachable from the roots.  It
211// uses an explicit stack of pointers rather than recursion.  The young
212// generation's inactive ('from') space is used as a marking stack.  The
213// objects in the marking stack are the ones that have been reached and marked
214// but their children have not yet been visited.
215//
216// The marking stack can overflow during traversal.  In that case, we set an
217// overflow flag.  When the overflow flag is set, we continue marking objects
218// reachable from the objects on the marking stack, but no longer push them on
219// the marking stack.  Instead, we mark them as both marked and overflowed.
220// When the stack is in the overflowed state, objects marked as overflowed
221// have been reached and marked but their children have not been visited yet.
222// After emptying the marking stack, we clear the overflow flag and traverse
223// the heap looking for objects marked as overflowed, push them on the stack,
224// and continue with marking.  This process repeats until all reachable
225// objects have been marked.
226
227class CodeFlusher {
228 public:
229  explicit CodeFlusher(Isolate* isolate)
230      : isolate_(isolate),
231        jsfunction_candidates_head_(NULL),
232        shared_function_info_candidates_head_(NULL) {}
233
234  void AddCandidate(SharedFunctionInfo* shared_info) {
235    SetNextCandidate(shared_info, shared_function_info_candidates_head_);
236    shared_function_info_candidates_head_ = shared_info;
237  }
238
239  void AddCandidate(JSFunction* function) {
240    ASSERT(function->unchecked_code() ==
241           function->unchecked_shared()->unchecked_code());
242
243    SetNextCandidate(function, jsfunction_candidates_head_);
244    jsfunction_candidates_head_ = function;
245  }
246
247  void ProcessCandidates() {
248    ProcessSharedFunctionInfoCandidates();
249    ProcessJSFunctionCandidates();
250  }
251
252 private:
253  void ProcessJSFunctionCandidates() {
254    Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
255
256    JSFunction* candidate = jsfunction_candidates_head_;
257    JSFunction* next_candidate;
258    while (candidate != NULL) {
259      next_candidate = GetNextCandidate(candidate);
260
261      SharedFunctionInfo* shared = candidate->unchecked_shared();
262
263      Code* code = shared->unchecked_code();
264      if (!code->IsMarked()) {
265        shared->set_code(lazy_compile);
266        candidate->set_code(lazy_compile);
267      } else {
268        candidate->set_code(shared->unchecked_code());
269      }
270
271      candidate = next_candidate;
272    }
273
274    jsfunction_candidates_head_ = NULL;
275  }
276
277
278  void ProcessSharedFunctionInfoCandidates() {
279    Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
280
281    SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
282    SharedFunctionInfo* next_candidate;
283    while (candidate != NULL) {
284      next_candidate = GetNextCandidate(candidate);
285      SetNextCandidate(candidate, NULL);
286
287      Code* code = candidate->unchecked_code();
288      if (!code->IsMarked()) {
289        candidate->set_code(lazy_compile);
290      }
291
292      candidate = next_candidate;
293    }
294
295    shared_function_info_candidates_head_ = NULL;
296  }
297
298  static JSFunction** GetNextCandidateField(JSFunction* candidate) {
299    return reinterpret_cast<JSFunction**>(
300        candidate->address() + JSFunction::kCodeEntryOffset);
301  }
302
303  static JSFunction* GetNextCandidate(JSFunction* candidate) {
304    return *GetNextCandidateField(candidate);
305  }
306
307  static void SetNextCandidate(JSFunction* candidate,
308                               JSFunction* next_candidate) {
309    *GetNextCandidateField(candidate) = next_candidate;
310  }
311
312  static SharedFunctionInfo** GetNextCandidateField(
313      SharedFunctionInfo* candidate) {
314    Code* code = candidate->unchecked_code();
315    return reinterpret_cast<SharedFunctionInfo**>(
316        code->address() + Code::kNextCodeFlushingCandidateOffset);
317  }
318
319  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
320    return *GetNextCandidateField(candidate);
321  }
322
323  static void SetNextCandidate(SharedFunctionInfo* candidate,
324                               SharedFunctionInfo* next_candidate) {
325    *GetNextCandidateField(candidate) = next_candidate;
326  }
327
328  Isolate* isolate_;
329  JSFunction* jsfunction_candidates_head_;
330  SharedFunctionInfo* shared_function_info_candidates_head_;
331
332  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
333};
334
335
336MarkCompactCollector::~MarkCompactCollector() {
337  if (code_flusher_ != NULL) {
338    delete code_flusher_;
339    code_flusher_ = NULL;
340  }
341}
342
343
344static inline HeapObject* ShortCircuitConsString(Object** p) {
345  // Optimization: If the heap object pointed to by p is a non-symbol
346  // cons string whose right substring is HEAP->empty_string, update
347  // it in place to its left substring.  Return the updated value.
348  //
349  // Here we assume that if we change *p, we replace it with a heap object
350  // (ie, the left substring of a cons string is always a heap object).
351  //
352  // The check performed is:
353  //   object->IsConsString() && !object->IsSymbol() &&
354  //   (ConsString::cast(object)->second() == HEAP->empty_string())
355  // except the maps for the object and its possible substrings might be
356  // marked.
357  HeapObject* object = HeapObject::cast(*p);
358  MapWord map_word = object->map_word();
359  map_word.ClearMark();
360  InstanceType type = map_word.ToMap()->instance_type();
361  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
362
363  Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
364  Heap* heap = map_word.ToMap()->heap();
365  if (second != heap->raw_unchecked_empty_string()) {
366    return object;
367  }
368
369  // Since we don't have the object's start, it is impossible to update the
370  // page dirty marks. Therefore, we only replace the string with its left
371  // substring when page dirty marks do not change.
372  Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
373  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
374
375  *p = first;
376  return HeapObject::cast(first);
377}
378
379
380class StaticMarkingVisitor : public StaticVisitorBase {
381 public:
382  static inline void IterateBody(Map* map, HeapObject* obj) {
383    table_.GetVisitor(map)(map, obj);
384  }
385
386  static void Initialize() {
387    table_.Register(kVisitShortcutCandidate,
388                    &FixedBodyVisitor<StaticMarkingVisitor,
389                                      ConsString::BodyDescriptor,
390                                      void>::Visit);
391
392    table_.Register(kVisitConsString,
393                    &FixedBodyVisitor<StaticMarkingVisitor,
394                                      ConsString::BodyDescriptor,
395                                      void>::Visit);
396
397    table_.Register(kVisitSlicedString,
398                    &FixedBodyVisitor<StaticMarkingVisitor,
399                                      SlicedString::BodyDescriptor,
400                                      void>::Visit);
401
402    table_.Register(kVisitFixedArray,
403                    &FlexibleBodyVisitor<StaticMarkingVisitor,
404                                         FixedArray::BodyDescriptor,
405                                         void>::Visit);
406
407    table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
408
409    table_.Register(kVisitGlobalContext,
410                    &FixedBodyVisitor<StaticMarkingVisitor,
411                                      Context::MarkCompactBodyDescriptor,
412                                      void>::Visit);
413
414    table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
415    table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
416    table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
417
418    table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
419
420    table_.Register(kVisitOddball,
421                    &FixedBodyVisitor<StaticMarkingVisitor,
422                                      Oddball::BodyDescriptor,
423                                      void>::Visit);
424    table_.Register(kVisitMap,
425                    &FixedBodyVisitor<StaticMarkingVisitor,
426                                      Map::BodyDescriptor,
427                                      void>::Visit);
428
429    table_.Register(kVisitCode, &VisitCode);
430
431    table_.Register(kVisitSharedFunctionInfo,
432                    &VisitSharedFunctionInfoAndFlushCode);
433
434    table_.Register(kVisitJSFunction,
435                    &VisitJSFunctionAndFlushCode);
436
437    table_.Register(kVisitJSRegExp,
438                    &VisitRegExpAndFlushCode);
439
440    table_.Register(kVisitPropertyCell,
441                    &FixedBodyVisitor<StaticMarkingVisitor,
442                                      JSGlobalPropertyCell::BodyDescriptor,
443                                      void>::Visit);
444
445    table_.RegisterSpecializations<DataObjectVisitor,
446                                   kVisitDataObject,
447                                   kVisitDataObjectGeneric>();
448
449    table_.RegisterSpecializations<JSObjectVisitor,
450                                   kVisitJSObject,
451                                   kVisitJSObjectGeneric>();
452
453    table_.RegisterSpecializations<StructObjectVisitor,
454                                   kVisitStruct,
455                                   kVisitStructGeneric>();
456  }
457
458  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
459    MarkObjectByPointer(heap, p);
460  }
461
462  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
463    // Mark all objects pointed to in [start, end).
464    const int kMinRangeForMarkingRecursion = 64;
465    if (end - start >= kMinRangeForMarkingRecursion) {
466      if (VisitUnmarkedObjects(heap, start, end)) return;
467      // We are close to a stack overflow, so just mark the objects.
468    }
469    for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
470  }
471
472  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
473    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
474    Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
475    if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
476      IC::Clear(rinfo->pc());
477      // Please note targets for cleared inline cached do not have to be
478      // marked since they are contained in HEAP->non_monomorphic_cache().
479    } else {
480      heap->mark_compact_collector()->MarkObject(code);
481    }
482  }
483
484  static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
485    ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
486    Object* cell = rinfo->target_cell();
487    Object* old_cell = cell;
488    VisitPointer(heap, &cell);
489    if (cell != old_cell) {
490      rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
491    }
492  }
493
494  static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
495    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
496            rinfo->IsPatchedReturnSequence()) ||
497           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
498            rinfo->IsPatchedDebugBreakSlotSequence()));
499    HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
500    heap->mark_compact_collector()->MarkObject(code);
501  }
502
503  // Mark object pointed to by p.
504  INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
505    if (!(*p)->IsHeapObject()) return;
506    HeapObject* object = ShortCircuitConsString(p);
507    if (!object->IsMarked()) {
508      heap->mark_compact_collector()->MarkUnmarkedObject(object);
509    }
510  }
511
512
513  // Visit an unmarked object.
514  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
515                                         HeapObject* obj)) {
516#ifdef DEBUG
517    ASSERT(Isolate::Current()->heap()->Contains(obj));
518    ASSERT(!obj->IsMarked());
519#endif
520    Map* map = obj->map();
521    collector->SetMark(obj);
522    // Mark the map pointer and the body.
523    if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
524    IterateBody(map, obj);
525  }
526
527  // Visit all unmarked objects pointed to by [start, end).
528  // Returns false if the operation fails (lack of stack space).
529  static inline bool VisitUnmarkedObjects(Heap* heap,
530                                          Object** start,
531                                          Object** end) {
532    // Return false is we are close to the stack limit.
533    StackLimitCheck check(heap->isolate());
534    if (check.HasOverflowed()) return false;
535
536    MarkCompactCollector* collector = heap->mark_compact_collector();
537    // Visit the unmarked objects.
538    for (Object** p = start; p < end; p++) {
539      if (!(*p)->IsHeapObject()) continue;
540      HeapObject* obj = HeapObject::cast(*p);
541      if (obj->IsMarked()) continue;
542      VisitUnmarkedObject(collector, obj);
543    }
544    return true;
545  }
546
547  static inline void VisitExternalReference(Address* p) { }
548  static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
549
550 private:
551  class DataObjectVisitor {
552   public:
553    template<int size>
554    static void VisitSpecialized(Map* map, HeapObject* object) {
555    }
556
557    static void Visit(Map* map, HeapObject* object) {
558    }
559  };
560
561  typedef FlexibleBodyVisitor<StaticMarkingVisitor,
562                              JSObject::BodyDescriptor,
563                              void> JSObjectVisitor;
564
565  typedef FlexibleBodyVisitor<StaticMarkingVisitor,
566                              StructBodyDescriptor,
567                              void> StructObjectVisitor;
568
569  static void VisitJSWeakMap(Map* map, HeapObject* object) {
570    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
571    JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
572
573    // Enqueue weak map in linked list of encountered weak maps.
574    ASSERT(weak_map->next() == Smi::FromInt(0));
575    weak_map->set_next(collector->encountered_weak_maps());
576    collector->set_encountered_weak_maps(weak_map);
577
578    // Skip visiting the backing hash table containing the mappings.
579    int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
580    BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
581        map->heap(),
582        object,
583        JSWeakMap::BodyDescriptor::kStartOffset,
584        JSWeakMap::kTableOffset);
585    BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
586        map->heap(),
587        object,
588        JSWeakMap::kTableOffset + kPointerSize,
589        object_size);
590
591    // Mark the backing hash table without pushing it on the marking stack.
592    ASSERT(!weak_map->unchecked_table()->IsMarked());
593    ASSERT(weak_map->unchecked_table()->map()->IsMarked());
594    collector->SetMark(weak_map->unchecked_table());
595  }
596
597  static void VisitCode(Map* map, HeapObject* object) {
598    reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
599        map->heap());
600  }
601
602  // Code flushing support.
603
604  // How many collections newly compiled code object will survive before being
605  // flushed.
606  static const int kCodeAgeThreshold = 5;
607
608  static const int kRegExpCodeThreshold = 5;
609
610  inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
611    Object* undefined = heap->raw_unchecked_undefined_value();
612    return (info->script() != undefined) &&
613        (reinterpret_cast<Script*>(info->script())->source() != undefined);
614  }
615
616
617  inline static bool IsCompiled(JSFunction* function) {
618    return function->unchecked_code() !=
619        function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
620  }
621
622  inline static bool IsCompiled(SharedFunctionInfo* function) {
623    return function->unchecked_code() !=
624        function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
625  }
626
627  inline static bool IsFlushable(Heap* heap, JSFunction* function) {
628    SharedFunctionInfo* shared_info = function->unchecked_shared();
629
630    // Code is either on stack, in compilation cache or referenced
631    // by optimized version of function.
632    if (function->unchecked_code()->IsMarked()) {
633      shared_info->set_code_age(0);
634      return false;
635    }
636
637    // We do not flush code for optimized functions.
638    if (function->code() != shared_info->unchecked_code()) {
639      return false;
640    }
641
642    return IsFlushable(heap, shared_info);
643  }
644
645  inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
646    // Code is either on stack, in compilation cache or referenced
647    // by optimized version of function.
648    if (shared_info->unchecked_code()->IsMarked()) {
649      shared_info->set_code_age(0);
650      return false;
651    }
652
653    // The function must be compiled and have the source code available,
654    // to be able to recompile it in case we need the function again.
655    if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
656      return false;
657    }
658
659    // We never flush code for Api functions.
660    Object* function_data = shared_info->function_data();
661    if (function_data->IsHeapObject() &&
662        (SafeMap(function_data)->instance_type() ==
663         FUNCTION_TEMPLATE_INFO_TYPE)) {
664      return false;
665    }
666
667    // Only flush code for functions.
668    if (shared_info->code()->kind() != Code::FUNCTION) {
669      return false;
670    }
671
672    // Function must be lazy compilable.
673    if (!shared_info->allows_lazy_compilation()) {
674      return false;
675    }
676
677    // If this is a full script wrapped in a function we do no flush the code.
678    if (shared_info->is_toplevel()) {
679      return false;
680    }
681
682    // Age this shared function info.
683    if (shared_info->code_age() < kCodeAgeThreshold) {
684      shared_info->set_code_age(shared_info->code_age() + 1);
685      return false;
686    }
687
688    return true;
689  }
690
691
692  static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
693    if (!IsFlushable(heap, function)) return false;
694
695    // This function's code looks flushable. But we have to postpone the
696    // decision until we see all functions that point to the same
697    // SharedFunctionInfo because some of them might be optimized.
698    // That would make the nonoptimized version of the code nonflushable,
699    // because it is required for bailing out from optimized code.
700    heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
701    return true;
702  }
703
704
705  static inline Map* SafeMap(Object* obj) {
706    MapWord map_word = HeapObject::cast(obj)->map_word();
707    map_word.ClearMark();
708    map_word.ClearOverflow();
709    return map_word.ToMap();
710  }
711
712
713  static inline bool IsJSBuiltinsObject(Object* obj) {
714    return obj->IsHeapObject() &&
715        (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
716  }
717
718
719  static inline bool IsValidNotBuiltinContext(Object* ctx) {
720    if (!ctx->IsHeapObject()) return false;
721
722    Map* map = SafeMap(ctx);
723    Heap* heap = map->heap();
724    if (!(map == heap->raw_unchecked_function_context_map() ||
725          map == heap->raw_unchecked_catch_context_map() ||
726          map == heap->raw_unchecked_with_context_map() ||
727          map == heap->raw_unchecked_global_context_map())) {
728      return false;
729    }
730
731    Context* context = reinterpret_cast<Context*>(ctx);
732
733    if (IsJSBuiltinsObject(context->global())) {
734      return false;
735    }
736
737    return true;
738  }
739
740
741  static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
742    SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
743
744    if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
745
746    FixedBodyVisitor<StaticMarkingVisitor,
747                     SharedFunctionInfo::BodyDescriptor,
748                     void>::Visit(map, object);
749  }
750
751
752  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
753                                          JSRegExp* re,
754                                          bool is_ascii) {
755    // Make sure that the fixed array is in fact initialized on the RegExp.
756    // We could potentially trigger a GC when initializing the RegExp.
757    if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
758
759    // Make sure this is a RegExp that actually contains code.
760    if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
761
762    Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
763    if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
764      // Save a copy that can be reinstated if we need the code again.
765      re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
766                             code,
767                             heap);
768      // Set a number in the 0-255 range to guarantee no smi overflow.
769      re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
770                             Smi::FromInt(heap->sweep_generation() & 0xff),
771                             heap);
772    } else if (code->IsSmi()) {
773      int value = Smi::cast(code)->value();
774      // The regexp has not been compiled yet or there was a compilation error.
775      if (value == JSRegExp::kUninitializedValue ||
776          value == JSRegExp::kCompilationErrorValue) {
777        return;
778      }
779
780      // Check if we should flush now.
781      if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
782        re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
783                               Smi::FromInt(JSRegExp::kUninitializedValue),
784                               heap);
785        re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
786                               Smi::FromInt(JSRegExp::kUninitializedValue),
787                               heap);
788      }
789    }
790  }
791
792
793  // Works by setting the current sweep_generation (as a smi) in the
794  // code object place in the data array of the RegExp and keeps a copy
795  // around that can be reinstated if we reuse the RegExp before flushing.
796  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
797  // we flush the code.
798  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
799    Heap* heap = map->heap();
800    MarkCompactCollector* collector = heap->mark_compact_collector();
801    if (!collector->is_code_flushing_enabled()) {
802      VisitJSRegExpFields(map, object);
803      return;
804    }
805    JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
806    // Flush code or set age on both ascii and two byte code.
807    UpdateRegExpCodeAgeAndFlush(heap, re, true);
808    UpdateRegExpCodeAgeAndFlush(heap, re, false);
809    // Visit the fields of the RegExp, including the updated FixedArray.
810    VisitJSRegExpFields(map, object);
811  }
812
813
814  static void VisitSharedFunctionInfoAndFlushCode(Map* map,
815                                                  HeapObject* object) {
816    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
817    if (!collector->is_code_flushing_enabled()) {
818      VisitSharedFunctionInfoGeneric(map, object);
819      return;
820    }
821    VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
822  }
823
824
825  static void VisitSharedFunctionInfoAndFlushCodeGeneric(
826      Map* map, HeapObject* object, bool known_flush_code_candidate) {
827    Heap* heap = map->heap();
828    SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
829
830    if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
831
832    if (!known_flush_code_candidate) {
833      known_flush_code_candidate = IsFlushable(heap, shared);
834      if (known_flush_code_candidate) {
835        heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
836      }
837    }
838
839    VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
840  }
841
842
843  static void VisitCodeEntry(Heap* heap, Address entry_address) {
844    Object* code = Code::GetObjectFromEntryAddress(entry_address);
845    Object* old_code = code;
846    VisitPointer(heap, &code);
847    if (code != old_code) {
848      Memory::Address_at(entry_address) =
849          reinterpret_cast<Code*>(code)->entry();
850    }
851  }
852
853
854  static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
855    Heap* heap = map->heap();
856    MarkCompactCollector* collector = heap->mark_compact_collector();
857    if (!collector->is_code_flushing_enabled()) {
858      VisitJSFunction(map, object);
859      return;
860    }
861
862    JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
863    // The function must have a valid context and not be a builtin.
864    bool flush_code_candidate = false;
865    if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
866      flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
867    }
868
869    if (!flush_code_candidate) {
870      collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
871
872      if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
873        collector->MarkInlinedFunctionsCode(jsfunction->unchecked_code());
874      }
875    }
876
877    VisitJSFunctionFields(map,
878                          reinterpret_cast<JSFunction*>(object),
879                          flush_code_candidate);
880  }
881
882
883  static void VisitJSFunction(Map* map, HeapObject* object) {
884    VisitJSFunctionFields(map,
885                          reinterpret_cast<JSFunction*>(object),
886                          false);
887  }
888
889
890#define SLOT_ADDR(obj, offset) \
891  reinterpret_cast<Object**>((obj)->address() + offset)
892
893
894  static inline void VisitJSFunctionFields(Map* map,
895                                           JSFunction* object,
896                                           bool flush_code_candidate) {
897    Heap* heap = map->heap();
898    MarkCompactCollector* collector = heap->mark_compact_collector();
899
900    VisitPointers(heap,
901                  SLOT_ADDR(object, JSFunction::kPropertiesOffset),
902                  SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
903
904    if (!flush_code_candidate) {
905      VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
906    } else {
907      // Don't visit code object.
908
909      // Visit shared function info to avoid double checking of it's
910      // flushability.
911      SharedFunctionInfo* shared_info = object->unchecked_shared();
912      if (!shared_info->IsMarked()) {
913        Map* shared_info_map = shared_info->map();
914        collector->SetMark(shared_info);
915        collector->MarkObject(shared_info_map);
916        VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
917                                                   shared_info,
918                                                   true);
919      }
920    }
921
922    VisitPointers(heap,
923                  SLOT_ADDR(object,
924                            JSFunction::kCodeEntryOffset + kPointerSize),
925                  SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
926
927    // Don't visit the next function list field as it is a weak reference.
928  }
929
930  static inline void VisitJSRegExpFields(Map* map,
931                                         HeapObject* object) {
932    int last_property_offset =
933        JSRegExp::kSize + kPointerSize * map->inobject_properties();
934    VisitPointers(map->heap(),
935                  SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
936                  SLOT_ADDR(object, last_property_offset));
937  }
938
939
940  static void VisitSharedFunctionInfoFields(Heap* heap,
941                                            HeapObject* object,
942                                            bool flush_code_candidate) {
943    VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
944
945    if (!flush_code_candidate) {
946      VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
947    }
948
949    VisitPointers(heap,
950                  SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
951                  SLOT_ADDR(object, SharedFunctionInfo::kSize));
952  }
953
954  #undef SLOT_ADDR
955
956  typedef void (*Callback)(Map* map, HeapObject* object);
957
958  static VisitorDispatchTable<Callback> table_;
959};
960
961
962VisitorDispatchTable<StaticMarkingVisitor::Callback>
963  StaticMarkingVisitor::table_;
964
965
966class MarkingVisitor : public ObjectVisitor {
967 public:
968  explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
969
970  void VisitPointer(Object** p) {
971    StaticMarkingVisitor::VisitPointer(heap_, p);
972  }
973
974  void VisitPointers(Object** start, Object** end) {
975    StaticMarkingVisitor::VisitPointers(heap_, start, end);
976  }
977
978 private:
979  Heap* heap_;
980};
981
982
983class CodeMarkingVisitor : public ThreadVisitor {
984 public:
985  explicit CodeMarkingVisitor(MarkCompactCollector* collector)
986      : collector_(collector) {}
987
988  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
989    collector_->PrepareThreadForCodeFlushing(isolate, top);
990  }
991
992 private:
993  MarkCompactCollector* collector_;
994};
995
996
997class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
998 public:
999  explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1000      : collector_(collector) {}
1001
1002  void VisitPointers(Object** start, Object** end) {
1003    for (Object** p = start; p < end; p++) VisitPointer(p);
1004  }
1005
1006  void VisitPointer(Object** slot) {
1007    Object* obj = *slot;
1008    if (obj->IsSharedFunctionInfo()) {
1009      SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1010      collector_->MarkObject(shared->unchecked_code());
1011      collector_->MarkObject(shared);
1012    }
1013  }
1014
1015 private:
1016  MarkCompactCollector* collector_;
1017};
1018
1019
1020void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
1021  // For optimized functions we should retain both non-optimized version
1022  // of it's code and non-optimized version of all inlined functions.
1023  // This is required to support bailing out from inlined code.
1024  DeoptimizationInputData* data =
1025      reinterpret_cast<DeoptimizationInputData*>(
1026          code->unchecked_deoptimization_data());
1027
1028  FixedArray* literals = data->UncheckedLiteralArray();
1029
1030  for (int i = 0, count = data->InlinedFunctionCount()->value();
1031       i < count;
1032       i++) {
1033    JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
1034    MarkObject(inlined->unchecked_shared()->unchecked_code());
1035  }
1036}
1037
1038
1039void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1040                                                        ThreadLocalTop* top) {
1041  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1042    // Note: for the frame that has a pending lazy deoptimization
1043    // StackFrame::unchecked_code will return a non-optimized code object for
1044    // the outermost function and StackFrame::LookupCode will return
1045    // actual optimized code object.
1046    StackFrame* frame = it.frame();
1047    Code* code = frame->unchecked_code();
1048    MarkObject(code);
1049    if (frame->is_optimized()) {
1050      MarkInlinedFunctionsCode(frame->LookupCode());
1051    }
1052  }
1053}
1054
1055
1056void MarkCompactCollector::PrepareForCodeFlushing() {
1057  ASSERT(heap() == Isolate::Current()->heap());
1058
1059  if (!FLAG_flush_code) {
1060    EnableCodeFlushing(false);
1061    return;
1062  }
1063
1064#ifdef ENABLE_DEBUGGER_SUPPORT
1065  if (heap()->isolate()->debug()->IsLoaded() ||
1066      heap()->isolate()->debug()->has_break_points()) {
1067    EnableCodeFlushing(false);
1068    return;
1069  }
1070#endif
1071  EnableCodeFlushing(true);
1072
1073  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1074  // relies on it being marked before any other descriptor array.
1075  MarkObject(heap()->raw_unchecked_empty_descriptor_array());
1076
1077  // Make sure we are not referencing the code from the stack.
1078  ASSERT(this == heap()->mark_compact_collector());
1079  PrepareThreadForCodeFlushing(heap()->isolate(),
1080                               heap()->isolate()->thread_local_top());
1081
1082  // Iterate the archived stacks in all threads to check if
1083  // the code is referenced.
1084  CodeMarkingVisitor code_marking_visitor(this);
1085  heap()->isolate()->thread_manager()->IterateArchivedThreads(
1086      &code_marking_visitor);
1087
1088  SharedFunctionInfoMarkingVisitor visitor(this);
1089  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1090  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1091
1092  ProcessMarkingStack();
1093}
1094
1095
1096// Visitor class for marking heap roots.
1097class RootMarkingVisitor : public ObjectVisitor {
1098 public:
1099  explicit RootMarkingVisitor(Heap* heap)
1100    : collector_(heap->mark_compact_collector()) { }
1101
1102  void VisitPointer(Object** p) {
1103    MarkObjectByPointer(p);
1104  }
1105
1106  void VisitPointers(Object** start, Object** end) {
1107    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1108  }
1109
1110 private:
1111  void MarkObjectByPointer(Object** p) {
1112    if (!(*p)->IsHeapObject()) return;
1113
1114    // Replace flat cons strings in place.
1115    HeapObject* object = ShortCircuitConsString(p);
1116    if (object->IsMarked()) return;
1117
1118    Map* map = object->map();
1119    // Mark the object.
1120    collector_->SetMark(object);
1121
1122    // Mark the map pointer and body, and push them on the marking stack.
1123    collector_->MarkObject(map);
1124    StaticMarkingVisitor::IterateBody(map, object);
1125
1126    // Mark all the objects reachable from the map and body.  May leave
1127    // overflowed objects in the heap.
1128    collector_->EmptyMarkingStack();
1129  }
1130
1131  MarkCompactCollector* collector_;
1132};
1133
1134
1135// Helper class for pruning the symbol table.
1136class SymbolTableCleaner : public ObjectVisitor {
1137 public:
1138  explicit SymbolTableCleaner(Heap* heap)
1139    : heap_(heap), pointers_removed_(0) { }
1140
1141  virtual void VisitPointers(Object** start, Object** end) {
1142    // Visit all HeapObject pointers in [start, end).
1143    for (Object** p = start; p < end; p++) {
1144      if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
1145        // Check if the symbol being pruned is an external symbol. We need to
1146        // delete the associated external data as this symbol is going away.
1147
1148        // Since no objects have yet been moved we can safely access the map of
1149        // the object.
1150        if ((*p)->IsExternalString()) {
1151          heap_->FinalizeExternalString(String::cast(*p));
1152        }
1153        // Set the entry to null_value (as deleted).
1154        *p = heap_->raw_unchecked_null_value();
1155        pointers_removed_++;
1156      }
1157    }
1158  }
1159
1160  int PointersRemoved() {
1161    return pointers_removed_;
1162  }
1163
1164 private:
1165  Heap* heap_;
1166  int pointers_removed_;
1167};
1168
1169
1170// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1171// are retained.
1172class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1173 public:
1174  virtual Object* RetainAs(Object* object) {
1175    MapWord first_word = HeapObject::cast(object)->map_word();
1176    if (first_word.IsMarked()) {
1177      return object;
1178    } else {
1179      return NULL;
1180    }
1181  }
1182};
1183
1184
1185void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
1186  ASSERT(!object->IsMarked());
1187  ASSERT(HEAP->Contains(object));
1188  if (object->IsMap()) {
1189    Map* map = Map::cast(object);
1190    if (FLAG_cleanup_code_caches_at_gc) {
1191      map->ClearCodeCache(heap());
1192    }
1193    SetMark(map);
1194
1195    // When map collection is enabled we have to mark through map's transitions
1196    // in a special way to make transition links weak.
1197    // Only maps for subclasses of JSReceiver can have transitions.
1198    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1199    if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
1200      MarkMapContents(map);
1201    } else {
1202      marking_stack_.Push(map);
1203    }
1204  } else {
1205    SetMark(object);
1206    marking_stack_.Push(object);
1207  }
1208}
1209
1210
1211void MarkCompactCollector::MarkMapContents(Map* map) {
1212  // Mark prototype transitions array but don't push it into marking stack.
1213  // This will make references from it weak. We will clean dead prototype
1214  // transitions in ClearNonLiveTransitions.
1215  FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
1216  if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
1217
1218  Object* raw_descriptor_array =
1219      *HeapObject::RawField(map,
1220                            Map::kInstanceDescriptorsOrBitField3Offset);
1221  if (!raw_descriptor_array->IsSmi()) {
1222    MarkDescriptorArray(
1223        reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
1224  }
1225
1226  // Mark the Object* fields of the Map.
1227  // Since the descriptor array has been marked already, it is fine
1228  // that one of these fields contains a pointer to it.
1229  Object** start_slot = HeapObject::RawField(map,
1230                                             Map::kPointerFieldsBeginOffset);
1231
1232  Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
1233
1234  StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
1235}
1236
1237
1238void MarkCompactCollector::MarkDescriptorArray(
1239    DescriptorArray* descriptors) {
1240  if (descriptors->IsMarked()) return;
1241  // Empty descriptor array is marked as a root before any maps are marked.
1242  ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
1243  SetMark(descriptors);
1244
1245  FixedArray* contents = reinterpret_cast<FixedArray*>(
1246      descriptors->get(DescriptorArray::kContentArrayIndex));
1247  ASSERT(contents->IsHeapObject());
1248  ASSERT(!contents->IsMarked());
1249  ASSERT(contents->IsFixedArray());
1250  ASSERT(contents->length() >= 2);
1251  SetMark(contents);
1252  // Contents contains (value, details) pairs.  If the details say that the type
1253  // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
1254  // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
1255  // live.  Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
1256  // CONSTANT_TRANSITION is the value an Object* (a Map*).
1257  for (int i = 0; i < contents->length(); i += 2) {
1258    // If the pair (value, details) at index i, i+1 is not
1259    // a transition or null descriptor, mark the value.
1260    PropertyDetails details(Smi::cast(contents->get(i + 1)));
1261    if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
1262      HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
1263      if (object->IsHeapObject() && !object->IsMarked()) {
1264        SetMark(object);
1265        marking_stack_.Push(object);
1266      }
1267    }
1268  }
1269  // The DescriptorArray descriptors contains a pointer to its contents array,
1270  // but the contents array is already marked.
1271  marking_stack_.Push(descriptors);
1272}
1273
1274
1275void MarkCompactCollector::CreateBackPointers() {
1276  HeapObjectIterator iterator(heap()->map_space());
1277  for (HeapObject* next_object = iterator.next();
1278       next_object != NULL; next_object = iterator.next()) {
1279    if (next_object->IsMap()) {  // Could also be ByteArray on free list.
1280      Map* map = Map::cast(next_object);
1281      STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
1282      if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
1283        map->CreateBackPointers();
1284      } else {
1285        ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
1286      }
1287    }
1288  }
1289}
1290
1291
1292static int OverflowObjectSize(HeapObject* obj) {
1293  // Recover the normal map pointer, it might be marked as live and
1294  // overflowed.
1295  MapWord map_word = obj->map_word();
1296  map_word.ClearMark();
1297  map_word.ClearOverflow();
1298  return obj->SizeFromMap(map_word.ToMap());
1299}
1300
1301
1302class OverflowedObjectsScanner : public AllStatic {
1303 public:
1304  // Fill the marking stack with overflowed objects returned by the given
1305  // iterator.  Stop when the marking stack is filled or the end of the space
1306  // is reached, whichever comes first.
1307  template<class T>
1308  static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
1309                                           T* it) {
1310    // The caller should ensure that the marking stack is initially not full,
1311    // so that we don't waste effort pointlessly scanning for objects.
1312    ASSERT(!collector->marking_stack_.is_full());
1313
1314    for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
1315      if (object->IsOverflowed()) {
1316        object->ClearOverflow();
1317        ASSERT(object->IsMarked());
1318        ASSERT(HEAP->Contains(object));
1319        collector->marking_stack_.Push(object);
1320        if (collector->marking_stack_.is_full()) return;
1321      }
1322    }
1323  }
1324};
1325
1326
1327bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1328  return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
1329}
1330
1331
1332void MarkCompactCollector::MarkSymbolTable() {
1333  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
1334  // Mark the symbol table itself.
1335  SetMark(symbol_table);
1336  // Explicitly mark the prefix.
1337  MarkingVisitor marker(heap());
1338  symbol_table->IteratePrefix(&marker);
1339  ProcessMarkingStack();
1340}
1341
1342
1343void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1344  // Mark the heap roots including global variables, stack variables,
1345  // etc., and all objects reachable from them.
1346  heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1347
1348  // Handle the symbol table specially.
1349  MarkSymbolTable();
1350
1351  // There may be overflowed objects in the heap.  Visit them now.
1352  while (marking_stack_.overflowed()) {
1353    RefillMarkingStack();
1354    EmptyMarkingStack();
1355  }
1356}
1357
1358
1359void MarkCompactCollector::MarkObjectGroups() {
1360  List<ObjectGroup*>* object_groups =
1361      heap()->isolate()->global_handles()->object_groups();
1362
1363  int last = 0;
1364  for (int i = 0; i < object_groups->length(); i++) {
1365    ObjectGroup* entry = object_groups->at(i);
1366    ASSERT(entry != NULL);
1367
1368    Object*** objects = entry->objects_;
1369    bool group_marked = false;
1370    for (size_t j = 0; j < entry->length_; j++) {
1371      Object* object = *objects[j];
1372      if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
1373        group_marked = true;
1374        break;
1375      }
1376    }
1377
1378    if (!group_marked) {
1379      (*object_groups)[last++] = entry;
1380      continue;
1381    }
1382
1383    // An object in the group is marked, so mark all heap objects in
1384    // the group.
1385    for (size_t j = 0; j < entry->length_; ++j) {
1386      if ((*objects[j])->IsHeapObject()) {
1387        MarkObject(HeapObject::cast(*objects[j]));
1388      }
1389    }
1390
1391    // Once the entire group has been marked, dispose it because it's
1392    // not needed anymore.
1393    entry->Dispose();
1394  }
1395  object_groups->Rewind(last);
1396}
1397
1398
1399void MarkCompactCollector::MarkImplicitRefGroups() {
1400  List<ImplicitRefGroup*>* ref_groups =
1401      heap()->isolate()->global_handles()->implicit_ref_groups();
1402
1403  int last = 0;
1404  for (int i = 0; i < ref_groups->length(); i++) {
1405    ImplicitRefGroup* entry = ref_groups->at(i);
1406    ASSERT(entry != NULL);
1407
1408    if (!(*entry->parent_)->IsMarked()) {
1409      (*ref_groups)[last++] = entry;
1410      continue;
1411    }
1412
1413    Object*** children = entry->children_;
1414    // A parent object is marked, so mark all child heap objects.
1415    for (size_t j = 0; j < entry->length_; ++j) {
1416      if ((*children[j])->IsHeapObject()) {
1417        MarkObject(HeapObject::cast(*children[j]));
1418      }
1419    }
1420
1421    // Once the entire group has been marked, dispose it because it's
1422    // not needed anymore.
1423    entry->Dispose();
1424  }
1425  ref_groups->Rewind(last);
1426}
1427
1428
1429// Mark all objects reachable from the objects on the marking stack.
1430// Before: the marking stack contains zero or more heap object pointers.
1431// After: the marking stack is empty, and all objects reachable from the
1432// marking stack have been marked, or are overflowed in the heap.
1433void MarkCompactCollector::EmptyMarkingStack() {
1434  while (!marking_stack_.is_empty()) {
1435    while (!marking_stack_.is_empty()) {
1436      HeapObject* object = marking_stack_.Pop();
1437      ASSERT(object->IsHeapObject());
1438      ASSERT(heap()->Contains(object));
1439      ASSERT(object->IsMarked());
1440      ASSERT(!object->IsOverflowed());
1441
1442      // Because the object is marked, we have to recover the original map
1443      // pointer and use it to mark the object's body.
1444      MapWord map_word = object->map_word();
1445      map_word.ClearMark();
1446      Map* map = map_word.ToMap();
1447      MarkObject(map);
1448
1449      StaticMarkingVisitor::IterateBody(map, object);
1450    }
1451
1452    // Process encountered weak maps, mark objects only reachable by those
1453    // weak maps and repeat until fix-point is reached.
1454    ProcessWeakMaps();
1455  }
1456}
1457
1458
1459// Sweep the heap for overflowed objects, clear their overflow bits, and
1460// push them on the marking stack.  Stop early if the marking stack fills
1461// before sweeping completes.  If sweeping completes, there are no remaining
1462// overflowed objects in the heap so the overflow flag on the markings stack
1463// is cleared.
1464void MarkCompactCollector::RefillMarkingStack() {
1465  ASSERT(marking_stack_.overflowed());
1466
1467  SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
1468  OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
1469  if (marking_stack_.is_full()) return;
1470
1471  HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
1472                                    &OverflowObjectSize);
1473  OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
1474  if (marking_stack_.is_full()) return;
1475
1476  HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
1477  OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
1478  if (marking_stack_.is_full()) return;
1479
1480  HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
1481  OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
1482  if (marking_stack_.is_full()) return;
1483
1484  HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
1485  OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
1486  if (marking_stack_.is_full()) return;
1487
1488  HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
1489  OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
1490  if (marking_stack_.is_full()) return;
1491
1492  LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
1493  OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
1494  if (marking_stack_.is_full()) return;
1495
1496  marking_stack_.clear_overflowed();
1497}
1498
1499
1500// Mark all objects reachable (transitively) from objects on the marking
1501// stack.  Before: the marking stack contains zero or more heap object
1502// pointers.  After: the marking stack is empty and there are no overflowed
1503// objects in the heap.
1504void MarkCompactCollector::ProcessMarkingStack() {
1505  EmptyMarkingStack();
1506  while (marking_stack_.overflowed()) {
1507    RefillMarkingStack();
1508    EmptyMarkingStack();
1509  }
1510}
1511
1512
1513void MarkCompactCollector::ProcessExternalMarking() {
1514  bool work_to_do = true;
1515  ASSERT(marking_stack_.is_empty());
1516  while (work_to_do) {
1517    MarkObjectGroups();
1518    MarkImplicitRefGroups();
1519    work_to_do = !marking_stack_.is_empty();
1520    ProcessMarkingStack();
1521  }
1522}
1523
1524
1525void MarkCompactCollector::MarkLiveObjects() {
1526  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
1527  // The recursive GC marker detects when it is nearing stack overflow,
1528  // and switches to a different marking system.  JS interrupts interfere
1529  // with the C stack limit check.
1530  PostponeInterruptsScope postpone(heap()->isolate());
1531
1532#ifdef DEBUG
1533  ASSERT(state_ == PREPARE_GC);
1534  state_ = MARK_LIVE_OBJECTS;
1535#endif
1536  // The to space contains live objects, the from space is used as a marking
1537  // stack.
1538  marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
1539                            heap()->new_space()->FromSpaceHigh());
1540
1541  ASSERT(!marking_stack_.overflowed());
1542
1543  PrepareForCodeFlushing();
1544
1545  RootMarkingVisitor root_visitor(heap());
1546  MarkRoots(&root_visitor);
1547
1548  // The objects reachable from the roots are marked, yet unreachable
1549  // objects are unmarked.  Mark objects reachable due to host
1550  // application specific logic.
1551  ProcessExternalMarking();
1552
1553  // The objects reachable from the roots or object groups are marked,
1554  // yet unreachable objects are unmarked.  Mark objects reachable
1555  // only from weak global handles.
1556  //
1557  // First we identify nonlive weak handles and mark them as pending
1558  // destruction.
1559  heap()->isolate()->global_handles()->IdentifyWeakHandles(
1560      &IsUnmarkedHeapObject);
1561  // Then we mark the objects and process the transitive closure.
1562  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
1563  while (marking_stack_.overflowed()) {
1564    RefillMarkingStack();
1565    EmptyMarkingStack();
1566  }
1567
1568  // Repeat host application specific marking to mark unmarked objects
1569  // reachable from the weak roots.
1570  ProcessExternalMarking();
1571
1572  // Object literal map caches reference symbols (cache keys) and maps
1573  // (cache values). At this point still useful maps have already been
1574  // marked. Mark the keys for the alive values before we process the
1575  // symbol table.
1576  ProcessMapCaches();
1577
1578  // Prune the symbol table removing all symbols only pointed to by the
1579  // symbol table.  Cannot use symbol_table() here because the symbol
1580  // table is marked.
1581  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
1582  SymbolTableCleaner v(heap());
1583  symbol_table->IterateElements(&v);
1584  symbol_table->ElementsRemoved(v.PointersRemoved());
1585  heap()->external_string_table_.Iterate(&v);
1586  heap()->external_string_table_.CleanUp();
1587
1588  // Process the weak references.
1589  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
1590  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
1591
1592  // Remove object groups after marking phase.
1593  heap()->isolate()->global_handles()->RemoveObjectGroups();
1594  heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
1595
1596  // Flush code from collected candidates.
1597  if (is_code_flushing_enabled()) {
1598    code_flusher_->ProcessCandidates();
1599  }
1600
1601  // Clean up dead objects from the runtime profiler.
1602  heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
1603}
1604
1605
1606void MarkCompactCollector::ProcessMapCaches() {
1607  Object* raw_context = heap()->global_contexts_list_;
1608  while (raw_context != heap()->undefined_value()) {
1609    Context* context = reinterpret_cast<Context*>(raw_context);
1610    if (context->IsMarked()) {
1611      HeapObject* raw_map_cache =
1612          HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
1613      // A map cache may be reachable from the stack. In this case
1614      // it's already transitively marked and it's too late to clean
1615      // up its parts.
1616      if (!raw_map_cache->IsMarked() &&
1617          raw_map_cache != heap()->undefined_value()) {
1618        MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
1619        int existing_elements = map_cache->NumberOfElements();
1620        int used_elements = 0;
1621        for (int i = MapCache::kElementsStartIndex;
1622             i < map_cache->length();
1623             i += MapCache::kEntrySize) {
1624          Object* raw_key = map_cache->get(i);
1625          if (raw_key == heap()->undefined_value() ||
1626              raw_key == heap()->null_value()) continue;
1627          STATIC_ASSERT(MapCache::kEntrySize == 2);
1628          Object* raw_map = map_cache->get(i + 1);
1629          if (raw_map->IsHeapObject() &&
1630              HeapObject::cast(raw_map)->IsMarked()) {
1631            ++used_elements;
1632          } else {
1633            // Delete useless entries with unmarked maps.
1634            ASSERT(raw_map->IsMap());
1635            map_cache->set_null_unchecked(heap(), i);
1636            map_cache->set_null_unchecked(heap(), i + 1);
1637          }
1638        }
1639        if (used_elements == 0) {
1640          context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
1641        } else {
1642          // Note: we don't actually shrink the cache here to avoid
1643          // extra complexity during GC. We rely on subsequent cache
1644          // usages (EnsureCapacity) to do this.
1645          map_cache->ElementsRemoved(existing_elements - used_elements);
1646          MarkObject(map_cache);
1647        }
1648      }
1649    }
1650    // Move to next element in the list.
1651    raw_context = context->get(Context::NEXT_CONTEXT_LINK);
1652  }
1653  ProcessMarkingStack();
1654}
1655
1656
1657#ifdef DEBUG
1658void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1659  live_bytes_ += obj->Size();
1660  if (heap()->new_space()->Contains(obj)) {
1661    live_young_objects_size_ += obj->Size();
1662  } else if (heap()->map_space()->Contains(obj)) {
1663    ASSERT(obj->IsMap());
1664    live_map_objects_size_ += obj->Size();
1665  } else if (heap()->cell_space()->Contains(obj)) {
1666    ASSERT(obj->IsJSGlobalPropertyCell());
1667    live_cell_objects_size_ += obj->Size();
1668  } else if (heap()->old_pointer_space()->Contains(obj)) {
1669    live_old_pointer_objects_size_ += obj->Size();
1670  } else if (heap()->old_data_space()->Contains(obj)) {
1671    live_old_data_objects_size_ += obj->Size();
1672  } else if (heap()->code_space()->Contains(obj)) {
1673    live_code_objects_size_ += obj->Size();
1674  } else if (heap()->lo_space()->Contains(obj)) {
1675    live_lo_objects_size_ += obj->Size();
1676  } else {
1677    UNREACHABLE();
1678  }
1679}
1680#endif  // DEBUG
1681
1682
1683void MarkCompactCollector::SweepLargeObjectSpace() {
1684#ifdef DEBUG
1685  ASSERT(state_ == MARK_LIVE_OBJECTS);
1686  state_ =
1687      compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1688#endif
1689  // Deallocate unmarked objects and clear marked bits for marked objects.
1690  heap()->lo_space()->FreeUnmarkedObjects();
1691}
1692
1693
1694// Safe to use during marking phase only.
1695bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1696  MapWord metamap = object->map_word();
1697  metamap.ClearMark();
1698  return metamap.ToMap()->instance_type() == MAP_TYPE;
1699}
1700
1701
1702void MarkCompactCollector::ClearNonLiveTransitions() {
1703  HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
1704  // Iterate over the map space, setting map transitions that go from
1705  // a marked map to an unmarked map to null transitions.  At the same time,
1706  // set all the prototype fields of maps back to their original value,
1707  // dropping the back pointers temporarily stored in the prototype field.
1708  // Setting the prototype field requires following the linked list of
1709  // back pointers, reversing them all at once.  This allows us to find
1710  // those maps with map transitions that need to be nulled, and only
1711  // scan the descriptor arrays of those maps, not all maps.
1712  // All of these actions are carried out only on maps of JSObjects
1713  // and related subtypes.
1714  for (HeapObject* obj = map_iterator.next();
1715       obj != NULL; obj = map_iterator.next()) {
1716    Map* map = reinterpret_cast<Map*>(obj);
1717    if (!map->IsMarked() && map->IsByteArray()) continue;
1718
1719    ASSERT(SafeIsMap(map));
1720    // Only JSObject and subtypes have map transitions and back pointers.
1721    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
1722    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
1723
1724    if (map->IsMarked() && map->attached_to_shared_function_info()) {
1725      // This map is used for inobject slack tracking and has been detached
1726      // from SharedFunctionInfo during the mark phase.
1727      // Since it survived the GC, reattach it now.
1728      map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
1729    }
1730
1731    // Clear dead prototype transitions.
1732    int number_of_transitions = map->NumberOfProtoTransitions();
1733    if (number_of_transitions > 0) {
1734      FixedArray* prototype_transitions =
1735          map->unchecked_prototype_transitions();
1736      int new_number_of_transitions = 0;
1737      const int header = Map::kProtoTransitionHeaderSize;
1738      const int proto_offset =
1739          header + Map::kProtoTransitionPrototypeOffset;
1740      const int map_offset = header + Map::kProtoTransitionMapOffset;
1741      const int step = Map::kProtoTransitionElementsPerEntry;
1742      for (int i = 0; i < number_of_transitions; i++) {
1743        Object* prototype = prototype_transitions->get(proto_offset + i * step);
1744        Object* cached_map = prototype_transitions->get(map_offset + i * step);
1745        if (HeapObject::cast(prototype)->IsMarked() &&
1746            HeapObject::cast(cached_map)->IsMarked()) {
1747          if (new_number_of_transitions != i) {
1748            prototype_transitions->set_unchecked(
1749                heap_,
1750                proto_offset + new_number_of_transitions * step,
1751                prototype,
1752                UPDATE_WRITE_BARRIER);
1753            prototype_transitions->set_unchecked(
1754                heap_,
1755                map_offset + new_number_of_transitions * step,
1756                cached_map,
1757                SKIP_WRITE_BARRIER);
1758          }
1759          new_number_of_transitions++;
1760        }
1761      }
1762
1763      // Fill slots that became free with undefined value.
1764      Object* undefined = heap()->raw_unchecked_undefined_value();
1765      for (int i = new_number_of_transitions * step;
1766           i < number_of_transitions * step;
1767           i++) {
1768        prototype_transitions->set_unchecked(heap_,
1769                                             header + i,
1770                                             undefined,
1771                                             SKIP_WRITE_BARRIER);
1772      }
1773      map->SetNumberOfProtoTransitions(new_number_of_transitions);
1774    }
1775
1776    // Follow the chain of back pointers to find the prototype.
1777    Map* current = map;
1778    while (SafeIsMap(current)) {
1779      current = reinterpret_cast<Map*>(current->prototype());
1780      ASSERT(current->IsHeapObject());
1781    }
1782    Object* real_prototype = current;
1783
1784    // Follow back pointers, setting them to prototype,
1785    // clearing map transitions when necessary.
1786    current = map;
1787    bool on_dead_path = !current->IsMarked();
1788    Object* next;
1789    while (SafeIsMap(current)) {
1790      next = current->prototype();
1791      // There should never be a dead map above a live map.
1792      ASSERT(on_dead_path || current->IsMarked());
1793
1794      // A live map above a dead map indicates a dead transition.
1795      // This test will always be false on the first iteration.
1796      if (on_dead_path && current->IsMarked()) {
1797        on_dead_path = false;
1798        current->ClearNonLiveTransitions(heap(), real_prototype);
1799      }
1800      *HeapObject::RawField(current, Map::kPrototypeOffset) =
1801          real_prototype;
1802      current = reinterpret_cast<Map*>(next);
1803    }
1804  }
1805}
1806
1807
1808void MarkCompactCollector::ProcessWeakMaps() {
1809  Object* weak_map_obj = encountered_weak_maps();
1810  while (weak_map_obj != Smi::FromInt(0)) {
1811    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
1812    JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
1813    ObjectHashTable* table = weak_map->unchecked_table();
1814    for (int i = 0; i < table->Capacity(); i++) {
1815      if (HeapObject::cast(table->KeyAt(i))->IsMarked()) {
1816        Object* value = table->get(table->EntryToValueIndex(i));
1817        StaticMarkingVisitor::MarkObjectByPointer(heap(), &value);
1818        table->set_unchecked(heap(),
1819                             table->EntryToValueIndex(i),
1820                             value,
1821                             UPDATE_WRITE_BARRIER);
1822      }
1823    }
1824    weak_map_obj = weak_map->next();
1825  }
1826}
1827
1828
1829void MarkCompactCollector::ClearWeakMaps() {
1830  Object* weak_map_obj = encountered_weak_maps();
1831  while (weak_map_obj != Smi::FromInt(0)) {
1832    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
1833    JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
1834    ObjectHashTable* table = weak_map->unchecked_table();
1835    for (int i = 0; i < table->Capacity(); i++) {
1836      if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) {
1837        table->RemoveEntry(i, heap());
1838      }
1839    }
1840    weak_map_obj = weak_map->next();
1841    weak_map->set_next(Smi::FromInt(0));
1842  }
1843  set_encountered_weak_maps(Smi::FromInt(0));
1844}
1845
1846// -------------------------------------------------------------------------
1847// Phase 2: Encode forwarding addresses.
1848// When compacting, forwarding addresses for objects in old space and map
1849// space are encoded in their map pointer word (along with an encoding of
1850// their map pointers).
1851//
1852// The excact encoding is described in the comments for class MapWord in
1853// objects.h.
1854//
1855// An address range [start, end) can have both live and non-live objects.
1856// Maximal non-live regions are marked so they can be skipped on subsequent
1857// sweeps of the heap.  A distinguished map-pointer encoding is used to mark
1858// free regions of one-word size (in which case the next word is the start
1859// of a live object).  A second distinguished map-pointer encoding is used
1860// to mark free regions larger than one word, and the size of the free
1861// region (including the first word) is written to the second word of the
1862// region.
1863//
1864// Any valid map page offset must lie in the object area of the page, so map
1865// page offsets less than Page::kObjectStartOffset are invalid.  We use a
1866// pair of distinguished invalid map encodings (for single word and multiple
1867// words) to indicate free regions in the page found during computation of
1868// forwarding addresses and skipped over in subsequent sweeps.
1869
1870
1871// Encode a free region, defined by the given start address and size, in the
1872// first word or two of the region.
1873void EncodeFreeRegion(Address free_start, int free_size) {
1874  ASSERT(free_size >= kIntSize);
1875  if (free_size == kIntSize) {
1876    Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
1877  } else {
1878    ASSERT(free_size >= 2 * kIntSize);
1879    Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
1880    Memory::int_at(free_start + kIntSize) = free_size;
1881  }
1882
1883#ifdef DEBUG
1884  // Zap the body of the free region.
1885  if (FLAG_enable_slow_asserts) {
1886    for (int offset = 2 * kIntSize;
1887         offset < free_size;
1888         offset += kPointerSize) {
1889      Memory::Address_at(free_start + offset) = kZapValue;
1890    }
1891  }
1892#endif
1893}
1894
1895
1896// Try to promote all objects in new space.  Heap numbers and sequential
1897// strings are promoted to the code space, large objects to large object space,
1898// and all others to the old space.
1899inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
1900                                           HeapObject* object,
1901                                           int object_size) {
1902  MaybeObject* forwarded;
1903  if (object_size > heap->MaxObjectSizeInPagedSpace()) {
1904    forwarded = Failure::Exception();
1905  } else {
1906    OldSpace* target_space = heap->TargetSpace(object);
1907    ASSERT(target_space == heap->old_pointer_space() ||
1908           target_space == heap->old_data_space());
1909    forwarded = target_space->MCAllocateRaw(object_size);
1910  }
1911  Object* result;
1912  if (!forwarded->ToObject(&result)) {
1913    result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
1914  }
1915  return result;
1916}
1917
1918
1919// Allocation functions for the paged spaces call the space's MCAllocateRaw.
1920MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
1921    Heap *heap,
1922    HeapObject* ignore,
1923    int object_size) {
1924  return heap->old_pointer_space()->MCAllocateRaw(object_size);
1925}
1926
1927
1928MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
1929    Heap* heap,
1930    HeapObject* ignore,
1931    int object_size) {
1932  return heap->old_data_space()->MCAllocateRaw(object_size);
1933}
1934
1935
1936MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
1937    Heap* heap,
1938    HeapObject* ignore,
1939    int object_size) {
1940  return heap->code_space()->MCAllocateRaw(object_size);
1941}
1942
1943
1944MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
1945    Heap* heap,
1946    HeapObject* ignore,
1947    int object_size) {
1948  return heap->map_space()->MCAllocateRaw(object_size);
1949}
1950
1951
1952MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
1953    Heap* heap, HeapObject* ignore, int object_size) {
1954  return heap->cell_space()->MCAllocateRaw(object_size);
1955}
1956
1957
1958// The forwarding address is encoded at the same offset as the current
1959// to-space object, but in from space.
1960inline void EncodeForwardingAddressInNewSpace(Heap* heap,
1961                                              HeapObject* old_object,
1962                                              int object_size,
1963                                              Object* new_object,
1964                                              int* ignored) {
1965  int offset =
1966      heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
1967  Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
1968      HeapObject::cast(new_object)->address();
1969}
1970
1971
1972// The forwarding address is encoded in the map pointer of the object as an
1973// offset (in terms of live bytes) from the address of the first live object
1974// in the page.
1975inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
1976                                                HeapObject* old_object,
1977                                                int object_size,
1978                                                Object* new_object,
1979                                                int* offset) {
1980  // Record the forwarding address of the first live object if necessary.
1981  if (*offset == 0) {
1982    Page::FromAddress(old_object->address())->mc_first_forwarded =
1983        HeapObject::cast(new_object)->address();
1984  }
1985
1986  MapWord encoding =
1987      MapWord::EncodeAddress(old_object->map()->address(), *offset);
1988  old_object->set_map_word(encoding);
1989  *offset += object_size;
1990  ASSERT(*offset <= Page::kObjectAreaSize);
1991}
1992
1993
1994// Most non-live objects are ignored.
1995inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
1996
1997
1998// Function template that, given a range of addresses (eg, a semispace or a
1999// paged space page), iterates through the objects in the range to clear
2000// mark bits and compute and encode forwarding addresses.  As a side effect,
2001// maximal free chunks are marked so that they can be skipped on subsequent
2002// sweeps.
2003//
2004// The template parameters are an allocation function, a forwarding address
2005// encoding function, and a function to process non-live objects.
2006template<MarkCompactCollector::AllocationFunction Alloc,
2007         MarkCompactCollector::EncodingFunction Encode,
2008         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
2009inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
2010                                             Address start,
2011                                             Address end,
2012                                             int* offset) {
2013  // The start address of the current free region while sweeping the space.
2014  // This address is set when a transition from live to non-live objects is
2015  // encountered.  A value (an encoding of the 'next free region' pointer)
2016  // is written to memory at this address when a transition from non-live to
2017  // live objects is encountered.
2018  Address free_start = NULL;
2019
2020  // A flag giving the state of the previously swept object.  Initially true
2021  // to ensure that free_start is initialized to a proper address before
2022  // trying to write to it.
2023  bool is_prev_alive = true;
2024
2025  int object_size;  // Will be set on each iteration of the loop.
2026  for (Address current = start; current < end; current += object_size) {
2027    HeapObject* object = HeapObject::FromAddress(current);
2028    if (object->IsMarked()) {
2029      object->ClearMark();
2030      collector->tracer()->decrement_marked_count();
2031      object_size = object->Size();
2032
2033      Object* forwarded =
2034          Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
2035      Encode(collector->heap(), object, object_size, forwarded, offset);
2036
2037#ifdef DEBUG
2038      if (FLAG_gc_verbose) {
2039        PrintF("forward %p -> %p.\n", object->address(),
2040               HeapObject::cast(forwarded)->address());
2041      }
2042#endif
2043      if (!is_prev_alive) {  // Transition from non-live to live.
2044        EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
2045        is_prev_alive = true;
2046      }
2047    } else {  // Non-live object.
2048      object_size = object->Size();
2049      ProcessNonLive(object, collector->heap()->isolate());
2050      if (is_prev_alive) {  // Transition from live to non-live.
2051        free_start = current;
2052        is_prev_alive = false;
2053      }
2054      LiveObjectList::ProcessNonLive(object);
2055    }
2056  }
2057
2058  // If we ended on a free region, mark it.
2059  if (!is_prev_alive) {
2060    EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
2061  }
2062}
2063
2064
2065// Functions to encode the forwarding pointers in each compactable space.
2066void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
2067  int ignored;
2068  EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
2069                                   EncodeForwardingAddressInNewSpace,
2070                                   IgnoreNonLiveObject>(
2071      this,
2072      heap()->new_space()->bottom(),
2073      heap()->new_space()->top(),
2074      &ignored);
2075}
2076
2077
2078template<MarkCompactCollector::AllocationFunction Alloc,
2079         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
2080void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
2081    PagedSpace* space) {
2082  PageIterator it(space, PageIterator::PAGES_IN_USE);
2083  while (it.has_next()) {
2084    Page* p = it.next();
2085
2086    // The offset of each live object in the page from the first live object
2087    // in the page.
2088    int offset = 0;
2089    EncodeForwardingAddressesInRange<Alloc,
2090                                     EncodeForwardingAddressInPagedSpace,
2091                                     ProcessNonLive>(
2092        this,
2093        p->ObjectAreaStart(),
2094        p->AllocationTop(),
2095        &offset);
2096  }
2097}
2098
2099
2100// We scavange new space simultaneously with sweeping. This is done in two
2101// passes.
2102// The first pass migrates all alive objects from one semispace to another or
2103// promotes them to old space. Forwading address is written directly into
2104// first word of object without any encoding. If object is dead we are writing
2105// NULL as a forwarding address.
2106// The second pass updates pointers to new space in all spaces. It is possible
2107// to encounter pointers to dead objects during traversal of dirty regions we
2108// should clear them to avoid encountering them during next dirty regions
2109// iteration.
2110static void MigrateObject(Heap* heap,
2111                          Address dst,
2112                          Address src,
2113                          int size,
2114                          bool to_old_space) {
2115  if (to_old_space) {
2116    heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
2117  } else {
2118    heap->CopyBlock(dst, src, size);
2119  }
2120
2121  Memory::Address_at(src) = dst;
2122}
2123
2124
2125class StaticPointersToNewGenUpdatingVisitor : public
2126  StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
2127 public:
2128  static inline void VisitPointer(Heap* heap, Object** p) {
2129    if (!(*p)->IsHeapObject()) return;
2130
2131    HeapObject* obj = HeapObject::cast(*p);
2132    Address old_addr = obj->address();
2133
2134    if (heap->new_space()->Contains(obj)) {
2135      ASSERT(heap->InFromSpace(*p));
2136      *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
2137    }
2138  }
2139};
2140
2141
2142// Visitor for updating pointers from live objects in old spaces to new space.
2143// It does not expect to encounter pointers to dead objects.
2144class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
2145 public:
2146  explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
2147
2148  void VisitPointer(Object** p) {
2149    StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
2150  }
2151
2152  void VisitPointers(Object** start, Object** end) {
2153    for (Object** p = start; p < end; p++) {
2154      StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
2155    }
2156  }
2157
2158  void VisitCodeTarget(RelocInfo* rinfo) {
2159    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2160    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2161    VisitPointer(&target);
2162    rinfo->set_target_address(Code::cast(target)->instruction_start());
2163  }
2164
2165  void VisitDebugTarget(RelocInfo* rinfo) {
2166    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2167            rinfo->IsPatchedReturnSequence()) ||
2168           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2169            rinfo->IsPatchedDebugBreakSlotSequence()));
2170    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2171    VisitPointer(&target);
2172    rinfo->set_call_address(Code::cast(target)->instruction_start());
2173  }
2174
2175 private:
2176  Heap* heap_;
2177};
2178
2179
2180// Visitor for updating pointers from live objects in old spaces to new space.
2181// It can encounter pointers to dead objects in new space when traversing map
2182// space (see comment for MigrateObject).
2183static void UpdatePointerToNewGen(HeapObject** p) {
2184  if (!(*p)->IsHeapObject()) return;
2185
2186  Address old_addr = (*p)->address();
2187  ASSERT(HEAP->InFromSpace(*p));
2188
2189  Address new_addr = Memory::Address_at(old_addr);
2190
2191  if (new_addr == NULL) {
2192    // We encountered pointer to a dead object. Clear it so we will
2193    // not visit it again during next iteration of dirty regions.
2194    *p = NULL;
2195  } else {
2196    *p = HeapObject::FromAddress(new_addr);
2197  }
2198}
2199
2200
2201static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
2202                                                                 Object** p) {
2203  Address old_addr = HeapObject::cast(*p)->address();
2204  Address new_addr = Memory::Address_at(old_addr);
2205  return String::cast(HeapObject::FromAddress(new_addr));
2206}
2207
2208
2209static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
2210  Object* result;
2211
2212  if (object_size > heap->MaxObjectSizeInPagedSpace()) {
2213    MaybeObject* maybe_result =
2214        heap->lo_space()->AllocateRawFixedArray(object_size);
2215    if (maybe_result->ToObject(&result)) {
2216      HeapObject* target = HeapObject::cast(result);
2217      MigrateObject(heap, target->address(), object->address(), object_size,
2218                    true);
2219      heap->mark_compact_collector()->tracer()->
2220          increment_promoted_objects_size(object_size);
2221      return true;
2222    }
2223  } else {
2224    OldSpace* target_space = heap->TargetSpace(object);
2225
2226    ASSERT(target_space == heap->old_pointer_space() ||
2227           target_space == heap->old_data_space());
2228    MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2229    if (maybe_result->ToObject(&result)) {
2230      HeapObject* target = HeapObject::cast(result);
2231      MigrateObject(heap,
2232                    target->address(),
2233                    object->address(),
2234                    object_size,
2235                    target_space == heap->old_pointer_space());
2236      heap->mark_compact_collector()->tracer()->
2237          increment_promoted_objects_size(object_size);
2238      return true;
2239    }
2240  }
2241
2242  return false;
2243}
2244
2245
2246static void SweepNewSpace(Heap* heap, NewSpace* space) {
2247  heap->CheckNewSpaceExpansionCriteria();
2248
2249  Address from_bottom = space->bottom();
2250  Address from_top = space->top();
2251
2252  // Flip the semispaces.  After flipping, to space is empty, from space has
2253  // live objects.
2254  space->Flip();
2255  space->ResetAllocationInfo();
2256
2257  int size = 0;
2258  int survivors_size = 0;
2259
2260  // First pass: traverse all objects in inactive semispace, remove marks,
2261  // migrate live objects and write forwarding addresses.
2262  for (Address current = from_bottom; current < from_top; current += size) {
2263    HeapObject* object = HeapObject::FromAddress(current);
2264
2265    if (object->IsMarked()) {
2266      object->ClearMark();
2267      heap->mark_compact_collector()->tracer()->decrement_marked_count();
2268
2269      size = object->Size();
2270      survivors_size += size;
2271
2272      // Aggressively promote young survivors to the old space.
2273      if (TryPromoteObject(heap, object, size)) {
2274        continue;
2275      }
2276
2277      // Promotion failed. Just migrate object to another semispace.
2278      // Allocation cannot fail at this point: semispaces are of equal size.
2279      Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
2280
2281      MigrateObject(heap,
2282                    HeapObject::cast(target)->address(),
2283                    current,
2284                    size,
2285                    false);
2286    } else {
2287      // Process the dead object before we write a NULL into its header.
2288      LiveObjectList::ProcessNonLive(object);
2289
2290      size = object->Size();
2291      Memory::Address_at(current) = NULL;
2292    }
2293  }
2294
2295  // Second pass: find pointers to new space and update them.
2296  PointersToNewGenUpdatingVisitor updating_visitor(heap);
2297
2298  // Update pointers in to space.
2299  Address current = space->bottom();
2300  while (current < space->top()) {
2301    HeapObject* object = HeapObject::FromAddress(current);
2302    current +=
2303        StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
2304                                                           object);
2305  }
2306
2307  // Update roots.
2308  heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
2309  LiveObjectList::IterateElements(&updating_visitor);
2310
2311  // Update pointers in old spaces.
2312  heap->IterateDirtyRegions(heap->old_pointer_space(),
2313                            &Heap::IteratePointersInDirtyRegion,
2314                            &UpdatePointerToNewGen,
2315                            heap->WATERMARK_SHOULD_BE_VALID);
2316
2317  heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
2318
2319  // Update pointers from cells.
2320  HeapObjectIterator cell_iterator(heap->cell_space());
2321  for (HeapObject* cell = cell_iterator.next();
2322       cell != NULL;
2323       cell = cell_iterator.next()) {
2324    if (cell->IsJSGlobalPropertyCell()) {
2325      Address value_address =
2326          reinterpret_cast<Address>(cell) +
2327          (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
2328      updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
2329    }
2330  }
2331
2332  // Update pointer from the global contexts list.
2333  updating_visitor.VisitPointer(heap->global_contexts_list_address());
2334
2335  // Update pointers from external string table.
2336  heap->UpdateNewSpaceReferencesInExternalStringTable(
2337      &UpdateNewSpaceReferenceInExternalStringTableEntry);
2338
2339  // All pointers were updated. Update auxiliary allocation info.
2340  heap->IncrementYoungSurvivorsCounter(survivors_size);
2341  space->set_age_mark(space->top());
2342
2343  // Update JSFunction pointers from the runtime profiler.
2344  heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
2345}
2346
2347
2348static void SweepSpace(Heap* heap, PagedSpace* space) {
2349  PageIterator it(space, PageIterator::PAGES_IN_USE);
2350
2351  // During sweeping of paged space we are trying to find longest sequences
2352  // of pages without live objects and free them (instead of putting them on
2353  // the free list).
2354
2355  // Page preceding current.
2356  Page* prev = Page::FromAddress(NULL);
2357
2358  // First empty page in a sequence.
2359  Page* first_empty_page = Page::FromAddress(NULL);
2360
2361  // Page preceding first empty page.
2362  Page* prec_first_empty_page = Page::FromAddress(NULL);
2363
2364  // If last used page of space ends with a sequence of dead objects
2365  // we can adjust allocation top instead of puting this free area into
2366  // the free list. Thus during sweeping we keep track of such areas
2367  // and defer their deallocation until the sweeping of the next page
2368  // is done: if one of the next pages contains live objects we have
2369  // to put such area into the free list.
2370  Address last_free_start = NULL;
2371  int last_free_size = 0;
2372
2373  while (it.has_next()) {
2374    Page* p = it.next();
2375
2376    bool is_previous_alive = true;
2377    Address free_start = NULL;
2378    HeapObject* object;
2379
2380    for (Address current = p->ObjectAreaStart();
2381         current < p->AllocationTop();
2382         current += object->Size()) {
2383      object = HeapObject::FromAddress(current);
2384      if (object->IsMarked()) {
2385        object->ClearMark();
2386        heap->mark_compact_collector()->tracer()->decrement_marked_count();
2387
2388        if (!is_previous_alive) {  // Transition from free to live.
2389          space->DeallocateBlock(free_start,
2390                                 static_cast<int>(current - free_start),
2391                                 true);
2392          is_previous_alive = true;
2393        }
2394      } else {
2395        heap->mark_compact_collector()->ReportDeleteIfNeeded(
2396            object, heap->isolate());
2397        if (is_previous_alive) {  // Transition from live to free.
2398          free_start = current;
2399          is_previous_alive = false;
2400        }
2401        LiveObjectList::ProcessNonLive(object);
2402      }
2403      // The object is now unmarked for the call to Size() at the top of the
2404      // loop.
2405    }
2406
2407    bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
2408        || (!is_previous_alive && free_start == p->ObjectAreaStart());
2409
2410    if (page_is_empty) {
2411      // This page is empty. Check whether we are in the middle of
2412      // sequence of empty pages and start one if not.
2413      if (!first_empty_page->is_valid()) {
2414        first_empty_page = p;
2415        prec_first_empty_page = prev;
2416      }
2417
2418      if (!is_previous_alive) {
2419        // There are dead objects on this page. Update space accounting stats
2420        // without putting anything into free list.
2421        int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
2422        if (size_in_bytes > 0) {
2423          space->DeallocateBlock(free_start, size_in_bytes, false);
2424        }
2425      }
2426    } else {
2427      // This page is not empty. Sequence of empty pages ended on the previous
2428      // one.
2429      if (first_empty_page->is_valid()) {
2430        space->FreePages(prec_first_empty_page, prev);
2431        prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
2432      }
2433
2434      // If there is a free ending area on one of the previous pages we have
2435      // deallocate that area and put it on the free list.
2436      if (last_free_size > 0) {
2437        Page::FromAddress(last_free_start)->
2438            SetAllocationWatermark(last_free_start);
2439        space->DeallocateBlock(last_free_start, last_free_size, true);
2440        last_free_start = NULL;
2441        last_free_size  = 0;
2442      }
2443
2444      // If the last region of this page was not live we remember it.
2445      if (!is_previous_alive) {
2446        ASSERT(last_free_size == 0);
2447        last_free_size = static_cast<int>(p->AllocationTop() - free_start);
2448        last_free_start = free_start;
2449      }
2450    }
2451
2452    prev = p;
2453  }
2454
2455  // We reached end of space. See if we need to adjust allocation top.
2456  Address new_allocation_top = NULL;
2457
2458  if (first_empty_page->is_valid()) {
2459    // Last used pages in space are empty. We can move allocation top backwards
2460    // to the beginning of first empty page.
2461    ASSERT(prev == space->AllocationTopPage());
2462
2463    new_allocation_top = first_empty_page->ObjectAreaStart();
2464  }
2465
2466  if (last_free_size > 0) {
2467    // There was a free ending area on the previous page.
2468    // Deallocate it without putting it into freelist and move allocation
2469    // top to the beginning of this free area.
2470    space->DeallocateBlock(last_free_start, last_free_size, false);
2471    new_allocation_top = last_free_start;
2472  }
2473
2474  if (new_allocation_top != NULL) {
2475#ifdef DEBUG
2476    Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
2477    if (!first_empty_page->is_valid()) {
2478      ASSERT(new_allocation_top_page == space->AllocationTopPage());
2479    } else if (last_free_size > 0) {
2480      ASSERT(new_allocation_top_page == prec_first_empty_page);
2481    } else {
2482      ASSERT(new_allocation_top_page == first_empty_page);
2483    }
2484#endif
2485
2486    space->SetTop(new_allocation_top);
2487  }
2488}
2489
2490
2491void MarkCompactCollector::EncodeForwardingAddresses() {
2492  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2493  // Objects in the active semispace of the young generation may be
2494  // relocated to the inactive semispace (if not promoted).  Set the
2495  // relocation info to the beginning of the inactive semispace.
2496  heap()->new_space()->MCResetRelocationInfo();
2497
2498  // Compute the forwarding pointers in each space.
2499  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
2500                                        ReportDeleteIfNeeded>(
2501      heap()->old_pointer_space());
2502
2503  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
2504                                        IgnoreNonLiveObject>(
2505      heap()->old_data_space());
2506
2507  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
2508                                        ReportDeleteIfNeeded>(
2509      heap()->code_space());
2510
2511  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
2512                                        IgnoreNonLiveObject>(
2513      heap()->cell_space());
2514
2515
2516  // Compute new space next to last after the old and code spaces have been
2517  // compacted.  Objects in new space can be promoted to old or code space.
2518  EncodeForwardingAddressesInNewSpace();
2519
2520  // Compute map space last because computing forwarding addresses
2521  // overwrites non-live objects.  Objects in the other spaces rely on
2522  // non-live map pointers to get the sizes of non-live objects.
2523  EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
2524                                        IgnoreNonLiveObject>(
2525      heap()->map_space());
2526
2527  // Write relocation info to the top page, so we can use it later.  This is
2528  // done after promoting objects from the new space so we get the correct
2529  // allocation top.
2530  heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
2531  heap()->old_data_space()->MCWriteRelocationInfoToPage();
2532  heap()->code_space()->MCWriteRelocationInfoToPage();
2533  heap()->map_space()->MCWriteRelocationInfoToPage();
2534  heap()->cell_space()->MCWriteRelocationInfoToPage();
2535}
2536
2537
2538class MapIterator : public HeapObjectIterator {
2539 public:
2540  explicit MapIterator(Heap* heap)
2541    : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
2542
2543  MapIterator(Heap* heap, Address start)
2544      : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
2545
2546 private:
2547  static int SizeCallback(HeapObject* unused) {
2548    USE(unused);
2549    return Map::kSize;
2550  }
2551};
2552
2553
2554class MapCompact {
2555 public:
2556  explicit MapCompact(Heap* heap, int live_maps)
2557    : heap_(heap),
2558      live_maps_(live_maps),
2559      to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
2560      vacant_map_it_(heap),
2561      map_to_evacuate_it_(heap, to_evacuate_start_),
2562      first_map_to_evacuate_(
2563          reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
2564  }
2565
2566  void CompactMaps() {
2567    // As we know the number of maps to evacuate beforehand,
2568    // we stop then there is no more vacant maps.
2569    for (Map* next_vacant_map = NextVacantMap();
2570         next_vacant_map;
2571         next_vacant_map = NextVacantMap()) {
2572      EvacuateMap(next_vacant_map, NextMapToEvacuate());
2573    }
2574
2575#ifdef DEBUG
2576    CheckNoMapsToEvacuate();
2577#endif
2578  }
2579
2580  void UpdateMapPointersInRoots() {
2581    MapUpdatingVisitor map_updating_visitor;
2582    heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
2583    heap()->isolate()->global_handles()->IterateWeakRoots(
2584        &map_updating_visitor);
2585    LiveObjectList::IterateElements(&map_updating_visitor);
2586  }
2587
2588  void UpdateMapPointersInPagedSpace(PagedSpace* space) {
2589    ASSERT(space != heap()->map_space());
2590
2591    PageIterator it(space, PageIterator::PAGES_IN_USE);
2592    while (it.has_next()) {
2593      Page* p = it.next();
2594      UpdateMapPointersInRange(heap(),
2595                               p->ObjectAreaStart(),
2596                               p->AllocationTop());
2597    }
2598  }
2599
2600  void UpdateMapPointersInNewSpace() {
2601    NewSpace* space = heap()->new_space();
2602    UpdateMapPointersInRange(heap(), space->bottom(), space->top());
2603  }
2604
2605  void UpdateMapPointersInLargeObjectSpace() {
2606    LargeObjectIterator it(heap()->lo_space());
2607    for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
2608      UpdateMapPointersInObject(heap(), obj);
2609  }
2610
2611  void Finish() {
2612    heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
2613  }
2614
2615  inline Heap* heap() const { return heap_; }
2616
2617 private:
2618  Heap* heap_;
2619  int live_maps_;
2620  Address to_evacuate_start_;
2621  MapIterator vacant_map_it_;
2622  MapIterator map_to_evacuate_it_;
2623  Map* first_map_to_evacuate_;
2624
2625  // Helper class for updating map pointers in HeapObjects.
2626  class MapUpdatingVisitor: public ObjectVisitor {
2627  public:
2628    MapUpdatingVisitor() {}
2629
2630    void VisitPointer(Object** p) {
2631      UpdateMapPointer(p);
2632    }
2633
2634    void VisitPointers(Object** start, Object** end) {
2635      for (Object** p = start; p < end; p++) UpdateMapPointer(p);
2636    }
2637
2638  private:
2639    void UpdateMapPointer(Object** p) {
2640      if (!(*p)->IsHeapObject()) return;
2641      HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
2642
2643      // Moved maps are tagged with overflowed map word.  They are the only
2644      // objects those map word is overflowed as marking is already complete.
2645      MapWord map_word = old_map->map_word();
2646      if (!map_word.IsOverflowed()) return;
2647
2648      *p = GetForwardedMap(map_word);
2649    }
2650  };
2651
2652  static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
2653    while (true) {
2654      HeapObject* next = it->next();
2655      ASSERT(next != NULL);
2656      if (next == last)
2657        return NULL;
2658      ASSERT(!next->IsOverflowed());
2659      ASSERT(!next->IsMarked());
2660      ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
2661      if (next->IsMap() == live)
2662        return reinterpret_cast<Map*>(next);
2663    }
2664  }
2665
2666  Map* NextVacantMap() {
2667    Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
2668    ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
2669    return map;
2670  }
2671
2672  Map* NextMapToEvacuate() {
2673    Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
2674    ASSERT(map != NULL);
2675    ASSERT(map->IsMap());
2676    return map;
2677  }
2678
2679  static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
2680    ASSERT(FreeListNode::IsFreeListNode(vacant_map));
2681    ASSERT(map_to_evacuate->IsMap());
2682
2683    ASSERT(Map::kSize % 4 == 0);
2684
2685    map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
2686        vacant_map->address(), map_to_evacuate->address(), Map::kSize);
2687
2688    ASSERT(vacant_map->IsMap());  // Due to memcpy above.
2689
2690    MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
2691    forwarding_map_word.SetOverflow();
2692    map_to_evacuate->set_map_word(forwarding_map_word);
2693
2694    ASSERT(map_to_evacuate->map_word().IsOverflowed());
2695    ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
2696  }
2697
2698  static Map* GetForwardedMap(MapWord map_word) {
2699    ASSERT(map_word.IsOverflowed());
2700    map_word.ClearOverflow();
2701    Map* new_map = map_word.ToMap();
2702    ASSERT_MAP_ALIGNED(new_map->address());
2703    return new_map;
2704  }
2705
2706  static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
2707    ASSERT(!obj->IsMarked());
2708    Map* map = obj->map();
2709    ASSERT(heap->map_space()->Contains(map));
2710    MapWord map_word = map->map_word();
2711    ASSERT(!map_word.IsMarked());
2712    if (map_word.IsOverflowed()) {
2713      Map* new_map = GetForwardedMap(map_word);
2714      ASSERT(heap->map_space()->Contains(new_map));
2715      obj->set_map(new_map);
2716
2717#ifdef DEBUG
2718      if (FLAG_gc_verbose) {
2719        PrintF("update %p : %p -> %p\n",
2720               obj->address(),
2721               reinterpret_cast<void*>(map),
2722               reinterpret_cast<void*>(new_map));
2723      }
2724#endif
2725    }
2726
2727    int size = obj->SizeFromMap(map);
2728    MapUpdatingVisitor map_updating_visitor;
2729    obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
2730    return size;
2731  }
2732
2733  static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
2734    HeapObject* object;
2735    int size;
2736    for (Address current = start; current < end; current += size) {
2737      object = HeapObject::FromAddress(current);
2738      size = UpdateMapPointersInObject(heap, object);
2739      ASSERT(size > 0);
2740    }
2741  }
2742
2743#ifdef DEBUG
2744  void CheckNoMapsToEvacuate() {
2745    if (!FLAG_enable_slow_asserts)
2746      return;
2747
2748    for (HeapObject* obj = map_to_evacuate_it_.next();
2749         obj != NULL; obj = map_to_evacuate_it_.next())
2750      ASSERT(FreeListNode::IsFreeListNode(obj));
2751  }
2752#endif
2753};
2754
2755
2756void MarkCompactCollector::SweepSpaces() {
2757  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2758
2759  ASSERT(state_ == SWEEP_SPACES);
2760  ASSERT(!IsCompacting());
2761  // Noncompacting collections simply sweep the spaces to clear the mark
2762  // bits and free the nonlive blocks (for old and map spaces).  We sweep
2763  // the map space last because freeing non-live maps overwrites them and
2764  // the other spaces rely on possibly non-live maps to get the sizes for
2765  // non-live objects.
2766  SweepSpace(heap(), heap()->old_pointer_space());
2767  SweepSpace(heap(), heap()->old_data_space());
2768  SweepSpace(heap(), heap()->code_space());
2769  SweepSpace(heap(), heap()->cell_space());
2770  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2771    SweepNewSpace(heap(), heap()->new_space());
2772  }
2773  SweepSpace(heap(), heap()->map_space());
2774
2775  heap()->IterateDirtyRegions(heap()->map_space(),
2776                             &heap()->IteratePointersInDirtyMapsRegion,
2777                             &UpdatePointerToNewGen,
2778                             heap()->WATERMARK_SHOULD_BE_VALID);
2779
2780  intptr_t live_maps_size = heap()->map_space()->Size();
2781  int live_maps = static_cast<int>(live_maps_size / Map::kSize);
2782  ASSERT(live_map_objects_size_ == live_maps_size);
2783
2784  if (heap()->map_space()->NeedsCompaction(live_maps)) {
2785    MapCompact map_compact(heap(), live_maps);
2786
2787    map_compact.CompactMaps();
2788    map_compact.UpdateMapPointersInRoots();
2789
2790    PagedSpaces spaces;
2791    for (PagedSpace* space = spaces.next();
2792         space != NULL; space = spaces.next()) {
2793      if (space == heap()->map_space()) continue;
2794      map_compact.UpdateMapPointersInPagedSpace(space);
2795    }
2796    map_compact.UpdateMapPointersInNewSpace();
2797    map_compact.UpdateMapPointersInLargeObjectSpace();
2798
2799    map_compact.Finish();
2800  }
2801}
2802
2803
2804// Iterate the live objects in a range of addresses (eg, a page or a
2805// semispace).  The live regions of the range have been linked into a list.
2806// The first live region is [first_live_start, first_live_end), and the last
2807// address in the range is top.  The callback function is used to get the
2808// size of each live object.
2809int MarkCompactCollector::IterateLiveObjectsInRange(
2810    Address start,
2811    Address end,
2812    LiveObjectCallback size_func) {
2813  int live_objects_size = 0;
2814  Address current = start;
2815  while (current < end) {
2816    uint32_t encoded_map = Memory::uint32_at(current);
2817    if (encoded_map == kSingleFreeEncoding) {
2818      current += kPointerSize;
2819    } else if (encoded_map == kMultiFreeEncoding) {
2820      current += Memory::int_at(current + kIntSize);
2821    } else {
2822      int size = (this->*size_func)(HeapObject::FromAddress(current));
2823      current += size;
2824      live_objects_size += size;
2825    }
2826  }
2827  return live_objects_size;
2828}
2829
2830
2831int MarkCompactCollector::IterateLiveObjects(
2832    NewSpace* space, LiveObjectCallback size_f) {
2833  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2834  return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2835}
2836
2837
2838int MarkCompactCollector::IterateLiveObjects(
2839    PagedSpace* space, LiveObjectCallback size_f) {
2840  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2841  int total = 0;
2842  PageIterator it(space, PageIterator::PAGES_IN_USE);
2843  while (it.has_next()) {
2844    Page* p = it.next();
2845    total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2846                                       p->AllocationTop(),
2847                                       size_f);
2848  }
2849  return total;
2850}
2851
2852
2853// -------------------------------------------------------------------------
2854// Phase 3: Update pointers
2855
2856// Helper class for updating pointers in HeapObjects.
2857class UpdatingVisitor: public ObjectVisitor {
2858 public:
2859  explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
2860
2861  void VisitPointer(Object** p) {
2862    UpdatePointer(p);
2863  }
2864
2865  void VisitPointers(Object** start, Object** end) {
2866    // Mark all HeapObject pointers in [start, end)
2867    for (Object** p = start; p < end; p++) UpdatePointer(p);
2868  }
2869
2870  void VisitCodeTarget(RelocInfo* rinfo) {
2871    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2872    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2873    VisitPointer(&target);
2874    rinfo->set_target_address(
2875        reinterpret_cast<Code*>(target)->instruction_start());
2876  }
2877
2878  void VisitDebugTarget(RelocInfo* rinfo) {
2879    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2880            rinfo->IsPatchedReturnSequence()) ||
2881           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2882            rinfo->IsPatchedDebugBreakSlotSequence()));
2883    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2884    VisitPointer(&target);
2885    rinfo->set_call_address(
2886        reinterpret_cast<Code*>(target)->instruction_start());
2887  }
2888
2889  inline Heap* heap() const { return heap_; }
2890
2891 private:
2892  void UpdatePointer(Object** p) {
2893    if (!(*p)->IsHeapObject()) return;
2894
2895    HeapObject* obj = HeapObject::cast(*p);
2896    Address old_addr = obj->address();
2897    Address new_addr;
2898    ASSERT(!heap()->InFromSpace(obj));
2899
2900    if (heap()->new_space()->Contains(obj)) {
2901      Address forwarding_pointer_addr =
2902          heap()->new_space()->FromSpaceLow() +
2903          heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
2904      new_addr = Memory::Address_at(forwarding_pointer_addr);
2905
2906#ifdef DEBUG
2907      ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
2908             heap()->old_data_space()->Contains(new_addr) ||
2909             heap()->new_space()->FromSpaceContains(new_addr) ||
2910             heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
2911
2912      if (heap()->new_space()->FromSpaceContains(new_addr)) {
2913        ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
2914               heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
2915      }
2916#endif
2917
2918    } else if (heap()->lo_space()->Contains(obj)) {
2919      // Don't move objects in the large object space.
2920      return;
2921
2922    } else {
2923#ifdef DEBUG
2924      PagedSpaces spaces;
2925      PagedSpace* original_space = spaces.next();
2926      while (original_space != NULL) {
2927        if (original_space->Contains(obj)) break;
2928        original_space = spaces.next();
2929      }
2930      ASSERT(original_space != NULL);
2931#endif
2932      new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2933      ASSERT(original_space->Contains(new_addr));
2934      ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2935             original_space->MCSpaceOffsetForAddress(old_addr));
2936    }
2937
2938    *p = HeapObject::FromAddress(new_addr);
2939
2940#ifdef DEBUG
2941    if (FLAG_gc_verbose) {
2942      PrintF("update %p : %p -> %p\n",
2943             reinterpret_cast<Address>(p), old_addr, new_addr);
2944    }
2945#endif
2946  }
2947
2948  Heap* heap_;
2949};
2950
2951
2952void MarkCompactCollector::UpdatePointers() {
2953#ifdef DEBUG
2954  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2955  state_ = UPDATE_POINTERS;
2956#endif
2957  UpdatingVisitor updating_visitor(heap());
2958  heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
2959      &updating_visitor);
2960  heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
2961  heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
2962
2963  // Update the pointer to the head of the weak list of global contexts.
2964  updating_visitor.VisitPointer(&heap()->global_contexts_list_);
2965
2966  LiveObjectList::IterateElements(&updating_visitor);
2967
2968  int live_maps_size = IterateLiveObjects(
2969      heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2970  int live_pointer_olds_size = IterateLiveObjects(
2971      heap()->old_pointer_space(),
2972      &MarkCompactCollector::UpdatePointersInOldObject);
2973  int live_data_olds_size = IterateLiveObjects(
2974      heap()->old_data_space(),
2975      &MarkCompactCollector::UpdatePointersInOldObject);
2976  int live_codes_size = IterateLiveObjects(
2977      heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2978  int live_cells_size = IterateLiveObjects(
2979      heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2980  int live_news_size = IterateLiveObjects(
2981      heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
2982
2983  // Large objects do not move, the map word can be updated directly.
2984  LargeObjectIterator it(heap()->lo_space());
2985  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
2986    UpdatePointersInNewObject(obj);
2987  }
2988
2989  USE(live_maps_size);
2990  USE(live_pointer_olds_size);
2991  USE(live_data_olds_size);
2992  USE(live_codes_size);
2993  USE(live_cells_size);
2994  USE(live_news_size);
2995  ASSERT(live_maps_size == live_map_objects_size_);
2996  ASSERT(live_data_olds_size == live_old_data_objects_size_);
2997  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2998  ASSERT(live_codes_size == live_code_objects_size_);
2999  ASSERT(live_cells_size == live_cell_objects_size_);
3000  ASSERT(live_news_size == live_young_objects_size_);
3001}
3002
3003
3004int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
3005  // Keep old map pointers
3006  Map* old_map = obj->map();
3007  ASSERT(old_map->IsHeapObject());
3008
3009  Address forwarded = GetForwardingAddressInOldSpace(old_map);
3010
3011  ASSERT(heap()->map_space()->Contains(old_map));
3012  ASSERT(heap()->map_space()->Contains(forwarded));
3013#ifdef DEBUG
3014  if (FLAG_gc_verbose) {
3015    PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
3016           forwarded);
3017  }
3018#endif
3019  // Update the map pointer.
3020  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
3021
3022  // We have to compute the object size relying on the old map because
3023  // map objects are not relocated yet.
3024  int obj_size = obj->SizeFromMap(old_map);
3025
3026  // Update pointers in the object body.
3027  UpdatingVisitor updating_visitor(heap());
3028  obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
3029  return obj_size;
3030}
3031
3032
3033int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
3034  // Decode the map pointer.
3035  MapWord encoding = obj->map_word();
3036  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3037  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
3038
3039  // At this point, the first word of map_addr is also encoded, cannot
3040  // cast it to Map* using Map::cast.
3041  Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
3042  int obj_size = obj->SizeFromMap(map);
3043  InstanceType type = map->instance_type();
3044
3045  // Update map pointer.
3046  Address new_map_addr = GetForwardingAddressInOldSpace(map);
3047  int offset = encoding.DecodeOffset();
3048  obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
3049
3050#ifdef DEBUG
3051  if (FLAG_gc_verbose) {
3052    PrintF("update %p : %p -> %p\n", obj->address(),
3053           map_addr, new_map_addr);
3054  }
3055#endif
3056
3057  // Update pointers in the object body.
3058  UpdatingVisitor updating_visitor(heap());
3059  obj->IterateBody(type, obj_size, &updating_visitor);
3060  return obj_size;
3061}
3062
3063
3064Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
3065  // Object should either in old or map space.
3066  MapWord encoding = obj->map_word();
3067
3068  // Offset to the first live object's forwarding address.
3069  int offset = encoding.DecodeOffset();
3070  Address obj_addr = obj->address();
3071
3072  // Find the first live object's forwarding address.
3073  Page* p = Page::FromAddress(obj_addr);
3074  Address first_forwarded = p->mc_first_forwarded;
3075
3076  // Page start address of forwarded address.
3077  Page* forwarded_page = Page::FromAddress(first_forwarded);
3078  int forwarded_offset = forwarded_page->Offset(first_forwarded);
3079
3080  // Find end of allocation in the page of first_forwarded.
3081  int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
3082
3083  // Check if current object's forward pointer is in the same page
3084  // as the first live object's forwarding pointer
3085  if (forwarded_offset + offset < mc_top_offset) {
3086    // In the same page.
3087    return first_forwarded + offset;
3088  }
3089
3090  // Must be in the next page, NOTE: this may cross chunks.
3091  Page* next_page = forwarded_page->next_page();
3092  ASSERT(next_page->is_valid());
3093
3094  offset -= (mc_top_offset - forwarded_offset);
3095  offset += Page::kObjectStartOffset;
3096
3097  ASSERT_PAGE_OFFSET(offset);
3098  ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
3099
3100  return next_page->OffsetToAddress(offset);
3101}
3102
3103
3104// -------------------------------------------------------------------------
3105// Phase 4: Relocate objects
3106
3107void MarkCompactCollector::RelocateObjects() {
3108#ifdef DEBUG
3109  ASSERT(state_ == UPDATE_POINTERS);
3110  state_ = RELOCATE_OBJECTS;
3111#endif
3112  // Relocates objects, always relocate map objects first. Relocating
3113  // objects in other space relies on map objects to get object size.
3114  int live_maps_size = IterateLiveObjects(
3115      heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
3116  int live_pointer_olds_size = IterateLiveObjects(
3117      heap()->old_pointer_space(),
3118      &MarkCompactCollector::RelocateOldPointerObject);
3119  int live_data_olds_size = IterateLiveObjects(
3120      heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
3121  int live_codes_size = IterateLiveObjects(
3122      heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
3123  int live_cells_size = IterateLiveObjects(
3124      heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
3125  int live_news_size = IterateLiveObjects(
3126      heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
3127
3128  USE(live_maps_size);
3129  USE(live_pointer_olds_size);
3130  USE(live_data_olds_size);
3131  USE(live_codes_size);
3132  USE(live_cells_size);
3133  USE(live_news_size);
3134  ASSERT(live_maps_size == live_map_objects_size_);
3135  ASSERT(live_data_olds_size == live_old_data_objects_size_);
3136  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
3137  ASSERT(live_codes_size == live_code_objects_size_);
3138  ASSERT(live_cells_size == live_cell_objects_size_);
3139  ASSERT(live_news_size == live_young_objects_size_);
3140
3141  // Flip from and to spaces
3142  heap()->new_space()->Flip();
3143
3144  heap()->new_space()->MCCommitRelocationInfo();
3145
3146  // Set age_mark to bottom in to space
3147  Address mark = heap()->new_space()->bottom();
3148  heap()->new_space()->set_age_mark(mark);
3149
3150  PagedSpaces spaces;
3151  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
3152    space->MCCommitRelocationInfo();
3153
3154  heap()->CheckNewSpaceExpansionCriteria();
3155  heap()->IncrementYoungSurvivorsCounter(live_news_size);
3156}
3157
3158
3159int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
3160  // Recover map pointer.
3161  MapWord encoding = obj->map_word();
3162  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3163  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
3164
3165  // Get forwarding address before resetting map pointer
3166  Address new_addr = GetForwardingAddressInOldSpace(obj);
3167
3168  // Reset map pointer.  The meta map object may not be copied yet so
3169  // Map::cast does not yet work.
3170  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
3171
3172  Address old_addr = obj->address();
3173
3174  if (new_addr != old_addr) {
3175    // Move contents.
3176    heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3177                                                   old_addr,
3178                                                   Map::kSize);
3179  }
3180
3181#ifdef DEBUG
3182  if (FLAG_gc_verbose) {
3183    PrintF("relocate %p -> %p\n", old_addr, new_addr);
3184  }
3185#endif
3186
3187  return Map::kSize;
3188}
3189
3190
3191static inline int RestoreMap(HeapObject* obj,
3192                             PagedSpace* space,
3193                             Address new_addr,
3194                             Address map_addr) {
3195  // This must be a non-map object, and the function relies on the
3196  // assumption that the Map space is compacted before the other paged
3197  // spaces (see RelocateObjects).
3198
3199  // Reset map pointer.
3200  obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
3201
3202  int obj_size = obj->Size();
3203  ASSERT_OBJECT_SIZE(obj_size);
3204
3205  ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
3206         space->MCSpaceOffsetForAddress(obj->address()));
3207
3208#ifdef DEBUG
3209  if (FLAG_gc_verbose) {
3210    PrintF("relocate %p -> %p\n", obj->address(), new_addr);
3211  }
3212#endif
3213
3214  return obj_size;
3215}
3216
3217
3218int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
3219                                                   PagedSpace* space) {
3220  // Recover map pointer.
3221  MapWord encoding = obj->map_word();
3222  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3223  ASSERT(heap()->map_space()->Contains(map_addr));
3224
3225  // Get forwarding address before resetting map pointer.
3226  Address new_addr = GetForwardingAddressInOldSpace(obj);
3227
3228  // Reset the map pointer.
3229  int obj_size = RestoreMap(obj, space, new_addr, map_addr);
3230
3231  Address old_addr = obj->address();
3232
3233  if (new_addr != old_addr) {
3234    // Move contents.
3235    if (space == heap()->old_data_space()) {
3236      heap()->MoveBlock(new_addr, old_addr, obj_size);
3237    } else {
3238      heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3239                                                     old_addr,
3240                                                     obj_size);
3241    }
3242  }
3243
3244  ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
3245
3246  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3247  if (copied_to->IsSharedFunctionInfo()) {
3248    PROFILE(heap()->isolate(),
3249            SharedFunctionInfoMoveEvent(old_addr, new_addr));
3250  }
3251  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3252
3253  return obj_size;
3254}
3255
3256
3257int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
3258  return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
3259}
3260
3261
3262int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
3263  return RelocateOldNonCodeObject(obj, heap()->old_data_space());
3264}
3265
3266
3267int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
3268  return RelocateOldNonCodeObject(obj, heap()->cell_space());
3269}
3270
3271
3272int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
3273  // Recover map pointer.
3274  MapWord encoding = obj->map_word();
3275  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3276  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
3277
3278  // Get forwarding address before resetting map pointer
3279  Address new_addr = GetForwardingAddressInOldSpace(obj);
3280
3281  // Reset the map pointer.
3282  int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
3283
3284  Address old_addr = obj->address();
3285
3286  if (new_addr != old_addr) {
3287    // Move contents.
3288    heap()->MoveBlock(new_addr, old_addr, obj_size);
3289  }
3290
3291  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3292  if (copied_to->IsCode()) {
3293    // May also update inline cache target.
3294    Code::cast(copied_to)->Relocate(new_addr - old_addr);
3295    // Notify the logger that compiled code has moved.
3296    PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
3297  }
3298  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3299
3300  return obj_size;
3301}
3302
3303
3304int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
3305  int obj_size = obj->Size();
3306
3307  // Get forwarding address
3308  Address old_addr = obj->address();
3309  int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
3310
3311  Address new_addr =
3312    Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
3313
3314#ifdef DEBUG
3315  if (heap()->new_space()->FromSpaceContains(new_addr)) {
3316    ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
3317           heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
3318  } else {
3319    ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
3320           heap()->TargetSpace(obj) == heap()->old_data_space());
3321  }
3322#endif
3323
3324  // New and old addresses cannot overlap.
3325  if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
3326    heap()->CopyBlock(new_addr, old_addr, obj_size);
3327  } else {
3328    heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3329                                                   old_addr,
3330                                                   obj_size);
3331  }
3332
3333#ifdef DEBUG
3334  if (FLAG_gc_verbose) {
3335    PrintF("relocate %p -> %p\n", old_addr, new_addr);
3336  }
3337#endif
3338
3339  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3340  if (copied_to->IsSharedFunctionInfo()) {
3341    PROFILE(heap()->isolate(),
3342            SharedFunctionInfoMoveEvent(old_addr, new_addr));
3343  }
3344  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3345
3346  return obj_size;
3347}
3348
3349
3350void MarkCompactCollector::EnableCodeFlushing(bool enable) {
3351  if (enable) {
3352    if (code_flusher_ != NULL) return;
3353    code_flusher_ = new CodeFlusher(heap()->isolate());
3354  } else {
3355    if (code_flusher_ == NULL) return;
3356    delete code_flusher_;
3357    code_flusher_ = NULL;
3358  }
3359}
3360
3361
3362void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
3363                                                Isolate* isolate) {
3364#ifdef ENABLE_GDB_JIT_INTERFACE
3365  if (obj->IsCode()) {
3366    GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3367  }
3368#endif
3369  if (obj->IsCode()) {
3370    PROFILE(isolate, CodeDeleteEvent(obj->address()));
3371  }
3372}
3373
3374
3375int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
3376  MapWord map_word = obj->map_word();
3377  map_word.ClearMark();
3378  return obj->SizeFromMap(map_word.ToMap());
3379}
3380
3381
3382void MarkCompactCollector::Initialize() {
3383  StaticPointersToNewGenUpdatingVisitor::Initialize();
3384  StaticMarkingVisitor::Initialize();
3385}
3386
3387
3388} }  // namespace v8::internal
3389