mark-compact.cc revision f87a203d89e1bbb6708282e0b64dbd13d59b723d
1// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "compilation-cache.h"
31#include "execution.h"
32#include "heap-profiler.h"
33#include "global-handles.h"
34#include "ic-inl.h"
35#include "mark-compact.h"
36#include "objects-visiting.h"
37#include "stub-cache.h"
38
39namespace v8 {
40namespace internal {
41
42// -------------------------------------------------------------------------
43// MarkCompactCollector
44
45bool MarkCompactCollector::force_compaction_ = false;
46bool MarkCompactCollector::compacting_collection_ = false;
47bool MarkCompactCollector::compact_on_next_gc_ = false;
48
49int MarkCompactCollector::previous_marked_count_ = 0;
50GCTracer* MarkCompactCollector::tracer_ = NULL;
51
52
53#ifdef DEBUG
54MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
55
56// Counters used for debugging the marking phase of mark-compact or mark-sweep
57// collection.
58int MarkCompactCollector::live_bytes_ = 0;
59int MarkCompactCollector::live_young_objects_size_ = 0;
60int MarkCompactCollector::live_old_data_objects_size_ = 0;
61int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
62int MarkCompactCollector::live_code_objects_size_ = 0;
63int MarkCompactCollector::live_map_objects_size_ = 0;
64int MarkCompactCollector::live_cell_objects_size_ = 0;
65int MarkCompactCollector::live_lo_objects_size_ = 0;
66#endif
67
68
69void MarkCompactCollector::CollectGarbage() {
70  // Make sure that Prepare() has been called. The individual steps below will
71  // update the state as they proceed.
72  ASSERT(state_ == PREPARE_GC);
73
74  // Prepare has selected whether to compact the old generation or not.
75  // Tell the tracer.
76  if (IsCompacting()) tracer_->set_is_compacting();
77
78  MarkLiveObjects();
79
80  if (FLAG_collect_maps) ClearNonLiveTransitions();
81
82  SweepLargeObjectSpace();
83
84  if (IsCompacting()) {
85    GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
86    EncodeForwardingAddresses();
87
88    Heap::MarkMapPointersAsEncoded(true);
89    UpdatePointers();
90    Heap::MarkMapPointersAsEncoded(false);
91    PcToCodeCache::FlushPcToCodeCache();
92
93    RelocateObjects();
94  } else {
95    SweepSpaces();
96    PcToCodeCache::FlushPcToCodeCache();
97  }
98
99  Finish();
100
101  // Save the count of marked objects remaining after the collection and
102  // null out the GC tracer.
103  previous_marked_count_ = tracer_->marked_count();
104  ASSERT(previous_marked_count_ == 0);
105  tracer_ = NULL;
106}
107
108
109void MarkCompactCollector::Prepare(GCTracer* tracer) {
110  // Rather than passing the tracer around we stash it in a static member
111  // variable.
112  tracer_ = tracer;
113
114#ifdef DEBUG
115  ASSERT(state_ == IDLE);
116  state_ = PREPARE_GC;
117#endif
118  ASSERT(!FLAG_always_compact || !FLAG_never_compact);
119
120  compacting_collection_ =
121      FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
122  compact_on_next_gc_ = false;
123
124  if (FLAG_never_compact) compacting_collection_ = false;
125  if (!Heap::map_space()->MapPointersEncodable())
126      compacting_collection_ = false;
127  if (FLAG_collect_maps) CreateBackPointers();
128
129  PagedSpaces spaces;
130  for (PagedSpace* space = spaces.next();
131       space != NULL; space = spaces.next()) {
132    space->PrepareForMarkCompact(compacting_collection_);
133  }
134
135#ifdef DEBUG
136  live_bytes_ = 0;
137  live_young_objects_size_ = 0;
138  live_old_pointer_objects_size_ = 0;
139  live_old_data_objects_size_ = 0;
140  live_code_objects_size_ = 0;
141  live_map_objects_size_ = 0;
142  live_cell_objects_size_ = 0;
143  live_lo_objects_size_ = 0;
144#endif
145}
146
147
148void MarkCompactCollector::Finish() {
149#ifdef DEBUG
150  ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
151  state_ = IDLE;
152#endif
153  // The stub cache is not traversed during GC; clear the cache to
154  // force lazy re-initialization of it. This must be done after the
155  // GC, because it relies on the new address of certain old space
156  // objects (empty string, illegal builtin).
157  StubCache::Clear();
158
159  ExternalStringTable::CleanUp();
160
161  // If we've just compacted old space there's no reason to check the
162  // fragmentation limit. Just return.
163  if (HasCompacted()) return;
164
165  // We compact the old generation on the next GC if it has gotten too
166  // fragmented (ie, we could recover an expected amount of space by
167  // reclaiming the waste and free list blocks).
168  static const int kFragmentationLimit = 15;        // Percent.
169  static const int kFragmentationAllowed = 1 * MB;  // Absolute.
170  intptr_t old_gen_recoverable = 0;
171  intptr_t old_gen_used = 0;
172
173  OldSpaces spaces;
174  for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
175    old_gen_recoverable += space->Waste() + space->AvailableFree();
176    old_gen_used += space->Size();
177  }
178
179  int old_gen_fragmentation =
180      static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
181  if (old_gen_fragmentation > kFragmentationLimit &&
182      old_gen_recoverable > kFragmentationAllowed) {
183    compact_on_next_gc_ = true;
184  }
185}
186
187
188// -------------------------------------------------------------------------
189// Phase 1: tracing and marking live objects.
190//   before: all objects are in normal state.
191//   after: a live object's map pointer is marked as '00'.
192
193// Marking all live objects in the heap as part of mark-sweep or mark-compact
194// collection.  Before marking, all objects are in their normal state.  After
195// marking, live objects' map pointers are marked indicating that the object
196// has been found reachable.
197//
198// The marking algorithm is a (mostly) depth-first (because of possible stack
199// overflow) traversal of the graph of objects reachable from the roots.  It
200// uses an explicit stack of pointers rather than recursion.  The young
201// generation's inactive ('from') space is used as a marking stack.  The
202// objects in the marking stack are the ones that have been reached and marked
203// but their children have not yet been visited.
204//
205// The marking stack can overflow during traversal.  In that case, we set an
206// overflow flag.  When the overflow flag is set, we continue marking objects
207// reachable from the objects on the marking stack, but no longer push them on
208// the marking stack.  Instead, we mark them as both marked and overflowed.
209// When the stack is in the overflowed state, objects marked as overflowed
210// have been reached and marked but their children have not been visited yet.
211// After emptying the marking stack, we clear the overflow flag and traverse
212// the heap looking for objects marked as overflowed, push them on the stack,
213// and continue with marking.  This process repeats until all reachable
214// objects have been marked.
215
216static MarkingStack marking_stack;
217
218
219static inline HeapObject* ShortCircuitConsString(Object** p) {
220  // Optimization: If the heap object pointed to by p is a non-symbol
221  // cons string whose right substring is Heap::empty_string, update
222  // it in place to its left substring.  Return the updated value.
223  //
224  // Here we assume that if we change *p, we replace it with a heap object
225  // (ie, the left substring of a cons string is always a heap object).
226  //
227  // The check performed is:
228  //   object->IsConsString() && !object->IsSymbol() &&
229  //   (ConsString::cast(object)->second() == Heap::empty_string())
230  // except the maps for the object and its possible substrings might be
231  // marked.
232  HeapObject* object = HeapObject::cast(*p);
233  MapWord map_word = object->map_word();
234  map_word.ClearMark();
235  InstanceType type = map_word.ToMap()->instance_type();
236  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
237
238  Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
239  if (second != Heap::raw_unchecked_empty_string()) {
240    return object;
241  }
242
243  // Since we don't have the object's start, it is impossible to update the
244  // page dirty marks. Therefore, we only replace the string with its left
245  // substring when page dirty marks do not change.
246  Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
247  if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
248
249  *p = first;
250  return HeapObject::cast(first);
251}
252
253
254class StaticMarkingVisitor : public StaticVisitorBase {
255 public:
256  static inline void IterateBody(Map* map, HeapObject* obj) {
257    table_.GetVisitor(map)(map, obj);
258  }
259
260  static void EnableCodeFlushing(bool enabled) {
261    if (enabled) {
262      table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
263    } else {
264      table_.Register(kVisitJSFunction, &VisitJSFunction);
265    }
266  }
267
268  static void Initialize() {
269    table_.Register(kVisitShortcutCandidate,
270                    &FixedBodyVisitor<StaticMarkingVisitor,
271                                      ConsString::BodyDescriptor,
272                                      void>::Visit);
273
274    table_.Register(kVisitConsString,
275                    &FixedBodyVisitor<StaticMarkingVisitor,
276                                      ConsString::BodyDescriptor,
277                                      void>::Visit);
278
279
280    table_.Register(kVisitFixedArray,
281                    &FlexibleBodyVisitor<StaticMarkingVisitor,
282                                         FixedArray::BodyDescriptor,
283                                         void>::Visit);
284
285    table_.Register(kVisitGlobalContext,
286                    &FixedBodyVisitor<StaticMarkingVisitor,
287                                      Context::MarkCompactBodyDescriptor,
288                                      void>::Visit);
289
290    table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
291
292    table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
293    table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
294    table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
295
296    table_.Register(kVisitOddball,
297                    &FixedBodyVisitor<StaticMarkingVisitor,
298                                      Oddball::BodyDescriptor,
299                                      void>::Visit);
300    table_.Register(kVisitMap,
301                    &FixedBodyVisitor<StaticMarkingVisitor,
302                                      Map::BodyDescriptor,
303                                      void>::Visit);
304
305    table_.Register(kVisitCode, &VisitCode);
306
307    table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
308
309    table_.Register(kVisitPropertyCell,
310                    &FixedBodyVisitor<StaticMarkingVisitor,
311                                      JSGlobalPropertyCell::BodyDescriptor,
312                                      void>::Visit);
313
314    table_.RegisterSpecializations<DataObjectVisitor,
315                                   kVisitDataObject,
316                                   kVisitDataObjectGeneric>();
317
318    table_.RegisterSpecializations<JSObjectVisitor,
319                                   kVisitJSObject,
320                                   kVisitJSObjectGeneric>();
321
322    table_.RegisterSpecializations<StructObjectVisitor,
323                                   kVisitStruct,
324                                   kVisitStructGeneric>();
325  }
326
327  INLINE(static void VisitPointer(Object** p)) {
328    MarkObjectByPointer(p);
329  }
330
331  INLINE(static void VisitPointers(Object** start, Object** end)) {
332    // Mark all objects pointed to in [start, end).
333    const int kMinRangeForMarkingRecursion = 64;
334    if (end - start >= kMinRangeForMarkingRecursion) {
335      if (VisitUnmarkedObjects(start, end)) return;
336      // We are close to a stack overflow, so just mark the objects.
337    }
338    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
339  }
340
341  static inline void VisitCodeTarget(RelocInfo* rinfo) {
342    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
343    Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
344    if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
345      IC::Clear(rinfo->pc());
346      // Please note targets for cleared inline cached do not have to be
347      // marked since they are contained in Heap::non_monomorphic_cache().
348    } else {
349      MarkCompactCollector::MarkObject(code);
350    }
351  }
352
353  static inline void VisitDebugTarget(RelocInfo* rinfo) {
354    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
355            rinfo->IsPatchedReturnSequence()) ||
356           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
357            rinfo->IsPatchedDebugBreakSlotSequence()));
358    HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
359    MarkCompactCollector::MarkObject(code);
360  }
361
362  // Mark object pointed to by p.
363  INLINE(static void MarkObjectByPointer(Object** p)) {
364    if (!(*p)->IsHeapObject()) return;
365    HeapObject* object = ShortCircuitConsString(p);
366    MarkCompactCollector::MarkObject(object);
367  }
368
369  // Visit an unmarked object.
370  static inline void VisitUnmarkedObject(HeapObject* obj) {
371#ifdef DEBUG
372    ASSERT(Heap::Contains(obj));
373    ASSERT(!obj->IsMarked());
374#endif
375    Map* map = obj->map();
376    MarkCompactCollector::SetMark(obj);
377    // Mark the map pointer and the body.
378    MarkCompactCollector::MarkObject(map);
379    IterateBody(map, obj);
380  }
381
382  // Visit all unmarked objects pointed to by [start, end).
383  // Returns false if the operation fails (lack of stack space).
384  static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
385    // Return false is we are close to the stack limit.
386    StackLimitCheck check;
387    if (check.HasOverflowed()) return false;
388
389    // Visit the unmarked objects.
390    for (Object** p = start; p < end; p++) {
391      if (!(*p)->IsHeapObject()) continue;
392      HeapObject* obj = HeapObject::cast(*p);
393      if (obj->IsMarked()) continue;
394      VisitUnmarkedObject(obj);
395    }
396    return true;
397  }
398
399  static inline void VisitExternalReference(Address* p) { }
400  static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
401
402 private:
403  class DataObjectVisitor {
404   public:
405    template<int size>
406    static void VisitSpecialized(Map* map, HeapObject* object) {
407    }
408
409    static void Visit(Map* map, HeapObject* object) {
410    }
411  };
412
413  typedef FlexibleBodyVisitor<StaticMarkingVisitor,
414                              JSObject::BodyDescriptor,
415                              void> JSObjectVisitor;
416
417  typedef FlexibleBodyVisitor<StaticMarkingVisitor,
418                              StructBodyDescriptor,
419                              void> StructObjectVisitor;
420
421  static void VisitCode(Map* map, HeapObject* object) {
422    reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
423  }
424
425  // Code flushing support.
426
427  // How many collections newly compiled code object will survive before being
428  // flushed.
429  static const int kCodeAgeThreshold = 5;
430
431  inline static bool HasSourceCode(SharedFunctionInfo* info) {
432    Object* undefined = Heap::raw_unchecked_undefined_value();
433    return (info->script() != undefined) &&
434        (reinterpret_cast<Script*>(info->script())->source() != undefined);
435  }
436
437
438  inline static bool IsCompiled(JSFunction* function) {
439    return
440        function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
441  }
442
443
444  inline static bool IsCompiled(SharedFunctionInfo* function) {
445    return
446        function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
447  }
448
449
450  static void FlushCodeForFunction(JSFunction* function) {
451    SharedFunctionInfo* shared_info = function->unchecked_shared();
452
453    if (shared_info->IsMarked()) return;
454
455    // Special handling if the function and shared info objects
456    // have different code objects.
457    if (function->unchecked_code() != shared_info->unchecked_code()) {
458      // If the shared function has been flushed but the function has not,
459      // we flush the function if possible.
460      if (!IsCompiled(shared_info) &&
461          IsCompiled(function) &&
462          !function->unchecked_code()->IsMarked()) {
463        function->set_code(shared_info->unchecked_code());
464      }
465      return;
466    }
467
468    // Code is either on stack or in compilation cache.
469    if (shared_info->unchecked_code()->IsMarked()) {
470      shared_info->set_code_age(0);
471      return;
472    }
473
474    // The function must be compiled and have the source code available,
475    // to be able to recompile it in case we need the function again.
476    if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return;
477
478    // We never flush code for Api functions.
479    Object* function_data = shared_info->function_data();
480    if (function_data->IsHeapObject() &&
481        (SafeMap(function_data)->instance_type() ==
482         FUNCTION_TEMPLATE_INFO_TYPE)) {
483      return;
484    }
485
486    // Only flush code for functions.
487    if (shared_info->code()->kind() != Code::FUNCTION) return;
488
489    // Function must be lazy compilable.
490    if (!shared_info->allows_lazy_compilation()) return;
491
492    // If this is a full script wrapped in a function we do no flush the code.
493    if (shared_info->is_toplevel()) return;
494
495    // Age this shared function info.
496    if (shared_info->code_age() < kCodeAgeThreshold) {
497      shared_info->set_code_age(shared_info->code_age() + 1);
498      return;
499    }
500
501    // Compute the lazy compilable version of the code.
502    Code* code = Builtins::builtin(Builtins::LazyCompile);
503    shared_info->set_code(code);
504    function->set_code(code);
505  }
506
507
508  static inline Map* SafeMap(Object* obj) {
509    MapWord map_word = HeapObject::cast(obj)->map_word();
510    map_word.ClearMark();
511    map_word.ClearOverflow();
512    return map_word.ToMap();
513  }
514
515
516  static inline bool IsJSBuiltinsObject(Object* obj) {
517    return obj->IsHeapObject() &&
518        (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
519  }
520
521
522  static inline bool IsValidNotBuiltinContext(Object* ctx) {
523    if (!ctx->IsHeapObject()) return false;
524
525    Map* map = SafeMap(ctx);
526    if (!(map == Heap::raw_unchecked_context_map() ||
527          map == Heap::raw_unchecked_catch_context_map() ||
528          map == Heap::raw_unchecked_global_context_map())) {
529      return false;
530    }
531
532    Context* context = reinterpret_cast<Context*>(ctx);
533
534    if (IsJSBuiltinsObject(context->global())) {
535      return false;
536    }
537
538    return true;
539  }
540
541
542  static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
543    SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
544    if (shared->IsInobjectSlackTrackingInProgress()) {
545      shared->DetachInitialMap();
546    }
547    FixedBodyVisitor<StaticMarkingVisitor,
548                     SharedFunctionInfo::BodyDescriptor,
549                     void>::Visit(map, object);
550  }
551
552
553  static void VisitCodeEntry(Address entry_address) {
554    Object* code = Code::GetObjectFromEntryAddress(entry_address);
555    Object* old_code = code;
556    VisitPointer(&code);
557    if (code != old_code) {
558      Memory::Address_at(entry_address) =
559          reinterpret_cast<Code*>(code)->entry();
560    }
561  }
562
563
564  static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
565    JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
566    // The function must have a valid context and not be a builtin.
567    if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
568      FlushCodeForFunction(jsfunction);
569    }
570    VisitJSFunction(map, object);
571  }
572
573
574  static void VisitJSFunction(Map* map, HeapObject* object) {
575#define SLOT_ADDR(obj, offset)   \
576    reinterpret_cast<Object**>((obj)->address() + offset)
577
578    VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
579                  SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
580
581    VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
582
583    VisitPointers(SLOT_ADDR(object,
584                            JSFunction::kCodeEntryOffset + kPointerSize),
585                  SLOT_ADDR(object, JSFunction::kSize));
586
587#undef SLOT_ADDR
588  }
589
590
591  typedef void (*Callback)(Map* map, HeapObject* object);
592
593  static VisitorDispatchTable<Callback> table_;
594};
595
596
597VisitorDispatchTable<StaticMarkingVisitor::Callback>
598  StaticMarkingVisitor::table_;
599
600
601class MarkingVisitor : public ObjectVisitor {
602 public:
603  void VisitPointer(Object** p) {
604    StaticMarkingVisitor::VisitPointer(p);
605  }
606
607  void VisitPointers(Object** start, Object** end) {
608    StaticMarkingVisitor::VisitPointers(start, end);
609  }
610
611  void VisitCodeTarget(RelocInfo* rinfo) {
612    StaticMarkingVisitor::VisitCodeTarget(rinfo);
613  }
614
615  void VisitDebugTarget(RelocInfo* rinfo) {
616    StaticMarkingVisitor::VisitDebugTarget(rinfo);
617  }
618};
619
620
621class CodeMarkingVisitor : public ThreadVisitor {
622 public:
623  void VisitThread(ThreadLocalTop* top) {
624    for (StackFrameIterator it(top); !it.done(); it.Advance()) {
625      MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
626    }
627  }
628};
629
630
631class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
632 public:
633  void VisitPointers(Object** start, Object** end) {
634    for (Object** p = start; p < end; p++) VisitPointer(p);
635  }
636
637  void VisitPointer(Object** slot) {
638    Object* obj = *slot;
639    if (obj->IsHeapObject()) {
640      MarkCompactCollector::MarkObject(HeapObject::cast(obj));
641    }
642  }
643};
644
645
646void MarkCompactCollector::PrepareForCodeFlushing() {
647  if (!FLAG_flush_code) {
648    StaticMarkingVisitor::EnableCodeFlushing(false);
649    return;
650  }
651
652#ifdef ENABLE_DEBUGGER_SUPPORT
653  if (Debug::IsLoaded() || Debug::has_break_points()) {
654    StaticMarkingVisitor::EnableCodeFlushing(false);
655    return;
656  }
657#endif
658  StaticMarkingVisitor::EnableCodeFlushing(true);
659
660  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
661  // relies on it being marked before any other descriptor array.
662  MarkObject(Heap::raw_unchecked_empty_descriptor_array());
663
664  // Make sure we are not referencing the code from the stack.
665  for (StackFrameIterator it; !it.done(); it.Advance()) {
666    MarkObject(it.frame()->unchecked_code());
667  }
668
669  // Iterate the archived stacks in all threads to check if
670  // the code is referenced.
671  CodeMarkingVisitor code_marking_visitor;
672  ThreadManager::IterateArchivedThreads(&code_marking_visitor);
673
674  SharedFunctionInfoMarkingVisitor visitor;
675  CompilationCache::IterateFunctions(&visitor);
676
677  ProcessMarkingStack();
678}
679
680
681// Visitor class for marking heap roots.
682class RootMarkingVisitor : public ObjectVisitor {
683 public:
684  void VisitPointer(Object** p) {
685    MarkObjectByPointer(p);
686  }
687
688  void VisitPointers(Object** start, Object** end) {
689    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
690  }
691
692 private:
693  void MarkObjectByPointer(Object** p) {
694    if (!(*p)->IsHeapObject()) return;
695
696    // Replace flat cons strings in place.
697    HeapObject* object = ShortCircuitConsString(p);
698    if (object->IsMarked()) return;
699
700    Map* map = object->map();
701    // Mark the object.
702    MarkCompactCollector::SetMark(object);
703
704    // Mark the map pointer and body, and push them on the marking stack.
705    MarkCompactCollector::MarkObject(map);
706    StaticMarkingVisitor::IterateBody(map, object);
707
708    // Mark all the objects reachable from the map and body.  May leave
709    // overflowed objects in the heap.
710    MarkCompactCollector::EmptyMarkingStack();
711  }
712};
713
714
715// Helper class for pruning the symbol table.
716class SymbolTableCleaner : public ObjectVisitor {
717 public:
718  SymbolTableCleaner() : pointers_removed_(0) { }
719
720  virtual void VisitPointers(Object** start, Object** end) {
721    // Visit all HeapObject pointers in [start, end).
722    for (Object** p = start; p < end; p++) {
723      if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
724        // Check if the symbol being pruned is an external symbol. We need to
725        // delete the associated external data as this symbol is going away.
726
727        // Since no objects have yet been moved we can safely access the map of
728        // the object.
729        if ((*p)->IsExternalString()) {
730          Heap::FinalizeExternalString(String::cast(*p));
731        }
732        // Set the entry to null_value (as deleted).
733        *p = Heap::raw_unchecked_null_value();
734        pointers_removed_++;
735      }
736    }
737  }
738
739  int PointersRemoved() {
740    return pointers_removed_;
741  }
742 private:
743  int pointers_removed_;
744};
745
746
747// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
748// are retained.
749class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
750 public:
751  virtual Object* RetainAs(Object* object) {
752    MapWord first_word = HeapObject::cast(object)->map_word();
753    if (first_word.IsMarked()) {
754      return object;
755    } else {
756      return NULL;
757    }
758  }
759};
760
761
762void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
763  ASSERT(!object->IsMarked());
764  ASSERT(Heap::Contains(object));
765  if (object->IsMap()) {
766    Map* map = Map::cast(object);
767    if (FLAG_cleanup_caches_in_maps_at_gc) {
768      map->ClearCodeCache();
769    }
770    SetMark(map);
771    if (FLAG_collect_maps &&
772        map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
773        map->instance_type() <= JS_FUNCTION_TYPE) {
774      MarkMapContents(map);
775    } else {
776      marking_stack.Push(map);
777    }
778  } else {
779    SetMark(object);
780    marking_stack.Push(object);
781  }
782}
783
784
785void MarkCompactCollector::MarkMapContents(Map* map) {
786  MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
787      *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
788
789  // Mark the Object* fields of the Map.
790  // Since the descriptor array has been marked already, it is fine
791  // that one of these fields contains a pointer to it.
792  Object** start_slot = HeapObject::RawField(map,
793                                             Map::kPointerFieldsBeginOffset);
794
795  Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
796
797  StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
798}
799
800
801void MarkCompactCollector::MarkDescriptorArray(
802    DescriptorArray* descriptors) {
803  if (descriptors->IsMarked()) return;
804  // Empty descriptor array is marked as a root before any maps are marked.
805  ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
806  SetMark(descriptors);
807
808  FixedArray* contents = reinterpret_cast<FixedArray*>(
809      descriptors->get(DescriptorArray::kContentArrayIndex));
810  ASSERT(contents->IsHeapObject());
811  ASSERT(!contents->IsMarked());
812  ASSERT(contents->IsFixedArray());
813  ASSERT(contents->length() >= 2);
814  SetMark(contents);
815  // Contents contains (value, details) pairs.  If the details say that
816  // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or
817  // NULL_DESCRIPTOR, we don't mark the value as live.  Only for
818  // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a
819  // Map*).
820  for (int i = 0; i < contents->length(); i += 2) {
821    // If the pair (value, details) at index i, i+1 is not
822    // a transition or null descriptor, mark the value.
823    PropertyDetails details(Smi::cast(contents->get(i + 1)));
824    if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
825      HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
826      if (object->IsHeapObject() && !object->IsMarked()) {
827        SetMark(object);
828        marking_stack.Push(object);
829      }
830    }
831  }
832  // The DescriptorArray descriptors contains a pointer to its contents array,
833  // but the contents array is already marked.
834  marking_stack.Push(descriptors);
835}
836
837
838void MarkCompactCollector::CreateBackPointers() {
839  HeapObjectIterator iterator(Heap::map_space());
840  for (HeapObject* next_object = iterator.next();
841       next_object != NULL; next_object = iterator.next()) {
842    if (next_object->IsMap()) {  // Could also be ByteArray on free list.
843      Map* map = Map::cast(next_object);
844      if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
845          map->instance_type() <= JS_FUNCTION_TYPE) {
846        map->CreateBackPointers();
847      } else {
848        ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
849      }
850    }
851  }
852}
853
854
855static int OverflowObjectSize(HeapObject* obj) {
856  // Recover the normal map pointer, it might be marked as live and
857  // overflowed.
858  MapWord map_word = obj->map_word();
859  map_word.ClearMark();
860  map_word.ClearOverflow();
861  return obj->SizeFromMap(map_word.ToMap());
862}
863
864
865// Fill the marking stack with overflowed objects returned by the given
866// iterator.  Stop when the marking stack is filled or the end of the space
867// is reached, whichever comes first.
868template<class T>
869static void ScanOverflowedObjects(T* it) {
870  // The caller should ensure that the marking stack is initially not full,
871  // so that we don't waste effort pointlessly scanning for objects.
872  ASSERT(!marking_stack.is_full());
873
874  for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
875    if (object->IsOverflowed()) {
876      object->ClearOverflow();
877      ASSERT(object->IsMarked());
878      ASSERT(Heap::Contains(object));
879      marking_stack.Push(object);
880      if (marking_stack.is_full()) return;
881    }
882  }
883}
884
885
886bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
887  return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
888}
889
890
891void MarkCompactCollector::MarkSymbolTable() {
892  SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
893  // Mark the symbol table itself.
894  SetMark(symbol_table);
895  // Explicitly mark the prefix.
896  MarkingVisitor marker;
897  symbol_table->IteratePrefix(&marker);
898  ProcessMarkingStack();
899}
900
901
902void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
903  // Mark the heap roots including global variables, stack variables,
904  // etc., and all objects reachable from them.
905  Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
906
907  // Handle the symbol table specially.
908  MarkSymbolTable();
909
910  // There may be overflowed objects in the heap.  Visit them now.
911  while (marking_stack.overflowed()) {
912    RefillMarkingStack();
913    EmptyMarkingStack();
914  }
915}
916
917
918void MarkCompactCollector::MarkObjectGroups() {
919  List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
920
921  for (int i = 0; i < object_groups->length(); i++) {
922    ObjectGroup* entry = object_groups->at(i);
923    if (entry == NULL) continue;
924
925    List<Object**>& objects = entry->objects_;
926    bool group_marked = false;
927    for (int j = 0; j < objects.length(); j++) {
928      Object* object = *objects[j];
929      if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
930        group_marked = true;
931        break;
932      }
933    }
934
935    if (!group_marked) continue;
936
937    // An object in the group is marked, so mark as gray all white heap
938    // objects in the group.
939    for (int j = 0; j < objects.length(); ++j) {
940      if ((*objects[j])->IsHeapObject()) {
941        MarkObject(HeapObject::cast(*objects[j]));
942      }
943    }
944    // Once the entire group has been colored gray, set the object group
945    // to NULL so it won't be processed again.
946    delete object_groups->at(i);
947    object_groups->at(i) = NULL;
948  }
949}
950
951
952// Mark all objects reachable from the objects on the marking stack.
953// Before: the marking stack contains zero or more heap object pointers.
954// After: the marking stack is empty, and all objects reachable from the
955// marking stack have been marked, or are overflowed in the heap.
956void MarkCompactCollector::EmptyMarkingStack() {
957  while (!marking_stack.is_empty()) {
958    HeapObject* object = marking_stack.Pop();
959    ASSERT(object->IsHeapObject());
960    ASSERT(Heap::Contains(object));
961    ASSERT(object->IsMarked());
962    ASSERT(!object->IsOverflowed());
963
964    // Because the object is marked, we have to recover the original map
965    // pointer and use it to mark the object's body.
966    MapWord map_word = object->map_word();
967    map_word.ClearMark();
968    Map* map = map_word.ToMap();
969    MarkObject(map);
970
971    StaticMarkingVisitor::IterateBody(map, object);
972  }
973}
974
975
976// Sweep the heap for overflowed objects, clear their overflow bits, and
977// push them on the marking stack.  Stop early if the marking stack fills
978// before sweeping completes.  If sweeping completes, there are no remaining
979// overflowed objects in the heap so the overflow flag on the markings stack
980// is cleared.
981void MarkCompactCollector::RefillMarkingStack() {
982  ASSERT(marking_stack.overflowed());
983
984  SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
985  ScanOverflowedObjects(&new_it);
986  if (marking_stack.is_full()) return;
987
988  HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
989                                    &OverflowObjectSize);
990  ScanOverflowedObjects(&old_pointer_it);
991  if (marking_stack.is_full()) return;
992
993  HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
994  ScanOverflowedObjects(&old_data_it);
995  if (marking_stack.is_full()) return;
996
997  HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
998  ScanOverflowedObjects(&code_it);
999  if (marking_stack.is_full()) return;
1000
1001  HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
1002  ScanOverflowedObjects(&map_it);
1003  if (marking_stack.is_full()) return;
1004
1005  HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
1006  ScanOverflowedObjects(&cell_it);
1007  if (marking_stack.is_full()) return;
1008
1009  LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
1010  ScanOverflowedObjects(&lo_it);
1011  if (marking_stack.is_full()) return;
1012
1013  marking_stack.clear_overflowed();
1014}
1015
1016
1017// Mark all objects reachable (transitively) from objects on the marking
1018// stack.  Before: the marking stack contains zero or more heap object
1019// pointers.  After: the marking stack is empty and there are no overflowed
1020// objects in the heap.
1021void MarkCompactCollector::ProcessMarkingStack() {
1022  EmptyMarkingStack();
1023  while (marking_stack.overflowed()) {
1024    RefillMarkingStack();
1025    EmptyMarkingStack();
1026  }
1027}
1028
1029
1030void MarkCompactCollector::ProcessObjectGroups() {
1031  bool work_to_do = true;
1032  ASSERT(marking_stack.is_empty());
1033  while (work_to_do) {
1034    MarkObjectGroups();
1035    work_to_do = !marking_stack.is_empty();
1036    ProcessMarkingStack();
1037  }
1038}
1039
1040
1041void MarkCompactCollector::MarkLiveObjects() {
1042  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
1043#ifdef DEBUG
1044  ASSERT(state_ == PREPARE_GC);
1045  state_ = MARK_LIVE_OBJECTS;
1046#endif
1047  // The to space contains live objects, the from space is used as a marking
1048  // stack.
1049  marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
1050                           Heap::new_space()->FromSpaceHigh());
1051
1052  ASSERT(!marking_stack.overflowed());
1053
1054  PrepareForCodeFlushing();
1055
1056  RootMarkingVisitor root_visitor;
1057  MarkRoots(&root_visitor);
1058
1059  // The objects reachable from the roots are marked, yet unreachable
1060  // objects are unmarked.  Mark objects reachable from object groups
1061  // containing at least one marked object, and continue until no new
1062  // objects are reachable from the object groups.
1063  ProcessObjectGroups();
1064
1065  // The objects reachable from the roots or object groups are marked,
1066  // yet unreachable objects are unmarked.  Mark objects reachable
1067  // only from weak global handles.
1068  //
1069  // First we identify nonlive weak handles and mark them as pending
1070  // destruction.
1071  GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
1072  // Then we mark the objects and process the transitive closure.
1073  GlobalHandles::IterateWeakRoots(&root_visitor);
1074  while (marking_stack.overflowed()) {
1075    RefillMarkingStack();
1076    EmptyMarkingStack();
1077  }
1078
1079  // Repeat the object groups to mark unmarked groups reachable from the
1080  // weak roots.
1081  ProcessObjectGroups();
1082
1083  // Prune the symbol table removing all symbols only pointed to by the
1084  // symbol table.  Cannot use symbol_table() here because the symbol
1085  // table is marked.
1086  SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
1087  SymbolTableCleaner v;
1088  symbol_table->IterateElements(&v);
1089  symbol_table->ElementsRemoved(v.PointersRemoved());
1090  ExternalStringTable::Iterate(&v);
1091  ExternalStringTable::CleanUp();
1092
1093  // Process the weak references.
1094  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
1095  Heap::ProcessWeakReferences(&mark_compact_object_retainer);
1096
1097  // Remove object groups after marking phase.
1098  GlobalHandles::RemoveObjectGroups();
1099}
1100
1101
1102static int CountMarkedCallback(HeapObject* obj) {
1103  MapWord map_word = obj->map_word();
1104  map_word.ClearMark();
1105  return obj->SizeFromMap(map_word.ToMap());
1106}
1107
1108
1109#ifdef DEBUG
1110void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1111  live_bytes_ += obj->Size();
1112  if (Heap::new_space()->Contains(obj)) {
1113    live_young_objects_size_ += obj->Size();
1114  } else if (Heap::map_space()->Contains(obj)) {
1115    ASSERT(obj->IsMap());
1116    live_map_objects_size_ += obj->Size();
1117  } else if (Heap::cell_space()->Contains(obj)) {
1118    ASSERT(obj->IsJSGlobalPropertyCell());
1119    live_cell_objects_size_ += obj->Size();
1120  } else if (Heap::old_pointer_space()->Contains(obj)) {
1121    live_old_pointer_objects_size_ += obj->Size();
1122  } else if (Heap::old_data_space()->Contains(obj)) {
1123    live_old_data_objects_size_ += obj->Size();
1124  } else if (Heap::code_space()->Contains(obj)) {
1125    live_code_objects_size_ += obj->Size();
1126  } else if (Heap::lo_space()->Contains(obj)) {
1127    live_lo_objects_size_ += obj->Size();
1128  } else {
1129    UNREACHABLE();
1130  }
1131}
1132#endif  // DEBUG
1133
1134
1135void MarkCompactCollector::SweepLargeObjectSpace() {
1136#ifdef DEBUG
1137  ASSERT(state_ == MARK_LIVE_OBJECTS);
1138  state_ =
1139      compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1140#endif
1141  // Deallocate unmarked objects and clear marked bits for marked objects.
1142  Heap::lo_space()->FreeUnmarkedObjects();
1143}
1144
1145
1146// Safe to use during marking phase only.
1147bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1148  MapWord metamap = object->map_word();
1149  metamap.ClearMark();
1150  return metamap.ToMap()->instance_type() == MAP_TYPE;
1151}
1152
1153
1154void MarkCompactCollector::ClearNonLiveTransitions() {
1155  HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
1156  // Iterate over the map space, setting map transitions that go from
1157  // a marked map to an unmarked map to null transitions.  At the same time,
1158  // set all the prototype fields of maps back to their original value,
1159  // dropping the back pointers temporarily stored in the prototype field.
1160  // Setting the prototype field requires following the linked list of
1161  // back pointers, reversing them all at once.  This allows us to find
1162  // those maps with map transitions that need to be nulled, and only
1163  // scan the descriptor arrays of those maps, not all maps.
1164  // All of these actions are carried out only on maps of JSObjects
1165  // and related subtypes.
1166  for (HeapObject* obj = map_iterator.next();
1167       obj != NULL; obj = map_iterator.next()) {
1168    Map* map = reinterpret_cast<Map*>(obj);
1169    if (!map->IsMarked() && map->IsByteArray()) continue;
1170
1171    ASSERT(SafeIsMap(map));
1172    // Only JSObject and subtypes have map transitions and back pointers.
1173    if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1174    if (map->instance_type() > JS_FUNCTION_TYPE) continue;
1175
1176    if (map->IsMarked() && map->attached_to_shared_function_info()) {
1177      // This map is used for inobject slack tracking and has been detached
1178      // from SharedFunctionInfo during the mark phase.
1179      // Since it survived the GC, reattach it now.
1180      map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
1181    }
1182
1183    // Follow the chain of back pointers to find the prototype.
1184    Map* current = map;
1185    while (SafeIsMap(current)) {
1186      current = reinterpret_cast<Map*>(current->prototype());
1187      ASSERT(current->IsHeapObject());
1188    }
1189    Object* real_prototype = current;
1190
1191    // Follow back pointers, setting them to prototype,
1192    // clearing map transitions when necessary.
1193    current = map;
1194    bool on_dead_path = !current->IsMarked();
1195    Object* next;
1196    while (SafeIsMap(current)) {
1197      next = current->prototype();
1198      // There should never be a dead map above a live map.
1199      ASSERT(on_dead_path || current->IsMarked());
1200
1201      // A live map above a dead map indicates a dead transition.
1202      // This test will always be false on the first iteration.
1203      if (on_dead_path && current->IsMarked()) {
1204        on_dead_path = false;
1205        current->ClearNonLiveTransitions(real_prototype);
1206      }
1207      *HeapObject::RawField(current, Map::kPrototypeOffset) =
1208          real_prototype;
1209      current = reinterpret_cast<Map*>(next);
1210    }
1211  }
1212}
1213
1214// -------------------------------------------------------------------------
1215// Phase 2: Encode forwarding addresses.
1216// When compacting, forwarding addresses for objects in old space and map
1217// space are encoded in their map pointer word (along with an encoding of
1218// their map pointers).
1219//
1220// The excact encoding is described in the comments for class MapWord in
1221// objects.h.
1222//
1223// An address range [start, end) can have both live and non-live objects.
1224// Maximal non-live regions are marked so they can be skipped on subsequent
1225// sweeps of the heap.  A distinguished map-pointer encoding is used to mark
1226// free regions of one-word size (in which case the next word is the start
1227// of a live object).  A second distinguished map-pointer encoding is used
1228// to mark free regions larger than one word, and the size of the free
1229// region (including the first word) is written to the second word of the
1230// region.
1231//
1232// Any valid map page offset must lie in the object area of the page, so map
1233// page offsets less than Page::kObjectStartOffset are invalid.  We use a
1234// pair of distinguished invalid map encodings (for single word and multiple
1235// words) to indicate free regions in the page found during computation of
1236// forwarding addresses and skipped over in subsequent sweeps.
1237
1238
1239// Encode a free region, defined by the given start address and size, in the
1240// first word or two of the region.
1241void EncodeFreeRegion(Address free_start, int free_size) {
1242  ASSERT(free_size >= kIntSize);
1243  if (free_size == kIntSize) {
1244    Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
1245  } else {
1246    ASSERT(free_size >= 2 * kIntSize);
1247    Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
1248    Memory::int_at(free_start + kIntSize) = free_size;
1249  }
1250
1251#ifdef DEBUG
1252  // Zap the body of the free region.
1253  if (FLAG_enable_slow_asserts) {
1254    for (int offset = 2 * kIntSize;
1255         offset < free_size;
1256         offset += kPointerSize) {
1257      Memory::Address_at(free_start + offset) = kZapValue;
1258    }
1259  }
1260#endif
1261}
1262
1263
1264// Try to promote all objects in new space.  Heap numbers and sequential
1265// strings are promoted to the code space, large objects to large object space,
1266// and all others to the old space.
1267inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
1268  Object* forwarded;
1269  if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1270    forwarded = Failure::Exception();
1271  } else {
1272    OldSpace* target_space = Heap::TargetSpace(object);
1273    ASSERT(target_space == Heap::old_pointer_space() ||
1274           target_space == Heap::old_data_space());
1275    forwarded = target_space->MCAllocateRaw(object_size);
1276  }
1277  if (forwarded->IsFailure()) {
1278    forwarded = Heap::new_space()->MCAllocateRaw(object_size);
1279  }
1280  return forwarded;
1281}
1282
1283
1284// Allocation functions for the paged spaces call the space's MCAllocateRaw.
1285inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
1286                                             int object_size) {
1287  return Heap::old_pointer_space()->MCAllocateRaw(object_size);
1288}
1289
1290
1291inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
1292  return Heap::old_data_space()->MCAllocateRaw(object_size);
1293}
1294
1295
1296inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
1297  return Heap::code_space()->MCAllocateRaw(object_size);
1298}
1299
1300
1301inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
1302  return Heap::map_space()->MCAllocateRaw(object_size);
1303}
1304
1305
1306inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
1307  return Heap::cell_space()->MCAllocateRaw(object_size);
1308}
1309
1310
1311// The forwarding address is encoded at the same offset as the current
1312// to-space object, but in from space.
1313inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
1314                                              int object_size,
1315                                              Object* new_object,
1316                                              int* ignored) {
1317  int offset =
1318      Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
1319  Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
1320      HeapObject::cast(new_object)->address();
1321}
1322
1323
1324// The forwarding address is encoded in the map pointer of the object as an
1325// offset (in terms of live bytes) from the address of the first live object
1326// in the page.
1327inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
1328                                                int object_size,
1329                                                Object* new_object,
1330                                                int* offset) {
1331  // Record the forwarding address of the first live object if necessary.
1332  if (*offset == 0) {
1333    Page::FromAddress(old_object->address())->mc_first_forwarded =
1334        HeapObject::cast(new_object)->address();
1335  }
1336
1337  MapWord encoding =
1338      MapWord::EncodeAddress(old_object->map()->address(), *offset);
1339  old_object->set_map_word(encoding);
1340  *offset += object_size;
1341  ASSERT(*offset <= Page::kObjectAreaSize);
1342}
1343
1344
1345// Most non-live objects are ignored.
1346inline void IgnoreNonLiveObject(HeapObject* object) {}
1347
1348
1349// Function template that, given a range of addresses (eg, a semispace or a
1350// paged space page), iterates through the objects in the range to clear
1351// mark bits and compute and encode forwarding addresses.  As a side effect,
1352// maximal free chunks are marked so that they can be skipped on subsequent
1353// sweeps.
1354//
1355// The template parameters are an allocation function, a forwarding address
1356// encoding function, and a function to process non-live objects.
1357template<MarkCompactCollector::AllocationFunction Alloc,
1358         MarkCompactCollector::EncodingFunction Encode,
1359         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1360inline void EncodeForwardingAddressesInRange(Address start,
1361                                             Address end,
1362                                             int* offset) {
1363  // The start address of the current free region while sweeping the space.
1364  // This address is set when a transition from live to non-live objects is
1365  // encountered.  A value (an encoding of the 'next free region' pointer)
1366  // is written to memory at this address when a transition from non-live to
1367  // live objects is encountered.
1368  Address free_start = NULL;
1369
1370  // A flag giving the state of the previously swept object.  Initially true
1371  // to ensure that free_start is initialized to a proper address before
1372  // trying to write to it.
1373  bool is_prev_alive = true;
1374
1375  int object_size;  // Will be set on each iteration of the loop.
1376  for (Address current = start; current < end; current += object_size) {
1377    HeapObject* object = HeapObject::FromAddress(current);
1378    if (object->IsMarked()) {
1379      object->ClearMark();
1380      MarkCompactCollector::tracer()->decrement_marked_count();
1381      object_size = object->Size();
1382
1383      Object* forwarded = Alloc(object, object_size);
1384      // Allocation cannot fail, because we are compacting the space.
1385      ASSERT(!forwarded->IsFailure());
1386      Encode(object, object_size, forwarded, offset);
1387
1388#ifdef DEBUG
1389      if (FLAG_gc_verbose) {
1390        PrintF("forward %p -> %p.\n", object->address(),
1391               HeapObject::cast(forwarded)->address());
1392      }
1393#endif
1394      if (!is_prev_alive) {  // Transition from non-live to live.
1395        EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
1396        is_prev_alive = true;
1397      }
1398    } else {  // Non-live object.
1399      object_size = object->Size();
1400      ProcessNonLive(object);
1401      if (is_prev_alive) {  // Transition from live to non-live.
1402        free_start = current;
1403        is_prev_alive = false;
1404      }
1405    }
1406  }
1407
1408  // If we ended on a free region, mark it.
1409  if (!is_prev_alive) {
1410    EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1411  }
1412}
1413
1414
1415// Functions to encode the forwarding pointers in each compactable space.
1416void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1417  int ignored;
1418  EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1419                                   EncodeForwardingAddressInNewSpace,
1420                                   IgnoreNonLiveObject>(
1421      Heap::new_space()->bottom(),
1422      Heap::new_space()->top(),
1423      &ignored);
1424}
1425
1426
1427template<MarkCompactCollector::AllocationFunction Alloc,
1428         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1429void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1430    PagedSpace* space) {
1431  PageIterator it(space, PageIterator::PAGES_IN_USE);
1432  while (it.has_next()) {
1433    Page* p = it.next();
1434
1435    // The offset of each live object in the page from the first live object
1436    // in the page.
1437    int offset = 0;
1438    EncodeForwardingAddressesInRange<Alloc,
1439                                     EncodeForwardingAddressInPagedSpace,
1440                                     ProcessNonLive>(
1441        p->ObjectAreaStart(),
1442        p->AllocationTop(),
1443        &offset);
1444  }
1445}
1446
1447
1448// We scavange new space simultaneously with sweeping. This is done in two
1449// passes.
1450// The first pass migrates all alive objects from one semispace to another or
1451// promotes them to old space. Forwading address is written directly into
1452// first word of object without any encoding. If object is dead we are writing
1453// NULL as a forwarding address.
1454// The second pass updates pointers to new space in all spaces. It is possible
1455// to encounter pointers to dead objects during traversal of dirty regions we
1456// should clear them to avoid encountering them during next dirty regions
1457// iteration.
1458static void MigrateObject(Address dst,
1459                          Address src,
1460                          int size,
1461                          bool to_old_space) {
1462  if (to_old_space) {
1463    Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1464  } else {
1465    Heap::CopyBlock(dst, src, size);
1466  }
1467
1468  Memory::Address_at(src) = dst;
1469}
1470
1471
1472class StaticPointersToNewGenUpdatingVisitor : public
1473  StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
1474 public:
1475  static inline void VisitPointer(Object** p) {
1476    if (!(*p)->IsHeapObject()) return;
1477
1478    HeapObject* obj = HeapObject::cast(*p);
1479    Address old_addr = obj->address();
1480
1481    if (Heap::new_space()->Contains(obj)) {
1482      ASSERT(Heap::InFromSpace(*p));
1483      *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1484    }
1485  }
1486};
1487
1488
1489// Visitor for updating pointers from live objects in old spaces to new space.
1490// It does not expect to encounter pointers to dead objects.
1491class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1492 public:
1493  void VisitPointer(Object** p) {
1494    StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
1495  }
1496
1497  void VisitPointers(Object** start, Object** end) {
1498    for (Object** p = start; p < end; p++) {
1499      StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
1500    }
1501  }
1502
1503  void VisitCodeTarget(RelocInfo* rinfo) {
1504    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1505    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1506    VisitPointer(&target);
1507    rinfo->set_target_address(Code::cast(target)->instruction_start());
1508  }
1509
1510  void VisitDebugTarget(RelocInfo* rinfo) {
1511    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1512            rinfo->IsPatchedReturnSequence()) ||
1513           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1514            rinfo->IsPatchedDebugBreakSlotSequence()));
1515    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1516    VisitPointer(&target);
1517    rinfo->set_call_address(Code::cast(target)->instruction_start());
1518  }
1519};
1520
1521
1522// Visitor for updating pointers from live objects in old spaces to new space.
1523// It can encounter pointers to dead objects in new space when traversing map
1524// space (see comment for MigrateObject).
1525static void UpdatePointerToNewGen(HeapObject** p) {
1526  if (!(*p)->IsHeapObject()) return;
1527
1528  Address old_addr = (*p)->address();
1529  ASSERT(Heap::InFromSpace(*p));
1530
1531  Address new_addr = Memory::Address_at(old_addr);
1532
1533  if (new_addr == NULL) {
1534    // We encountered pointer to a dead object. Clear it so we will
1535    // not visit it again during next iteration of dirty regions.
1536    *p = NULL;
1537  } else {
1538    *p = HeapObject::FromAddress(new_addr);
1539  }
1540}
1541
1542
1543static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1544  Address old_addr = HeapObject::cast(*p)->address();
1545  Address new_addr = Memory::Address_at(old_addr);
1546  return String::cast(HeapObject::FromAddress(new_addr));
1547}
1548
1549
1550static bool TryPromoteObject(HeapObject* object, int object_size) {
1551  Object* result;
1552
1553  if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1554    result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1555    if (!result->IsFailure()) {
1556      HeapObject* target = HeapObject::cast(result);
1557      MigrateObject(target->address(), object->address(), object_size, true);
1558      MarkCompactCollector::tracer()->
1559          increment_promoted_objects_size(object_size);
1560      return true;
1561    }
1562  } else {
1563    OldSpace* target_space = Heap::TargetSpace(object);
1564
1565    ASSERT(target_space == Heap::old_pointer_space() ||
1566           target_space == Heap::old_data_space());
1567    result = target_space->AllocateRaw(object_size);
1568    if (!result->IsFailure()) {
1569      HeapObject* target = HeapObject::cast(result);
1570      MigrateObject(target->address(),
1571                    object->address(),
1572                    object_size,
1573                    target_space == Heap::old_pointer_space());
1574      MarkCompactCollector::tracer()->
1575          increment_promoted_objects_size(object_size);
1576      return true;
1577    }
1578  }
1579
1580  return false;
1581}
1582
1583
1584static void SweepNewSpace(NewSpace* space) {
1585  Heap::CheckNewSpaceExpansionCriteria();
1586
1587  Address from_bottom = space->bottom();
1588  Address from_top = space->top();
1589
1590  // Flip the semispaces.  After flipping, to space is empty, from space has
1591  // live objects.
1592  space->Flip();
1593  space->ResetAllocationInfo();
1594
1595  int size = 0;
1596  int survivors_size = 0;
1597
1598  // First pass: traverse all objects in inactive semispace, remove marks,
1599  // migrate live objects and write forwarding addresses.
1600  for (Address current = from_bottom; current < from_top; current += size) {
1601    HeapObject* object = HeapObject::FromAddress(current);
1602
1603    if (object->IsMarked()) {
1604      object->ClearMark();
1605      MarkCompactCollector::tracer()->decrement_marked_count();
1606
1607      size = object->Size();
1608      survivors_size += size;
1609
1610      // Aggressively promote young survivors to the old space.
1611      if (TryPromoteObject(object, size)) {
1612        continue;
1613      }
1614
1615      // Promotion failed. Just migrate object to another semispace.
1616      Object* target = space->AllocateRaw(size);
1617
1618      // Allocation cannot fail at this point: semispaces are of equal size.
1619      ASSERT(!target->IsFailure());
1620
1621      MigrateObject(HeapObject::cast(target)->address(),
1622                    current,
1623                    size,
1624                    false);
1625    } else {
1626      size = object->Size();
1627      Memory::Address_at(current) = NULL;
1628    }
1629  }
1630
1631  // Second pass: find pointers to new space and update them.
1632  PointersToNewGenUpdatingVisitor updating_visitor;
1633
1634  // Update pointers in to space.
1635  Address current = space->bottom();
1636  while (current < space->top()) {
1637    HeapObject* object = HeapObject::FromAddress(current);
1638    current +=
1639        StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
1640                                                           object);
1641  }
1642
1643  // Update roots.
1644  Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1645
1646  // Update pointers in old spaces.
1647  Heap::IterateDirtyRegions(Heap::old_pointer_space(),
1648                            &Heap::IteratePointersInDirtyRegion,
1649                            &UpdatePointerToNewGen,
1650                            Heap::WATERMARK_SHOULD_BE_VALID);
1651
1652  Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
1653
1654  // Update pointers from cells.
1655  HeapObjectIterator cell_iterator(Heap::cell_space());
1656  for (HeapObject* cell = cell_iterator.next();
1657       cell != NULL;
1658       cell = cell_iterator.next()) {
1659    if (cell->IsJSGlobalPropertyCell()) {
1660      Address value_address =
1661          reinterpret_cast<Address>(cell) +
1662          (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1663      updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1664    }
1665  }
1666
1667  // Update pointer from the global contexts list.
1668  updating_visitor.VisitPointer(Heap::global_contexts_list_address());
1669
1670  // Update pointers from external string table.
1671  Heap::UpdateNewSpaceReferencesInExternalStringTable(
1672      &UpdateNewSpaceReferenceInExternalStringTableEntry);
1673
1674  // All pointers were updated. Update auxiliary allocation info.
1675  Heap::IncrementYoungSurvivorsCounter(survivors_size);
1676  space->set_age_mark(space->top());
1677}
1678
1679
1680static void SweepSpace(PagedSpace* space) {
1681  PageIterator it(space, PageIterator::PAGES_IN_USE);
1682
1683  // During sweeping of paged space we are trying to find longest sequences
1684  // of pages without live objects and free them (instead of putting them on
1685  // the free list).
1686
1687  // Page preceding current.
1688  Page* prev = Page::FromAddress(NULL);
1689
1690  // First empty page in a sequence.
1691  Page* first_empty_page = Page::FromAddress(NULL);
1692
1693  // Page preceding first empty page.
1694  Page* prec_first_empty_page = Page::FromAddress(NULL);
1695
1696  // If last used page of space ends with a sequence of dead objects
1697  // we can adjust allocation top instead of puting this free area into
1698  // the free list. Thus during sweeping we keep track of such areas
1699  // and defer their deallocation until the sweeping of the next page
1700  // is done: if one of the next pages contains live objects we have
1701  // to put such area into the free list.
1702  Address last_free_start = NULL;
1703  int last_free_size = 0;
1704
1705  while (it.has_next()) {
1706    Page* p = it.next();
1707
1708    bool is_previous_alive = true;
1709    Address free_start = NULL;
1710    HeapObject* object;
1711
1712    for (Address current = p->ObjectAreaStart();
1713         current < p->AllocationTop();
1714         current += object->Size()) {
1715      object = HeapObject::FromAddress(current);
1716      if (object->IsMarked()) {
1717        object->ClearMark();
1718        MarkCompactCollector::tracer()->decrement_marked_count();
1719
1720        if (!is_previous_alive) {  // Transition from free to live.
1721          space->DeallocateBlock(free_start,
1722                                 static_cast<int>(current - free_start),
1723                                 true);
1724          is_previous_alive = true;
1725        }
1726      } else {
1727        MarkCompactCollector::ReportDeleteIfNeeded(object);
1728        if (is_previous_alive) {  // Transition from live to free.
1729          free_start = current;
1730          is_previous_alive = false;
1731        }
1732      }
1733      // The object is now unmarked for the call to Size() at the top of the
1734      // loop.
1735    }
1736
1737    bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
1738        || (!is_previous_alive && free_start == p->ObjectAreaStart());
1739
1740    if (page_is_empty) {
1741      // This page is empty. Check whether we are in the middle of
1742      // sequence of empty pages and start one if not.
1743      if (!first_empty_page->is_valid()) {
1744        first_empty_page = p;
1745        prec_first_empty_page = prev;
1746      }
1747
1748      if (!is_previous_alive) {
1749        // There are dead objects on this page. Update space accounting stats
1750        // without putting anything into free list.
1751        int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
1752        if (size_in_bytes > 0) {
1753          space->DeallocateBlock(free_start, size_in_bytes, false);
1754        }
1755      }
1756    } else {
1757      // This page is not empty. Sequence of empty pages ended on the previous
1758      // one.
1759      if (first_empty_page->is_valid()) {
1760        space->FreePages(prec_first_empty_page, prev);
1761        prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
1762      }
1763
1764      // If there is a free ending area on one of the previous pages we have
1765      // deallocate that area and put it on the free list.
1766      if (last_free_size > 0) {
1767        Page::FromAddress(last_free_start)->
1768            SetAllocationWatermark(last_free_start);
1769        space->DeallocateBlock(last_free_start, last_free_size, true);
1770        last_free_start = NULL;
1771        last_free_size  = 0;
1772      }
1773
1774      // If the last region of this page was not live we remember it.
1775      if (!is_previous_alive) {
1776        ASSERT(last_free_size == 0);
1777        last_free_size = static_cast<int>(p->AllocationTop() - free_start);
1778        last_free_start = free_start;
1779      }
1780    }
1781
1782    prev = p;
1783  }
1784
1785  // We reached end of space. See if we need to adjust allocation top.
1786  Address new_allocation_top = NULL;
1787
1788  if (first_empty_page->is_valid()) {
1789    // Last used pages in space are empty. We can move allocation top backwards
1790    // to the beginning of first empty page.
1791    ASSERT(prev == space->AllocationTopPage());
1792
1793    new_allocation_top = first_empty_page->ObjectAreaStart();
1794  }
1795
1796  if (last_free_size > 0) {
1797    // There was a free ending area on the previous page.
1798    // Deallocate it without putting it into freelist and move allocation
1799    // top to the beginning of this free area.
1800    space->DeallocateBlock(last_free_start, last_free_size, false);
1801    new_allocation_top = last_free_start;
1802  }
1803
1804  if (new_allocation_top != NULL) {
1805#ifdef DEBUG
1806    Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
1807    if (!first_empty_page->is_valid()) {
1808      ASSERT(new_allocation_top_page == space->AllocationTopPage());
1809    } else if (last_free_size > 0) {
1810      ASSERT(new_allocation_top_page == prec_first_empty_page);
1811    } else {
1812      ASSERT(new_allocation_top_page == first_empty_page);
1813    }
1814#endif
1815
1816    space->SetTop(new_allocation_top);
1817  }
1818}
1819
1820
1821void MarkCompactCollector::EncodeForwardingAddresses() {
1822  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1823  // Objects in the active semispace of the young generation may be
1824  // relocated to the inactive semispace (if not promoted).  Set the
1825  // relocation info to the beginning of the inactive semispace.
1826  Heap::new_space()->MCResetRelocationInfo();
1827
1828  // Compute the forwarding pointers in each space.
1829  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
1830                                        ReportDeleteIfNeeded>(
1831      Heap::old_pointer_space());
1832
1833  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
1834                                        IgnoreNonLiveObject>(
1835      Heap::old_data_space());
1836
1837  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
1838                                        ReportDeleteIfNeeded>(
1839      Heap::code_space());
1840
1841  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
1842                                        IgnoreNonLiveObject>(
1843      Heap::cell_space());
1844
1845
1846  // Compute new space next to last after the old and code spaces have been
1847  // compacted.  Objects in new space can be promoted to old or code space.
1848  EncodeForwardingAddressesInNewSpace();
1849
1850  // Compute map space last because computing forwarding addresses
1851  // overwrites non-live objects.  Objects in the other spaces rely on
1852  // non-live map pointers to get the sizes of non-live objects.
1853  EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
1854                                        IgnoreNonLiveObject>(
1855      Heap::map_space());
1856
1857  // Write relocation info to the top page, so we can use it later.  This is
1858  // done after promoting objects from the new space so we get the correct
1859  // allocation top.
1860  Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
1861  Heap::old_data_space()->MCWriteRelocationInfoToPage();
1862  Heap::code_space()->MCWriteRelocationInfoToPage();
1863  Heap::map_space()->MCWriteRelocationInfoToPage();
1864  Heap::cell_space()->MCWriteRelocationInfoToPage();
1865}
1866
1867
1868class MapIterator : public HeapObjectIterator {
1869 public:
1870  MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
1871
1872  explicit MapIterator(Address start)
1873      : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
1874
1875 private:
1876  static int SizeCallback(HeapObject* unused) {
1877    USE(unused);
1878    return Map::kSize;
1879  }
1880};
1881
1882
1883class MapCompact {
1884 public:
1885  explicit MapCompact(int live_maps)
1886    : live_maps_(live_maps),
1887      to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
1888      map_to_evacuate_it_(to_evacuate_start_),
1889      first_map_to_evacuate_(
1890          reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
1891  }
1892
1893  void CompactMaps() {
1894    // As we know the number of maps to evacuate beforehand,
1895    // we stop then there is no more vacant maps.
1896    for (Map* next_vacant_map = NextVacantMap();
1897         next_vacant_map;
1898         next_vacant_map = NextVacantMap()) {
1899      EvacuateMap(next_vacant_map, NextMapToEvacuate());
1900    }
1901
1902#ifdef DEBUG
1903    CheckNoMapsToEvacuate();
1904#endif
1905  }
1906
1907  void UpdateMapPointersInRoots() {
1908    Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1909    GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1910  }
1911
1912  void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1913    ASSERT(space != Heap::map_space());
1914
1915    PageIterator it(space, PageIterator::PAGES_IN_USE);
1916    while (it.has_next()) {
1917      Page* p = it.next();
1918      UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1919    }
1920  }
1921
1922  void UpdateMapPointersInNewSpace() {
1923    NewSpace* space = Heap::new_space();
1924    UpdateMapPointersInRange(space->bottom(), space->top());
1925  }
1926
1927  void UpdateMapPointersInLargeObjectSpace() {
1928    LargeObjectIterator it(Heap::lo_space());
1929    for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1930      UpdateMapPointersInObject(obj);
1931  }
1932
1933  void Finish() {
1934    Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
1935  }
1936
1937 private:
1938  int live_maps_;
1939  Address to_evacuate_start_;
1940  MapIterator vacant_map_it_;
1941  MapIterator map_to_evacuate_it_;
1942  Map* first_map_to_evacuate_;
1943
1944  // Helper class for updating map pointers in HeapObjects.
1945  class MapUpdatingVisitor: public ObjectVisitor {
1946  public:
1947    void VisitPointer(Object** p) {
1948      UpdateMapPointer(p);
1949    }
1950
1951    void VisitPointers(Object** start, Object** end) {
1952      for (Object** p = start; p < end; p++) UpdateMapPointer(p);
1953    }
1954
1955  private:
1956    void UpdateMapPointer(Object** p) {
1957      if (!(*p)->IsHeapObject()) return;
1958      HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
1959
1960      // Moved maps are tagged with overflowed map word.  They are the only
1961      // objects those map word is overflowed as marking is already complete.
1962      MapWord map_word = old_map->map_word();
1963      if (!map_word.IsOverflowed()) return;
1964
1965      *p = GetForwardedMap(map_word);
1966    }
1967  };
1968
1969  static MapUpdatingVisitor map_updating_visitor_;
1970
1971  static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
1972    while (true) {
1973      HeapObject* next = it->next();
1974      ASSERT(next != NULL);
1975      if (next == last)
1976        return NULL;
1977      ASSERT(!next->IsOverflowed());
1978      ASSERT(!next->IsMarked());
1979      ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
1980      if (next->IsMap() == live)
1981        return reinterpret_cast<Map*>(next);
1982    }
1983  }
1984
1985  Map* NextVacantMap() {
1986    Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
1987    ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
1988    return map;
1989  }
1990
1991  Map* NextMapToEvacuate() {
1992    Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
1993    ASSERT(map != NULL);
1994    ASSERT(map->IsMap());
1995    return map;
1996  }
1997
1998  static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
1999    ASSERT(FreeListNode::IsFreeListNode(vacant_map));
2000    ASSERT(map_to_evacuate->IsMap());
2001
2002    ASSERT(Map::kSize % 4 == 0);
2003
2004    Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
2005                                                  map_to_evacuate->address(),
2006                                                  Map::kSize);
2007
2008    ASSERT(vacant_map->IsMap());  // Due to memcpy above.
2009
2010    MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
2011    forwarding_map_word.SetOverflow();
2012    map_to_evacuate->set_map_word(forwarding_map_word);
2013
2014    ASSERT(map_to_evacuate->map_word().IsOverflowed());
2015    ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
2016  }
2017
2018  static Map* GetForwardedMap(MapWord map_word) {
2019    ASSERT(map_word.IsOverflowed());
2020    map_word.ClearOverflow();
2021    Map* new_map = map_word.ToMap();
2022    ASSERT_MAP_ALIGNED(new_map->address());
2023    return new_map;
2024  }
2025
2026  static int UpdateMapPointersInObject(HeapObject* obj) {
2027    ASSERT(!obj->IsMarked());
2028    Map* map = obj->map();
2029    ASSERT(Heap::map_space()->Contains(map));
2030    MapWord map_word = map->map_word();
2031    ASSERT(!map_word.IsMarked());
2032    if (map_word.IsOverflowed()) {
2033      Map* new_map = GetForwardedMap(map_word);
2034      ASSERT(Heap::map_space()->Contains(new_map));
2035      obj->set_map(new_map);
2036
2037#ifdef DEBUG
2038      if (FLAG_gc_verbose) {
2039        PrintF("update %p : %p -> %p\n",
2040               obj->address(),
2041               reinterpret_cast<void*>(map),
2042               reinterpret_cast<void*>(new_map));
2043      }
2044#endif
2045    }
2046
2047    int size = obj->SizeFromMap(map);
2048    obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
2049    return size;
2050  }
2051
2052  static void UpdateMapPointersInRange(Address start, Address end) {
2053    HeapObject* object;
2054    int size;
2055    for (Address current = start; current < end; current += size) {
2056      object = HeapObject::FromAddress(current);
2057      size = UpdateMapPointersInObject(object);
2058      ASSERT(size > 0);
2059    }
2060  }
2061
2062#ifdef DEBUG
2063  void CheckNoMapsToEvacuate() {
2064    if (!FLAG_enable_slow_asserts)
2065      return;
2066
2067    for (HeapObject* obj = map_to_evacuate_it_.next();
2068         obj != NULL; obj = map_to_evacuate_it_.next())
2069      ASSERT(FreeListNode::IsFreeListNode(obj));
2070  }
2071#endif
2072};
2073
2074MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
2075
2076
2077void MarkCompactCollector::SweepSpaces() {
2078  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2079
2080  ASSERT(state_ == SWEEP_SPACES);
2081  ASSERT(!IsCompacting());
2082  // Noncompacting collections simply sweep the spaces to clear the mark
2083  // bits and free the nonlive blocks (for old and map spaces).  We sweep
2084  // the map space last because freeing non-live maps overwrites them and
2085  // the other spaces rely on possibly non-live maps to get the sizes for
2086  // non-live objects.
2087  SweepSpace(Heap::old_pointer_space());
2088  SweepSpace(Heap::old_data_space());
2089  SweepSpace(Heap::code_space());
2090  SweepSpace(Heap::cell_space());
2091  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2092    SweepNewSpace(Heap::new_space());
2093  }
2094  SweepSpace(Heap::map_space());
2095
2096  Heap::IterateDirtyRegions(Heap::map_space(),
2097                            &Heap::IteratePointersInDirtyMapsRegion,
2098                            &UpdatePointerToNewGen,
2099                            Heap::WATERMARK_SHOULD_BE_VALID);
2100
2101  intptr_t live_maps_size = Heap::map_space()->Size();
2102  int live_maps = static_cast<int>(live_maps_size / Map::kSize);
2103  ASSERT(live_map_objects_size_ == live_maps_size);
2104
2105  if (Heap::map_space()->NeedsCompaction(live_maps)) {
2106    MapCompact map_compact(live_maps);
2107
2108    map_compact.CompactMaps();
2109    map_compact.UpdateMapPointersInRoots();
2110
2111    PagedSpaces spaces;
2112    for (PagedSpace* space = spaces.next();
2113         space != NULL; space = spaces.next()) {
2114      if (space == Heap::map_space()) continue;
2115      map_compact.UpdateMapPointersInPagedSpace(space);
2116    }
2117    map_compact.UpdateMapPointersInNewSpace();
2118    map_compact.UpdateMapPointersInLargeObjectSpace();
2119
2120    map_compact.Finish();
2121  }
2122}
2123
2124
2125// Iterate the live objects in a range of addresses (eg, a page or a
2126// semispace).  The live regions of the range have been linked into a list.
2127// The first live region is [first_live_start, first_live_end), and the last
2128// address in the range is top.  The callback function is used to get the
2129// size of each live object.
2130int MarkCompactCollector::IterateLiveObjectsInRange(
2131    Address start,
2132    Address end,
2133    HeapObjectCallback size_func) {
2134  int live_objects_size = 0;
2135  Address current = start;
2136  while (current < end) {
2137    uint32_t encoded_map = Memory::uint32_at(current);
2138    if (encoded_map == kSingleFreeEncoding) {
2139      current += kPointerSize;
2140    } else if (encoded_map == kMultiFreeEncoding) {
2141      current += Memory::int_at(current + kIntSize);
2142    } else {
2143      int size = size_func(HeapObject::FromAddress(current));
2144      current += size;
2145      live_objects_size += size;
2146    }
2147  }
2148  return live_objects_size;
2149}
2150
2151
2152int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
2153                                             HeapObjectCallback size_f) {
2154  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2155  return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2156}
2157
2158
2159int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
2160                                             HeapObjectCallback size_f) {
2161  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2162  int total = 0;
2163  PageIterator it(space, PageIterator::PAGES_IN_USE);
2164  while (it.has_next()) {
2165    Page* p = it.next();
2166    total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2167                                       p->AllocationTop(),
2168                                       size_f);
2169  }
2170  return total;
2171}
2172
2173
2174// -------------------------------------------------------------------------
2175// Phase 3: Update pointers
2176
2177// Helper class for updating pointers in HeapObjects.
2178class UpdatingVisitor: public ObjectVisitor {
2179 public:
2180  void VisitPointer(Object** p) {
2181    UpdatePointer(p);
2182  }
2183
2184  void VisitPointers(Object** start, Object** end) {
2185    // Mark all HeapObject pointers in [start, end)
2186    for (Object** p = start; p < end; p++) UpdatePointer(p);
2187  }
2188
2189  void VisitCodeTarget(RelocInfo* rinfo) {
2190    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2191    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2192    VisitPointer(&target);
2193    rinfo->set_target_address(
2194        reinterpret_cast<Code*>(target)->instruction_start());
2195  }
2196
2197  void VisitDebugTarget(RelocInfo* rinfo) {
2198    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2199            rinfo->IsPatchedReturnSequence()) ||
2200           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2201            rinfo->IsPatchedDebugBreakSlotSequence()));
2202    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2203    VisitPointer(&target);
2204    rinfo->set_call_address(
2205        reinterpret_cast<Code*>(target)->instruction_start());
2206  }
2207
2208 private:
2209  void UpdatePointer(Object** p) {
2210    if (!(*p)->IsHeapObject()) return;
2211
2212    HeapObject* obj = HeapObject::cast(*p);
2213    Address old_addr = obj->address();
2214    Address new_addr;
2215    ASSERT(!Heap::InFromSpace(obj));
2216
2217    if (Heap::new_space()->Contains(obj)) {
2218      Address forwarding_pointer_addr =
2219          Heap::new_space()->FromSpaceLow() +
2220          Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2221      new_addr = Memory::Address_at(forwarding_pointer_addr);
2222
2223#ifdef DEBUG
2224      ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
2225             Heap::old_data_space()->Contains(new_addr) ||
2226             Heap::new_space()->FromSpaceContains(new_addr) ||
2227             Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
2228
2229      if (Heap::new_space()->FromSpaceContains(new_addr)) {
2230        ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2231               Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2232      }
2233#endif
2234
2235    } else if (Heap::lo_space()->Contains(obj)) {
2236      // Don't move objects in the large object space.
2237      return;
2238
2239    } else {
2240#ifdef DEBUG
2241      PagedSpaces spaces;
2242      PagedSpace* original_space = spaces.next();
2243      while (original_space != NULL) {
2244        if (original_space->Contains(obj)) break;
2245        original_space = spaces.next();
2246      }
2247      ASSERT(original_space != NULL);
2248#endif
2249      new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2250      ASSERT(original_space->Contains(new_addr));
2251      ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2252             original_space->MCSpaceOffsetForAddress(old_addr));
2253    }
2254
2255    *p = HeapObject::FromAddress(new_addr);
2256
2257#ifdef DEBUG
2258    if (FLAG_gc_verbose) {
2259      PrintF("update %p : %p -> %p\n",
2260             reinterpret_cast<Address>(p), old_addr, new_addr);
2261    }
2262#endif
2263  }
2264};
2265
2266
2267void MarkCompactCollector::UpdatePointers() {
2268#ifdef DEBUG
2269  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2270  state_ = UPDATE_POINTERS;
2271#endif
2272  UpdatingVisitor updating_visitor;
2273  Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
2274  GlobalHandles::IterateWeakRoots(&updating_visitor);
2275
2276  // Update the pointer to the head of the weak list of global contexts.
2277  updating_visitor.VisitPointer(&Heap::global_contexts_list_);
2278
2279  int live_maps_size = IterateLiveObjects(Heap::map_space(),
2280                                          &UpdatePointersInOldObject);
2281  int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2282                                                  &UpdatePointersInOldObject);
2283  int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2284                                               &UpdatePointersInOldObject);
2285  int live_codes_size = IterateLiveObjects(Heap::code_space(),
2286                                           &UpdatePointersInOldObject);
2287  int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2288                                           &UpdatePointersInOldObject);
2289  int live_news_size = IterateLiveObjects(Heap::new_space(),
2290                                          &UpdatePointersInNewObject);
2291
2292  // Large objects do not move, the map word can be updated directly.
2293  LargeObjectIterator it(Heap::lo_space());
2294  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
2295    UpdatePointersInNewObject(obj);
2296
2297  USE(live_maps_size);
2298  USE(live_pointer_olds_size);
2299  USE(live_data_olds_size);
2300  USE(live_codes_size);
2301  USE(live_cells_size);
2302  USE(live_news_size);
2303  ASSERT(live_maps_size == live_map_objects_size_);
2304  ASSERT(live_data_olds_size == live_old_data_objects_size_);
2305  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2306  ASSERT(live_codes_size == live_code_objects_size_);
2307  ASSERT(live_cells_size == live_cell_objects_size_);
2308  ASSERT(live_news_size == live_young_objects_size_);
2309}
2310
2311
2312int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
2313  // Keep old map pointers
2314  Map* old_map = obj->map();
2315  ASSERT(old_map->IsHeapObject());
2316
2317  Address forwarded = GetForwardingAddressInOldSpace(old_map);
2318
2319  ASSERT(Heap::map_space()->Contains(old_map));
2320  ASSERT(Heap::map_space()->Contains(forwarded));
2321#ifdef DEBUG
2322  if (FLAG_gc_verbose) {
2323    PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
2324           forwarded);
2325  }
2326#endif
2327  // Update the map pointer.
2328  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
2329
2330  // We have to compute the object size relying on the old map because
2331  // map objects are not relocated yet.
2332  int obj_size = obj->SizeFromMap(old_map);
2333
2334  // Update pointers in the object body.
2335  UpdatingVisitor updating_visitor;
2336  obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
2337  return obj_size;
2338}
2339
2340
2341int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
2342  // Decode the map pointer.
2343  MapWord encoding = obj->map_word();
2344  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2345  ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2346
2347  // At this point, the first word of map_addr is also encoded, cannot
2348  // cast it to Map* using Map::cast.
2349  Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
2350  int obj_size = obj->SizeFromMap(map);
2351  InstanceType type = map->instance_type();
2352
2353  // Update map pointer.
2354  Address new_map_addr = GetForwardingAddressInOldSpace(map);
2355  int offset = encoding.DecodeOffset();
2356  obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
2357
2358#ifdef DEBUG
2359  if (FLAG_gc_verbose) {
2360    PrintF("update %p : %p -> %p\n", obj->address(),
2361           map_addr, new_map_addr);
2362  }
2363#endif
2364
2365  // Update pointers in the object body.
2366  UpdatingVisitor updating_visitor;
2367  obj->IterateBody(type, obj_size, &updating_visitor);
2368  return obj_size;
2369}
2370
2371
2372Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
2373  // Object should either in old or map space.
2374  MapWord encoding = obj->map_word();
2375
2376  // Offset to the first live object's forwarding address.
2377  int offset = encoding.DecodeOffset();
2378  Address obj_addr = obj->address();
2379
2380  // Find the first live object's forwarding address.
2381  Page* p = Page::FromAddress(obj_addr);
2382  Address first_forwarded = p->mc_first_forwarded;
2383
2384  // Page start address of forwarded address.
2385  Page* forwarded_page = Page::FromAddress(first_forwarded);
2386  int forwarded_offset = forwarded_page->Offset(first_forwarded);
2387
2388  // Find end of allocation in the page of first_forwarded.
2389  int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
2390
2391  // Check if current object's forward pointer is in the same page
2392  // as the first live object's forwarding pointer
2393  if (forwarded_offset + offset < mc_top_offset) {
2394    // In the same page.
2395    return first_forwarded + offset;
2396  }
2397
2398  // Must be in the next page, NOTE: this may cross chunks.
2399  Page* next_page = forwarded_page->next_page();
2400  ASSERT(next_page->is_valid());
2401
2402  offset -= (mc_top_offset - forwarded_offset);
2403  offset += Page::kObjectStartOffset;
2404
2405  ASSERT_PAGE_OFFSET(offset);
2406  ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
2407
2408  return next_page->OffsetToAddress(offset);
2409}
2410
2411
2412// -------------------------------------------------------------------------
2413// Phase 4: Relocate objects
2414
2415void MarkCompactCollector::RelocateObjects() {
2416#ifdef DEBUG
2417  ASSERT(state_ == UPDATE_POINTERS);
2418  state_ = RELOCATE_OBJECTS;
2419#endif
2420  // Relocates objects, always relocate map objects first. Relocating
2421  // objects in other space relies on map objects to get object size.
2422  int live_maps_size = IterateLiveObjects(Heap::map_space(),
2423                                          &RelocateMapObject);
2424  int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2425                                                  &RelocateOldPointerObject);
2426  int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2427                                               &RelocateOldDataObject);
2428  int live_codes_size = IterateLiveObjects(Heap::code_space(),
2429                                           &RelocateCodeObject);
2430  int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2431                                           &RelocateCellObject);
2432  int live_news_size = IterateLiveObjects(Heap::new_space(),
2433                                          &RelocateNewObject);
2434
2435  USE(live_maps_size);
2436  USE(live_pointer_olds_size);
2437  USE(live_data_olds_size);
2438  USE(live_codes_size);
2439  USE(live_cells_size);
2440  USE(live_news_size);
2441  ASSERT(live_maps_size == live_map_objects_size_);
2442  ASSERT(live_data_olds_size == live_old_data_objects_size_);
2443  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2444  ASSERT(live_codes_size == live_code_objects_size_);
2445  ASSERT(live_cells_size == live_cell_objects_size_);
2446  ASSERT(live_news_size == live_young_objects_size_);
2447
2448  // Flip from and to spaces
2449  Heap::new_space()->Flip();
2450
2451  Heap::new_space()->MCCommitRelocationInfo();
2452
2453  // Set age_mark to bottom in to space
2454  Address mark = Heap::new_space()->bottom();
2455  Heap::new_space()->set_age_mark(mark);
2456
2457  PagedSpaces spaces;
2458  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2459    space->MCCommitRelocationInfo();
2460
2461  Heap::CheckNewSpaceExpansionCriteria();
2462  Heap::IncrementYoungSurvivorsCounter(live_news_size);
2463}
2464
2465
2466int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2467  // Recover map pointer.
2468  MapWord encoding = obj->map_word();
2469  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2470  ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2471
2472  // Get forwarding address before resetting map pointer
2473  Address new_addr = GetForwardingAddressInOldSpace(obj);
2474
2475  // Reset map pointer.  The meta map object may not be copied yet so
2476  // Map::cast does not yet work.
2477  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2478
2479  Address old_addr = obj->address();
2480
2481  if (new_addr != old_addr) {
2482    // Move contents.
2483    Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2484                                                  old_addr,
2485                                                  Map::kSize);
2486  }
2487
2488#ifdef DEBUG
2489  if (FLAG_gc_verbose) {
2490    PrintF("relocate %p -> %p\n", old_addr, new_addr);
2491  }
2492#endif
2493
2494  return Map::kSize;
2495}
2496
2497
2498static inline int RestoreMap(HeapObject* obj,
2499                             PagedSpace* space,
2500                             Address new_addr,
2501                             Address map_addr) {
2502  // This must be a non-map object, and the function relies on the
2503  // assumption that the Map space is compacted before the other paged
2504  // spaces (see RelocateObjects).
2505
2506  // Reset map pointer.
2507  obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
2508
2509  int obj_size = obj->Size();
2510  ASSERT_OBJECT_SIZE(obj_size);
2511
2512  ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
2513         space->MCSpaceOffsetForAddress(obj->address()));
2514
2515#ifdef DEBUG
2516  if (FLAG_gc_verbose) {
2517    PrintF("relocate %p -> %p\n", obj->address(), new_addr);
2518  }
2519#endif
2520
2521  return obj_size;
2522}
2523
2524
2525int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
2526                                                   PagedSpace* space) {
2527  // Recover map pointer.
2528  MapWord encoding = obj->map_word();
2529  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2530  ASSERT(Heap::map_space()->Contains(map_addr));
2531
2532  // Get forwarding address before resetting map pointer.
2533  Address new_addr = GetForwardingAddressInOldSpace(obj);
2534
2535  // Reset the map pointer.
2536  int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2537
2538  Address old_addr = obj->address();
2539
2540  if (new_addr != old_addr) {
2541    // Move contents.
2542    if (space == Heap::old_data_space()) {
2543      Heap::MoveBlock(new_addr, old_addr, obj_size);
2544    } else {
2545      Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2546                                                    old_addr,
2547                                                    obj_size);
2548    }
2549  }
2550
2551  ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2552
2553  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2554  if (copied_to->IsJSFunction()) {
2555    PROFILE(FunctionMoveEvent(old_addr, new_addr));
2556    PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
2557  }
2558  HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
2559
2560  return obj_size;
2561}
2562
2563
2564int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
2565  return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
2566}
2567
2568
2569int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
2570  return RelocateOldNonCodeObject(obj, Heap::old_data_space());
2571}
2572
2573
2574int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
2575  return RelocateOldNonCodeObject(obj, Heap::cell_space());
2576}
2577
2578
2579int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
2580  // Recover map pointer.
2581  MapWord encoding = obj->map_word();
2582  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2583  ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2584
2585  // Get forwarding address before resetting map pointer
2586  Address new_addr = GetForwardingAddressInOldSpace(obj);
2587
2588  // Reset the map pointer.
2589  int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
2590
2591  Address old_addr = obj->address();
2592
2593  if (new_addr != old_addr) {
2594    // Move contents.
2595    Heap::MoveBlock(new_addr, old_addr, obj_size);
2596  }
2597
2598  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2599  if (copied_to->IsCode()) {
2600    // May also update inline cache target.
2601    Code::cast(copied_to)->Relocate(new_addr - old_addr);
2602    // Notify the logger that compiled code has moved.
2603    PROFILE(CodeMoveEvent(old_addr, new_addr));
2604  }
2605  HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
2606
2607  return obj_size;
2608}
2609
2610
2611int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
2612  int obj_size = obj->Size();
2613
2614  // Get forwarding address
2615  Address old_addr = obj->address();
2616  int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2617
2618  Address new_addr =
2619    Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
2620
2621#ifdef DEBUG
2622  if (Heap::new_space()->FromSpaceContains(new_addr)) {
2623    ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2624           Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2625  } else {
2626    ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
2627           Heap::TargetSpace(obj) == Heap::old_data_space());
2628  }
2629#endif
2630
2631  // New and old addresses cannot overlap.
2632  if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
2633    Heap::CopyBlock(new_addr, old_addr, obj_size);
2634  } else {
2635    Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2636                                                  old_addr,
2637                                                  obj_size);
2638  }
2639
2640#ifdef DEBUG
2641  if (FLAG_gc_verbose) {
2642    PrintF("relocate %p -> %p\n", old_addr, new_addr);
2643  }
2644#endif
2645
2646  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2647  if (copied_to->IsJSFunction()) {
2648    PROFILE(FunctionMoveEvent(old_addr, new_addr));
2649    PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
2650  }
2651  HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
2652
2653  return obj_size;
2654}
2655
2656
2657void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
2658#ifdef ENABLE_LOGGING_AND_PROFILING
2659  if (obj->IsCode()) {
2660    PROFILE(CodeDeleteEvent(obj->address()));
2661  } else if (obj->IsJSFunction()) {
2662    PROFILE(FunctionDeleteEvent(obj->address()));
2663  }
2664#endif
2665}
2666
2667
2668void MarkCompactCollector::Initialize() {
2669  StaticPointersToNewGenUpdatingVisitor::Initialize();
2670  StaticMarkingVisitor::Initialize();
2671}
2672
2673
2674} }  // namespace v8::internal
2675