mark-compact.cc revision 3ce2e2076e8e3e60cf1810eec160ea2d8557e9e7
1// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "execution.h"
31#include "global-handles.h"
32#include "ic-inl.h"
33#include "mark-compact.h"
34#include "stub-cache.h"
35
36namespace v8 {
37namespace internal {
38
39// -------------------------------------------------------------------------
40// MarkCompactCollector
41
42bool MarkCompactCollector::force_compaction_ = false;
43bool MarkCompactCollector::compacting_collection_ = false;
44bool MarkCompactCollector::compact_on_next_gc_ = false;
45
46int MarkCompactCollector::previous_marked_count_ = 0;
47GCTracer* MarkCompactCollector::tracer_ = NULL;
48
49
50#ifdef DEBUG
51MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
52
53// Counters used for debugging the marking phase of mark-compact or mark-sweep
54// collection.
55int MarkCompactCollector::live_bytes_ = 0;
56int MarkCompactCollector::live_young_objects_ = 0;
57int MarkCompactCollector::live_old_data_objects_ = 0;
58int MarkCompactCollector::live_old_pointer_objects_ = 0;
59int MarkCompactCollector::live_code_objects_ = 0;
60int MarkCompactCollector::live_map_objects_ = 0;
61int MarkCompactCollector::live_cell_objects_ = 0;
62int MarkCompactCollector::live_lo_objects_ = 0;
63#endif
64
65void MarkCompactCollector::CollectGarbage() {
66  // Make sure that Prepare() has been called. The individual steps below will
67  // update the state as they proceed.
68  ASSERT(state_ == PREPARE_GC);
69
70  // Prepare has selected whether to compact the old generation or not.
71  // Tell the tracer.
72  if (IsCompacting()) tracer_->set_is_compacting();
73
74  MarkLiveObjects();
75
76  if (FLAG_collect_maps) ClearNonLiveTransitions();
77
78  SweepLargeObjectSpace();
79
80  if (IsCompacting()) {
81    EncodeForwardingAddresses();
82
83    UpdatePointers();
84
85    RelocateObjects();
86
87    RebuildRSets();
88
89  } else {
90    SweepSpaces();
91  }
92
93  Finish();
94
95  // Save the count of marked objects remaining after the collection and
96  // null out the GC tracer.
97  previous_marked_count_ = tracer_->marked_count();
98  ASSERT(previous_marked_count_ == 0);
99  tracer_ = NULL;
100}
101
102
103void MarkCompactCollector::Prepare(GCTracer* tracer) {
104  // Rather than passing the tracer around we stash it in a static member
105  // variable.
106  tracer_ = tracer;
107
108#ifdef DEBUG
109  ASSERT(state_ == IDLE);
110  state_ = PREPARE_GC;
111#endif
112  ASSERT(!FLAG_always_compact || !FLAG_never_compact);
113
114  compacting_collection_ =
115      FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
116  compact_on_next_gc_ = false;
117
118  if (FLAG_never_compact) compacting_collection_ = false;
119  if (FLAG_collect_maps) CreateBackPointers();
120
121#ifdef DEBUG
122  if (compacting_collection_) {
123    // We will write bookkeeping information to the remembered set area
124    // starting now.
125    Page::set_rset_state(Page::NOT_IN_USE);
126  }
127#endif
128
129  PagedSpaces spaces;
130  while (PagedSpace* space = spaces.next()) {
131    space->PrepareForMarkCompact(compacting_collection_);
132  }
133
134#ifdef DEBUG
135  live_bytes_ = 0;
136  live_young_objects_ = 0;
137  live_old_pointer_objects_ = 0;
138  live_old_data_objects_ = 0;
139  live_code_objects_ = 0;
140  live_map_objects_ = 0;
141  live_cell_objects_ = 0;
142  live_lo_objects_ = 0;
143#endif
144}
145
146
147void MarkCompactCollector::Finish() {
148#ifdef DEBUG
149  ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
150  state_ = IDLE;
151#endif
152  // The stub cache is not traversed during GC; clear the cache to
153  // force lazy re-initialization of it. This must be done after the
154  // GC, because it relies on the new address of certain old space
155  // objects (empty string, illegal builtin).
156  StubCache::Clear();
157
158  // If we've just compacted old space there's no reason to check the
159  // fragmentation limit. Just return.
160  if (HasCompacted()) return;
161
162  // We compact the old generation on the next GC if it has gotten too
163  // fragmented (ie, we could recover an expected amount of space by
164  // reclaiming the waste and free list blocks).
165  static const int kFragmentationLimit = 15;        // Percent.
166  static const int kFragmentationAllowed = 1 * MB;  // Absolute.
167  int old_gen_recoverable = 0;
168  int old_gen_used = 0;
169
170  OldSpaces spaces;
171  while (OldSpace* space = spaces.next()) {
172    old_gen_recoverable += space->Waste() + space->AvailableFree();
173    old_gen_used += space->Size();
174  }
175
176  int old_gen_fragmentation =
177      static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
178  if (old_gen_fragmentation > kFragmentationLimit &&
179      old_gen_recoverable > kFragmentationAllowed) {
180    compact_on_next_gc_ = true;
181  }
182}
183
184
185// -------------------------------------------------------------------------
186// Phase 1: tracing and marking live objects.
187//   before: all objects are in normal state.
188//   after: a live object's map pointer is marked as '00'.
189
190// Marking all live objects in the heap as part of mark-sweep or mark-compact
191// collection.  Before marking, all objects are in their normal state.  After
192// marking, live objects' map pointers are marked indicating that the object
193// has been found reachable.
194//
195// The marking algorithm is a (mostly) depth-first (because of possible stack
196// overflow) traversal of the graph of objects reachable from the roots.  It
197// uses an explicit stack of pointers rather than recursion.  The young
198// generation's inactive ('from') space is used as a marking stack.  The
199// objects in the marking stack are the ones that have been reached and marked
200// but their children have not yet been visited.
201//
202// The marking stack can overflow during traversal.  In that case, we set an
203// overflow flag.  When the overflow flag is set, we continue marking objects
204// reachable from the objects on the marking stack, but no longer push them on
205// the marking stack.  Instead, we mark them as both marked and overflowed.
206// When the stack is in the overflowed state, objects marked as overflowed
207// have been reached and marked but their children have not been visited yet.
208// After emptying the marking stack, we clear the overflow flag and traverse
209// the heap looking for objects marked as overflowed, push them on the stack,
210// and continue with marking.  This process repeats until all reachable
211// objects have been marked.
212
213static MarkingStack marking_stack;
214
215
216static inline HeapObject* ShortCircuitConsString(Object** p) {
217  // Optimization: If the heap object pointed to by p is a non-symbol
218  // cons string whose right substring is Heap::empty_string, update
219  // it in place to its left substring.  Return the updated value.
220  //
221  // Here we assume that if we change *p, we replace it with a heap object
222  // (ie, the left substring of a cons string is always a heap object).
223  //
224  // The check performed is:
225  //   object->IsConsString() && !object->IsSymbol() &&
226  //   (ConsString::cast(object)->second() == Heap::empty_string())
227  // except the maps for the object and its possible substrings might be
228  // marked.
229  HeapObject* object = HeapObject::cast(*p);
230  MapWord map_word = object->map_word();
231  map_word.ClearMark();
232  InstanceType type = map_word.ToMap()->instance_type();
233  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
234
235  Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
236  if (second != Heap::raw_unchecked_empty_string()) {
237    return object;
238  }
239
240  // Since we don't have the object's start, it is impossible to update the
241  // remembered set.  Therefore, we only replace the string with its left
242  // substring when the remembered set does not change.
243  Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
244  if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
245
246  *p = first;
247  return HeapObject::cast(first);
248}
249
250
251// Helper class for marking pointers in HeapObjects.
252class MarkingVisitor : public ObjectVisitor {
253 public:
254  void VisitPointer(Object** p) {
255    MarkObjectByPointer(p);
256  }
257
258  void VisitPointers(Object** start, Object** end) {
259    // Mark all objects pointed to in [start, end).
260    const int kMinRangeForMarkingRecursion = 64;
261    if (end - start >= kMinRangeForMarkingRecursion) {
262      if (VisitUnmarkedObjects(start, end)) return;
263      // We are close to a stack overflow, so just mark the objects.
264    }
265    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
266  }
267
268  void VisitCodeTarget(RelocInfo* rinfo) {
269    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
270    Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
271    if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
272      IC::Clear(rinfo->pc());
273      // Please note targets for cleared inline cached do not have to be
274      // marked since they are contained in Heap::non_monomorphic_cache().
275    } else {
276      MarkCompactCollector::MarkObject(code);
277    }
278  }
279
280  void VisitDebugTarget(RelocInfo* rinfo) {
281    ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
282           rinfo->IsPatchedReturnSequence());
283    HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
284    MarkCompactCollector::MarkObject(code);
285  }
286
287 private:
288  // Mark object pointed to by p.
289  void MarkObjectByPointer(Object** p) {
290    if (!(*p)->IsHeapObject()) return;
291    HeapObject* object = ShortCircuitConsString(p);
292    MarkCompactCollector::MarkObject(object);
293  }
294
295  // Tells whether the mark sweep collection will perform compaction.
296  bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
297
298  // Visit an unmarked object.
299  void VisitUnmarkedObject(HeapObject* obj) {
300#ifdef DEBUG
301    ASSERT(Heap::Contains(obj));
302    ASSERT(!obj->IsMarked());
303#endif
304    Map* map = obj->map();
305    MarkCompactCollector::SetMark(obj);
306    // Mark the map pointer and the body.
307    MarkCompactCollector::MarkObject(map);
308    obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
309  }
310
311  // Visit all unmarked objects pointed to by [start, end).
312  // Returns false if the operation fails (lack of stack space).
313  inline bool VisitUnmarkedObjects(Object** start, Object** end) {
314    // Return false is we are close to the stack limit.
315    StackLimitCheck check;
316    if (check.HasOverflowed()) return false;
317
318    // Visit the unmarked objects.
319    for (Object** p = start; p < end; p++) {
320      if (!(*p)->IsHeapObject()) continue;
321      HeapObject* obj = HeapObject::cast(*p);
322      if (obj->IsMarked()) continue;
323      VisitUnmarkedObject(obj);
324    }
325    return true;
326  }
327};
328
329
330// Visitor class for marking heap roots.
331class RootMarkingVisitor : public ObjectVisitor {
332 public:
333  void VisitPointer(Object** p) {
334    MarkObjectByPointer(p);
335  }
336
337  void VisitPointers(Object** start, Object** end) {
338    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
339  }
340
341  MarkingVisitor* stack_visitor() { return &stack_visitor_; }
342
343 private:
344  MarkingVisitor stack_visitor_;
345
346  void MarkObjectByPointer(Object** p) {
347    if (!(*p)->IsHeapObject()) return;
348
349    // Replace flat cons strings in place.
350    HeapObject* object = ShortCircuitConsString(p);
351    if (object->IsMarked()) return;
352
353    Map* map = object->map();
354    // Mark the object.
355    MarkCompactCollector::SetMark(object);
356    // Mark the map pointer and body, and push them on the marking stack.
357    MarkCompactCollector::MarkObject(map);
358    object->IterateBody(map->instance_type(), object->SizeFromMap(map),
359                        &stack_visitor_);
360
361    // Mark all the objects reachable from the map and body.  May leave
362    // overflowed objects in the heap.
363    MarkCompactCollector::EmptyMarkingStack(&stack_visitor_);
364  }
365};
366
367
368// Helper class for pruning the symbol table.
369class SymbolTableCleaner : public ObjectVisitor {
370 public:
371  SymbolTableCleaner() : pointers_removed_(0) { }
372  void VisitPointers(Object** start, Object** end) {
373    // Visit all HeapObject pointers in [start, end).
374    for (Object** p = start; p < end; p++) {
375      if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
376        // Check if the symbol being pruned is an external symbol. We need to
377        // delete the associated external data as this symbol is going away.
378
379        // Since the object is not marked we can access its map word safely
380        // without having to worry about marking bits in the object header.
381        Map* map = HeapObject::cast(*p)->map();
382        // Since no objects have yet been moved we can safely access the map of
383        // the object.
384        uint32_t type = map->instance_type();
385        bool is_external = (type & kStringRepresentationMask) ==
386                           kExternalStringTag;
387        if (is_external) {
388          bool is_two_byte = (type & kStringEncodingMask) == kTwoByteStringTag;
389          byte* resource_addr = reinterpret_cast<byte*>(*p) +
390                                ExternalString::kResourceOffset -
391                                kHeapObjectTag;
392          if (is_two_byte) {
393            v8::String::ExternalStringResource** resource =
394                reinterpret_cast<v8::String::ExternalStringResource**>
395                (resource_addr);
396            delete *resource;
397            // Clear the resource pointer in the symbol.
398            *resource = NULL;
399          } else {
400            v8::String::ExternalAsciiStringResource** resource =
401                reinterpret_cast<v8::String::ExternalAsciiStringResource**>
402                (resource_addr);
403            delete *resource;
404            // Clear the resource pointer in the symbol.
405            *resource = NULL;
406          }
407        }
408        // Set the entry to null_value (as deleted).
409        *p = Heap::raw_unchecked_null_value();
410        pointers_removed_++;
411      }
412    }
413  }
414
415  int PointersRemoved() {
416    return pointers_removed_;
417  }
418 private:
419  int pointers_removed_;
420};
421
422
423void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
424  ASSERT(!object->IsMarked());
425  ASSERT(Heap::Contains(object));
426  if (object->IsMap()) {
427    Map* map = Map::cast(object);
428    if (FLAG_cleanup_caches_in_maps_at_gc) {
429      map->ClearCodeCache();
430    }
431    SetMark(map);
432    if (FLAG_collect_maps &&
433        map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
434        map->instance_type() <= JS_FUNCTION_TYPE) {
435      MarkMapContents(map);
436    } else {
437      marking_stack.Push(map);
438    }
439  } else {
440    SetMark(object);
441    marking_stack.Push(object);
442  }
443}
444
445
446void MarkCompactCollector::MarkMapContents(Map* map) {
447  MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
448      *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
449
450  // Mark the Object* fields of the Map.
451  // Since the descriptor array has been marked already, it is fine
452  // that one of these fields contains a pointer to it.
453  MarkingVisitor visitor;  // Has no state or contents.
454  visitor.VisitPointers(HeapObject::RawField(map, Map::kPrototypeOffset),
455                        HeapObject::RawField(map, Map::kSize));
456}
457
458
459void MarkCompactCollector::MarkDescriptorArray(
460    DescriptorArray* descriptors) {
461  if (descriptors->IsMarked()) return;
462  // Empty descriptor array is marked as a root before any maps are marked.
463  ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
464  SetMark(descriptors);
465
466  FixedArray* contents = reinterpret_cast<FixedArray*>(
467      descriptors->get(DescriptorArray::kContentArrayIndex));
468  ASSERT(contents->IsHeapObject());
469  ASSERT(!contents->IsMarked());
470  ASSERT(contents->IsFixedArray());
471  ASSERT(contents->length() >= 2);
472  SetMark(contents);
473  // Contents contains (value, details) pairs.  If the details say
474  // that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
475  // or NULL_DESCRIPTOR, we don't mark the value as live.  Only for
476  // type MAP_TRANSITION is the value a Object* (a Map*).
477  for (int i = 0; i < contents->length(); i += 2) {
478    // If the pair (value, details) at index i, i+1 is not
479    // a transition or null descriptor, mark the value.
480    PropertyDetails details(Smi::cast(contents->get(i + 1)));
481    if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
482      HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
483      if (object->IsHeapObject() && !object->IsMarked()) {
484        SetMark(object);
485        marking_stack.Push(object);
486      }
487    }
488  }
489  // The DescriptorArray descriptors contains a pointer to its contents array,
490  // but the contents array is already marked.
491  marking_stack.Push(descriptors);
492}
493
494
495void MarkCompactCollector::CreateBackPointers() {
496  HeapObjectIterator iterator(Heap::map_space());
497  while (iterator.has_next()) {
498    Object* next_object = iterator.next();
499    if (next_object->IsMap()) {  // Could also be ByteArray on free list.
500      Map* map = Map::cast(next_object);
501      if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
502          map->instance_type() <= JS_FUNCTION_TYPE) {
503        map->CreateBackPointers();
504      } else {
505        ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
506      }
507    }
508  }
509}
510
511
512static int OverflowObjectSize(HeapObject* obj) {
513  // Recover the normal map pointer, it might be marked as live and
514  // overflowed.
515  MapWord map_word = obj->map_word();
516  map_word.ClearMark();
517  map_word.ClearOverflow();
518  return obj->SizeFromMap(map_word.ToMap());
519}
520
521
522// Fill the marking stack with overflowed objects returned by the given
523// iterator.  Stop when the marking stack is filled or the end of the space
524// is reached, whichever comes first.
525template<class T>
526static void ScanOverflowedObjects(T* it) {
527  // The caller should ensure that the marking stack is initially not full,
528  // so that we don't waste effort pointlessly scanning for objects.
529  ASSERT(!marking_stack.is_full());
530
531  while (it->has_next()) {
532    HeapObject* object = it->next();
533    if (object->IsOverflowed()) {
534      object->ClearOverflow();
535      ASSERT(object->IsMarked());
536      ASSERT(Heap::Contains(object));
537      marking_stack.Push(object);
538      if (marking_stack.is_full()) return;
539    }
540  }
541}
542
543
544bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
545  return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
546}
547
548
549class SymbolMarkingVisitor : public ObjectVisitor {
550 public:
551  void VisitPointers(Object** start, Object** end) {
552    MarkingVisitor marker;
553    for (Object** p = start; p < end; p++) {
554      if (!(*p)->IsHeapObject()) continue;
555
556      HeapObject* object = HeapObject::cast(*p);
557      // If the object is marked, we have marked or are in the process
558      // of marking subparts.
559      if (object->IsMarked()) continue;
560
561      // The object is unmarked, we do not need to unmark to use its
562      // map.
563      Map* map = object->map();
564      object->IterateBody(map->instance_type(),
565                          object->SizeFromMap(map),
566                          &marker);
567    }
568  }
569};
570
571
572void MarkCompactCollector::MarkSymbolTable() {
573  // Objects reachable from symbols are marked as live so as to ensure
574  // that if the symbol itself remains alive after GC for any reason,
575  // and if it is a sliced string or a cons string backed by an
576  // external string (even indirectly), then the external string does
577  // not receive a weak reference callback.
578  SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
579  // Mark the symbol table itself.
580  SetMark(symbol_table);
581  // Explicitly mark the prefix.
582  MarkingVisitor marker;
583  symbol_table->IteratePrefix(&marker);
584  ProcessMarkingStack(&marker);
585  // Mark subparts of the symbols but not the symbols themselves
586  // (unless reachable from another symbol).
587  SymbolMarkingVisitor symbol_marker;
588  symbol_table->IterateElements(&symbol_marker);
589  ProcessMarkingStack(&marker);
590}
591
592
593void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
594  // Mark the heap roots including global variables, stack variables,
595  // etc., and all objects reachable from them.
596  Heap::IterateStrongRoots(visitor);
597
598  // Handle the symbol table specially.
599  MarkSymbolTable();
600
601  // There may be overflowed objects in the heap.  Visit them now.
602  while (marking_stack.overflowed()) {
603    RefillMarkingStack();
604    EmptyMarkingStack(visitor->stack_visitor());
605  }
606}
607
608
609void MarkCompactCollector::MarkObjectGroups() {
610  List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
611
612  for (int i = 0; i < object_groups->length(); i++) {
613    ObjectGroup* entry = object_groups->at(i);
614    if (entry == NULL) continue;
615
616    List<Object**>& objects = entry->objects_;
617    bool group_marked = false;
618    for (int j = 0; j < objects.length(); j++) {
619      Object* object = *objects[j];
620      if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
621        group_marked = true;
622        break;
623      }
624    }
625
626    if (!group_marked) continue;
627
628    // An object in the group is marked, so mark as gray all white heap
629    // objects in the group.
630    for (int j = 0; j < objects.length(); ++j) {
631      if ((*objects[j])->IsHeapObject()) {
632        MarkObject(HeapObject::cast(*objects[j]));
633      }
634    }
635    // Once the entire group has been colored gray, set the object group
636    // to NULL so it won't be processed again.
637    delete object_groups->at(i);
638    object_groups->at(i) = NULL;
639  }
640}
641
642
643// Mark all objects reachable from the objects on the marking stack.
644// Before: the marking stack contains zero or more heap object pointers.
645// After: the marking stack is empty, and all objects reachable from the
646// marking stack have been marked, or are overflowed in the heap.
647void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) {
648  while (!marking_stack.is_empty()) {
649    HeapObject* object = marking_stack.Pop();
650    ASSERT(object->IsHeapObject());
651    ASSERT(Heap::Contains(object));
652    ASSERT(object->IsMarked());
653    ASSERT(!object->IsOverflowed());
654
655    // Because the object is marked, we have to recover the original map
656    // pointer and use it to mark the object's body.
657    MapWord map_word = object->map_word();
658    map_word.ClearMark();
659    Map* map = map_word.ToMap();
660    MarkObject(map);
661    object->IterateBody(map->instance_type(), object->SizeFromMap(map),
662                        visitor);
663  }
664}
665
666
667// Sweep the heap for overflowed objects, clear their overflow bits, and
668// push them on the marking stack.  Stop early if the marking stack fills
669// before sweeping completes.  If sweeping completes, there are no remaining
670// overflowed objects in the heap so the overflow flag on the markings stack
671// is cleared.
672void MarkCompactCollector::RefillMarkingStack() {
673  ASSERT(marking_stack.overflowed());
674
675  SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
676  ScanOverflowedObjects(&new_it);
677  if (marking_stack.is_full()) return;
678
679  HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
680                                    &OverflowObjectSize);
681  ScanOverflowedObjects(&old_pointer_it);
682  if (marking_stack.is_full()) return;
683
684  HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
685  ScanOverflowedObjects(&old_data_it);
686  if (marking_stack.is_full()) return;
687
688  HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
689  ScanOverflowedObjects(&code_it);
690  if (marking_stack.is_full()) return;
691
692  HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
693  ScanOverflowedObjects(&map_it);
694  if (marking_stack.is_full()) return;
695
696  HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
697  ScanOverflowedObjects(&cell_it);
698  if (marking_stack.is_full()) return;
699
700  LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
701  ScanOverflowedObjects(&lo_it);
702  if (marking_stack.is_full()) return;
703
704  marking_stack.clear_overflowed();
705}
706
707
708// Mark all objects reachable (transitively) from objects on the marking
709// stack.  Before: the marking stack contains zero or more heap object
710// pointers.  After: the marking stack is empty and there are no overflowed
711// objects in the heap.
712void MarkCompactCollector::ProcessMarkingStack(MarkingVisitor* visitor) {
713  EmptyMarkingStack(visitor);
714  while (marking_stack.overflowed()) {
715    RefillMarkingStack();
716    EmptyMarkingStack(visitor);
717  }
718}
719
720
721void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) {
722  bool work_to_do = true;
723  ASSERT(marking_stack.is_empty());
724  while (work_to_do) {
725    MarkObjectGroups();
726    work_to_do = !marking_stack.is_empty();
727    ProcessMarkingStack(visitor);
728  }
729}
730
731
732void MarkCompactCollector::MarkLiveObjects() {
733#ifdef DEBUG
734  ASSERT(state_ == PREPARE_GC);
735  state_ = MARK_LIVE_OBJECTS;
736#endif
737  // The to space contains live objects, the from space is used as a marking
738  // stack.
739  marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
740                           Heap::new_space()->FromSpaceHigh());
741
742  ASSERT(!marking_stack.overflowed());
743
744  RootMarkingVisitor root_visitor;
745  MarkRoots(&root_visitor);
746
747  // The objects reachable from the roots are marked, yet unreachable
748  // objects are unmarked.  Mark objects reachable from object groups
749  // containing at least one marked object, and continue until no new
750  // objects are reachable from the object groups.
751  ProcessObjectGroups(root_visitor.stack_visitor());
752
753  // The objects reachable from the roots or object groups are marked,
754  // yet unreachable objects are unmarked.  Mark objects reachable
755  // only from weak global handles.
756  //
757  // First we identify nonlive weak handles and mark them as pending
758  // destruction.
759  GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
760  // Then we mark the objects and process the transitive closure.
761  GlobalHandles::IterateWeakRoots(&root_visitor);
762  while (marking_stack.overflowed()) {
763    RefillMarkingStack();
764    EmptyMarkingStack(root_visitor.stack_visitor());
765  }
766
767  // Repeat the object groups to mark unmarked groups reachable from the
768  // weak roots.
769  ProcessObjectGroups(root_visitor.stack_visitor());
770
771  // Prune the symbol table removing all symbols only pointed to by the
772  // symbol table.  Cannot use symbol_table() here because the symbol
773  // table is marked.
774  SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
775  SymbolTableCleaner v;
776  symbol_table->IterateElements(&v);
777  symbol_table->ElementsRemoved(v.PointersRemoved());
778
779  // Remove object groups after marking phase.
780  GlobalHandles::RemoveObjectGroups();
781}
782
783
784static int CountMarkedCallback(HeapObject* obj) {
785  MapWord map_word = obj->map_word();
786  map_word.ClearMark();
787  return obj->SizeFromMap(map_word.ToMap());
788}
789
790
791#ifdef DEBUG
792void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
793  live_bytes_ += obj->Size();
794  if (Heap::new_space()->Contains(obj)) {
795    live_young_objects_++;
796  } else if (Heap::map_space()->Contains(obj)) {
797    ASSERT(obj->IsMap());
798    live_map_objects_++;
799  } else if (Heap::cell_space()->Contains(obj)) {
800    ASSERT(obj->IsJSGlobalPropertyCell());
801    live_cell_objects_++;
802  } else if (Heap::old_pointer_space()->Contains(obj)) {
803    live_old_pointer_objects_++;
804  } else if (Heap::old_data_space()->Contains(obj)) {
805    live_old_data_objects_++;
806  } else if (Heap::code_space()->Contains(obj)) {
807    live_code_objects_++;
808  } else if (Heap::lo_space()->Contains(obj)) {
809    live_lo_objects_++;
810  } else {
811    UNREACHABLE();
812  }
813}
814#endif  // DEBUG
815
816
817void MarkCompactCollector::SweepLargeObjectSpace() {
818#ifdef DEBUG
819  ASSERT(state_ == MARK_LIVE_OBJECTS);
820  state_ =
821      compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
822#endif
823  // Deallocate unmarked objects and clear marked bits for marked objects.
824  Heap::lo_space()->FreeUnmarkedObjects();
825}
826
827// Safe to use during marking phase only.
828bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
829  MapWord metamap = object->map_word();
830  metamap.ClearMark();
831  return metamap.ToMap()->instance_type() == MAP_TYPE;
832}
833
834void MarkCompactCollector::ClearNonLiveTransitions() {
835  HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
836  // Iterate over the map space, setting map transitions that go from
837  // a marked map to an unmarked map to null transitions.  At the same time,
838  // set all the prototype fields of maps back to their original value,
839  // dropping the back pointers temporarily stored in the prototype field.
840  // Setting the prototype field requires following the linked list of
841  // back pointers, reversing them all at once.  This allows us to find
842  // those maps with map transitions that need to be nulled, and only
843  // scan the descriptor arrays of those maps, not all maps.
844  // All of these actions are carried out only on maps of JSObects
845  // and related subtypes.
846  while (map_iterator.has_next()) {
847    Map* map = reinterpret_cast<Map*>(map_iterator.next());
848    if (!map->IsMarked() && map->IsByteArray()) continue;
849
850    ASSERT(SafeIsMap(map));
851    // Only JSObject and subtypes have map transitions and back pointers.
852    if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
853    if (map->instance_type() > JS_FUNCTION_TYPE) continue;
854    // Follow the chain of back pointers to find the prototype.
855    Map* current = map;
856    while (SafeIsMap(current)) {
857      current = reinterpret_cast<Map*>(current->prototype());
858      ASSERT(current->IsHeapObject());
859    }
860    Object* real_prototype = current;
861
862    // Follow back pointers, setting them to prototype,
863    // clearing map transitions when necessary.
864    current = map;
865    bool on_dead_path = !current->IsMarked();
866    Object* next;
867    while (SafeIsMap(current)) {
868      next = current->prototype();
869      // There should never be a dead map above a live map.
870      ASSERT(on_dead_path || current->IsMarked());
871
872      // A live map above a dead map indicates a dead transition.
873      // This test will always be false on the first iteration.
874      if (on_dead_path && current->IsMarked()) {
875        on_dead_path = false;
876        current->ClearNonLiveTransitions(real_prototype);
877      }
878      *HeapObject::RawField(current, Map::kPrototypeOffset) =
879          real_prototype;
880      current = reinterpret_cast<Map*>(next);
881    }
882  }
883}
884
885// -------------------------------------------------------------------------
886// Phase 2: Encode forwarding addresses.
887// When compacting, forwarding addresses for objects in old space and map
888// space are encoded in their map pointer word (along with an encoding of
889// their map pointers).
890//
891//  31             21 20              10 9               0
892// +-----------------+------------------+-----------------+
893// |forwarding offset|page offset of map|page index of map|
894// +-----------------+------------------+-----------------+
895//  11 bits           11 bits            10 bits
896//
897// An address range [start, end) can have both live and non-live objects.
898// Maximal non-live regions are marked so they can be skipped on subsequent
899// sweeps of the heap.  A distinguished map-pointer encoding is used to mark
900// free regions of one-word size (in which case the next word is the start
901// of a live object).  A second distinguished map-pointer encoding is used
902// to mark free regions larger than one word, and the size of the free
903// region (including the first word) is written to the second word of the
904// region.
905//
906// Any valid map page offset must lie in the object area of the page, so map
907// page offsets less than Page::kObjectStartOffset are invalid.  We use a
908// pair of distinguished invalid map encodings (for single word and multiple
909// words) to indicate free regions in the page found during computation of
910// forwarding addresses and skipped over in subsequent sweeps.
911static const uint32_t kSingleFreeEncoding = 0;
912static const uint32_t kMultiFreeEncoding = 1;
913
914
915// Encode a free region, defined by the given start address and size, in the
916// first word or two of the region.
917void EncodeFreeRegion(Address free_start, int free_size) {
918  ASSERT(free_size >= kIntSize);
919  if (free_size == kIntSize) {
920    Memory::uint32_at(free_start) = kSingleFreeEncoding;
921  } else {
922    ASSERT(free_size >= 2 * kIntSize);
923    Memory::uint32_at(free_start) = kMultiFreeEncoding;
924    Memory::int_at(free_start + kIntSize) = free_size;
925  }
926
927#ifdef DEBUG
928  // Zap the body of the free region.
929  if (FLAG_enable_slow_asserts) {
930    for (int offset = 2 * kIntSize;
931         offset < free_size;
932         offset += kPointerSize) {
933      Memory::Address_at(free_start + offset) = kZapValue;
934    }
935  }
936#endif
937}
938
939
940// Try to promote all objects in new space.  Heap numbers and sequential
941// strings are promoted to the code space, large objects to large object space,
942// and all others to the old space.
943inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
944  Object* forwarded;
945  if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
946    forwarded = Failure::Exception();
947  } else {
948    OldSpace* target_space = Heap::TargetSpace(object);
949    ASSERT(target_space == Heap::old_pointer_space() ||
950           target_space == Heap::old_data_space());
951    forwarded = target_space->MCAllocateRaw(object_size);
952  }
953  if (forwarded->IsFailure()) {
954    forwarded = Heap::new_space()->MCAllocateRaw(object_size);
955  }
956  return forwarded;
957}
958
959
960// Allocation functions for the paged spaces call the space's MCAllocateRaw.
961inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
962                                             int object_size) {
963  return Heap::old_pointer_space()->MCAllocateRaw(object_size);
964}
965
966
967inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
968  return Heap::old_data_space()->MCAllocateRaw(object_size);
969}
970
971
972inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
973  return Heap::code_space()->MCAllocateRaw(object_size);
974}
975
976
977inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
978  return Heap::map_space()->MCAllocateRaw(object_size);
979}
980
981
982inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
983  return Heap::cell_space()->MCAllocateRaw(object_size);
984}
985
986
987// The forwarding address is encoded at the same offset as the current
988// to-space object, but in from space.
989inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
990                                              int object_size,
991                                              Object* new_object,
992                                              int* ignored) {
993  int offset =
994      Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
995  Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
996      HeapObject::cast(new_object)->address();
997}
998
999
1000// The forwarding address is encoded in the map pointer of the object as an
1001// offset (in terms of live bytes) from the address of the first live object
1002// in the page.
1003inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
1004                                                int object_size,
1005                                                Object* new_object,
1006                                                int* offset) {
1007  // Record the forwarding address of the first live object if necessary.
1008  if (*offset == 0) {
1009    Page::FromAddress(old_object->address())->mc_first_forwarded =
1010        HeapObject::cast(new_object)->address();
1011  }
1012
1013  MapWord encoding =
1014      MapWord::EncodeAddress(old_object->map()->address(), *offset);
1015  old_object->set_map_word(encoding);
1016  *offset += object_size;
1017  ASSERT(*offset <= Page::kObjectAreaSize);
1018}
1019
1020
1021// Most non-live objects are ignored.
1022inline void IgnoreNonLiveObject(HeapObject* object) {}
1023
1024
1025// A code deletion event is logged for non-live code objects.
1026inline void LogNonLiveCodeObject(HeapObject* object) {
1027  if (object->IsCode()) LOG(CodeDeleteEvent(object->address()));
1028}
1029
1030
1031// Function template that, given a range of addresses (eg, a semispace or a
1032// paged space page), iterates through the objects in the range to clear
1033// mark bits and compute and encode forwarding addresses.  As a side effect,
1034// maximal free chunks are marked so that they can be skipped on subsequent
1035// sweeps.
1036//
1037// The template parameters are an allocation function, a forwarding address
1038// encoding function, and a function to process non-live objects.
1039template<MarkCompactCollector::AllocationFunction Alloc,
1040         MarkCompactCollector::EncodingFunction Encode,
1041         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1042inline void EncodeForwardingAddressesInRange(Address start,
1043                                             Address end,
1044                                             int* offset) {
1045  // The start address of the current free region while sweeping the space.
1046  // This address is set when a transition from live to non-live objects is
1047  // encountered.  A value (an encoding of the 'next free region' pointer)
1048  // is written to memory at this address when a transition from non-live to
1049  // live objects is encountered.
1050  Address free_start = NULL;
1051
1052  // A flag giving the state of the previously swept object.  Initially true
1053  // to ensure that free_start is initialized to a proper address before
1054  // trying to write to it.
1055  bool is_prev_alive = true;
1056
1057  int object_size;  // Will be set on each iteration of the loop.
1058  for (Address current = start; current < end; current += object_size) {
1059    HeapObject* object = HeapObject::FromAddress(current);
1060    if (object->IsMarked()) {
1061      object->ClearMark();
1062      MarkCompactCollector::tracer()->decrement_marked_count();
1063      object_size = object->Size();
1064
1065      Object* forwarded = Alloc(object, object_size);
1066      // Allocation cannot fail, because we are compacting the space.
1067      ASSERT(!forwarded->IsFailure());
1068      Encode(object, object_size, forwarded, offset);
1069
1070#ifdef DEBUG
1071      if (FLAG_gc_verbose) {
1072        PrintF("forward %p -> %p.\n", object->address(),
1073               HeapObject::cast(forwarded)->address());
1074      }
1075#endif
1076      if (!is_prev_alive) {  // Transition from non-live to live.
1077        EncodeFreeRegion(free_start, current - free_start);
1078        is_prev_alive = true;
1079      }
1080    } else {  // Non-live object.
1081      object_size = object->Size();
1082      ProcessNonLive(object);
1083      if (is_prev_alive) {  // Transition from live to non-live.
1084        free_start = current;
1085        is_prev_alive = false;
1086      }
1087    }
1088  }
1089
1090  // If we ended on a free region, mark it.
1091  if (!is_prev_alive) EncodeFreeRegion(free_start, end - free_start);
1092}
1093
1094
1095// Functions to encode the forwarding pointers in each compactable space.
1096void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1097  int ignored;
1098  EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1099                                   EncodeForwardingAddressInNewSpace,
1100                                   IgnoreNonLiveObject>(
1101      Heap::new_space()->bottom(),
1102      Heap::new_space()->top(),
1103      &ignored);
1104}
1105
1106
1107template<MarkCompactCollector::AllocationFunction Alloc,
1108         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1109void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1110    PagedSpace* space) {
1111  PageIterator it(space, PageIterator::PAGES_IN_USE);
1112  while (it.has_next()) {
1113    Page* p = it.next();
1114    // The offset of each live object in the page from the first live object
1115    // in the page.
1116    int offset = 0;
1117    EncodeForwardingAddressesInRange<Alloc,
1118                                     EncodeForwardingAddressInPagedSpace,
1119                                     ProcessNonLive>(
1120        p->ObjectAreaStart(),
1121        p->AllocationTop(),
1122        &offset);
1123  }
1124}
1125
1126
1127static void SweepSpace(NewSpace* space) {
1128  HeapObject* object;
1129  for (Address current = space->bottom();
1130       current < space->top();
1131       current += object->Size()) {
1132    object = HeapObject::FromAddress(current);
1133    if (object->IsMarked()) {
1134      object->ClearMark();
1135      MarkCompactCollector::tracer()->decrement_marked_count();
1136    } else {
1137      // We give non-live objects a map that will correctly give their size,
1138      // since their existing map might not be live after the collection.
1139      int size = object->Size();
1140      if (size >= ByteArray::kHeaderSize) {
1141        object->set_map(Heap::raw_unchecked_byte_array_map());
1142        ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
1143      } else {
1144        ASSERT(size == kPointerSize);
1145        object->set_map(Heap::raw_unchecked_one_pointer_filler_map());
1146      }
1147      ASSERT(object->Size() == size);
1148    }
1149    // The object is now unmarked for the call to Size() at the top of the
1150    // loop.
1151  }
1152}
1153
1154
1155static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
1156  PageIterator it(space, PageIterator::PAGES_IN_USE);
1157  while (it.has_next()) {
1158    Page* p = it.next();
1159
1160    bool is_previous_alive = true;
1161    Address free_start = NULL;
1162    HeapObject* object;
1163
1164    for (Address current = p->ObjectAreaStart();
1165         current < p->AllocationTop();
1166         current += object->Size()) {
1167      object = HeapObject::FromAddress(current);
1168      if (object->IsMarked()) {
1169        object->ClearMark();
1170        MarkCompactCollector::tracer()->decrement_marked_count();
1171        if (!is_previous_alive) {  // Transition from free to live.
1172          dealloc(free_start, current - free_start);
1173          is_previous_alive = true;
1174        }
1175      } else {
1176        if (object->IsCode()) {
1177          // Notify the logger that compiled code has been collected.
1178          LOG(CodeDeleteEvent(Code::cast(object)->address()));
1179        }
1180        if (is_previous_alive) {  // Transition from live to free.
1181          free_start = current;
1182          is_previous_alive = false;
1183        }
1184      }
1185      // The object is now unmarked for the call to Size() at the top of the
1186      // loop.
1187    }
1188
1189    // If the last region was not live we need to deallocate from
1190    // free_start to the allocation top in the page.
1191    if (!is_previous_alive) {
1192      int free_size = p->AllocationTop() - free_start;
1193      if (free_size > 0) {
1194        dealloc(free_start, free_size);
1195      }
1196    }
1197  }
1198}
1199
1200
1201void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
1202                                                     int size_in_bytes) {
1203  Heap::ClearRSetRange(start, size_in_bytes);
1204  Heap::old_pointer_space()->Free(start, size_in_bytes);
1205}
1206
1207
1208void MarkCompactCollector::DeallocateOldDataBlock(Address start,
1209                                                  int size_in_bytes) {
1210  Heap::old_data_space()->Free(start, size_in_bytes);
1211}
1212
1213
1214void MarkCompactCollector::DeallocateCodeBlock(Address start,
1215                                               int size_in_bytes) {
1216  Heap::code_space()->Free(start, size_in_bytes);
1217}
1218
1219
1220void MarkCompactCollector::DeallocateMapBlock(Address start,
1221                                              int size_in_bytes) {
1222  // Objects in map space are frequently assumed to have size Map::kSize and a
1223  // valid map in their first word.  Thus, we break the free block up into
1224  // chunks and free them separately.
1225  ASSERT(size_in_bytes % Map::kSize == 0);
1226  Heap::ClearRSetRange(start, size_in_bytes);
1227  Address end = start + size_in_bytes;
1228  for (Address a = start; a < end; a += Map::kSize) {
1229    Heap::map_space()->Free(a);
1230  }
1231}
1232
1233
1234void MarkCompactCollector::DeallocateCellBlock(Address start,
1235                                               int size_in_bytes) {
1236  // Free-list elements in cell space are assumed to have a fixed size.
1237  // We break the free block into chunks and add them to the free list
1238  // individually.
1239  int size = Heap::cell_space()->object_size_in_bytes();
1240  ASSERT(size_in_bytes % size == 0);
1241  Heap::ClearRSetRange(start, size_in_bytes);
1242  Address end = start + size_in_bytes;
1243  for (Address a = start; a < end; a += size) {
1244    Heap::cell_space()->Free(a);
1245  }
1246}
1247
1248
1249void MarkCompactCollector::EncodeForwardingAddresses() {
1250  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1251  // Objects in the active semispace of the young generation may be
1252  // relocated to the inactive semispace (if not promoted).  Set the
1253  // relocation info to the beginning of the inactive semispace.
1254  Heap::new_space()->MCResetRelocationInfo();
1255
1256  // Compute the forwarding pointers in each space.
1257  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
1258                                        IgnoreNonLiveObject>(
1259      Heap::old_pointer_space());
1260
1261  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
1262                                        IgnoreNonLiveObject>(
1263      Heap::old_data_space());
1264
1265  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
1266                                        LogNonLiveCodeObject>(
1267      Heap::code_space());
1268
1269  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
1270                                        IgnoreNonLiveObject>(
1271      Heap::cell_space());
1272
1273
1274  // Compute new space next to last after the old and code spaces have been
1275  // compacted.  Objects in new space can be promoted to old or code space.
1276  EncodeForwardingAddressesInNewSpace();
1277
1278  // Compute map space last because computing forwarding addresses
1279  // overwrites non-live objects.  Objects in the other spaces rely on
1280  // non-live map pointers to get the sizes of non-live objects.
1281  EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
1282                                        IgnoreNonLiveObject>(
1283      Heap::map_space());
1284
1285  // Write relocation info to the top page, so we can use it later.  This is
1286  // done after promoting objects from the new space so we get the correct
1287  // allocation top.
1288  Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
1289  Heap::old_data_space()->MCWriteRelocationInfoToPage();
1290  Heap::code_space()->MCWriteRelocationInfoToPage();
1291  Heap::map_space()->MCWriteRelocationInfoToPage();
1292  Heap::cell_space()->MCWriteRelocationInfoToPage();
1293}
1294
1295
1296void MarkCompactCollector::SweepSpaces() {
1297  ASSERT(state_ == SWEEP_SPACES);
1298  ASSERT(!IsCompacting());
1299  // Noncompacting collections simply sweep the spaces to clear the mark
1300  // bits and free the nonlive blocks (for old and map spaces).  We sweep
1301  // the map space last because freeing non-live maps overwrites them and
1302  // the other spaces rely on possibly non-live maps to get the sizes for
1303  // non-live objects.
1304  SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
1305  SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
1306  SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
1307  SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
1308  SweepSpace(Heap::new_space());
1309  SweepSpace(Heap::map_space(), &DeallocateMapBlock);
1310}
1311
1312
1313// Iterate the live objects in a range of addresses (eg, a page or a
1314// semispace).  The live regions of the range have been linked into a list.
1315// The first live region is [first_live_start, first_live_end), and the last
1316// address in the range is top.  The callback function is used to get the
1317// size of each live object.
1318int MarkCompactCollector::IterateLiveObjectsInRange(
1319    Address start,
1320    Address end,
1321    HeapObjectCallback size_func) {
1322  int live_objects = 0;
1323  Address current = start;
1324  while (current < end) {
1325    uint32_t encoded_map = Memory::uint32_at(current);
1326    if (encoded_map == kSingleFreeEncoding) {
1327      current += kPointerSize;
1328    } else if (encoded_map == kMultiFreeEncoding) {
1329      current += Memory::int_at(current + kIntSize);
1330    } else {
1331      live_objects++;
1332      current += size_func(HeapObject::FromAddress(current));
1333    }
1334  }
1335  return live_objects;
1336}
1337
1338
1339int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
1340                                             HeapObjectCallback size_f) {
1341  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
1342  return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
1343}
1344
1345
1346int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
1347                                             HeapObjectCallback size_f) {
1348  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
1349  int total = 0;
1350  PageIterator it(space, PageIterator::PAGES_IN_USE);
1351  while (it.has_next()) {
1352    Page* p = it.next();
1353    total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
1354                                       p->AllocationTop(),
1355                                       size_f);
1356  }
1357  return total;
1358}
1359
1360
1361// -------------------------------------------------------------------------
1362// Phase 3: Update pointers
1363
1364// Helper class for updating pointers in HeapObjects.
1365class UpdatingVisitor: public ObjectVisitor {
1366 public:
1367  void VisitPointer(Object** p) {
1368    UpdatePointer(p);
1369  }
1370
1371  void VisitPointers(Object** start, Object** end) {
1372    // Mark all HeapObject pointers in [start, end)
1373    for (Object** p = start; p < end; p++) UpdatePointer(p);
1374  }
1375
1376  void VisitCodeTarget(RelocInfo* rinfo) {
1377    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1378    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1379    VisitPointer(&target);
1380    rinfo->set_target_address(
1381        reinterpret_cast<Code*>(target)->instruction_start());
1382  }
1383
1384  void VisitDebugTarget(RelocInfo* rinfo) {
1385    ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
1386           rinfo->IsPatchedReturnSequence());
1387    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1388    VisitPointer(&target);
1389    rinfo->set_call_address(
1390        reinterpret_cast<Code*>(target)->instruction_start());
1391  }
1392
1393 private:
1394  void UpdatePointer(Object** p) {
1395    if (!(*p)->IsHeapObject()) return;
1396
1397    HeapObject* obj = HeapObject::cast(*p);
1398    Address old_addr = obj->address();
1399    Address new_addr;
1400    ASSERT(!Heap::InFromSpace(obj));
1401
1402    if (Heap::new_space()->Contains(obj)) {
1403      Address forwarding_pointer_addr =
1404          Heap::new_space()->FromSpaceLow() +
1405          Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
1406      new_addr = Memory::Address_at(forwarding_pointer_addr);
1407
1408#ifdef DEBUG
1409      ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
1410             Heap::old_data_space()->Contains(new_addr) ||
1411             Heap::new_space()->FromSpaceContains(new_addr) ||
1412             Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
1413
1414      if (Heap::new_space()->FromSpaceContains(new_addr)) {
1415        ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
1416               Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
1417      }
1418#endif
1419
1420    } else if (Heap::lo_space()->Contains(obj)) {
1421      // Don't move objects in the large object space.
1422      return;
1423
1424    } else {
1425#ifdef DEBUG
1426      PagedSpaces spaces;
1427      PagedSpace* original_space = spaces.next();
1428      while (original_space != NULL) {
1429        if (original_space->Contains(obj)) break;
1430        original_space = spaces.next();
1431      }
1432      ASSERT(original_space != NULL);
1433#endif
1434      new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
1435      ASSERT(original_space->Contains(new_addr));
1436      ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
1437             original_space->MCSpaceOffsetForAddress(old_addr));
1438    }
1439
1440    *p = HeapObject::FromAddress(new_addr);
1441
1442#ifdef DEBUG
1443    if (FLAG_gc_verbose) {
1444      PrintF("update %p : %p -> %p\n",
1445             reinterpret_cast<Address>(p), old_addr, new_addr);
1446    }
1447#endif
1448  }
1449};
1450
1451
1452void MarkCompactCollector::UpdatePointers() {
1453#ifdef DEBUG
1454  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1455  state_ = UPDATE_POINTERS;
1456#endif
1457  UpdatingVisitor updating_visitor;
1458  Heap::IterateRoots(&updating_visitor);
1459  GlobalHandles::IterateWeakRoots(&updating_visitor);
1460
1461  int live_maps = IterateLiveObjects(Heap::map_space(),
1462                                     &UpdatePointersInOldObject);
1463  int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
1464                                             &UpdatePointersInOldObject);
1465  int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
1466                                          &UpdatePointersInOldObject);
1467  int live_codes = IterateLiveObjects(Heap::code_space(),
1468                                      &UpdatePointersInOldObject);
1469  int live_cells = IterateLiveObjects(Heap::cell_space(),
1470                                      &UpdatePointersInOldObject);
1471  int live_news = IterateLiveObjects(Heap::new_space(),
1472                                     &UpdatePointersInNewObject);
1473
1474  // Large objects do not move, the map word can be updated directly.
1475  LargeObjectIterator it(Heap::lo_space());
1476  while (it.has_next()) UpdatePointersInNewObject(it.next());
1477
1478  USE(live_maps);
1479  USE(live_pointer_olds);
1480  USE(live_data_olds);
1481  USE(live_codes);
1482  USE(live_cells);
1483  USE(live_news);
1484  ASSERT(live_maps == live_map_objects_);
1485  ASSERT(live_data_olds == live_old_data_objects_);
1486  ASSERT(live_pointer_olds == live_old_pointer_objects_);
1487  ASSERT(live_codes == live_code_objects_);
1488  ASSERT(live_cells == live_cell_objects_);
1489  ASSERT(live_news == live_young_objects_);
1490}
1491
1492
1493int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
1494  // Keep old map pointers
1495  Map* old_map = obj->map();
1496  ASSERT(old_map->IsHeapObject());
1497
1498  Address forwarded = GetForwardingAddressInOldSpace(old_map);
1499
1500  ASSERT(Heap::map_space()->Contains(old_map));
1501  ASSERT(Heap::map_space()->Contains(forwarded));
1502#ifdef DEBUG
1503  if (FLAG_gc_verbose) {
1504    PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
1505           forwarded);
1506  }
1507#endif
1508  // Update the map pointer.
1509  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
1510
1511  // We have to compute the object size relying on the old map because
1512  // map objects are not relocated yet.
1513  int obj_size = obj->SizeFromMap(old_map);
1514
1515  // Update pointers in the object body.
1516  UpdatingVisitor updating_visitor;
1517  obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
1518  return obj_size;
1519}
1520
1521
1522int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
1523  // Decode the map pointer.
1524  MapWord encoding = obj->map_word();
1525  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
1526  ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
1527
1528  // At this point, the first word of map_addr is also encoded, cannot
1529  // cast it to Map* using Map::cast.
1530  Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
1531  int obj_size = obj->SizeFromMap(map);
1532  InstanceType type = map->instance_type();
1533
1534  // Update map pointer.
1535  Address new_map_addr = GetForwardingAddressInOldSpace(map);
1536  int offset = encoding.DecodeOffset();
1537  obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
1538
1539#ifdef DEBUG
1540  if (FLAG_gc_verbose) {
1541    PrintF("update %p : %p -> %p\n", obj->address(),
1542           map_addr, new_map_addr);
1543  }
1544#endif
1545
1546  // Update pointers in the object body.
1547  UpdatingVisitor updating_visitor;
1548  obj->IterateBody(type, obj_size, &updating_visitor);
1549  return obj_size;
1550}
1551
1552
1553Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
1554  // Object should either in old or map space.
1555  MapWord encoding = obj->map_word();
1556
1557  // Offset to the first live object's forwarding address.
1558  int offset = encoding.DecodeOffset();
1559  Address obj_addr = obj->address();
1560
1561  // Find the first live object's forwarding address.
1562  Page* p = Page::FromAddress(obj_addr);
1563  Address first_forwarded = p->mc_first_forwarded;
1564
1565  // Page start address of forwarded address.
1566  Page* forwarded_page = Page::FromAddress(first_forwarded);
1567  int forwarded_offset = forwarded_page->Offset(first_forwarded);
1568
1569  // Find end of allocation of in the page of first_forwarded.
1570  Address mc_top = forwarded_page->mc_relocation_top;
1571  int mc_top_offset = forwarded_page->Offset(mc_top);
1572
1573  // Check if current object's forward pointer is in the same page
1574  // as the first live object's forwarding pointer
1575  if (forwarded_offset + offset < mc_top_offset) {
1576    // In the same page.
1577    return first_forwarded + offset;
1578  }
1579
1580  // Must be in the next page, NOTE: this may cross chunks.
1581  Page* next_page = forwarded_page->next_page();
1582  ASSERT(next_page->is_valid());
1583
1584  offset -= (mc_top_offset - forwarded_offset);
1585  offset += Page::kObjectStartOffset;
1586
1587  ASSERT_PAGE_OFFSET(offset);
1588  ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
1589
1590  return next_page->OffsetToAddress(offset);
1591}
1592
1593
1594// -------------------------------------------------------------------------
1595// Phase 4: Relocate objects
1596
1597void MarkCompactCollector::RelocateObjects() {
1598#ifdef DEBUG
1599  ASSERT(state_ == UPDATE_POINTERS);
1600  state_ = RELOCATE_OBJECTS;
1601#endif
1602  // Relocates objects, always relocate map objects first. Relocating
1603  // objects in other space relies on map objects to get object size.
1604  int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
1605  int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
1606                                             &RelocateOldPointerObject);
1607  int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
1608                                          &RelocateOldDataObject);
1609  int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
1610  int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject);
1611  int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
1612
1613  USE(live_maps);
1614  USE(live_data_olds);
1615  USE(live_pointer_olds);
1616  USE(live_codes);
1617  USE(live_cells);
1618  USE(live_news);
1619  ASSERT(live_maps == live_map_objects_);
1620  ASSERT(live_data_olds == live_old_data_objects_);
1621  ASSERT(live_pointer_olds == live_old_pointer_objects_);
1622  ASSERT(live_codes == live_code_objects_);
1623  ASSERT(live_cells == live_cell_objects_);
1624  ASSERT(live_news == live_young_objects_);
1625
1626  // Flip from and to spaces
1627  Heap::new_space()->Flip();
1628
1629  // Set age_mark to bottom in to space
1630  Address mark = Heap::new_space()->bottom();
1631  Heap::new_space()->set_age_mark(mark);
1632
1633  Heap::new_space()->MCCommitRelocationInfo();
1634#ifdef DEBUG
1635  // It is safe to write to the remembered sets as remembered sets on a
1636  // page-by-page basis after committing the m-c forwarding pointer.
1637  Page::set_rset_state(Page::IN_USE);
1638#endif
1639  PagedSpaces spaces;
1640  while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo();
1641}
1642
1643
1644int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
1645  // Recover map pointer.
1646  MapWord encoding = obj->map_word();
1647  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
1648  ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
1649
1650  // Get forwarding address before resetting map pointer
1651  Address new_addr = GetForwardingAddressInOldSpace(obj);
1652
1653  // Reset map pointer.  The meta map object may not be copied yet so
1654  // Map::cast does not yet work.
1655  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
1656
1657  Address old_addr = obj->address();
1658
1659  if (new_addr != old_addr) {
1660    memmove(new_addr, old_addr, Map::kSize);  // copy contents
1661  }
1662
1663#ifdef DEBUG
1664  if (FLAG_gc_verbose) {
1665    PrintF("relocate %p -> %p\n", old_addr, new_addr);
1666  }
1667#endif
1668
1669  return Map::kSize;
1670}
1671
1672
1673static inline int RestoreMap(HeapObject* obj,
1674                             PagedSpace* space,
1675                             Address new_addr,
1676                             Address map_addr) {
1677  // This must be a non-map object, and the function relies on the
1678  // assumption that the Map space is compacted before the other paged
1679  // spaces (see RelocateObjects).
1680
1681  // Reset map pointer.
1682  obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
1683
1684  int obj_size = obj->Size();
1685  ASSERT_OBJECT_SIZE(obj_size);
1686
1687  ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
1688         space->MCSpaceOffsetForAddress(obj->address()));
1689
1690#ifdef DEBUG
1691  if (FLAG_gc_verbose) {
1692    PrintF("relocate %p -> %p\n", obj->address(), new_addr);
1693  }
1694#endif
1695
1696  return obj_size;
1697}
1698
1699
1700int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
1701                                                   PagedSpace* space) {
1702  // Recover map pointer.
1703  MapWord encoding = obj->map_word();
1704  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
1705  ASSERT(Heap::map_space()->Contains(map_addr));
1706
1707  // Get forwarding address before resetting map pointer.
1708  Address new_addr = GetForwardingAddressInOldSpace(obj);
1709
1710  // Reset the map pointer.
1711  int obj_size = RestoreMap(obj, space, new_addr, map_addr);
1712
1713  Address old_addr = obj->address();
1714
1715  if (new_addr != old_addr) {
1716    memmove(new_addr, old_addr, obj_size);  // Copy contents
1717  }
1718
1719  ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
1720
1721  return obj_size;
1722}
1723
1724
1725int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
1726  return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
1727}
1728
1729
1730int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
1731  return RelocateOldNonCodeObject(obj, Heap::old_data_space());
1732}
1733
1734
1735int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
1736  return RelocateOldNonCodeObject(obj, Heap::cell_space());
1737}
1738
1739
1740int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
1741  // Recover map pointer.
1742  MapWord encoding = obj->map_word();
1743  Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
1744  ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
1745
1746  // Get forwarding address before resetting map pointer
1747  Address new_addr = GetForwardingAddressInOldSpace(obj);
1748
1749  // Reset the map pointer.
1750  int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
1751
1752  Address old_addr = obj->address();
1753
1754  if (new_addr != old_addr) {
1755    memmove(new_addr, old_addr, obj_size);  // Copy contents.
1756  }
1757
1758  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
1759  if (copied_to->IsCode()) {
1760    // May also update inline cache target.
1761    Code::cast(copied_to)->Relocate(new_addr - old_addr);
1762    // Notify the logger that compiled code has moved.
1763    LOG(CodeMoveEvent(old_addr, new_addr));
1764  }
1765
1766  return obj_size;
1767}
1768
1769
1770int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
1771  int obj_size = obj->Size();
1772
1773  // Get forwarding address
1774  Address old_addr = obj->address();
1775  int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
1776
1777  Address new_addr =
1778    Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
1779
1780#ifdef DEBUG
1781  if (Heap::new_space()->FromSpaceContains(new_addr)) {
1782    ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
1783           Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
1784  } else {
1785    ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
1786           Heap::TargetSpace(obj) == Heap::old_data_space());
1787  }
1788#endif
1789
1790  // New and old addresses cannot overlap.
1791  memcpy(reinterpret_cast<void*>(new_addr),
1792         reinterpret_cast<void*>(old_addr),
1793         obj_size);
1794
1795#ifdef DEBUG
1796  if (FLAG_gc_verbose) {
1797    PrintF("relocate %p -> %p\n", old_addr, new_addr);
1798  }
1799#endif
1800
1801  return obj_size;
1802}
1803
1804
1805// -------------------------------------------------------------------------
1806// Phase 5: rebuild remembered sets
1807
1808void MarkCompactCollector::RebuildRSets() {
1809#ifdef DEBUG
1810  ASSERT(state_ == RELOCATE_OBJECTS);
1811  state_ = REBUILD_RSETS;
1812#endif
1813  Heap::RebuildRSets();
1814}
1815
1816} }  // namespace v8::internal
1817