1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "code-stubs.h"
31#include "compilation-cache.h"
32#include "cpu-profiler.h"
33#include "deoptimizer.h"
34#include "execution.h"
35#include "gdb-jit.h"
36#include "global-handles.h"
37#include "heap-profiler.h"
38#include "ic-inl.h"
39#include "incremental-marking.h"
40#include "mark-compact.h"
41#include "objects-visiting.h"
42#include "objects-visiting-inl.h"
43#include "stub-cache.h"
44#include "sweeper-thread.h"
45
46namespace v8 {
47namespace internal {
48
49
50const char* Marking::kWhiteBitPattern = "00";
51const char* Marking::kBlackBitPattern = "10";
52const char* Marking::kGreyBitPattern = "11";
53const char* Marking::kImpossibleBitPattern = "01";
54
55
56// -------------------------------------------------------------------------
57// MarkCompactCollector
58
59MarkCompactCollector::MarkCompactCollector() :  // NOLINT
60#ifdef DEBUG
61      state_(IDLE),
62#endif
63      sweep_precisely_(false),
64      reduce_memory_footprint_(false),
65      abort_incremental_marking_(false),
66      marking_parity_(ODD_MARKING_PARITY),
67      compacting_(false),
68      was_marked_incrementally_(false),
69      sweeping_pending_(false),
70      sequential_sweeping_(false),
71      tracer_(NULL),
72      migration_slots_buffer_(NULL),
73      heap_(NULL),
74      code_flusher_(NULL),
75      encountered_weak_collections_(NULL),
76      have_code_to_deoptimize_(false) { }
77
78#ifdef VERIFY_HEAP
79class VerifyMarkingVisitor: public ObjectVisitor {
80 public:
81  explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
82
83  void VisitPointers(Object** start, Object** end) {
84    for (Object** current = start; current < end; current++) {
85      if ((*current)->IsHeapObject()) {
86        HeapObject* object = HeapObject::cast(*current);
87        CHECK(heap_->mark_compact_collector()->IsMarked(object));
88      }
89    }
90  }
91
92  void VisitEmbeddedPointer(RelocInfo* rinfo) {
93    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
94    if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
95                                    rinfo->target_object())) {
96      Object* p = rinfo->target_object();
97      VisitPointer(&p);
98    }
99  }
100
101 private:
102  Heap* heap_;
103};
104
105
106static void VerifyMarking(Heap* heap, Address bottom, Address top) {
107  VerifyMarkingVisitor visitor(heap);
108  HeapObject* object;
109  Address next_object_must_be_here_or_later = bottom;
110
111  for (Address current = bottom;
112       current < top;
113       current += kPointerSize) {
114    object = HeapObject::FromAddress(current);
115    if (MarkCompactCollector::IsMarked(object)) {
116      CHECK(current >= next_object_must_be_here_or_later);
117      object->Iterate(&visitor);
118      next_object_must_be_here_or_later = current + object->Size();
119    }
120  }
121}
122
123
124static void VerifyMarking(NewSpace* space) {
125  Address end = space->top();
126  NewSpacePageIterator it(space->bottom(), end);
127  // The bottom position is at the start of its page. Allows us to use
128  // page->area_start() as start of range on all pages.
129  CHECK_EQ(space->bottom(),
130            NewSpacePage::FromAddress(space->bottom())->area_start());
131  while (it.has_next()) {
132    NewSpacePage* page = it.next();
133    Address limit = it.has_next() ? page->area_end() : end;
134    CHECK(limit == end || !page->Contains(end));
135    VerifyMarking(space->heap(), page->area_start(), limit);
136  }
137}
138
139
140static void VerifyMarking(PagedSpace* space) {
141  PageIterator it(space);
142
143  while (it.has_next()) {
144    Page* p = it.next();
145    VerifyMarking(space->heap(), p->area_start(), p->area_end());
146  }
147}
148
149
150static void VerifyMarking(Heap* heap) {
151  VerifyMarking(heap->old_pointer_space());
152  VerifyMarking(heap->old_data_space());
153  VerifyMarking(heap->code_space());
154  VerifyMarking(heap->cell_space());
155  VerifyMarking(heap->property_cell_space());
156  VerifyMarking(heap->map_space());
157  VerifyMarking(heap->new_space());
158
159  VerifyMarkingVisitor visitor(heap);
160
161  LargeObjectIterator it(heap->lo_space());
162  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
163    if (MarkCompactCollector::IsMarked(obj)) {
164      obj->Iterate(&visitor);
165    }
166  }
167
168  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
169}
170
171
172class VerifyEvacuationVisitor: public ObjectVisitor {
173 public:
174  void VisitPointers(Object** start, Object** end) {
175    for (Object** current = start; current < end; current++) {
176      if ((*current)->IsHeapObject()) {
177        HeapObject* object = HeapObject::cast(*current);
178        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
179      }
180    }
181  }
182};
183
184
185static void VerifyEvacuation(Address bottom, Address top) {
186  VerifyEvacuationVisitor visitor;
187  HeapObject* object;
188  Address next_object_must_be_here_or_later = bottom;
189
190  for (Address current = bottom;
191       current < top;
192       current += kPointerSize) {
193    object = HeapObject::FromAddress(current);
194    if (MarkCompactCollector::IsMarked(object)) {
195      CHECK(current >= next_object_must_be_here_or_later);
196      object->Iterate(&visitor);
197      next_object_must_be_here_or_later = current + object->Size();
198    }
199  }
200}
201
202
203static void VerifyEvacuation(NewSpace* space) {
204  NewSpacePageIterator it(space->bottom(), space->top());
205  VerifyEvacuationVisitor visitor;
206
207  while (it.has_next()) {
208    NewSpacePage* page = it.next();
209    Address current = page->area_start();
210    Address limit = it.has_next() ? page->area_end() : space->top();
211    CHECK(limit == space->top() || !page->Contains(space->top()));
212    while (current < limit) {
213      HeapObject* object = HeapObject::FromAddress(current);
214      object->Iterate(&visitor);
215      current += object->Size();
216    }
217  }
218}
219
220
221static void VerifyEvacuation(PagedSpace* space) {
222  PageIterator it(space);
223
224  while (it.has_next()) {
225    Page* p = it.next();
226    if (p->IsEvacuationCandidate()) continue;
227    VerifyEvacuation(p->area_start(), p->area_end());
228  }
229}
230
231
232static void VerifyEvacuation(Heap* heap) {
233  VerifyEvacuation(heap->old_pointer_space());
234  VerifyEvacuation(heap->old_data_space());
235  VerifyEvacuation(heap->code_space());
236  VerifyEvacuation(heap->cell_space());
237  VerifyEvacuation(heap->property_cell_space());
238  VerifyEvacuation(heap->map_space());
239  VerifyEvacuation(heap->new_space());
240
241  VerifyEvacuationVisitor visitor;
242  heap->IterateStrongRoots(&visitor, VISIT_ALL);
243}
244#endif  // VERIFY_HEAP
245
246
247#ifdef DEBUG
248class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
249 public:
250  VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
251
252  void VisitPointers(Object** start, Object** end) {
253    for (Object** current = start; current < end; current++) {
254      if ((*current)->IsHeapObject()) {
255        HeapObject* object = HeapObject::cast(*current);
256        if (object->IsString()) continue;
257        switch (object->map()->instance_type()) {
258          case JS_FUNCTION_TYPE:
259            CheckContext(JSFunction::cast(object)->context());
260            break;
261          case JS_GLOBAL_PROXY_TYPE:
262            CheckContext(JSGlobalProxy::cast(object)->native_context());
263            break;
264          case JS_GLOBAL_OBJECT_TYPE:
265          case JS_BUILTINS_OBJECT_TYPE:
266            CheckContext(GlobalObject::cast(object)->native_context());
267            break;
268          case JS_ARRAY_TYPE:
269          case JS_DATE_TYPE:
270          case JS_OBJECT_TYPE:
271          case JS_REGEXP_TYPE:
272            VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
273            break;
274          case MAP_TYPE:
275            VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
276            VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
277            break;
278          case FIXED_ARRAY_TYPE:
279            if (object->IsContext()) {
280              CheckContext(object);
281            } else {
282              FixedArray* array = FixedArray::cast(object);
283              int length = array->length();
284              // Set array length to zero to prevent cycles while iterating
285              // over array bodies, this is easier than intrusive marking.
286              array->set_length(0);
287              array->IterateBody(
288                  FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
289              array->set_length(length);
290            }
291            break;
292          case CELL_TYPE:
293          case JS_PROXY_TYPE:
294          case JS_VALUE_TYPE:
295          case TYPE_FEEDBACK_INFO_TYPE:
296            object->Iterate(this);
297            break;
298          case DECLARED_ACCESSOR_INFO_TYPE:
299          case EXECUTABLE_ACCESSOR_INFO_TYPE:
300          case BYTE_ARRAY_TYPE:
301          case CALL_HANDLER_INFO_TYPE:
302          case CODE_TYPE:
303          case FIXED_DOUBLE_ARRAY_TYPE:
304          case HEAP_NUMBER_TYPE:
305          case INTERCEPTOR_INFO_TYPE:
306          case ODDBALL_TYPE:
307          case SCRIPT_TYPE:
308          case SHARED_FUNCTION_INFO_TYPE:
309            break;
310          default:
311            UNREACHABLE();
312        }
313      }
314    }
315  }
316
317 private:
318  void CheckContext(Object* context) {
319    if (!context->IsContext()) return;
320    Context* native_context = Context::cast(context)->native_context();
321    if (current_native_context_ == NULL) {
322      current_native_context_ = native_context;
323    } else {
324      CHECK_EQ(current_native_context_, native_context);
325    }
326  }
327
328  Context* current_native_context_;
329};
330
331
332static void VerifyNativeContextSeparation(Heap* heap) {
333  HeapObjectIterator it(heap->code_space());
334
335  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
336    VerifyNativeContextSeparationVisitor visitor;
337    Code::cast(object)->CodeIterateBody(&visitor);
338  }
339}
340#endif
341
342
343void MarkCompactCollector::TearDown() {
344  AbortCompaction();
345}
346
347
348void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
349  p->MarkEvacuationCandidate();
350  evacuation_candidates_.Add(p);
351}
352
353
354static void TraceFragmentation(PagedSpace* space) {
355  int number_of_pages = space->CountTotalPages();
356  intptr_t reserved = (number_of_pages * space->AreaSize());
357  intptr_t free = reserved - space->SizeOfObjects();
358  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
359         AllocationSpaceName(space->identity()),
360         number_of_pages,
361         static_cast<int>(free),
362         static_cast<double>(free) * 100 / reserved);
363}
364
365
366bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
367  if (!compacting_) {
368    ASSERT(evacuation_candidates_.length() == 0);
369
370#ifdef ENABLE_GDB_JIT_INTERFACE
371    // If GDBJIT interface is active disable compaction.
372    if (FLAG_gdbjit) return false;
373#endif
374
375    CollectEvacuationCandidates(heap()->old_pointer_space());
376    CollectEvacuationCandidates(heap()->old_data_space());
377
378    if (FLAG_compact_code_space &&
379        (mode == NON_INCREMENTAL_COMPACTION ||
380         FLAG_incremental_code_compaction)) {
381      CollectEvacuationCandidates(heap()->code_space());
382    } else if (FLAG_trace_fragmentation) {
383      TraceFragmentation(heap()->code_space());
384    }
385
386    if (FLAG_trace_fragmentation) {
387      TraceFragmentation(heap()->map_space());
388      TraceFragmentation(heap()->cell_space());
389      TraceFragmentation(heap()->property_cell_space());
390    }
391
392    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
393    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
394    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
395
396    compacting_ = evacuation_candidates_.length() > 0;
397  }
398
399  return compacting_;
400}
401
402
403void MarkCompactCollector::CollectGarbage() {
404  // Make sure that Prepare() has been called. The individual steps below will
405  // update the state as they proceed.
406  ASSERT(state_ == PREPARE_GC);
407  ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
408
409  MarkLiveObjects();
410  ASSERT(heap_->incremental_marking()->IsStopped());
411
412  if (FLAG_collect_maps) ClearNonLiveReferences();
413
414  ClearWeakCollections();
415
416#ifdef VERIFY_HEAP
417  if (FLAG_verify_heap) {
418    VerifyMarking(heap_);
419  }
420#endif
421
422  SweepSpaces();
423
424  if (!FLAG_collect_maps) ReattachInitialMaps();
425
426#ifdef DEBUG
427  if (FLAG_verify_native_context_separation) {
428    VerifyNativeContextSeparation(heap_);
429  }
430#endif
431
432#ifdef VERIFY_HEAP
433  if (heap()->weak_embedded_objects_verification_enabled()) {
434    VerifyWeakEmbeddedObjectsInOptimizedCode();
435  }
436  if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
437    VerifyOmittedMapChecks();
438  }
439#endif
440
441  Finish();
442
443  if (marking_parity_ == EVEN_MARKING_PARITY) {
444    marking_parity_ = ODD_MARKING_PARITY;
445  } else {
446    ASSERT(marking_parity_ == ODD_MARKING_PARITY);
447    marking_parity_ = EVEN_MARKING_PARITY;
448  }
449
450  tracer_ = NULL;
451}
452
453
454#ifdef VERIFY_HEAP
455void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
456  PageIterator it(space);
457
458  while (it.has_next()) {
459    Page* p = it.next();
460    CHECK(p->markbits()->IsClean());
461    CHECK_EQ(0, p->LiveBytes());
462  }
463}
464
465
466void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
467  NewSpacePageIterator it(space->bottom(), space->top());
468
469  while (it.has_next()) {
470    NewSpacePage* p = it.next();
471    CHECK(p->markbits()->IsClean());
472    CHECK_EQ(0, p->LiveBytes());
473  }
474}
475
476
477void MarkCompactCollector::VerifyMarkbitsAreClean() {
478  VerifyMarkbitsAreClean(heap_->old_pointer_space());
479  VerifyMarkbitsAreClean(heap_->old_data_space());
480  VerifyMarkbitsAreClean(heap_->code_space());
481  VerifyMarkbitsAreClean(heap_->cell_space());
482  VerifyMarkbitsAreClean(heap_->property_cell_space());
483  VerifyMarkbitsAreClean(heap_->map_space());
484  VerifyMarkbitsAreClean(heap_->new_space());
485
486  LargeObjectIterator it(heap_->lo_space());
487  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
488    MarkBit mark_bit = Marking::MarkBitFrom(obj);
489    CHECK(Marking::IsWhite(mark_bit));
490    CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
491  }
492}
493
494
495void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
496  HeapObjectIterator code_iterator(heap()->code_space());
497  for (HeapObject* obj = code_iterator.Next();
498       obj != NULL;
499       obj = code_iterator.Next()) {
500    Code* code = Code::cast(obj);
501    if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
502    if (WillBeDeoptimized(code)) continue;
503    code->VerifyEmbeddedObjectsDependency();
504  }
505}
506
507
508void MarkCompactCollector::VerifyOmittedMapChecks() {
509  HeapObjectIterator iterator(heap()->map_space());
510  for (HeapObject* obj = iterator.Next();
511       obj != NULL;
512       obj = iterator.Next()) {
513    Map* map = Map::cast(obj);
514    map->VerifyOmittedMapChecks();
515  }
516}
517#endif  // VERIFY_HEAP
518
519
520static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
521  PageIterator it(space);
522
523  while (it.has_next()) {
524    Bitmap::Clear(it.next());
525  }
526}
527
528
529static void ClearMarkbitsInNewSpace(NewSpace* space) {
530  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
531
532  while (it.has_next()) {
533    Bitmap::Clear(it.next());
534  }
535}
536
537
538void MarkCompactCollector::ClearMarkbits() {
539  ClearMarkbitsInPagedSpace(heap_->code_space());
540  ClearMarkbitsInPagedSpace(heap_->map_space());
541  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
542  ClearMarkbitsInPagedSpace(heap_->old_data_space());
543  ClearMarkbitsInPagedSpace(heap_->cell_space());
544  ClearMarkbitsInPagedSpace(heap_->property_cell_space());
545  ClearMarkbitsInNewSpace(heap_->new_space());
546
547  LargeObjectIterator it(heap_->lo_space());
548  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
549    MarkBit mark_bit = Marking::MarkBitFrom(obj);
550    mark_bit.Clear();
551    mark_bit.Next().Clear();
552    Page::FromAddress(obj->address())->ResetProgressBar();
553    Page::FromAddress(obj->address())->ResetLiveBytes();
554  }
555}
556
557
558void MarkCompactCollector::StartSweeperThreads() {
559  sweeping_pending_ = true;
560  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
561    isolate()->sweeper_threads()[i]->StartSweeping();
562  }
563}
564
565
566void MarkCompactCollector::WaitUntilSweepingCompleted() {
567  ASSERT(sweeping_pending_ == true);
568  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
569    isolate()->sweeper_threads()[i]->WaitForSweeperThread();
570  }
571  sweeping_pending_ = false;
572  StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
573  StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
574  heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
575  heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
576}
577
578
579intptr_t MarkCompactCollector::
580             StealMemoryFromSweeperThreads(PagedSpace* space) {
581  intptr_t freed_bytes = 0;
582  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
583    freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
584  }
585  space->AddToAccountingStats(freed_bytes);
586  space->DecrementUnsweptFreeBytes(freed_bytes);
587  return freed_bytes;
588}
589
590
591bool MarkCompactCollector::AreSweeperThreadsActivated() {
592  return isolate()->sweeper_threads() != NULL;
593}
594
595
596bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
597  return sweeping_pending_;
598}
599
600
601bool Marking::TransferMark(Address old_start, Address new_start) {
602  // This is only used when resizing an object.
603  ASSERT(MemoryChunk::FromAddress(old_start) ==
604         MemoryChunk::FromAddress(new_start));
605
606  // If the mark doesn't move, we don't check the color of the object.
607  // It doesn't matter whether the object is black, since it hasn't changed
608  // size, so the adjustment to the live data count will be zero anyway.
609  if (old_start == new_start) return false;
610
611  MarkBit new_mark_bit = MarkBitFrom(new_start);
612  MarkBit old_mark_bit = MarkBitFrom(old_start);
613
614#ifdef DEBUG
615  ObjectColor old_color = Color(old_mark_bit);
616#endif
617
618  if (Marking::IsBlack(old_mark_bit)) {
619    old_mark_bit.Clear();
620    ASSERT(IsWhite(old_mark_bit));
621    Marking::MarkBlack(new_mark_bit);
622    return true;
623  } else if (Marking::IsGrey(old_mark_bit)) {
624    ASSERT(heap_->incremental_marking()->IsMarking());
625    old_mark_bit.Clear();
626    old_mark_bit.Next().Clear();
627    ASSERT(IsWhite(old_mark_bit));
628    heap_->incremental_marking()->WhiteToGreyAndPush(
629        HeapObject::FromAddress(new_start), new_mark_bit);
630    heap_->incremental_marking()->RestartIfNotMarking();
631  }
632
633#ifdef DEBUG
634  ObjectColor new_color = Color(new_mark_bit);
635  ASSERT(new_color == old_color);
636#endif
637
638  return false;
639}
640
641
642const char* AllocationSpaceName(AllocationSpace space) {
643  switch (space) {
644    case NEW_SPACE: return "NEW_SPACE";
645    case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
646    case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
647    case CODE_SPACE: return "CODE_SPACE";
648    case MAP_SPACE: return "MAP_SPACE";
649    case CELL_SPACE: return "CELL_SPACE";
650    case PROPERTY_CELL_SPACE:
651      return "PROPERTY_CELL_SPACE";
652    case LO_SPACE: return "LO_SPACE";
653    default:
654      UNREACHABLE();
655  }
656
657  return NULL;
658}
659
660
661// Returns zero for pages that have so little fragmentation that it is not
662// worth defragmenting them.  Otherwise a positive integer that gives an
663// estimate of fragmentation on an arbitrary scale.
664static int FreeListFragmentation(PagedSpace* space, Page* p) {
665  // If page was not swept then there are no free list items on it.
666  if (!p->WasSwept()) {
667    if (FLAG_trace_fragmentation) {
668      PrintF("%p [%s]: %d bytes live (unswept)\n",
669             reinterpret_cast<void*>(p),
670             AllocationSpaceName(space->identity()),
671             p->LiveBytes());
672    }
673    return 0;
674  }
675
676  PagedSpace::SizeStats sizes;
677  space->ObtainFreeListStatistics(p, &sizes);
678
679  intptr_t ratio;
680  intptr_t ratio_threshold;
681  intptr_t area_size = space->AreaSize();
682  if (space->identity() == CODE_SPACE) {
683    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
684        area_size;
685    ratio_threshold = 10;
686  } else {
687    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
688        area_size;
689    ratio_threshold = 15;
690  }
691
692  if (FLAG_trace_fragmentation) {
693    PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
694           reinterpret_cast<void*>(p),
695           AllocationSpaceName(space->identity()),
696           static_cast<int>(sizes.small_size_),
697           static_cast<double>(sizes.small_size_ * 100) /
698           area_size,
699           static_cast<int>(sizes.medium_size_),
700           static_cast<double>(sizes.medium_size_ * 100) /
701           area_size,
702           static_cast<int>(sizes.large_size_),
703           static_cast<double>(sizes.large_size_ * 100) /
704           area_size,
705           static_cast<int>(sizes.huge_size_),
706           static_cast<double>(sizes.huge_size_ * 100) /
707           area_size,
708           (ratio > ratio_threshold) ? "[fragmented]" : "");
709  }
710
711  if (FLAG_always_compact && sizes.Total() != area_size) {
712    return 1;
713  }
714
715  if (ratio <= ratio_threshold) return 0;  // Not fragmented.
716
717  return static_cast<int>(ratio - ratio_threshold);
718}
719
720
721void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
722  ASSERT(space->identity() == OLD_POINTER_SPACE ||
723         space->identity() == OLD_DATA_SPACE ||
724         space->identity() == CODE_SPACE);
725
726  static const int kMaxMaxEvacuationCandidates = 1000;
727  int number_of_pages = space->CountTotalPages();
728  int max_evacuation_candidates =
729      static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
730
731  if (FLAG_stress_compaction || FLAG_always_compact) {
732    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
733  }
734
735  class Candidate {
736   public:
737    Candidate() : fragmentation_(0), page_(NULL) { }
738    Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
739
740    int fragmentation() { return fragmentation_; }
741    Page* page() { return page_; }
742
743   private:
744    int fragmentation_;
745    Page* page_;
746  };
747
748  enum CompactionMode {
749    COMPACT_FREE_LISTS,
750    REDUCE_MEMORY_FOOTPRINT
751  };
752
753  CompactionMode mode = COMPACT_FREE_LISTS;
754
755  intptr_t reserved = number_of_pages * space->AreaSize();
756  intptr_t over_reserved = reserved - space->SizeOfObjects();
757  static const intptr_t kFreenessThreshold = 50;
758
759  if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
760    // If reduction of memory footprint was requested, we are aggressive
761    // about choosing pages to free.  We expect that half-empty pages
762    // are easier to compact so slightly bump the limit.
763    mode = REDUCE_MEMORY_FOOTPRINT;
764    max_evacuation_candidates += 2;
765  }
766
767
768  if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
769    // If over-usage is very high (more than a third of the space), we
770    // try to free all mostly empty pages.  We expect that almost empty
771    // pages are even easier to compact so bump the limit even more.
772    mode = REDUCE_MEMORY_FOOTPRINT;
773    max_evacuation_candidates *= 2;
774  }
775
776  if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
777    PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
778           "evacuation candidate limit: %d\n",
779           static_cast<double>(over_reserved) / MB,
780           static_cast<double>(reserved) / MB,
781           static_cast<int>(kFreenessThreshold),
782           max_evacuation_candidates);
783  }
784
785  intptr_t estimated_release = 0;
786
787  Candidate candidates[kMaxMaxEvacuationCandidates];
788
789  max_evacuation_candidates =
790      Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
791
792  int count = 0;
793  int fragmentation = 0;
794  Candidate* least = NULL;
795
796  PageIterator it(space);
797  if (it.has_next()) it.next();  // Never compact the first page.
798
799  while (it.has_next()) {
800    Page* p = it.next();
801    p->ClearEvacuationCandidate();
802
803    if (FLAG_stress_compaction) {
804      unsigned int counter = space->heap()->ms_count();
805      uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
806      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
807    } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
808      // Don't try to release too many pages.
809      if (estimated_release >= over_reserved) {
810        continue;
811      }
812
813      intptr_t free_bytes = 0;
814
815      if (!p->WasSwept()) {
816        free_bytes = (p->area_size() - p->LiveBytes());
817      } else {
818        PagedSpace::SizeStats sizes;
819        space->ObtainFreeListStatistics(p, &sizes);
820        free_bytes = sizes.Total();
821      }
822
823      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
824
825      if (free_pct >= kFreenessThreshold) {
826        estimated_release += free_bytes;
827        fragmentation = free_pct;
828      } else {
829        fragmentation = 0;
830      }
831
832      if (FLAG_trace_fragmentation) {
833        PrintF("%p [%s]: %d (%.2f%%) free %s\n",
834               reinterpret_cast<void*>(p),
835               AllocationSpaceName(space->identity()),
836               static_cast<int>(free_bytes),
837               static_cast<double>(free_bytes * 100) / p->area_size(),
838               (fragmentation > 0) ? "[fragmented]" : "");
839      }
840    } else {
841      fragmentation = FreeListFragmentation(space, p);
842    }
843
844    if (fragmentation != 0) {
845      if (count < max_evacuation_candidates) {
846        candidates[count++] = Candidate(fragmentation, p);
847      } else {
848        if (least == NULL) {
849          for (int i = 0; i < max_evacuation_candidates; i++) {
850            if (least == NULL ||
851                candidates[i].fragmentation() < least->fragmentation()) {
852              least = candidates + i;
853            }
854          }
855        }
856        if (least->fragmentation() < fragmentation) {
857          *least = Candidate(fragmentation, p);
858          least = NULL;
859        }
860      }
861    }
862  }
863
864  for (int i = 0; i < count; i++) {
865    AddEvacuationCandidate(candidates[i].page());
866  }
867
868  if (count > 0 && FLAG_trace_fragmentation) {
869    PrintF("Collected %d evacuation candidates for space %s\n",
870           count,
871           AllocationSpaceName(space->identity()));
872  }
873}
874
875
876void MarkCompactCollector::AbortCompaction() {
877  if (compacting_) {
878    int npages = evacuation_candidates_.length();
879    for (int i = 0; i < npages; i++) {
880      Page* p = evacuation_candidates_[i];
881      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
882      p->ClearEvacuationCandidate();
883      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
884    }
885    compacting_ = false;
886    evacuation_candidates_.Rewind(0);
887    invalidated_code_.Rewind(0);
888  }
889  ASSERT_EQ(0, evacuation_candidates_.length());
890}
891
892
893void MarkCompactCollector::Prepare(GCTracer* tracer) {
894  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
895
896  // Rather than passing the tracer around we stash it in a static member
897  // variable.
898  tracer_ = tracer;
899
900#ifdef DEBUG
901  ASSERT(state_ == IDLE);
902  state_ = PREPARE_GC;
903#endif
904
905  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
906
907  if (IsConcurrentSweepingInProgress()) {
908    // Instead of waiting we could also abort the sweeper threads here.
909    WaitUntilSweepingCompleted();
910  }
911
912  // Clear marking bits if incremental marking is aborted.
913  if (was_marked_incrementally_ && abort_incremental_marking_) {
914    heap()->incremental_marking()->Abort();
915    ClearMarkbits();
916    AbortCompaction();
917    was_marked_incrementally_ = false;
918  }
919
920  // Don't start compaction if we are in the middle of incremental
921  // marking cycle. We did not collect any slots.
922  if (!FLAG_never_compact && !was_marked_incrementally_) {
923    StartCompaction(NON_INCREMENTAL_COMPACTION);
924  }
925
926  PagedSpaces spaces(heap());
927  for (PagedSpace* space = spaces.next();
928       space != NULL;
929       space = spaces.next()) {
930    space->PrepareForMarkCompact();
931  }
932
933#ifdef VERIFY_HEAP
934  if (!was_marked_incrementally_ && FLAG_verify_heap) {
935    VerifyMarkbitsAreClean();
936  }
937#endif
938}
939
940
941void MarkCompactCollector::Finish() {
942#ifdef DEBUG
943  ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
944  state_ = IDLE;
945#endif
946  // The stub cache is not traversed during GC; clear the cache to
947  // force lazy re-initialization of it. This must be done after the
948  // GC, because it relies on the new address of certain old space
949  // objects (empty string, illegal builtin).
950  isolate()->stub_cache()->Clear();
951
952  if (have_code_to_deoptimize_) {
953    // Some code objects were marked for deoptimization during the GC.
954    Deoptimizer::DeoptimizeMarkedCode(isolate());
955    have_code_to_deoptimize_ = false;
956  }
957}
958
959
960// -------------------------------------------------------------------------
961// Phase 1: tracing and marking live objects.
962//   before: all objects are in normal state.
963//   after: a live object's map pointer is marked as '00'.
964
965// Marking all live objects in the heap as part of mark-sweep or mark-compact
966// collection.  Before marking, all objects are in their normal state.  After
967// marking, live objects' map pointers are marked indicating that the object
968// has been found reachable.
969//
970// The marking algorithm is a (mostly) depth-first (because of possible stack
971// overflow) traversal of the graph of objects reachable from the roots.  It
972// uses an explicit stack of pointers rather than recursion.  The young
973// generation's inactive ('from') space is used as a marking stack.  The
974// objects in the marking stack are the ones that have been reached and marked
975// but their children have not yet been visited.
976//
977// The marking stack can overflow during traversal.  In that case, we set an
978// overflow flag.  When the overflow flag is set, we continue marking objects
979// reachable from the objects on the marking stack, but no longer push them on
980// the marking stack.  Instead, we mark them as both marked and overflowed.
981// When the stack is in the overflowed state, objects marked as overflowed
982// have been reached and marked but their children have not been visited yet.
983// After emptying the marking stack, we clear the overflow flag and traverse
984// the heap looking for objects marked as overflowed, push them on the stack,
985// and continue with marking.  This process repeats until all reachable
986// objects have been marked.
987
988void CodeFlusher::ProcessJSFunctionCandidates() {
989  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
990  Object* undefined = isolate_->heap()->undefined_value();
991
992  JSFunction* candidate = jsfunction_candidates_head_;
993  JSFunction* next_candidate;
994  while (candidate != NULL) {
995    next_candidate = GetNextCandidate(candidate);
996    ClearNextCandidate(candidate, undefined);
997
998    SharedFunctionInfo* shared = candidate->shared();
999
1000    Code* code = shared->code();
1001    MarkBit code_mark = Marking::MarkBitFrom(code);
1002    if (!code_mark.Get()) {
1003      if (FLAG_trace_code_flushing && shared->is_compiled()) {
1004        PrintF("[code-flushing clears: ");
1005        shared->ShortPrint();
1006        PrintF(" - age: %d]\n", code->GetAge());
1007      }
1008      shared->set_code(lazy_compile);
1009      candidate->set_code(lazy_compile);
1010    } else {
1011      candidate->set_code(code);
1012    }
1013
1014    // We are in the middle of a GC cycle so the write barrier in the code
1015    // setter did not record the slot update and we have to do that manually.
1016    Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1017    Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1018    isolate_->heap()->mark_compact_collector()->
1019        RecordCodeEntrySlot(slot, target);
1020
1021    Object** shared_code_slot =
1022        HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1023    isolate_->heap()->mark_compact_collector()->
1024        RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
1025
1026    candidate = next_candidate;
1027  }
1028
1029  jsfunction_candidates_head_ = NULL;
1030}
1031
1032
1033void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1034  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
1035
1036  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1037  SharedFunctionInfo* next_candidate;
1038  while (candidate != NULL) {
1039    next_candidate = GetNextCandidate(candidate);
1040    ClearNextCandidate(candidate);
1041
1042    Code* code = candidate->code();
1043    MarkBit code_mark = Marking::MarkBitFrom(code);
1044    if (!code_mark.Get()) {
1045      if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1046        PrintF("[code-flushing clears: ");
1047        candidate->ShortPrint();
1048        PrintF(" - age: %d]\n", code->GetAge());
1049      }
1050      candidate->set_code(lazy_compile);
1051    }
1052
1053    Object** code_slot =
1054        HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1055    isolate_->heap()->mark_compact_collector()->
1056        RecordSlot(code_slot, code_slot, *code_slot);
1057
1058    candidate = next_candidate;
1059  }
1060
1061  shared_function_info_candidates_head_ = NULL;
1062}
1063
1064
1065void CodeFlusher::ProcessOptimizedCodeMaps() {
1066  static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
1067  static const int kEntryLength = SharedFunctionInfo::kEntryLength;
1068  static const int kContextOffset = 0;
1069  static const int kCodeOffset = 1;
1070  static const int kLiteralsOffset = 2;
1071  STATIC_ASSERT(kEntryLength == 3);
1072
1073  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1074  SharedFunctionInfo* next_holder;
1075  while (holder != NULL) {
1076    next_holder = GetNextCodeMap(holder);
1077    ClearNextCodeMap(holder);
1078
1079    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1080    int new_length = kEntriesStart;
1081    int old_length = code_map->length();
1082    for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
1083      Code* code = Code::cast(code_map->get(i + kCodeOffset));
1084      MarkBit code_mark = Marking::MarkBitFrom(code);
1085      if (!code_mark.Get()) {
1086        continue;
1087      }
1088
1089      // Update and record the context slot in the optimized code map.
1090      Object** context_slot = HeapObject::RawField(code_map,
1091          FixedArray::OffsetOfElementAt(new_length));
1092      code_map->set(new_length++, code_map->get(i + kContextOffset));
1093      ASSERT(Marking::IsBlack(
1094          Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
1095      isolate_->heap()->mark_compact_collector()->
1096          RecordSlot(context_slot, context_slot, *context_slot);
1097
1098      // Update and record the code slot in the optimized code map.
1099      Object** code_slot = HeapObject::RawField(code_map,
1100          FixedArray::OffsetOfElementAt(new_length));
1101      code_map->set(new_length++, code_map->get(i + kCodeOffset));
1102      ASSERT(Marking::IsBlack(
1103          Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
1104      isolate_->heap()->mark_compact_collector()->
1105          RecordSlot(code_slot, code_slot, *code_slot);
1106
1107      // Update and record the literals slot in the optimized code map.
1108      Object** literals_slot = HeapObject::RawField(code_map,
1109          FixedArray::OffsetOfElementAt(new_length));
1110      code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
1111      ASSERT(Marking::IsBlack(
1112          Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
1113      isolate_->heap()->mark_compact_collector()->
1114          RecordSlot(literals_slot, literals_slot, *literals_slot);
1115    }
1116
1117    // Trim the optimized code map if entries have been removed.
1118    if (new_length < old_length) {
1119      holder->TrimOptimizedCodeMap(old_length - new_length);
1120    }
1121
1122    holder = next_holder;
1123  }
1124
1125  optimized_code_map_holder_head_ = NULL;
1126}
1127
1128
1129void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1130  // Make sure previous flushing decisions are revisited.
1131  isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1132
1133  if (FLAG_trace_code_flushing) {
1134    PrintF("[code-flushing abandons function-info: ");
1135    shared_info->ShortPrint();
1136    PrintF("]\n");
1137  }
1138
1139  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1140  SharedFunctionInfo* next_candidate;
1141  if (candidate == shared_info) {
1142    next_candidate = GetNextCandidate(shared_info);
1143    shared_function_info_candidates_head_ = next_candidate;
1144    ClearNextCandidate(shared_info);
1145  } else {
1146    while (candidate != NULL) {
1147      next_candidate = GetNextCandidate(candidate);
1148
1149      if (next_candidate == shared_info) {
1150        next_candidate = GetNextCandidate(shared_info);
1151        SetNextCandidate(candidate, next_candidate);
1152        ClearNextCandidate(shared_info);
1153        break;
1154      }
1155
1156      candidate = next_candidate;
1157    }
1158  }
1159}
1160
1161
1162void CodeFlusher::EvictCandidate(JSFunction* function) {
1163  ASSERT(!function->next_function_link()->IsUndefined());
1164  Object* undefined = isolate_->heap()->undefined_value();
1165
1166  // Make sure previous flushing decisions are revisited.
1167  isolate_->heap()->incremental_marking()->RecordWrites(function);
1168  isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1169
1170  if (FLAG_trace_code_flushing) {
1171    PrintF("[code-flushing abandons closure: ");
1172    function->shared()->ShortPrint();
1173    PrintF("]\n");
1174  }
1175
1176  JSFunction* candidate = jsfunction_candidates_head_;
1177  JSFunction* next_candidate;
1178  if (candidate == function) {
1179    next_candidate = GetNextCandidate(function);
1180    jsfunction_candidates_head_ = next_candidate;
1181    ClearNextCandidate(function, undefined);
1182  } else {
1183    while (candidate != NULL) {
1184      next_candidate = GetNextCandidate(candidate);
1185
1186      if (next_candidate == function) {
1187        next_candidate = GetNextCandidate(function);
1188        SetNextCandidate(candidate, next_candidate);
1189        ClearNextCandidate(function, undefined);
1190        break;
1191      }
1192
1193      candidate = next_candidate;
1194    }
1195  }
1196}
1197
1198
1199void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1200  ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
1201         get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
1202
1203  // Make sure previous flushing decisions are revisited.
1204  isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1205
1206  if (FLAG_trace_code_flushing) {
1207    PrintF("[code-flushing abandons code-map: ");
1208    code_map_holder->ShortPrint();
1209    PrintF("]\n");
1210  }
1211
1212  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1213  SharedFunctionInfo* next_holder;
1214  if (holder == code_map_holder) {
1215    next_holder = GetNextCodeMap(code_map_holder);
1216    optimized_code_map_holder_head_ = next_holder;
1217    ClearNextCodeMap(code_map_holder);
1218  } else {
1219    while (holder != NULL) {
1220      next_holder = GetNextCodeMap(holder);
1221
1222      if (next_holder == code_map_holder) {
1223        next_holder = GetNextCodeMap(code_map_holder);
1224        SetNextCodeMap(holder, next_holder);
1225        ClearNextCodeMap(code_map_holder);
1226        break;
1227      }
1228
1229      holder = next_holder;
1230    }
1231  }
1232}
1233
1234
1235void CodeFlusher::EvictJSFunctionCandidates() {
1236  JSFunction* candidate = jsfunction_candidates_head_;
1237  JSFunction* next_candidate;
1238  while (candidate != NULL) {
1239    next_candidate = GetNextCandidate(candidate);
1240    EvictCandidate(candidate);
1241    candidate = next_candidate;
1242  }
1243  ASSERT(jsfunction_candidates_head_ == NULL);
1244}
1245
1246
1247void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1248  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1249  SharedFunctionInfo* next_candidate;
1250  while (candidate != NULL) {
1251    next_candidate = GetNextCandidate(candidate);
1252    EvictCandidate(candidate);
1253    candidate = next_candidate;
1254  }
1255  ASSERT(shared_function_info_candidates_head_ == NULL);
1256}
1257
1258
1259void CodeFlusher::EvictOptimizedCodeMaps() {
1260  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1261  SharedFunctionInfo* next_holder;
1262  while (holder != NULL) {
1263    next_holder = GetNextCodeMap(holder);
1264    EvictOptimizedCodeMap(holder);
1265    holder = next_holder;
1266  }
1267  ASSERT(optimized_code_map_holder_head_ == NULL);
1268}
1269
1270
1271void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1272  Heap* heap = isolate_->heap();
1273
1274  JSFunction** slot = &jsfunction_candidates_head_;
1275  JSFunction* candidate = jsfunction_candidates_head_;
1276  while (candidate != NULL) {
1277    if (heap->InFromSpace(candidate)) {
1278      v->VisitPointer(reinterpret_cast<Object**>(slot));
1279    }
1280    candidate = GetNextCandidate(*slot);
1281    slot = GetNextCandidateSlot(*slot);
1282  }
1283}
1284
1285
1286MarkCompactCollector::~MarkCompactCollector() {
1287  if (code_flusher_ != NULL) {
1288    delete code_flusher_;
1289    code_flusher_ = NULL;
1290  }
1291}
1292
1293
1294static inline HeapObject* ShortCircuitConsString(Object** p) {
1295  // Optimization: If the heap object pointed to by p is a non-internalized
1296  // cons string whose right substring is HEAP->empty_string, update
1297  // it in place to its left substring.  Return the updated value.
1298  //
1299  // Here we assume that if we change *p, we replace it with a heap object
1300  // (i.e., the left substring of a cons string is always a heap object).
1301  //
1302  // The check performed is:
1303  //   object->IsConsString() && !object->IsInternalizedString() &&
1304  //   (ConsString::cast(object)->second() == HEAP->empty_string())
1305  // except the maps for the object and its possible substrings might be
1306  // marked.
1307  HeapObject* object = HeapObject::cast(*p);
1308  if (!FLAG_clever_optimizations) return object;
1309  Map* map = object->map();
1310  InstanceType type = map->instance_type();
1311  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
1312
1313  Object* second = reinterpret_cast<ConsString*>(object)->second();
1314  Heap* heap = map->GetHeap();
1315  if (second != heap->empty_string()) {
1316    return object;
1317  }
1318
1319  // Since we don't have the object's start, it is impossible to update the
1320  // page dirty marks. Therefore, we only replace the string with its left
1321  // substring when page dirty marks do not change.
1322  Object* first = reinterpret_cast<ConsString*>(object)->first();
1323  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1324
1325  *p = first;
1326  return HeapObject::cast(first);
1327}
1328
1329
1330class MarkCompactMarkingVisitor
1331    : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1332 public:
1333  static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
1334                                   Map* map, HeapObject* obj);
1335
1336  static void ObjectStatsCountFixedArray(
1337      FixedArrayBase* fixed_array,
1338      FixedArraySubInstanceType fast_type,
1339      FixedArraySubInstanceType dictionary_type);
1340
1341  template<MarkCompactMarkingVisitor::VisitorId id>
1342  class ObjectStatsTracker {
1343   public:
1344    static inline void Visit(Map* map, HeapObject* obj);
1345  };
1346
1347  static void Initialize();
1348
1349  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1350    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1351  }
1352
1353  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1354    // Mark all objects pointed to in [start, end).
1355    const int kMinRangeForMarkingRecursion = 64;
1356    if (end - start >= kMinRangeForMarkingRecursion) {
1357      if (VisitUnmarkedObjects(heap, start, end)) return;
1358      // We are close to a stack overflow, so just mark the objects.
1359    }
1360    MarkCompactCollector* collector = heap->mark_compact_collector();
1361    for (Object** p = start; p < end; p++) {
1362      MarkObjectByPointer(collector, start, p);
1363    }
1364  }
1365
1366  // Marks the object black and pushes it on the marking stack.
1367  INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1368    MarkBit mark = Marking::MarkBitFrom(object);
1369    heap->mark_compact_collector()->MarkObject(object, mark);
1370  }
1371
1372  // Marks the object black without pushing it on the marking stack.
1373  // Returns true if object needed marking and false otherwise.
1374  INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1375    MarkBit mark_bit = Marking::MarkBitFrom(object);
1376    if (!mark_bit.Get()) {
1377      heap->mark_compact_collector()->SetMark(object, mark_bit);
1378      return true;
1379    }
1380    return false;
1381  }
1382
1383  // Mark object pointed to by p.
1384  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1385                                         Object** anchor_slot,
1386                                         Object** p)) {
1387    if (!(*p)->IsHeapObject()) return;
1388    HeapObject* object = ShortCircuitConsString(p);
1389    collector->RecordSlot(anchor_slot, p, object);
1390    MarkBit mark = Marking::MarkBitFrom(object);
1391    collector->MarkObject(object, mark);
1392  }
1393
1394
1395  // Visit an unmarked object.
1396  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1397                                         HeapObject* obj)) {
1398#ifdef DEBUG
1399    ASSERT(collector->heap()->Contains(obj));
1400    ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1401#endif
1402    Map* map = obj->map();
1403    Heap* heap = obj->GetHeap();
1404    MarkBit mark = Marking::MarkBitFrom(obj);
1405    heap->mark_compact_collector()->SetMark(obj, mark);
1406    // Mark the map pointer and the body.
1407    MarkBit map_mark = Marking::MarkBitFrom(map);
1408    heap->mark_compact_collector()->MarkObject(map, map_mark);
1409    IterateBody(map, obj);
1410  }
1411
1412  // Visit all unmarked objects pointed to by [start, end).
1413  // Returns false if the operation fails (lack of stack space).
1414  INLINE(static bool VisitUnmarkedObjects(Heap* heap,
1415                                          Object** start,
1416                                          Object** end)) {
1417    // Return false is we are close to the stack limit.
1418    StackLimitCheck check(heap->isolate());
1419    if (check.HasOverflowed()) return false;
1420
1421    MarkCompactCollector* collector = heap->mark_compact_collector();
1422    // Visit the unmarked objects.
1423    for (Object** p = start; p < end; p++) {
1424      Object* o = *p;
1425      if (!o->IsHeapObject()) continue;
1426      collector->RecordSlot(start, p, o);
1427      HeapObject* obj = HeapObject::cast(o);
1428      MarkBit mark = Marking::MarkBitFrom(obj);
1429      if (mark.Get()) continue;
1430      VisitUnmarkedObject(collector, obj);
1431    }
1432    return true;
1433  }
1434
1435  INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
1436    SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
1437    shared->BeforeVisitingPointers();
1438  }
1439
1440  static void VisitWeakCollection(Map* map, HeapObject* object) {
1441    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1442    JSWeakCollection* weak_collection =
1443        reinterpret_cast<JSWeakCollection*>(object);
1444
1445    // Enqueue weak map in linked list of encountered weak maps.
1446    if (weak_collection->next() == Smi::FromInt(0)) {
1447      weak_collection->set_next(collector->encountered_weak_collections());
1448      collector->set_encountered_weak_collections(weak_collection);
1449    }
1450
1451    // Skip visiting the backing hash table containing the mappings.
1452    int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
1453    BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1454        map->GetHeap(),
1455        object,
1456        JSWeakCollection::BodyDescriptor::kStartOffset,
1457        JSWeakCollection::kTableOffset);
1458    BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1459        map->GetHeap(),
1460        object,
1461        JSWeakCollection::kTableOffset + kPointerSize,
1462        object_size);
1463
1464    // Mark the backing hash table without pushing it on the marking stack.
1465    Object* table_object = weak_collection->table();
1466    if (!table_object->IsHashTable()) return;
1467    WeakHashTable* table = WeakHashTable::cast(table_object);
1468    Object** table_slot =
1469        HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
1470    MarkBit table_mark = Marking::MarkBitFrom(table);
1471    collector->RecordSlot(table_slot, table_slot, table);
1472    if (!table_mark.Get()) collector->SetMark(table, table_mark);
1473    // Recording the map slot can be skipped, because maps are not compacted.
1474    collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1475    ASSERT(MarkCompactCollector::IsMarked(table->map()));
1476  }
1477
1478 private:
1479  template<int id>
1480  static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1481
1482  // Code flushing support.
1483
1484  static const int kRegExpCodeThreshold = 5;
1485
1486  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1487                                          JSRegExp* re,
1488                                          bool is_ascii) {
1489    // Make sure that the fixed array is in fact initialized on the RegExp.
1490    // We could potentially trigger a GC when initializing the RegExp.
1491    if (HeapObject::cast(re->data())->map()->instance_type() !=
1492            FIXED_ARRAY_TYPE) return;
1493
1494    // Make sure this is a RegExp that actually contains code.
1495    if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1496
1497    Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
1498    if (!code->IsSmi() &&
1499        HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1500      // Save a copy that can be reinstated if we need the code again.
1501      re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
1502
1503      // Saving a copy might create a pointer into compaction candidate
1504      // that was not observed by marker.  This might happen if JSRegExp data
1505      // was marked through the compilation cache before marker reached JSRegExp
1506      // object.
1507      FixedArray* data = FixedArray::cast(re->data());
1508      Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1509      heap->mark_compact_collector()->
1510          RecordSlot(slot, slot, code);
1511
1512      // Set a number in the 0-255 range to guarantee no smi overflow.
1513      re->SetDataAt(JSRegExp::code_index(is_ascii),
1514                    Smi::FromInt(heap->sweep_generation() & 0xff));
1515    } else if (code->IsSmi()) {
1516      int value = Smi::cast(code)->value();
1517      // The regexp has not been compiled yet or there was a compilation error.
1518      if (value == JSRegExp::kUninitializedValue ||
1519          value == JSRegExp::kCompilationErrorValue) {
1520        return;
1521      }
1522
1523      // Check if we should flush now.
1524      if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1525        re->SetDataAt(JSRegExp::code_index(is_ascii),
1526                      Smi::FromInt(JSRegExp::kUninitializedValue));
1527        re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
1528                      Smi::FromInt(JSRegExp::kUninitializedValue));
1529      }
1530    }
1531  }
1532
1533
1534  // Works by setting the current sweep_generation (as a smi) in the
1535  // code object place in the data array of the RegExp and keeps a copy
1536  // around that can be reinstated if we reuse the RegExp before flushing.
1537  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1538  // we flush the code.
1539  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1540    Heap* heap = map->GetHeap();
1541    MarkCompactCollector* collector = heap->mark_compact_collector();
1542    if (!collector->is_code_flushing_enabled()) {
1543      VisitJSRegExp(map, object);
1544      return;
1545    }
1546    JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1547    // Flush code or set age on both ASCII and two byte code.
1548    UpdateRegExpCodeAgeAndFlush(heap, re, true);
1549    UpdateRegExpCodeAgeAndFlush(heap, re, false);
1550    // Visit the fields of the RegExp, including the updated FixedArray.
1551    VisitJSRegExp(map, object);
1552  }
1553
1554  static VisitorDispatchTable<Callback> non_count_table_;
1555};
1556
1557
1558void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1559    FixedArrayBase* fixed_array,
1560    FixedArraySubInstanceType fast_type,
1561    FixedArraySubInstanceType dictionary_type) {
1562  Heap* heap = fixed_array->map()->GetHeap();
1563  if (fixed_array->map() != heap->fixed_cow_array_map() &&
1564      fixed_array->map() != heap->fixed_double_array_map() &&
1565      fixed_array != heap->empty_fixed_array()) {
1566    if (fixed_array->IsDictionary()) {
1567      heap->RecordFixedArraySubTypeStats(dictionary_type,
1568                                         fixed_array->Size());
1569    } else {
1570      heap->RecordFixedArraySubTypeStats(fast_type,
1571                                         fixed_array->Size());
1572    }
1573  }
1574}
1575
1576
1577void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1578    MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1579  Heap* heap = map->GetHeap();
1580  int object_size = obj->Size();
1581  heap->RecordObjectStats(map->instance_type(), object_size);
1582  non_count_table_.GetVisitorById(id)(map, obj);
1583  if (obj->IsJSObject()) {
1584    JSObject* object = JSObject::cast(obj);
1585    ObjectStatsCountFixedArray(object->elements(),
1586                               DICTIONARY_ELEMENTS_SUB_TYPE,
1587                               FAST_ELEMENTS_SUB_TYPE);
1588    ObjectStatsCountFixedArray(object->properties(),
1589                               DICTIONARY_PROPERTIES_SUB_TYPE,
1590                               FAST_PROPERTIES_SUB_TYPE);
1591  }
1592}
1593
1594
1595template<MarkCompactMarkingVisitor::VisitorId id>
1596void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
1597    Map* map, HeapObject* obj) {
1598  ObjectStatsVisitBase(id, map, obj);
1599}
1600
1601
1602template<>
1603class MarkCompactMarkingVisitor::ObjectStatsTracker<
1604    MarkCompactMarkingVisitor::kVisitMap> {
1605 public:
1606  static inline void Visit(Map* map, HeapObject* obj) {
1607    Heap* heap = map->GetHeap();
1608    Map* map_obj = Map::cast(obj);
1609    ASSERT(map->instance_type() == MAP_TYPE);
1610    DescriptorArray* array = map_obj->instance_descriptors();
1611    if (map_obj->owns_descriptors() &&
1612        array != heap->empty_descriptor_array()) {
1613      int fixed_array_size = array->Size();
1614      heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1615                                         fixed_array_size);
1616    }
1617    if (map_obj->HasTransitionArray()) {
1618      int fixed_array_size = map_obj->transitions()->Size();
1619      heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1620                                         fixed_array_size);
1621    }
1622    if (map_obj->has_code_cache()) {
1623      CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1624      heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1625                                         cache->default_cache()->Size());
1626      if (!cache->normal_type_cache()->IsUndefined()) {
1627        heap->RecordFixedArraySubTypeStats(
1628            MAP_CODE_CACHE_SUB_TYPE,
1629            FixedArray::cast(cache->normal_type_cache())->Size());
1630      }
1631    }
1632    ObjectStatsVisitBase(kVisitMap, map, obj);
1633  }
1634};
1635
1636
1637template<>
1638class MarkCompactMarkingVisitor::ObjectStatsTracker<
1639    MarkCompactMarkingVisitor::kVisitCode> {
1640 public:
1641  static inline void Visit(Map* map, HeapObject* obj) {
1642    Heap* heap = map->GetHeap();
1643    int object_size = obj->Size();
1644    ASSERT(map->instance_type() == CODE_TYPE);
1645    Code* code_obj = Code::cast(obj);
1646    heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1647                                 object_size);
1648    ObjectStatsVisitBase(kVisitCode, map, obj);
1649  }
1650};
1651
1652
1653template<>
1654class MarkCompactMarkingVisitor::ObjectStatsTracker<
1655    MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1656 public:
1657  static inline void Visit(Map* map, HeapObject* obj) {
1658    Heap* heap = map->GetHeap();
1659    SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1660    if (sfi->scope_info() != heap->empty_fixed_array()) {
1661      heap->RecordFixedArraySubTypeStats(
1662          SCOPE_INFO_SUB_TYPE,
1663          FixedArray::cast(sfi->scope_info())->Size());
1664    }
1665    ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1666  }
1667};
1668
1669
1670template<>
1671class MarkCompactMarkingVisitor::ObjectStatsTracker<
1672    MarkCompactMarkingVisitor::kVisitFixedArray> {
1673 public:
1674  static inline void Visit(Map* map, HeapObject* obj) {
1675    Heap* heap = map->GetHeap();
1676    FixedArray* fixed_array = FixedArray::cast(obj);
1677    if (fixed_array == heap->string_table()) {
1678      heap->RecordFixedArraySubTypeStats(
1679          STRING_TABLE_SUB_TYPE,
1680          fixed_array->Size());
1681    }
1682    ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1683  }
1684};
1685
1686
1687void MarkCompactMarkingVisitor::Initialize() {
1688  StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1689
1690  table_.Register(kVisitJSRegExp,
1691                  &VisitRegExpAndFlushCode);
1692
1693  if (FLAG_track_gc_object_stats) {
1694    // Copy the visitor table to make call-through possible.
1695    non_count_table_.CopyFrom(&table_);
1696#define VISITOR_ID_COUNT_FUNCTION(id)                                   \
1697    table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1698    VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1699#undef VISITOR_ID_COUNT_FUNCTION
1700  }
1701}
1702
1703
1704VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1705    MarkCompactMarkingVisitor::non_count_table_;
1706
1707
1708class CodeMarkingVisitor : public ThreadVisitor {
1709 public:
1710  explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1711      : collector_(collector) {}
1712
1713  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1714    collector_->PrepareThreadForCodeFlushing(isolate, top);
1715  }
1716
1717 private:
1718  MarkCompactCollector* collector_;
1719};
1720
1721
1722class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1723 public:
1724  explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1725      : collector_(collector) {}
1726
1727  void VisitPointers(Object** start, Object** end) {
1728    for (Object** p = start; p < end; p++) VisitPointer(p);
1729  }
1730
1731  void VisitPointer(Object** slot) {
1732    Object* obj = *slot;
1733    if (obj->IsSharedFunctionInfo()) {
1734      SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1735      MarkBit shared_mark = Marking::MarkBitFrom(shared);
1736      MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1737      collector_->MarkObject(shared->code(), code_mark);
1738      collector_->MarkObject(shared, shared_mark);
1739    }
1740  }
1741
1742 private:
1743  MarkCompactCollector* collector_;
1744};
1745
1746
1747void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1748                                                        ThreadLocalTop* top) {
1749  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1750    // Note: for the frame that has a pending lazy deoptimization
1751    // StackFrame::unchecked_code will return a non-optimized code object for
1752    // the outermost function and StackFrame::LookupCode will return
1753    // actual optimized code object.
1754    StackFrame* frame = it.frame();
1755    Code* code = frame->unchecked_code();
1756    MarkBit code_mark = Marking::MarkBitFrom(code);
1757    MarkObject(code, code_mark);
1758    if (frame->is_optimized()) {
1759      MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1760                                                          frame->LookupCode());
1761    }
1762  }
1763}
1764
1765
1766void MarkCompactCollector::PrepareForCodeFlushing() {
1767  // Enable code flushing for non-incremental cycles.
1768  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1769    EnableCodeFlushing(!was_marked_incrementally_);
1770  }
1771
1772  // If code flushing is disabled, there is no need to prepare for it.
1773  if (!is_code_flushing_enabled()) return;
1774
1775  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1776  // relies on it being marked before any other descriptor array.
1777  HeapObject* descriptor_array = heap()->empty_descriptor_array();
1778  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1779  MarkObject(descriptor_array, descriptor_array_mark);
1780
1781  // Make sure we are not referencing the code from the stack.
1782  ASSERT(this == heap()->mark_compact_collector());
1783  PrepareThreadForCodeFlushing(heap()->isolate(),
1784                               heap()->isolate()->thread_local_top());
1785
1786  // Iterate the archived stacks in all threads to check if
1787  // the code is referenced.
1788  CodeMarkingVisitor code_marking_visitor(this);
1789  heap()->isolate()->thread_manager()->IterateArchivedThreads(
1790      &code_marking_visitor);
1791
1792  SharedFunctionInfoMarkingVisitor visitor(this);
1793  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1794  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1795
1796  ProcessMarkingDeque();
1797}
1798
1799
1800// Visitor class for marking heap roots.
1801class RootMarkingVisitor : public ObjectVisitor {
1802 public:
1803  explicit RootMarkingVisitor(Heap* heap)
1804    : collector_(heap->mark_compact_collector()) { }
1805
1806  void VisitPointer(Object** p) {
1807    MarkObjectByPointer(p);
1808  }
1809
1810  void VisitPointers(Object** start, Object** end) {
1811    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1812  }
1813
1814 private:
1815  void MarkObjectByPointer(Object** p) {
1816    if (!(*p)->IsHeapObject()) return;
1817
1818    // Replace flat cons strings in place.
1819    HeapObject* object = ShortCircuitConsString(p);
1820    MarkBit mark_bit = Marking::MarkBitFrom(object);
1821    if (mark_bit.Get()) return;
1822
1823    Map* map = object->map();
1824    // Mark the object.
1825    collector_->SetMark(object, mark_bit);
1826
1827    // Mark the map pointer and body, and push them on the marking stack.
1828    MarkBit map_mark = Marking::MarkBitFrom(map);
1829    collector_->MarkObject(map, map_mark);
1830    MarkCompactMarkingVisitor::IterateBody(map, object);
1831
1832    // Mark all the objects reachable from the map and body.  May leave
1833    // overflowed objects in the heap.
1834    collector_->EmptyMarkingDeque();
1835  }
1836
1837  MarkCompactCollector* collector_;
1838};
1839
1840
1841// Helper class for pruning the string table.
1842class StringTableCleaner : public ObjectVisitor {
1843 public:
1844  explicit StringTableCleaner(Heap* heap)
1845    : heap_(heap), pointers_removed_(0) { }
1846
1847  virtual void VisitPointers(Object** start, Object** end) {
1848    // Visit all HeapObject pointers in [start, end).
1849    for (Object** p = start; p < end; p++) {
1850      Object* o = *p;
1851      if (o->IsHeapObject() &&
1852          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1853        // Check if the internalized string being pruned is external. We need to
1854        // delete the associated external data as this string is going away.
1855
1856        // Since no objects have yet been moved we can safely access the map of
1857        // the object.
1858        if (o->IsExternalString()) {
1859          heap_->FinalizeExternalString(String::cast(*p));
1860        }
1861        // Set the entry to the_hole_value (as deleted).
1862        *p = heap_->the_hole_value();
1863        pointers_removed_++;
1864      }
1865    }
1866  }
1867
1868  int PointersRemoved() {
1869    return pointers_removed_;
1870  }
1871
1872 private:
1873  Heap* heap_;
1874  int pointers_removed_;
1875};
1876
1877
1878// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1879// are retained.
1880class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1881 public:
1882  virtual Object* RetainAs(Object* object) {
1883    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1884      return object;
1885    } else if (object->IsAllocationSite() &&
1886               !(AllocationSite::cast(object)->IsZombie())) {
1887      // "dead" AllocationSites need to live long enough for a traversal of new
1888      // space. These sites get a one-time reprieve.
1889      AllocationSite* site = AllocationSite::cast(object);
1890      site->MarkZombie();
1891      site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1892      return object;
1893    } else {
1894      return NULL;
1895    }
1896  }
1897};
1898
1899
1900// Fill the marking stack with overflowed objects returned by the given
1901// iterator.  Stop when the marking stack is filled or the end of the space
1902// is reached, whichever comes first.
1903template<class T>
1904static void DiscoverGreyObjectsWithIterator(Heap* heap,
1905                                            MarkingDeque* marking_deque,
1906                                            T* it) {
1907  // The caller should ensure that the marking stack is initially not full,
1908  // so that we don't waste effort pointlessly scanning for objects.
1909  ASSERT(!marking_deque->IsFull());
1910
1911  Map* filler_map = heap->one_pointer_filler_map();
1912  for (HeapObject* object = it->Next();
1913       object != NULL;
1914       object = it->Next()) {
1915    MarkBit markbit = Marking::MarkBitFrom(object);
1916    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1917      Marking::GreyToBlack(markbit);
1918      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1919      marking_deque->PushBlack(object);
1920      if (marking_deque->IsFull()) return;
1921    }
1922  }
1923}
1924
1925
1926static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1927
1928
1929static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1930                                      MemoryChunk* p) {
1931  ASSERT(!marking_deque->IsFull());
1932  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1933  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1934  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1935  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1936
1937  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1938    Address cell_base = it.CurrentCellBase();
1939    MarkBit::CellType* cell = it.CurrentCell();
1940
1941    const MarkBit::CellType current_cell = *cell;
1942    if (current_cell == 0) continue;
1943
1944    MarkBit::CellType grey_objects;
1945    if (it.HasNext()) {
1946      const MarkBit::CellType next_cell = *(cell+1);
1947      grey_objects = current_cell &
1948          ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
1949    } else {
1950      grey_objects = current_cell & (current_cell >> 1);
1951    }
1952
1953    int offset = 0;
1954    while (grey_objects != 0) {
1955      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1956      grey_objects >>= trailing_zeros;
1957      offset += trailing_zeros;
1958      MarkBit markbit(cell, 1 << offset, false);
1959      ASSERT(Marking::IsGrey(markbit));
1960      Marking::GreyToBlack(markbit);
1961      Address addr = cell_base + offset * kPointerSize;
1962      HeapObject* object = HeapObject::FromAddress(addr);
1963      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1964      marking_deque->PushBlack(object);
1965      if (marking_deque->IsFull()) return;
1966      offset += 2;
1967      grey_objects >>= 2;
1968    }
1969
1970    grey_objects >>= (Bitmap::kBitsPerCell - 1);
1971  }
1972}
1973
1974
1975int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
1976    NewSpace* new_space,
1977    NewSpacePage* p) {
1978  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1979  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1980  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1981  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1982
1983  MarkBit::CellType* cells = p->markbits()->cells();
1984  int survivors_size = 0;
1985
1986  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1987    Address cell_base = it.CurrentCellBase();
1988    MarkBit::CellType* cell = it.CurrentCell();
1989
1990    MarkBit::CellType current_cell = *cell;
1991    if (current_cell == 0) continue;
1992
1993    int offset = 0;
1994    while (current_cell != 0) {
1995      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
1996      current_cell >>= trailing_zeros;
1997      offset += trailing_zeros;
1998      Address address = cell_base + offset * kPointerSize;
1999      HeapObject* object = HeapObject::FromAddress(address);
2000
2001      int size = object->Size();
2002      survivors_size += size;
2003
2004      Heap::UpdateAllocationSiteFeedback(object);
2005
2006      offset++;
2007      current_cell >>= 1;
2008      // Aggressively promote young survivors to the old space.
2009      if (TryPromoteObject(object, size)) {
2010        continue;
2011      }
2012
2013      // Promotion failed. Just migrate object to another semispace.
2014      MaybeObject* allocation = new_space->AllocateRaw(size);
2015      if (allocation->IsFailure()) {
2016        if (!new_space->AddFreshPage()) {
2017          // Shouldn't happen. We are sweeping linearly, and to-space
2018          // has the same number of pages as from-space, so there is
2019          // always room.
2020          UNREACHABLE();
2021        }
2022        allocation = new_space->AllocateRaw(size);
2023        ASSERT(!allocation->IsFailure());
2024      }
2025      Object* target = allocation->ToObjectUnchecked();
2026
2027      MigrateObject(HeapObject::cast(target)->address(),
2028                    object->address(),
2029                    size,
2030                    NEW_SPACE);
2031    }
2032    *cells = 0;
2033  }
2034  return survivors_size;
2035}
2036
2037
2038static void DiscoverGreyObjectsInSpace(Heap* heap,
2039                                       MarkingDeque* marking_deque,
2040                                       PagedSpace* space) {
2041  if (!space->was_swept_conservatively()) {
2042    HeapObjectIterator it(space);
2043    DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2044  } else {
2045    PageIterator it(space);
2046    while (it.has_next()) {
2047      Page* p = it.next();
2048      DiscoverGreyObjectsOnPage(marking_deque, p);
2049      if (marking_deque->IsFull()) return;
2050    }
2051  }
2052}
2053
2054
2055static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2056                                          MarkingDeque* marking_deque) {
2057  NewSpace* space = heap->new_space();
2058  NewSpacePageIterator it(space->bottom(), space->top());
2059  while (it.has_next()) {
2060    NewSpacePage* page = it.next();
2061    DiscoverGreyObjectsOnPage(marking_deque, page);
2062    if (marking_deque->IsFull()) return;
2063  }
2064}
2065
2066
2067bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2068  Object* o = *p;
2069  if (!o->IsHeapObject()) return false;
2070  HeapObject* heap_object = HeapObject::cast(o);
2071  MarkBit mark = Marking::MarkBitFrom(heap_object);
2072  return !mark.Get();
2073}
2074
2075
2076bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2077                                                        Object** p) {
2078  Object* o = *p;
2079  ASSERT(o->IsHeapObject());
2080  HeapObject* heap_object = HeapObject::cast(o);
2081  MarkBit mark = Marking::MarkBitFrom(heap_object);
2082  return !mark.Get();
2083}
2084
2085
2086void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2087  StringTable* string_table = heap()->string_table();
2088  // Mark the string table itself.
2089  MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2090  SetMark(string_table, string_table_mark);
2091  // Explicitly mark the prefix.
2092  string_table->IteratePrefix(visitor);
2093  ProcessMarkingDeque();
2094}
2095
2096
2097void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
2098  MarkBit mark_bit = Marking::MarkBitFrom(site);
2099  SetMark(site, mark_bit);
2100}
2101
2102
2103void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2104  // Mark the heap roots including global variables, stack variables,
2105  // etc., and all objects reachable from them.
2106  heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2107
2108  // Handle the string table specially.
2109  MarkStringTable(visitor);
2110
2111  MarkWeakObjectToCodeTable();
2112
2113  // There may be overflowed objects in the heap.  Visit them now.
2114  while (marking_deque_.overflowed()) {
2115    RefillMarkingDeque();
2116    EmptyMarkingDeque();
2117  }
2118}
2119
2120
2121void MarkCompactCollector::MarkImplicitRefGroups() {
2122  List<ImplicitRefGroup*>* ref_groups =
2123      isolate()->global_handles()->implicit_ref_groups();
2124
2125  int last = 0;
2126  for (int i = 0; i < ref_groups->length(); i++) {
2127    ImplicitRefGroup* entry = ref_groups->at(i);
2128    ASSERT(entry != NULL);
2129
2130    if (!IsMarked(*entry->parent)) {
2131      (*ref_groups)[last++] = entry;
2132      continue;
2133    }
2134
2135    Object*** children = entry->children;
2136    // A parent object is marked, so mark all child heap objects.
2137    for (size_t j = 0; j < entry->length; ++j) {
2138      if ((*children[j])->IsHeapObject()) {
2139        HeapObject* child = HeapObject::cast(*children[j]);
2140        MarkBit mark = Marking::MarkBitFrom(child);
2141        MarkObject(child, mark);
2142      }
2143    }
2144
2145    // Once the entire group has been marked, dispose it because it's
2146    // not needed anymore.
2147    delete entry;
2148  }
2149  ref_groups->Rewind(last);
2150}
2151
2152
2153void MarkCompactCollector::MarkWeakObjectToCodeTable() {
2154  HeapObject* weak_object_to_code_table =
2155      HeapObject::cast(heap()->weak_object_to_code_table());
2156  if (!IsMarked(weak_object_to_code_table)) {
2157    MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2158    SetMark(weak_object_to_code_table, mark);
2159  }
2160}
2161
2162
2163// Mark all objects reachable from the objects on the marking stack.
2164// Before: the marking stack contains zero or more heap object pointers.
2165// After: the marking stack is empty, and all objects reachable from the
2166// marking stack have been marked, or are overflowed in the heap.
2167void MarkCompactCollector::EmptyMarkingDeque() {
2168  while (!marking_deque_.IsEmpty()) {
2169    HeapObject* object = marking_deque_.Pop();
2170    ASSERT(object->IsHeapObject());
2171    ASSERT(heap()->Contains(object));
2172    ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2173
2174    Map* map = object->map();
2175    MarkBit map_mark = Marking::MarkBitFrom(map);
2176    MarkObject(map, map_mark);
2177
2178    MarkCompactMarkingVisitor::IterateBody(map, object);
2179  }
2180}
2181
2182
2183// Sweep the heap for overflowed objects, clear their overflow bits, and
2184// push them on the marking stack.  Stop early if the marking stack fills
2185// before sweeping completes.  If sweeping completes, there are no remaining
2186// overflowed objects in the heap so the overflow flag on the markings stack
2187// is cleared.
2188void MarkCompactCollector::RefillMarkingDeque() {
2189  ASSERT(marking_deque_.overflowed());
2190
2191  DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2192  if (marking_deque_.IsFull()) return;
2193
2194  DiscoverGreyObjectsInSpace(heap(),
2195                             &marking_deque_,
2196                             heap()->old_pointer_space());
2197  if (marking_deque_.IsFull()) return;
2198
2199  DiscoverGreyObjectsInSpace(heap(),
2200                             &marking_deque_,
2201                             heap()->old_data_space());
2202  if (marking_deque_.IsFull()) return;
2203
2204  DiscoverGreyObjectsInSpace(heap(),
2205                             &marking_deque_,
2206                             heap()->code_space());
2207  if (marking_deque_.IsFull()) return;
2208
2209  DiscoverGreyObjectsInSpace(heap(),
2210                             &marking_deque_,
2211                             heap()->map_space());
2212  if (marking_deque_.IsFull()) return;
2213
2214  DiscoverGreyObjectsInSpace(heap(),
2215                             &marking_deque_,
2216                             heap()->cell_space());
2217  if (marking_deque_.IsFull()) return;
2218
2219  DiscoverGreyObjectsInSpace(heap(),
2220                             &marking_deque_,
2221                             heap()->property_cell_space());
2222  if (marking_deque_.IsFull()) return;
2223
2224  LargeObjectIterator lo_it(heap()->lo_space());
2225  DiscoverGreyObjectsWithIterator(heap(),
2226                                  &marking_deque_,
2227                                  &lo_it);
2228  if (marking_deque_.IsFull()) return;
2229
2230  marking_deque_.ClearOverflowed();
2231}
2232
2233
2234// Mark all objects reachable (transitively) from objects on the marking
2235// stack.  Before: the marking stack contains zero or more heap object
2236// pointers.  After: the marking stack is empty and there are no overflowed
2237// objects in the heap.
2238void MarkCompactCollector::ProcessMarkingDeque() {
2239  EmptyMarkingDeque();
2240  while (marking_deque_.overflowed()) {
2241    RefillMarkingDeque();
2242    EmptyMarkingDeque();
2243  }
2244}
2245
2246
2247// Mark all objects reachable (transitively) from objects on the marking
2248// stack including references only considered in the atomic marking pause.
2249void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2250  bool work_to_do = true;
2251  ASSERT(marking_deque_.IsEmpty());
2252  while (work_to_do) {
2253    isolate()->global_handles()->IterateObjectGroups(
2254        visitor, &IsUnmarkedHeapObjectWithHeap);
2255    MarkImplicitRefGroups();
2256    ProcessWeakCollections();
2257    work_to_do = !marking_deque_.IsEmpty();
2258    ProcessMarkingDeque();
2259  }
2260}
2261
2262
2263void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2264  for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2265       !it.done(); it.Advance()) {
2266    if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2267      return;
2268    }
2269    if (it.frame()->type() == StackFrame::OPTIMIZED) {
2270      Code* code = it.frame()->LookupCode();
2271      if (!code->CanDeoptAt(it.frame()->pc())) {
2272        code->CodeIterateBody(visitor);
2273      }
2274      ProcessMarkingDeque();
2275      return;
2276    }
2277  }
2278}
2279
2280
2281void MarkCompactCollector::MarkLiveObjects() {
2282  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2283  // The recursive GC marker detects when it is nearing stack overflow,
2284  // and switches to a different marking system.  JS interrupts interfere
2285  // with the C stack limit check.
2286  PostponeInterruptsScope postpone(isolate());
2287
2288  bool incremental_marking_overflowed = false;
2289  IncrementalMarking* incremental_marking = heap_->incremental_marking();
2290  if (was_marked_incrementally_) {
2291    // Finalize the incremental marking and check whether we had an overflow.
2292    // Both markers use grey color to mark overflowed objects so
2293    // non-incremental marker can deal with them as if overflow
2294    // occured during normal marking.
2295    // But incremental marker uses a separate marking deque
2296    // so we have to explicitly copy its overflow state.
2297    incremental_marking->Finalize();
2298    incremental_marking_overflowed =
2299        incremental_marking->marking_deque()->overflowed();
2300    incremental_marking->marking_deque()->ClearOverflowed();
2301  } else {
2302    // Abort any pending incremental activities e.g. incremental sweeping.
2303    incremental_marking->Abort();
2304  }
2305
2306#ifdef DEBUG
2307  ASSERT(state_ == PREPARE_GC);
2308  state_ = MARK_LIVE_OBJECTS;
2309#endif
2310  // The to space contains live objects, a page in from space is used as a
2311  // marking stack.
2312  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2313  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2314  if (FLAG_force_marking_deque_overflows) {
2315    marking_deque_end = marking_deque_start + 64 * kPointerSize;
2316  }
2317  marking_deque_.Initialize(marking_deque_start,
2318                            marking_deque_end);
2319  ASSERT(!marking_deque_.overflowed());
2320
2321  if (incremental_marking_overflowed) {
2322    // There are overflowed objects left in the heap after incremental marking.
2323    marking_deque_.SetOverflowed();
2324  }
2325
2326  PrepareForCodeFlushing();
2327
2328  if (was_marked_incrementally_) {
2329    // There is no write barrier on cells so we have to scan them now at the end
2330    // of the incremental marking.
2331    {
2332      HeapObjectIterator cell_iterator(heap()->cell_space());
2333      HeapObject* cell;
2334      while ((cell = cell_iterator.Next()) != NULL) {
2335        ASSERT(cell->IsCell());
2336        if (IsMarked(cell)) {
2337          int offset = Cell::kValueOffset;
2338          MarkCompactMarkingVisitor::VisitPointer(
2339              heap(),
2340              reinterpret_cast<Object**>(cell->address() + offset));
2341        }
2342      }
2343    }
2344    {
2345      HeapObjectIterator js_global_property_cell_iterator(
2346          heap()->property_cell_space());
2347      HeapObject* cell;
2348      while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2349        ASSERT(cell->IsPropertyCell());
2350        if (IsMarked(cell)) {
2351          MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2352        }
2353      }
2354    }
2355  }
2356
2357  RootMarkingVisitor root_visitor(heap());
2358  MarkRoots(&root_visitor);
2359
2360  ProcessTopOptimizedFrame(&root_visitor);
2361
2362  // The objects reachable from the roots are marked, yet unreachable
2363  // objects are unmarked.  Mark objects reachable due to host
2364  // application specific logic or through Harmony weak maps.
2365  ProcessEphemeralMarking(&root_visitor);
2366
2367  // The objects reachable from the roots, weak maps or object groups
2368  // are marked, yet unreachable objects are unmarked.  Mark objects
2369  // reachable only from weak global handles.
2370  //
2371  // First we identify nonlive weak handles and mark them as pending
2372  // destruction.
2373  heap()->isolate()->global_handles()->IdentifyWeakHandles(
2374      &IsUnmarkedHeapObject);
2375  // Then we mark the objects and process the transitive closure.
2376  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2377  while (marking_deque_.overflowed()) {
2378    RefillMarkingDeque();
2379    EmptyMarkingDeque();
2380  }
2381
2382  // Repeat host application specific and Harmony weak maps marking to
2383  // mark unmarked objects reachable from the weak roots.
2384  ProcessEphemeralMarking(&root_visitor);
2385
2386  AfterMarking();
2387}
2388
2389
2390void MarkCompactCollector::AfterMarking() {
2391  // Object literal map caches reference strings (cache keys) and maps
2392  // (cache values). At this point still useful maps have already been
2393  // marked. Mark the keys for the alive values before we process the
2394  // string table.
2395  ProcessMapCaches();
2396
2397  // Prune the string table removing all strings only pointed to by the
2398  // string table.  Cannot use string_table() here because the string
2399  // table is marked.
2400  StringTable* string_table = heap()->string_table();
2401  StringTableCleaner v(heap());
2402  string_table->IterateElements(&v);
2403  string_table->ElementsRemoved(v.PointersRemoved());
2404  heap()->external_string_table_.Iterate(&v);
2405  heap()->external_string_table_.CleanUp();
2406
2407  // Process the weak references.
2408  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2409  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2410
2411  // Remove object groups after marking phase.
2412  heap()->isolate()->global_handles()->RemoveObjectGroups();
2413  heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2414
2415  // Flush code from collected candidates.
2416  if (is_code_flushing_enabled()) {
2417    code_flusher_->ProcessCandidates();
2418    // If incremental marker does not support code flushing, we need to
2419    // disable it before incremental marking steps for next cycle.
2420    if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2421      EnableCodeFlushing(false);
2422    }
2423  }
2424
2425  if (!FLAG_watch_ic_patching) {
2426    // Clean up dead objects from the runtime profiler.
2427    heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
2428  }
2429
2430  if (FLAG_track_gc_object_stats) {
2431    heap()->CheckpointObjectStats();
2432  }
2433}
2434
2435
2436void MarkCompactCollector::ProcessMapCaches() {
2437  Object* raw_context = heap()->native_contexts_list_;
2438  while (raw_context != heap()->undefined_value()) {
2439    Context* context = reinterpret_cast<Context*>(raw_context);
2440    if (IsMarked(context)) {
2441      HeapObject* raw_map_cache =
2442          HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2443      // A map cache may be reachable from the stack. In this case
2444      // it's already transitively marked and it's too late to clean
2445      // up its parts.
2446      if (!IsMarked(raw_map_cache) &&
2447          raw_map_cache != heap()->undefined_value()) {
2448        MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2449        int existing_elements = map_cache->NumberOfElements();
2450        int used_elements = 0;
2451        for (int i = MapCache::kElementsStartIndex;
2452             i < map_cache->length();
2453             i += MapCache::kEntrySize) {
2454          Object* raw_key = map_cache->get(i);
2455          if (raw_key == heap()->undefined_value() ||
2456              raw_key == heap()->the_hole_value()) continue;
2457          STATIC_ASSERT(MapCache::kEntrySize == 2);
2458          Object* raw_map = map_cache->get(i + 1);
2459          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2460            ++used_elements;
2461          } else {
2462            // Delete useless entries with unmarked maps.
2463            ASSERT(raw_map->IsMap());
2464            map_cache->set_the_hole(i);
2465            map_cache->set_the_hole(i + 1);
2466          }
2467        }
2468        if (used_elements == 0) {
2469          context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2470        } else {
2471          // Note: we don't actually shrink the cache here to avoid
2472          // extra complexity during GC. We rely on subsequent cache
2473          // usages (EnsureCapacity) to do this.
2474          map_cache->ElementsRemoved(existing_elements - used_elements);
2475          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2476          MarkObject(map_cache, map_cache_markbit);
2477        }
2478      }
2479    }
2480    // Move to next element in the list.
2481    raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2482  }
2483  ProcessMarkingDeque();
2484}
2485
2486
2487void MarkCompactCollector::ReattachInitialMaps() {
2488  HeapObjectIterator map_iterator(heap()->map_space());
2489  for (HeapObject* obj = map_iterator.Next();
2490       obj != NULL;
2491       obj = map_iterator.Next()) {
2492    Map* map = Map::cast(obj);
2493
2494    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2495    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2496
2497    if (map->attached_to_shared_function_info()) {
2498      JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2499    }
2500  }
2501}
2502
2503
2504void MarkCompactCollector::ClearNonLiveReferences() {
2505  // Iterate over the map space, setting map transitions that go from
2506  // a marked map to an unmarked map to null transitions.  This action
2507  // is carried out only on maps of JSObjects and related subtypes.
2508  HeapObjectIterator map_iterator(heap()->map_space());
2509  for (HeapObject* obj = map_iterator.Next();
2510       obj != NULL;
2511       obj = map_iterator.Next()) {
2512    Map* map = Map::cast(obj);
2513
2514    if (!map->CanTransition()) continue;
2515
2516    MarkBit map_mark = Marking::MarkBitFrom(map);
2517    if (map_mark.Get() && map->attached_to_shared_function_info()) {
2518      // This map is used for inobject slack tracking and has been detached
2519      // from SharedFunctionInfo during the mark phase.
2520      // Since it survived the GC, reattach it now.
2521      JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2522    }
2523
2524    ClearNonLivePrototypeTransitions(map);
2525    ClearNonLiveMapTransitions(map, map_mark);
2526
2527    if (map_mark.Get()) {
2528      ClearNonLiveDependentCode(map->dependent_code());
2529    } else {
2530      ClearAndDeoptimizeDependentCode(map->dependent_code());
2531      map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2532    }
2533  }
2534
2535  // Iterate over property cell space, removing dependent code that is not
2536  // otherwise kept alive by strong references.
2537  HeapObjectIterator cell_iterator(heap_->property_cell_space());
2538  for (HeapObject* cell = cell_iterator.Next();
2539       cell != NULL;
2540       cell = cell_iterator.Next()) {
2541    if (IsMarked(cell)) {
2542      ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2543    }
2544  }
2545
2546  // Iterate over allocation sites, removing dependent code that is not
2547  // otherwise kept alive by strong references.
2548  Object* undefined = heap()->undefined_value();
2549  for (Object* site = heap()->allocation_sites_list();
2550       site != undefined;
2551       site = AllocationSite::cast(site)->weak_next()) {
2552    if (IsMarked(site)) {
2553      ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
2554    }
2555  }
2556
2557  if (heap_->weak_object_to_code_table()->IsHashTable()) {
2558    WeakHashTable* table =
2559        WeakHashTable::cast(heap_->weak_object_to_code_table());
2560    uint32_t capacity = table->Capacity();
2561    for (uint32_t i = 0; i < capacity; i++) {
2562      uint32_t key_index = table->EntryToIndex(i);
2563      Object* key = table->get(key_index);
2564      if (!table->IsKey(key)) continue;
2565      uint32_t value_index = table->EntryToValueIndex(i);
2566      Object* value = table->get(value_index);
2567      if (IsMarked(key)) {
2568        if (!IsMarked(value)) {
2569          HeapObject* obj = HeapObject::cast(value);
2570          MarkBit mark = Marking::MarkBitFrom(obj);
2571          SetMark(obj, mark);
2572        }
2573        ClearNonLiveDependentCode(DependentCode::cast(value));
2574      } else {
2575        ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
2576        table->set(key_index, heap_->the_hole_value());
2577        table->set(value_index, heap_->the_hole_value());
2578      }
2579    }
2580  }
2581}
2582
2583
2584void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2585  int number_of_transitions = map->NumberOfProtoTransitions();
2586  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2587
2588  int new_number_of_transitions = 0;
2589  const int header = Map::kProtoTransitionHeaderSize;
2590  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2591  const int map_offset = header + Map::kProtoTransitionMapOffset;
2592  const int step = Map::kProtoTransitionElementsPerEntry;
2593  for (int i = 0; i < number_of_transitions; i++) {
2594    Object* prototype = prototype_transitions->get(proto_offset + i * step);
2595    Object* cached_map = prototype_transitions->get(map_offset + i * step);
2596    if (IsMarked(prototype) && IsMarked(cached_map)) {
2597      ASSERT(!prototype->IsUndefined());
2598      int proto_index = proto_offset + new_number_of_transitions * step;
2599      int map_index = map_offset + new_number_of_transitions * step;
2600      if (new_number_of_transitions != i) {
2601        prototype_transitions->set(
2602            proto_index,
2603            prototype,
2604            UPDATE_WRITE_BARRIER);
2605        prototype_transitions->set(
2606            map_index,
2607            cached_map,
2608            SKIP_WRITE_BARRIER);
2609      }
2610      Object** slot =
2611          HeapObject::RawField(prototype_transitions,
2612                               FixedArray::OffsetOfElementAt(proto_index));
2613      RecordSlot(slot, slot, prototype);
2614      new_number_of_transitions++;
2615    }
2616  }
2617
2618  if (new_number_of_transitions != number_of_transitions) {
2619    map->SetNumberOfProtoTransitions(new_number_of_transitions);
2620  }
2621
2622  // Fill slots that became free with undefined value.
2623  for (int i = new_number_of_transitions * step;
2624       i < number_of_transitions * step;
2625       i++) {
2626    prototype_transitions->set_undefined(header + i);
2627  }
2628}
2629
2630
2631void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2632                                                      MarkBit map_mark) {
2633  Object* potential_parent = map->GetBackPointer();
2634  if (!potential_parent->IsMap()) return;
2635  Map* parent = Map::cast(potential_parent);
2636
2637  // Follow back pointer, check whether we are dealing with a map transition
2638  // from a live map to a dead path and in case clear transitions of parent.
2639  bool current_is_alive = map_mark.Get();
2640  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2641  if (!current_is_alive && parent_is_alive) {
2642    parent->ClearNonLiveTransitions(heap());
2643  }
2644}
2645
2646
2647void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
2648    DependentCode* entries) {
2649  DisallowHeapAllocation no_allocation;
2650  DependentCode::GroupStartIndexes starts(entries);
2651  int number_of_entries = starts.number_of_entries();
2652  if (number_of_entries == 0) return;
2653  for (int i = 0; i < number_of_entries; i++) {
2654    // If the entry is compilation info then the map must be alive,
2655    // and ClearAndDeoptimizeDependentCode shouldn't be called.
2656    ASSERT(entries->is_code_at(i));
2657    Code* code = entries->code_at(i);
2658
2659    if (IsMarked(code) && !code->marked_for_deoptimization()) {
2660      code->set_marked_for_deoptimization(true);
2661      code->InvalidateEmbeddedObjects();
2662      have_code_to_deoptimize_ = true;
2663    }
2664    entries->clear_at(i);
2665  }
2666}
2667
2668
2669void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2670  DisallowHeapAllocation no_allocation;
2671  DependentCode::GroupStartIndexes starts(entries);
2672  int number_of_entries = starts.number_of_entries();
2673  if (number_of_entries == 0) return;
2674  int new_number_of_entries = 0;
2675  // Go through all groups, remove dead codes and compact.
2676  for (int g = 0; g < DependentCode::kGroupCount; g++) {
2677    int group_number_of_entries = 0;
2678    for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2679      Object* obj = entries->object_at(i);
2680      ASSERT(obj->IsCode() || IsMarked(obj));
2681      if (IsMarked(obj) &&
2682          (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2683        if (new_number_of_entries + group_number_of_entries != i) {
2684          entries->set_object_at(
2685              new_number_of_entries + group_number_of_entries, obj);
2686        }
2687        Object** slot = entries->slot_at(new_number_of_entries +
2688                                         group_number_of_entries);
2689        RecordSlot(slot, slot, obj);
2690        group_number_of_entries++;
2691      }
2692    }
2693    entries->set_number_of_entries(
2694        static_cast<DependentCode::DependencyGroup>(g),
2695        group_number_of_entries);
2696    new_number_of_entries += group_number_of_entries;
2697  }
2698  for (int i = new_number_of_entries; i < number_of_entries; i++) {
2699    entries->clear_at(i);
2700  }
2701}
2702
2703
2704void MarkCompactCollector::ProcessWeakCollections() {
2705  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2706  Object* weak_collection_obj = encountered_weak_collections();
2707  while (weak_collection_obj != Smi::FromInt(0)) {
2708    ASSERT(MarkCompactCollector::IsMarked(
2709        HeapObject::cast(weak_collection_obj)));
2710    JSWeakCollection* weak_collection =
2711        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2712    ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2713    Object** anchor = reinterpret_cast<Object**>(table->address());
2714    for (int i = 0; i < table->Capacity(); i++) {
2715      if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2716        Object** key_slot =
2717            HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2718                ObjectHashTable::EntryToIndex(i)));
2719        RecordSlot(anchor, key_slot, *key_slot);
2720        Object** value_slot =
2721            HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2722                ObjectHashTable::EntryToValueIndex(i)));
2723        MarkCompactMarkingVisitor::MarkObjectByPointer(
2724            this, anchor, value_slot);
2725      }
2726    }
2727    weak_collection_obj = weak_collection->next();
2728  }
2729}
2730
2731
2732void MarkCompactCollector::ClearWeakCollections() {
2733  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2734  Object* weak_collection_obj = encountered_weak_collections();
2735  while (weak_collection_obj != Smi::FromInt(0)) {
2736    ASSERT(MarkCompactCollector::IsMarked(
2737        HeapObject::cast(weak_collection_obj)));
2738    JSWeakCollection* weak_collection =
2739        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2740    ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2741    for (int i = 0; i < table->Capacity(); i++) {
2742      if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2743        table->RemoveEntry(i);
2744      }
2745    }
2746    weak_collection_obj = weak_collection->next();
2747    weak_collection->set_next(Smi::FromInt(0));
2748  }
2749  set_encountered_weak_collections(Smi::FromInt(0));
2750}
2751
2752
2753// We scavange new space simultaneously with sweeping. This is done in two
2754// passes.
2755//
2756// The first pass migrates all alive objects from one semispace to another or
2757// promotes them to old space.  Forwarding address is written directly into
2758// first word of object without any encoding.  If object is dead we write
2759// NULL as a forwarding address.
2760//
2761// The second pass updates pointers to new space in all spaces.  It is possible
2762// to encounter pointers to dead new space objects during traversal of pointers
2763// to new space.  We should clear them to avoid encountering them during next
2764// pointer iteration.  This is an issue if the store buffer overflows and we
2765// have to scan the entire old space, including dead objects, looking for
2766// pointers to new space.
2767void MarkCompactCollector::MigrateObject(Address dst,
2768                                         Address src,
2769                                         int size,
2770                                         AllocationSpace dest) {
2771  HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
2772  if (heap_profiler->is_tracking_object_moves()) {
2773    heap_profiler->ObjectMoveEvent(src, dst, size);
2774  }
2775  ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
2776  ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
2777  if (dest == OLD_POINTER_SPACE) {
2778    Address src_slot = src;
2779    Address dst_slot = dst;
2780    ASSERT(IsAligned(size, kPointerSize));
2781
2782    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2783      Object* value = Memory::Object_at(src_slot);
2784
2785      Memory::Object_at(dst_slot) = value;
2786
2787      if (heap_->InNewSpace(value)) {
2788        heap_->store_buffer()->Mark(dst_slot);
2789      } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2790        SlotsBuffer::AddTo(&slots_buffer_allocator_,
2791                           &migration_slots_buffer_,
2792                           reinterpret_cast<Object**>(dst_slot),
2793                           SlotsBuffer::IGNORE_OVERFLOW);
2794      }
2795
2796      src_slot += kPointerSize;
2797      dst_slot += kPointerSize;
2798    }
2799
2800    if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2801      Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2802      Address code_entry = Memory::Address_at(code_entry_slot);
2803
2804      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2805        SlotsBuffer::AddTo(&slots_buffer_allocator_,
2806                           &migration_slots_buffer_,
2807                           SlotsBuffer::CODE_ENTRY_SLOT,
2808                           code_entry_slot,
2809                           SlotsBuffer::IGNORE_OVERFLOW);
2810      }
2811    }
2812  } else if (dest == CODE_SPACE) {
2813    PROFILE(isolate(), CodeMoveEvent(src, dst));
2814    heap()->MoveBlock(dst, src, size);
2815    SlotsBuffer::AddTo(&slots_buffer_allocator_,
2816                       &migration_slots_buffer_,
2817                       SlotsBuffer::RELOCATED_CODE_OBJECT,
2818                       dst,
2819                       SlotsBuffer::IGNORE_OVERFLOW);
2820    Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2821  } else {
2822    ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2823    heap()->MoveBlock(dst, src, size);
2824  }
2825  Memory::Address_at(src) = dst;
2826}
2827
2828
2829// Visitor for updating pointers from live objects in old spaces to new space.
2830// It does not expect to encounter pointers to dead objects.
2831class PointersUpdatingVisitor: public ObjectVisitor {
2832 public:
2833  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2834
2835  void VisitPointer(Object** p) {
2836    UpdatePointer(p);
2837  }
2838
2839  void VisitPointers(Object** start, Object** end) {
2840    for (Object** p = start; p < end; p++) UpdatePointer(p);
2841  }
2842
2843  void VisitEmbeddedPointer(RelocInfo* rinfo) {
2844    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2845    Object* target = rinfo->target_object();
2846    Object* old_target = target;
2847    VisitPointer(&target);
2848    // Avoid unnecessary changes that might unnecessary flush the instruction
2849    // cache.
2850    if (target != old_target) {
2851      rinfo->set_target_object(target);
2852    }
2853  }
2854
2855  void VisitCodeTarget(RelocInfo* rinfo) {
2856    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2857    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2858    Object* old_target = target;
2859    VisitPointer(&target);
2860    if (target != old_target) {
2861      rinfo->set_target_address(Code::cast(target)->instruction_start());
2862    }
2863  }
2864
2865  void VisitCodeAgeSequence(RelocInfo* rinfo) {
2866    ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2867    Object* stub = rinfo->code_age_stub();
2868    ASSERT(stub != NULL);
2869    VisitPointer(&stub);
2870    if (stub != rinfo->code_age_stub()) {
2871      rinfo->set_code_age_stub(Code::cast(stub));
2872    }
2873  }
2874
2875  void VisitDebugTarget(RelocInfo* rinfo) {
2876    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2877            rinfo->IsPatchedReturnSequence()) ||
2878           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2879            rinfo->IsPatchedDebugBreakSlotSequence()));
2880    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2881    VisitPointer(&target);
2882    rinfo->set_call_address(Code::cast(target)->instruction_start());
2883  }
2884
2885  static inline void UpdateSlot(Heap* heap, Object** slot) {
2886    Object* obj = *slot;
2887
2888    if (!obj->IsHeapObject()) return;
2889
2890    HeapObject* heap_obj = HeapObject::cast(obj);
2891
2892    MapWord map_word = heap_obj->map_word();
2893    if (map_word.IsForwardingAddress()) {
2894      ASSERT(heap->InFromSpace(heap_obj) ||
2895             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2896      HeapObject* target = map_word.ToForwardingAddress();
2897      *slot = target;
2898      ASSERT(!heap->InFromSpace(target) &&
2899             !MarkCompactCollector::IsOnEvacuationCandidate(target));
2900    }
2901  }
2902
2903 private:
2904  inline void UpdatePointer(Object** p) {
2905    UpdateSlot(heap_, p);
2906  }
2907
2908  Heap* heap_;
2909};
2910
2911
2912static void UpdatePointer(HeapObject** p, HeapObject* object) {
2913  ASSERT(*p == object);
2914
2915  Address old_addr = object->address();
2916
2917  Address new_addr = Memory::Address_at(old_addr);
2918
2919  // The new space sweep will overwrite the map word of dead objects
2920  // with NULL. In this case we do not need to transfer this entry to
2921  // the store buffer which we are rebuilding.
2922  if (new_addr != NULL) {
2923    *p = HeapObject::FromAddress(new_addr);
2924  } else {
2925    // We have to zap this pointer, because the store buffer may overflow later,
2926    // and then we have to scan the entire heap and we don't want to find
2927    // spurious newspace pointers in the old space.
2928    // TODO(mstarzinger): This was changed to a sentinel value to track down
2929    // rare crashes, change it back to Smi::FromInt(0) later.
2930    *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1));  // flood
2931  }
2932}
2933
2934
2935static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2936                                                         Object** p) {
2937  MapWord map_word = HeapObject::cast(*p)->map_word();
2938
2939  if (map_word.IsForwardingAddress()) {
2940    return String::cast(map_word.ToForwardingAddress());
2941  }
2942
2943  return String::cast(*p);
2944}
2945
2946
2947bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2948                                            int object_size) {
2949  // TODO(hpayer): Replace that check with an assert.
2950  CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
2951
2952  OldSpace* target_space = heap()->TargetSpace(object);
2953
2954  ASSERT(target_space == heap()->old_pointer_space() ||
2955         target_space == heap()->old_data_space());
2956  Object* result;
2957  MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2958  if (maybe_result->ToObject(&result)) {
2959    HeapObject* target = HeapObject::cast(result);
2960    MigrateObject(target->address(),
2961                  object->address(),
2962                  object_size,
2963                  target_space->identity());
2964    heap()->mark_compact_collector()->tracer()->
2965        increment_promoted_objects_size(object_size);
2966    return true;
2967  }
2968
2969  return false;
2970}
2971
2972
2973void MarkCompactCollector::EvacuateNewSpace() {
2974  // There are soft limits in the allocation code, designed trigger a mark
2975  // sweep collection by failing allocations.  But since we are already in
2976  // a mark-sweep allocation, there is no sense in trying to trigger one.
2977  AlwaysAllocateScope scope;
2978  heap()->CheckNewSpaceExpansionCriteria();
2979
2980  NewSpace* new_space = heap()->new_space();
2981
2982  // Store allocation range before flipping semispaces.
2983  Address from_bottom = new_space->bottom();
2984  Address from_top = new_space->top();
2985
2986  // Flip the semispaces.  After flipping, to space is empty, from space has
2987  // live objects.
2988  new_space->Flip();
2989  new_space->ResetAllocationInfo();
2990
2991  int survivors_size = 0;
2992
2993  // First pass: traverse all objects in inactive semispace, remove marks,
2994  // migrate live objects and write forwarding addresses.  This stage puts
2995  // new entries in the store buffer and may cause some pages to be marked
2996  // scan-on-scavenge.
2997  NewSpacePageIterator it(from_bottom, from_top);
2998  while (it.has_next()) {
2999    NewSpacePage* p = it.next();
3000    survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
3001  }
3002
3003  heap_->IncrementYoungSurvivorsCounter(survivors_size);
3004  new_space->set_age_mark(new_space->top());
3005}
3006
3007
3008void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3009  AlwaysAllocateScope always_allocate;
3010  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3011  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
3012  p->MarkSweptPrecisely();
3013
3014  int offsets[16];
3015
3016  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3017    Address cell_base = it.CurrentCellBase();
3018    MarkBit::CellType* cell = it.CurrentCell();
3019
3020    if (*cell == 0) continue;
3021
3022    int live_objects = MarkWordToObjectStarts(*cell, offsets);
3023    for (int i = 0; i < live_objects; i++) {
3024      Address object_addr = cell_base + offsets[i] * kPointerSize;
3025      HeapObject* object = HeapObject::FromAddress(object_addr);
3026      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
3027
3028      int size = object->Size();
3029
3030      MaybeObject* target = space->AllocateRaw(size);
3031      if (target->IsFailure()) {
3032        // OS refused to give us memory.
3033        V8::FatalProcessOutOfMemory("Evacuation");
3034        return;
3035      }
3036
3037      Object* target_object = target->ToObjectUnchecked();
3038
3039      MigrateObject(HeapObject::cast(target_object)->address(),
3040                    object_addr,
3041                    size,
3042                    space->identity());
3043      ASSERT(object->map_word().IsForwardingAddress());
3044    }
3045
3046    // Clear marking bits for current cell.
3047    *cell = 0;
3048  }
3049  p->ResetLiveBytes();
3050}
3051
3052
3053void MarkCompactCollector::EvacuatePages() {
3054  int npages = evacuation_candidates_.length();
3055  for (int i = 0; i < npages; i++) {
3056    Page* p = evacuation_candidates_[i];
3057    ASSERT(p->IsEvacuationCandidate() ||
3058           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3059    if (p->IsEvacuationCandidate()) {
3060      // During compaction we might have to request a new page.
3061      // Check that space still have room for that.
3062      if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3063        EvacuateLiveObjectsFromPage(p);
3064      } else {
3065        // Without room for expansion evacuation is not guaranteed to succeed.
3066        // Pessimistically abandon unevacuated pages.
3067        for (int j = i; j < npages; j++) {
3068          Page* page = evacuation_candidates_[j];
3069          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3070          page->ClearEvacuationCandidate();
3071          page->SetFlag(Page::RESCAN_ON_EVACUATION);
3072          page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
3073        }
3074        return;
3075      }
3076    }
3077  }
3078}
3079
3080
3081class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3082 public:
3083  virtual Object* RetainAs(Object* object) {
3084    if (object->IsHeapObject()) {
3085      HeapObject* heap_object = HeapObject::cast(object);
3086      MapWord map_word = heap_object->map_word();
3087      if (map_word.IsForwardingAddress()) {
3088        return map_word.ToForwardingAddress();
3089      }
3090    }
3091    return object;
3092  }
3093};
3094
3095
3096static inline void UpdateSlot(Isolate* isolate,
3097                              ObjectVisitor* v,
3098                              SlotsBuffer::SlotType slot_type,
3099                              Address addr) {
3100  switch (slot_type) {
3101    case SlotsBuffer::CODE_TARGET_SLOT: {
3102      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3103      rinfo.Visit(isolate, v);
3104      break;
3105    }
3106    case SlotsBuffer::CODE_ENTRY_SLOT: {
3107      v->VisitCodeEntry(addr);
3108      break;
3109    }
3110    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3111      HeapObject* obj = HeapObject::FromAddress(addr);
3112      Code::cast(obj)->CodeIterateBody(v);
3113      break;
3114    }
3115    case SlotsBuffer::DEBUG_TARGET_SLOT: {
3116      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3117      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3118      break;
3119    }
3120    case SlotsBuffer::JS_RETURN_SLOT: {
3121      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3122      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3123      break;
3124    }
3125    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3126      RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3127      rinfo.Visit(isolate, v);
3128      break;
3129    }
3130    default:
3131      UNREACHABLE();
3132      break;
3133  }
3134}
3135
3136
3137enum SweepingMode {
3138  SWEEP_ONLY,
3139  SWEEP_AND_VISIT_LIVE_OBJECTS
3140};
3141
3142
3143enum SkipListRebuildingMode {
3144  REBUILD_SKIP_LIST,
3145  IGNORE_SKIP_LIST
3146};
3147
3148
3149// Sweep a space precisely.  After this has been done the space can
3150// be iterated precisely, hitting only the live objects.  Code space
3151// is always swept precisely because we want to be able to iterate
3152// over it.  Map space is swept precisely, because it is not compacted.
3153// Slots in live objects pointing into evacuation candidates are updated
3154// if requested.
3155template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
3156static void SweepPrecisely(PagedSpace* space,
3157                           Page* p,
3158                           ObjectVisitor* v) {
3159  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3160  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3161            space->identity() == CODE_SPACE);
3162  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3163
3164  double start_time = 0.0;
3165  if (FLAG_print_cumulative_gc_stat) {
3166    start_time = OS::TimeCurrentMillis();
3167  }
3168
3169  p->MarkSweptPrecisely();
3170
3171  Address free_start = p->area_start();
3172  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3173  int offsets[16];
3174
3175  SkipList* skip_list = p->skip_list();
3176  int curr_region = -1;
3177  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3178    skip_list->Clear();
3179  }
3180
3181  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3182    Address cell_base = it.CurrentCellBase();
3183    MarkBit::CellType* cell = it.CurrentCell();
3184    int live_objects = MarkWordToObjectStarts(*cell, offsets);
3185    int live_index = 0;
3186    for ( ; live_objects != 0; live_objects--) {
3187      Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3188      if (free_end != free_start) {
3189        space->Free(free_start, static_cast<int>(free_end - free_start));
3190#ifdef ENABLE_GDB_JIT_INTERFACE
3191        if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3192          GDBJITInterface::RemoveCodeRange(free_start, free_end);
3193        }
3194#endif
3195      }
3196      HeapObject* live_object = HeapObject::FromAddress(free_end);
3197      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3198      Map* map = live_object->map();
3199      int size = live_object->SizeFromMap(map);
3200      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3201        live_object->IterateBody(map->instance_type(), size, v);
3202      }
3203      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3204        int new_region_start =
3205            SkipList::RegionNumber(free_end);
3206        int new_region_end =
3207            SkipList::RegionNumber(free_end + size - kPointerSize);
3208        if (new_region_start != curr_region ||
3209            new_region_end != curr_region) {
3210          skip_list->AddObject(free_end, size);
3211          curr_region = new_region_end;
3212        }
3213      }
3214      free_start = free_end + size;
3215    }
3216    // Clear marking bits for current cell.
3217    *cell = 0;
3218  }
3219  if (free_start != p->area_end()) {
3220    space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3221#ifdef ENABLE_GDB_JIT_INTERFACE
3222    if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3223      GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3224    }
3225#endif
3226  }
3227  p->ResetLiveBytes();
3228  if (FLAG_print_cumulative_gc_stat) {
3229    space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
3230  }
3231}
3232
3233
3234static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3235  Page* p = Page::FromAddress(code->address());
3236
3237  if (p->IsEvacuationCandidate() ||
3238      p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3239    return false;
3240  }
3241
3242  Address code_start = code->address();
3243  Address code_end = code_start + code->Size();
3244
3245  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3246  uint32_t end_index =
3247      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3248
3249  Bitmap* b = p->markbits();
3250
3251  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3252  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3253
3254  MarkBit::CellType* start_cell = start_mark_bit.cell();
3255  MarkBit::CellType* end_cell = end_mark_bit.cell();
3256
3257  if (value) {
3258    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3259    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3260
3261    if (start_cell == end_cell) {
3262      *start_cell |= start_mask & end_mask;
3263    } else {
3264      *start_cell |= start_mask;
3265      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3266        *cell = ~0;
3267      }
3268      *end_cell |= end_mask;
3269    }
3270  } else {
3271    for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3272      *cell = 0;
3273    }
3274  }
3275
3276  return true;
3277}
3278
3279
3280static bool IsOnInvalidatedCodeObject(Address addr) {
3281  // We did not record any slots in large objects thus
3282  // we can safely go to the page from the slot address.
3283  Page* p = Page::FromAddress(addr);
3284
3285  // First check owner's identity because old pointer and old data spaces
3286  // are swept lazily and might still have non-zero mark-bits on some
3287  // pages.
3288  if (p->owner()->identity() != CODE_SPACE) return false;
3289
3290  // In code space only bits on evacuation candidates (but we don't record
3291  // any slots on them) and under invalidated code objects are non-zero.
3292  MarkBit mark_bit =
3293      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3294
3295  return mark_bit.Get();
3296}
3297
3298
3299void MarkCompactCollector::InvalidateCode(Code* code) {
3300  if (heap_->incremental_marking()->IsCompacting() &&
3301      !ShouldSkipEvacuationSlotRecording(code)) {
3302    ASSERT(compacting_);
3303
3304    // If the object is white than no slots were recorded on it yet.
3305    MarkBit mark_bit = Marking::MarkBitFrom(code);
3306    if (Marking::IsWhite(mark_bit)) return;
3307
3308    invalidated_code_.Add(code);
3309  }
3310}
3311
3312
3313// Return true if the given code is deoptimized or will be deoptimized.
3314bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3315  return code->marked_for_deoptimization();
3316}
3317
3318
3319bool MarkCompactCollector::MarkInvalidatedCode() {
3320  bool code_marked = false;
3321
3322  int length = invalidated_code_.length();
3323  for (int i = 0; i < length; i++) {
3324    Code* code = invalidated_code_[i];
3325
3326    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3327      code_marked = true;
3328    }
3329  }
3330
3331  return code_marked;
3332}
3333
3334
3335void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3336  int length = invalidated_code_.length();
3337  for (int i = 0; i < length; i++) {
3338    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3339  }
3340}
3341
3342
3343void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3344  int length = invalidated_code_.length();
3345  for (int i = 0; i < length; i++) {
3346    Code* code = invalidated_code_[i];
3347    if (code != NULL) {
3348      code->Iterate(visitor);
3349      SetMarkBitsUnderInvalidatedCode(code, false);
3350    }
3351  }
3352  invalidated_code_.Rewind(0);
3353}
3354
3355
3356void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3357  Heap::RelocationLock relocation_lock(heap());
3358
3359  bool code_slots_filtering_required;
3360  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3361    code_slots_filtering_required = MarkInvalidatedCode();
3362    EvacuateNewSpace();
3363  }
3364
3365  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3366    EvacuatePages();
3367  }
3368
3369  // Second pass: find pointers to new space and update them.
3370  PointersUpdatingVisitor updating_visitor(heap());
3371
3372  { GCTracer::Scope gc_scope(tracer_,
3373                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3374    // Update pointers in to space.
3375    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3376                            heap()->new_space()->top());
3377    for (HeapObject* object = to_it.Next();
3378         object != NULL;
3379         object = to_it.Next()) {
3380      Map* map = object->map();
3381      object->IterateBody(map->instance_type(),
3382                          object->SizeFromMap(map),
3383                          &updating_visitor);
3384    }
3385  }
3386
3387  { GCTracer::Scope gc_scope(tracer_,
3388                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3389    // Update roots.
3390    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3391  }
3392
3393  { GCTracer::Scope gc_scope(tracer_,
3394                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3395    StoreBufferRebuildScope scope(heap_,
3396                                  heap_->store_buffer(),
3397                                  &Heap::ScavengeStoreBufferCallback);
3398    heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3399        &UpdatePointer);
3400  }
3401
3402  { GCTracer::Scope gc_scope(tracer_,
3403                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3404    SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3405                                       migration_slots_buffer_,
3406                                       code_slots_filtering_required);
3407    if (FLAG_trace_fragmentation) {
3408      PrintF("  migration slots buffer: %d\n",
3409             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3410    }
3411
3412    if (compacting_ && was_marked_incrementally_) {
3413      // It's difficult to filter out slots recorded for large objects.
3414      LargeObjectIterator it(heap_->lo_space());
3415      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3416        // LargeObjectSpace is not swept yet thus we have to skip
3417        // dead objects explicitly.
3418        if (!IsMarked(obj)) continue;
3419
3420        Page* p = Page::FromAddress(obj->address());
3421        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3422          obj->Iterate(&updating_visitor);
3423          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3424        }
3425      }
3426    }
3427  }
3428
3429  int npages = evacuation_candidates_.length();
3430  { GCTracer::Scope gc_scope(
3431      tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3432    for (int i = 0; i < npages; i++) {
3433      Page* p = evacuation_candidates_[i];
3434      ASSERT(p->IsEvacuationCandidate() ||
3435             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3436
3437      if (p->IsEvacuationCandidate()) {
3438        SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3439                                           p->slots_buffer(),
3440                                           code_slots_filtering_required);
3441        if (FLAG_trace_fragmentation) {
3442          PrintF("  page %p slots buffer: %d\n",
3443                 reinterpret_cast<void*>(p),
3444                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3445        }
3446
3447        // Important: skip list should be cleared only after roots were updated
3448        // because root iteration traverses the stack and might have to find
3449        // code objects from non-updated pc pointing into evacuation candidate.
3450        SkipList* list = p->skip_list();
3451        if (list != NULL) list->Clear();
3452      } else {
3453        if (FLAG_gc_verbose) {
3454          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3455                 reinterpret_cast<intptr_t>(p));
3456        }
3457        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3458        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3459
3460        switch (space->identity()) {
3461          case OLD_DATA_SPACE:
3462            SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
3463            break;
3464          case OLD_POINTER_SPACE:
3465            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3466                space, p, &updating_visitor);
3467            break;
3468          case CODE_SPACE:
3469            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3470                space, p, &updating_visitor);
3471            break;
3472          default:
3473            UNREACHABLE();
3474            break;
3475        }
3476      }
3477    }
3478  }
3479
3480  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3481
3482  // Update pointers from cells.
3483  HeapObjectIterator cell_iterator(heap_->cell_space());
3484  for (HeapObject* cell = cell_iterator.Next();
3485       cell != NULL;
3486       cell = cell_iterator.Next()) {
3487    if (cell->IsCell()) {
3488      Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3489    }
3490  }
3491
3492  HeapObjectIterator js_global_property_cell_iterator(
3493      heap_->property_cell_space());
3494  for (HeapObject* cell = js_global_property_cell_iterator.Next();
3495       cell != NULL;
3496       cell = js_global_property_cell_iterator.Next()) {
3497    if (cell->IsPropertyCell()) {
3498      PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3499    }
3500  }
3501
3502  // Update the head of the native contexts list in the heap.
3503  updating_visitor.VisitPointer(heap_->native_contexts_list_address());
3504
3505  heap_->string_table()->Iterate(&updating_visitor);
3506  updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3507  if (heap_->weak_object_to_code_table()->IsHashTable()) {
3508    WeakHashTable* table =
3509        WeakHashTable::cast(heap_->weak_object_to_code_table());
3510    table->Iterate(&updating_visitor);
3511    table->Rehash(heap_->undefined_value());
3512  }
3513
3514  // Update pointers from external string table.
3515  heap_->UpdateReferencesInExternalStringTable(
3516      &UpdateReferenceInExternalStringTableEntry);
3517
3518  if (!FLAG_watch_ic_patching) {
3519    // Update JSFunction pointers from the runtime profiler.
3520    heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
3521        &updating_visitor);
3522  }
3523
3524  EvacuationWeakObjectRetainer evacuation_object_retainer;
3525  heap()->ProcessWeakReferences(&evacuation_object_retainer);
3526
3527  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3528  // under it.
3529  ProcessInvalidatedCode(&updating_visitor);
3530
3531  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3532
3533#ifdef VERIFY_HEAP
3534  if (FLAG_verify_heap) {
3535    VerifyEvacuation(heap_);
3536  }
3537#endif
3538
3539  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3540  ASSERT(migration_slots_buffer_ == NULL);
3541}
3542
3543
3544void MarkCompactCollector::UnlinkEvacuationCandidates() {
3545  int npages = evacuation_candidates_.length();
3546  for (int i = 0; i < npages; i++) {
3547    Page* p = evacuation_candidates_[i];
3548    if (!p->IsEvacuationCandidate()) continue;
3549    p->Unlink();
3550    p->ClearSweptPrecisely();
3551    p->ClearSweptConservatively();
3552  }
3553}
3554
3555
3556void MarkCompactCollector::ReleaseEvacuationCandidates() {
3557  int npages = evacuation_candidates_.length();
3558  for (int i = 0; i < npages; i++) {
3559    Page* p = evacuation_candidates_[i];
3560    if (!p->IsEvacuationCandidate()) continue;
3561    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3562    space->Free(p->area_start(), p->area_size());
3563    p->set_scan_on_scavenge(false);
3564    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3565    p->ResetLiveBytes();
3566    space->ReleasePage(p, false);
3567  }
3568  evacuation_candidates_.Rewind(0);
3569  compacting_ = false;
3570  heap()->FreeQueuedChunks();
3571}
3572
3573
3574static const int kStartTableEntriesPerLine = 5;
3575static const int kStartTableLines = 171;
3576static const int kStartTableInvalidLine = 127;
3577static const int kStartTableUnusedEntry = 126;
3578
3579#define _ kStartTableUnusedEntry
3580#define X kStartTableInvalidLine
3581// Mark-bit to object start offset table.
3582//
3583// The line is indexed by the mark bits in a byte.  The first number on
3584// the line describes the number of live object starts for the line and the
3585// other numbers on the line describe the offsets (in words) of the object
3586// starts.
3587//
3588// Since objects are at least 2 words large we don't have entries for two
3589// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
3590char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3591  0, _, _, _, _,  // 0
3592  1, 0, _, _, _,  // 1
3593  1, 1, _, _, _,  // 2
3594  X, _, _, _, _,  // 3
3595  1, 2, _, _, _,  // 4
3596  2, 0, 2, _, _,  // 5
3597  X, _, _, _, _,  // 6
3598  X, _, _, _, _,  // 7
3599  1, 3, _, _, _,  // 8
3600  2, 0, 3, _, _,  // 9
3601  2, 1, 3, _, _,  // 10
3602  X, _, _, _, _,  // 11
3603  X, _, _, _, _,  // 12
3604  X, _, _, _, _,  // 13
3605  X, _, _, _, _,  // 14
3606  X, _, _, _, _,  // 15
3607  1, 4, _, _, _,  // 16
3608  2, 0, 4, _, _,  // 17
3609  2, 1, 4, _, _,  // 18
3610  X, _, _, _, _,  // 19
3611  2, 2, 4, _, _,  // 20
3612  3, 0, 2, 4, _,  // 21
3613  X, _, _, _, _,  // 22
3614  X, _, _, _, _,  // 23
3615  X, _, _, _, _,  // 24
3616  X, _, _, _, _,  // 25
3617  X, _, _, _, _,  // 26
3618  X, _, _, _, _,  // 27
3619  X, _, _, _, _,  // 28
3620  X, _, _, _, _,  // 29
3621  X, _, _, _, _,  // 30
3622  X, _, _, _, _,  // 31
3623  1, 5, _, _, _,  // 32
3624  2, 0, 5, _, _,  // 33
3625  2, 1, 5, _, _,  // 34
3626  X, _, _, _, _,  // 35
3627  2, 2, 5, _, _,  // 36
3628  3, 0, 2, 5, _,  // 37
3629  X, _, _, _, _,  // 38
3630  X, _, _, _, _,  // 39
3631  2, 3, 5, _, _,  // 40
3632  3, 0, 3, 5, _,  // 41
3633  3, 1, 3, 5, _,  // 42
3634  X, _, _, _, _,  // 43
3635  X, _, _, _, _,  // 44
3636  X, _, _, _, _,  // 45
3637  X, _, _, _, _,  // 46
3638  X, _, _, _, _,  // 47
3639  X, _, _, _, _,  // 48
3640  X, _, _, _, _,  // 49
3641  X, _, _, _, _,  // 50
3642  X, _, _, _, _,  // 51
3643  X, _, _, _, _,  // 52
3644  X, _, _, _, _,  // 53
3645  X, _, _, _, _,  // 54
3646  X, _, _, _, _,  // 55
3647  X, _, _, _, _,  // 56
3648  X, _, _, _, _,  // 57
3649  X, _, _, _, _,  // 58
3650  X, _, _, _, _,  // 59
3651  X, _, _, _, _,  // 60
3652  X, _, _, _, _,  // 61
3653  X, _, _, _, _,  // 62
3654  X, _, _, _, _,  // 63
3655  1, 6, _, _, _,  // 64
3656  2, 0, 6, _, _,  // 65
3657  2, 1, 6, _, _,  // 66
3658  X, _, _, _, _,  // 67
3659  2, 2, 6, _, _,  // 68
3660  3, 0, 2, 6, _,  // 69
3661  X, _, _, _, _,  // 70
3662  X, _, _, _, _,  // 71
3663  2, 3, 6, _, _,  // 72
3664  3, 0, 3, 6, _,  // 73
3665  3, 1, 3, 6, _,  // 74
3666  X, _, _, _, _,  // 75
3667  X, _, _, _, _,  // 76
3668  X, _, _, _, _,  // 77
3669  X, _, _, _, _,  // 78
3670  X, _, _, _, _,  // 79
3671  2, 4, 6, _, _,  // 80
3672  3, 0, 4, 6, _,  // 81
3673  3, 1, 4, 6, _,  // 82
3674  X, _, _, _, _,  // 83
3675  3, 2, 4, 6, _,  // 84
3676  4, 0, 2, 4, 6,  // 85
3677  X, _, _, _, _,  // 86
3678  X, _, _, _, _,  // 87
3679  X, _, _, _, _,  // 88
3680  X, _, _, _, _,  // 89
3681  X, _, _, _, _,  // 90
3682  X, _, _, _, _,  // 91
3683  X, _, _, _, _,  // 92
3684  X, _, _, _, _,  // 93
3685  X, _, _, _, _,  // 94
3686  X, _, _, _, _,  // 95
3687  X, _, _, _, _,  // 96
3688  X, _, _, _, _,  // 97
3689  X, _, _, _, _,  // 98
3690  X, _, _, _, _,  // 99
3691  X, _, _, _, _,  // 100
3692  X, _, _, _, _,  // 101
3693  X, _, _, _, _,  // 102
3694  X, _, _, _, _,  // 103
3695  X, _, _, _, _,  // 104
3696  X, _, _, _, _,  // 105
3697  X, _, _, _, _,  // 106
3698  X, _, _, _, _,  // 107
3699  X, _, _, _, _,  // 108
3700  X, _, _, _, _,  // 109
3701  X, _, _, _, _,  // 110
3702  X, _, _, _, _,  // 111
3703  X, _, _, _, _,  // 112
3704  X, _, _, _, _,  // 113
3705  X, _, _, _, _,  // 114
3706  X, _, _, _, _,  // 115
3707  X, _, _, _, _,  // 116
3708  X, _, _, _, _,  // 117
3709  X, _, _, _, _,  // 118
3710  X, _, _, _, _,  // 119
3711  X, _, _, _, _,  // 120
3712  X, _, _, _, _,  // 121
3713  X, _, _, _, _,  // 122
3714  X, _, _, _, _,  // 123
3715  X, _, _, _, _,  // 124
3716  X, _, _, _, _,  // 125
3717  X, _, _, _, _,  // 126
3718  X, _, _, _, _,  // 127
3719  1, 7, _, _, _,  // 128
3720  2, 0, 7, _, _,  // 129
3721  2, 1, 7, _, _,  // 130
3722  X, _, _, _, _,  // 131
3723  2, 2, 7, _, _,  // 132
3724  3, 0, 2, 7, _,  // 133
3725  X, _, _, _, _,  // 134
3726  X, _, _, _, _,  // 135
3727  2, 3, 7, _, _,  // 136
3728  3, 0, 3, 7, _,  // 137
3729  3, 1, 3, 7, _,  // 138
3730  X, _, _, _, _,  // 139
3731  X, _, _, _, _,  // 140
3732  X, _, _, _, _,  // 141
3733  X, _, _, _, _,  // 142
3734  X, _, _, _, _,  // 143
3735  2, 4, 7, _, _,  // 144
3736  3, 0, 4, 7, _,  // 145
3737  3, 1, 4, 7, _,  // 146
3738  X, _, _, _, _,  // 147
3739  3, 2, 4, 7, _,  // 148
3740  4, 0, 2, 4, 7,  // 149
3741  X, _, _, _, _,  // 150
3742  X, _, _, _, _,  // 151
3743  X, _, _, _, _,  // 152
3744  X, _, _, _, _,  // 153
3745  X, _, _, _, _,  // 154
3746  X, _, _, _, _,  // 155
3747  X, _, _, _, _,  // 156
3748  X, _, _, _, _,  // 157
3749  X, _, _, _, _,  // 158
3750  X, _, _, _, _,  // 159
3751  2, 5, 7, _, _,  // 160
3752  3, 0, 5, 7, _,  // 161
3753  3, 1, 5, 7, _,  // 162
3754  X, _, _, _, _,  // 163
3755  3, 2, 5, 7, _,  // 164
3756  4, 0, 2, 5, 7,  // 165
3757  X, _, _, _, _,  // 166
3758  X, _, _, _, _,  // 167
3759  3, 3, 5, 7, _,  // 168
3760  4, 0, 3, 5, 7,  // 169
3761  4, 1, 3, 5, 7   // 170
3762};
3763#undef _
3764#undef X
3765
3766
3767// Takes a word of mark bits.  Returns the number of objects that start in the
3768// range.  Puts the offsets of the words in the supplied array.
3769static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3770  int objects = 0;
3771  int offset = 0;
3772
3773  // No consecutive 1 bits.
3774  ASSERT((mark_bits & 0x180) != 0x180);
3775  ASSERT((mark_bits & 0x18000) != 0x18000);
3776  ASSERT((mark_bits & 0x1800000) != 0x1800000);
3777
3778  while (mark_bits != 0) {
3779    int byte = (mark_bits & 0xff);
3780    mark_bits >>= 8;
3781    if (byte != 0) {
3782      ASSERT(byte < kStartTableLines);  // No consecutive 1 bits.
3783      char* table = kStartTable + byte * kStartTableEntriesPerLine;
3784      int objects_in_these_8_words = table[0];
3785      ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3786      ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3787      for (int i = 0; i < objects_in_these_8_words; i++) {
3788        starts[objects++] = offset + table[1 + i];
3789      }
3790    }
3791    offset += 8;
3792  }
3793  return objects;
3794}
3795
3796
3797static inline Address DigestFreeStart(Address approximate_free_start,
3798                                      uint32_t free_start_cell) {
3799  ASSERT(free_start_cell != 0);
3800
3801  // No consecutive 1 bits.
3802  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3803
3804  int offsets[16];
3805  uint32_t cell = free_start_cell;
3806  int offset_of_last_live;
3807  if ((cell & 0x80000000u) != 0) {
3808    // This case would overflow below.
3809    offset_of_last_live = 31;
3810  } else {
3811    // Remove all but one bit, the most significant.  This is an optimization
3812    // that may or may not be worthwhile.
3813    cell |= cell >> 16;
3814    cell |= cell >> 8;
3815    cell |= cell >> 4;
3816    cell |= cell >> 2;
3817    cell |= cell >> 1;
3818    cell = (cell + 1) >> 1;
3819    int live_objects = MarkWordToObjectStarts(cell, offsets);
3820    ASSERT(live_objects == 1);
3821    offset_of_last_live = offsets[live_objects - 1];
3822  }
3823  Address last_live_start =
3824      approximate_free_start + offset_of_last_live * kPointerSize;
3825  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3826  Address free_start = last_live_start + last_live->Size();
3827  return free_start;
3828}
3829
3830
3831static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3832  ASSERT(cell != 0);
3833
3834  // No consecutive 1 bits.
3835  ASSERT((cell & (cell << 1)) == 0);
3836
3837  int offsets[16];
3838  if (cell == 0x80000000u) {  // Avoid overflow below.
3839    return block_address + 31 * kPointerSize;
3840  }
3841  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3842  ASSERT((first_set_bit & cell) == first_set_bit);
3843  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3844  ASSERT(live_objects == 1);
3845  USE(live_objects);
3846  return block_address + offsets[0] * kPointerSize;
3847}
3848
3849
3850template<MarkCompactCollector::SweepingParallelism mode>
3851static intptr_t Free(PagedSpace* space,
3852                     FreeList* free_list,
3853                     Address start,
3854                     int size) {
3855  if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
3856    return space->Free(start, size);
3857  } else {
3858    return size - free_list->Free(start, size);
3859  }
3860}
3861
3862
3863// Force instantiation of templatized SweepConservatively method for
3864// SWEEP_SEQUENTIALLY mode.
3865template intptr_t MarkCompactCollector::
3866    SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
3867        PagedSpace*, FreeList*, Page*);
3868
3869
3870// Force instantiation of templatized SweepConservatively method for
3871// SWEEP_IN_PARALLEL mode.
3872template intptr_t MarkCompactCollector::
3873    SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3874        PagedSpace*, FreeList*, Page*);
3875
3876
3877// Sweeps a space conservatively.  After this has been done the larger free
3878// spaces have been put on the free list and the smaller ones have been
3879// ignored and left untouched.  A free space is always either ignored or put
3880// on the free list, never split up into two parts.  This is important
3881// because it means that any FreeSpace maps left actually describe a region of
3882// memory that can be ignored when scanning.  Dead objects other than free
3883// spaces will not contain the free space map.
3884template<MarkCompactCollector::SweepingParallelism mode>
3885intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
3886                                                   FreeList* free_list,
3887                                                   Page* p) {
3888  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3889  ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
3890         free_list != NULL) ||
3891         (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
3892         free_list == NULL));
3893
3894  p->MarkSweptConservatively();
3895
3896  intptr_t freed_bytes = 0;
3897  size_t size = 0;
3898
3899  // Skip over all the dead objects at the start of the page and mark them free.
3900  Address cell_base = 0;
3901  MarkBit::CellType* cell = NULL;
3902  MarkBitCellIterator it(p);
3903  for (; !it.Done(); it.Advance()) {
3904    cell_base = it.CurrentCellBase();
3905    cell = it.CurrentCell();
3906    if (*cell != 0) break;
3907  }
3908
3909  if (it.Done()) {
3910    size = p->area_end() - p->area_start();
3911    freed_bytes += Free<mode>(space, free_list, p->area_start(),
3912                              static_cast<int>(size));
3913    ASSERT_EQ(0, p->LiveBytes());
3914    return freed_bytes;
3915  }
3916
3917  // Grow the size of the start-of-page free space a little to get up to the
3918  // first live object.
3919  Address free_end = StartOfLiveObject(cell_base, *cell);
3920  // Free the first free space.
3921  size = free_end - p->area_start();
3922  freed_bytes += Free<mode>(space, free_list, p->area_start(),
3923                            static_cast<int>(size));
3924
3925  // The start of the current free area is represented in undigested form by
3926  // the address of the last 32-word section that contained a live object and
3927  // the marking bitmap for that cell, which describes where the live object
3928  // started.  Unless we find a large free space in the bitmap we will not
3929  // digest this pair into a real address.  We start the iteration here at the
3930  // first word in the marking bit map that indicates a live object.
3931  Address free_start = cell_base;
3932  MarkBit::CellType free_start_cell = *cell;
3933
3934  for (; !it.Done(); it.Advance()) {
3935    cell_base = it.CurrentCellBase();
3936    cell = it.CurrentCell();
3937    if (*cell != 0) {
3938      // We have a live object.  Check approximately whether it is more than 32
3939      // words since the last live object.
3940      if (cell_base - free_start > 32 * kPointerSize) {
3941        free_start = DigestFreeStart(free_start, free_start_cell);
3942        if (cell_base - free_start > 32 * kPointerSize) {
3943          // Now that we know the exact start of the free space it still looks
3944          // like we have a large enough free space to be worth bothering with.
3945          // so now we need to find the start of the first live object at the
3946          // end of the free space.
3947          free_end = StartOfLiveObject(cell_base, *cell);
3948          freed_bytes += Free<mode>(space, free_list, free_start,
3949                                    static_cast<int>(free_end - free_start));
3950        }
3951      }
3952      // Update our undigested record of where the current free area started.
3953      free_start = cell_base;
3954      free_start_cell = *cell;
3955      // Clear marking bits for current cell.
3956      *cell = 0;
3957    }
3958  }
3959
3960  // Handle the free space at the end of the page.
3961  if (cell_base - free_start > 32 * kPointerSize) {
3962    free_start = DigestFreeStart(free_start, free_start_cell);
3963    freed_bytes += Free<mode>(space, free_list, free_start,
3964                              static_cast<int>(p->area_end() - free_start));
3965  }
3966
3967  p->ResetLiveBytes();
3968  return freed_bytes;
3969}
3970
3971
3972void MarkCompactCollector::SweepInParallel(PagedSpace* space,
3973                                           FreeList* private_free_list,
3974                                           FreeList* free_list) {
3975  PageIterator it(space);
3976  while (it.has_next()) {
3977    Page* p = it.next();
3978
3979    if (p->TryParallelSweeping()) {
3980      SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
3981      free_list->Concatenate(private_free_list);
3982    }
3983  }
3984}
3985
3986
3987void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3988  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3989                                      sweeper == LAZY_CONSERVATIVE ||
3990                                      sweeper == PARALLEL_CONSERVATIVE ||
3991                                      sweeper == CONCURRENT_CONSERVATIVE);
3992  space->ClearStats();
3993
3994  PageIterator it(space);
3995
3996  int pages_swept = 0;
3997  bool lazy_sweeping_active = false;
3998  bool unused_page_present = false;
3999  bool parallel_sweeping_active = false;
4000
4001  while (it.has_next()) {
4002    Page* p = it.next();
4003
4004    ASSERT(p->parallel_sweeping() == 0);
4005    ASSERT(!p->IsEvacuationCandidate());
4006
4007    // Clear sweeping flags indicating that marking bits are still intact.
4008    p->ClearSweptPrecisely();
4009    p->ClearSweptConservatively();
4010
4011    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
4012      // Will be processed in EvacuateNewSpaceAndCandidates.
4013      ASSERT(evacuation_candidates_.length() > 0);
4014      continue;
4015    }
4016
4017    // One unused page is kept, all further are released before sweeping them.
4018    if (p->LiveBytes() == 0) {
4019      if (unused_page_present) {
4020        if (FLAG_gc_verbose) {
4021          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4022                 reinterpret_cast<intptr_t>(p));
4023        }
4024        // Adjust unswept free bytes because releasing a page expects said
4025        // counter to be accurate for unswept pages.
4026        space->IncreaseUnsweptFreeBytes(p);
4027        space->ReleasePage(p, true);
4028        continue;
4029      }
4030      unused_page_present = true;
4031    }
4032
4033    switch (sweeper) {
4034      case CONSERVATIVE: {
4035        if (FLAG_gc_verbose) {
4036          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4037                 reinterpret_cast<intptr_t>(p));
4038        }
4039        SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4040        pages_swept++;
4041        break;
4042      }
4043      case LAZY_CONSERVATIVE: {
4044        if (lazy_sweeping_active) {
4045          if (FLAG_gc_verbose) {
4046            PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
4047                   reinterpret_cast<intptr_t>(p));
4048          }
4049          space->IncreaseUnsweptFreeBytes(p);
4050        } else {
4051          if (FLAG_gc_verbose) {
4052            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4053                   reinterpret_cast<intptr_t>(p));
4054          }
4055          SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4056          pages_swept++;
4057          space->SetPagesToSweep(p->next_page());
4058          lazy_sweeping_active = true;
4059        }
4060        break;
4061      }
4062      case CONCURRENT_CONSERVATIVE:
4063      case PARALLEL_CONSERVATIVE: {
4064        if (!parallel_sweeping_active) {
4065          if (FLAG_gc_verbose) {
4066            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4067                   reinterpret_cast<intptr_t>(p));
4068          }
4069          SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4070          pages_swept++;
4071          parallel_sweeping_active = true;
4072        } else {
4073          if (FLAG_gc_verbose) {
4074            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4075                   reinterpret_cast<intptr_t>(p));
4076          }
4077          p->set_parallel_sweeping(1);
4078          space->IncreaseUnsweptFreeBytes(p);
4079        }
4080        break;
4081      }
4082      case PRECISE: {
4083        if (FLAG_gc_verbose) {
4084          PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4085                 reinterpret_cast<intptr_t>(p));
4086        }
4087        if (space->identity() == CODE_SPACE) {
4088          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
4089        } else {
4090          SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
4091        }
4092        pages_swept++;
4093        break;
4094      }
4095      default: {
4096        UNREACHABLE();
4097      }
4098    }
4099  }
4100
4101  if (FLAG_gc_verbose) {
4102    PrintF("SweepSpace: %s (%d pages swept)\n",
4103           AllocationSpaceName(space->identity()),
4104           pages_swept);
4105  }
4106
4107  // Give pages that are queued to be freed back to the OS.
4108  heap()->FreeQueuedChunks();
4109}
4110
4111
4112void MarkCompactCollector::SweepSpaces() {
4113  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4114#ifdef DEBUG
4115  state_ = SWEEP_SPACES;
4116#endif
4117  SweeperType how_to_sweep =
4118      FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
4119  if (isolate()->num_sweeper_threads() > 0) {
4120    if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4121    if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4122  }
4123  if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
4124  if (sweep_precisely_) how_to_sweep = PRECISE;
4125
4126  // Unlink evacuation candidates before sweeper threads access the list of
4127  // pages to avoid race condition.
4128  UnlinkEvacuationCandidates();
4129
4130  // Noncompacting collections simply sweep the spaces to clear the mark
4131  // bits and free the nonlive blocks (for old and map spaces).  We sweep
4132  // the map space last because freeing non-live maps overwrites them and
4133  // the other spaces rely on possibly non-live maps to get the sizes for
4134  // non-live objects.
4135  SequentialSweepingScope scope(this);
4136  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4137  SweepSpace(heap()->old_data_space(), how_to_sweep);
4138
4139  if (how_to_sweep == PARALLEL_CONSERVATIVE ||
4140      how_to_sweep == CONCURRENT_CONSERVATIVE) {
4141    // TODO(hpayer): fix race with concurrent sweeper
4142    StartSweeperThreads();
4143  }
4144
4145  if (how_to_sweep == PARALLEL_CONSERVATIVE) {
4146    WaitUntilSweepingCompleted();
4147  }
4148
4149  RemoveDeadInvalidatedCode();
4150  SweepSpace(heap()->code_space(), PRECISE);
4151
4152  SweepSpace(heap()->cell_space(), PRECISE);
4153  SweepSpace(heap()->property_cell_space(), PRECISE);
4154
4155  EvacuateNewSpaceAndCandidates();
4156
4157  // ClearNonLiveTransitions depends on precise sweeping of map space to
4158  // detect whether unmarked map became dead in this collection or in one
4159  // of the previous ones.
4160  SweepSpace(heap()->map_space(), PRECISE);
4161
4162  // Deallocate unmarked objects and clear marked bits for marked objects.
4163  heap_->lo_space()->FreeUnmarkedObjects();
4164
4165  // Deallocate evacuated candidate pages.
4166  ReleaseEvacuationCandidates();
4167}
4168
4169
4170void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4171#ifdef ENABLE_DEBUGGER_SUPPORT
4172  if (isolate()->debug()->IsLoaded() ||
4173      isolate()->debug()->has_break_points()) {
4174    enable = false;
4175  }
4176#endif
4177
4178  if (enable) {
4179    if (code_flusher_ != NULL) return;
4180    code_flusher_ = new CodeFlusher(isolate());
4181  } else {
4182    if (code_flusher_ == NULL) return;
4183    code_flusher_->EvictAllCandidates();
4184    delete code_flusher_;
4185    code_flusher_ = NULL;
4186  }
4187
4188  if (FLAG_trace_code_flushing) {
4189    PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4190  }
4191}
4192
4193
4194// TODO(1466) ReportDeleteIfNeeded is not called currently.
4195// Our profiling tools do not expect intersections between
4196// code objects. We should either reenable it or change our tools.
4197void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4198                                                Isolate* isolate) {
4199#ifdef ENABLE_GDB_JIT_INTERFACE
4200  if (obj->IsCode()) {
4201    GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
4202  }
4203#endif
4204  if (obj->IsCode()) {
4205    PROFILE(isolate, CodeDeleteEvent(obj->address()));
4206  }
4207}
4208
4209
4210Isolate* MarkCompactCollector::isolate() const {
4211  return heap_->isolate();
4212}
4213
4214
4215void MarkCompactCollector::Initialize() {
4216  MarkCompactMarkingVisitor::Initialize();
4217  IncrementalMarking::Initialize();
4218}
4219
4220
4221bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4222  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4223}
4224
4225
4226bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4227                        SlotsBuffer** buffer_address,
4228                        SlotType type,
4229                        Address addr,
4230                        AdditionMode mode) {
4231  SlotsBuffer* buffer = *buffer_address;
4232  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4233    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4234      allocator->DeallocateChain(buffer_address);
4235      return false;
4236    }
4237    buffer = allocator->AllocateBuffer(buffer);
4238    *buffer_address = buffer;
4239  }
4240  ASSERT(buffer->HasSpaceForTypedSlot());
4241  buffer->Add(reinterpret_cast<ObjectSlot>(type));
4242  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4243  return true;
4244}
4245
4246
4247static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4248  if (RelocInfo::IsCodeTarget(rmode)) {
4249    return SlotsBuffer::CODE_TARGET_SLOT;
4250  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4251    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4252  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4253    return SlotsBuffer::DEBUG_TARGET_SLOT;
4254  } else if (RelocInfo::IsJSReturn(rmode)) {
4255    return SlotsBuffer::JS_RETURN_SLOT;
4256  }
4257  UNREACHABLE();
4258  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4259}
4260
4261
4262void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4263  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4264  if (target_page->IsEvacuationCandidate() &&
4265      (rinfo->host() == NULL ||
4266       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4267    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4268                            target_page->slots_buffer_address(),
4269                            SlotTypeForRMode(rinfo->rmode()),
4270                            rinfo->pc(),
4271                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
4272      EvictEvacuationCandidate(target_page);
4273    }
4274  }
4275}
4276
4277
4278void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4279  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4280  if (target_page->IsEvacuationCandidate() &&
4281      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4282    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4283                            target_page->slots_buffer_address(),
4284                            SlotsBuffer::CODE_ENTRY_SLOT,
4285                            slot,
4286                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
4287      EvictEvacuationCandidate(target_page);
4288    }
4289  }
4290}
4291
4292
4293void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4294  ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
4295  if (is_compacting()) {
4296    Code* host = isolate()->inner_pointer_to_code_cache()->
4297        GcSafeFindCodeForInnerPointer(pc);
4298    MarkBit mark_bit = Marking::MarkBitFrom(host);
4299    if (Marking::IsBlack(mark_bit)) {
4300      RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4301      RecordRelocSlot(&rinfo, target);
4302    }
4303  }
4304}
4305
4306
4307static inline SlotsBuffer::SlotType DecodeSlotType(
4308    SlotsBuffer::ObjectSlot slot) {
4309  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4310}
4311
4312
4313void SlotsBuffer::UpdateSlots(Heap* heap) {
4314  PointersUpdatingVisitor v(heap);
4315
4316  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4317    ObjectSlot slot = slots_[slot_idx];
4318    if (!IsTypedSlot(slot)) {
4319      PointersUpdatingVisitor::UpdateSlot(heap, slot);
4320    } else {
4321      ++slot_idx;
4322      ASSERT(slot_idx < idx_);
4323      UpdateSlot(heap->isolate(),
4324                 &v,
4325                 DecodeSlotType(slot),
4326                 reinterpret_cast<Address>(slots_[slot_idx]));
4327    }
4328  }
4329}
4330
4331
4332void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4333  PointersUpdatingVisitor v(heap);
4334
4335  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4336    ObjectSlot slot = slots_[slot_idx];
4337    if (!IsTypedSlot(slot)) {
4338      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4339        PointersUpdatingVisitor::UpdateSlot(heap, slot);
4340      }
4341    } else {
4342      ++slot_idx;
4343      ASSERT(slot_idx < idx_);
4344      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4345      if (!IsOnInvalidatedCodeObject(pc)) {
4346        UpdateSlot(heap->isolate(),
4347                   &v,
4348                   DecodeSlotType(slot),
4349                   reinterpret_cast<Address>(slots_[slot_idx]));
4350      }
4351    }
4352  }
4353}
4354
4355
4356SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4357  return new SlotsBuffer(next_buffer);
4358}
4359
4360
4361void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4362  delete buffer;
4363}
4364
4365
4366void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4367  SlotsBuffer* buffer = *buffer_address;
4368  while (buffer != NULL) {
4369    SlotsBuffer* next_buffer = buffer->next();
4370    DeallocateBuffer(buffer);
4371    buffer = next_buffer;
4372  }
4373  *buffer_address = NULL;
4374}
4375
4376
4377} }  // namespace v8::internal
4378