1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "code-stubs.h"
31#include "compilation-cache.h"
32#include "cpu-profiler.h"
33#include "deoptimizer.h"
34#include "execution.h"
35#include "gdb-jit.h"
36#include "global-handles.h"
37#include "heap-profiler.h"
38#include "ic-inl.h"
39#include "incremental-marking.h"
40#include "mark-compact.h"
41#include "marking-thread.h"
42#include "objects-visiting.h"
43#include "objects-visiting-inl.h"
44#include "stub-cache.h"
45#include "sweeper-thread.h"
46
47namespace v8 {
48namespace internal {
49
50
51const char* Marking::kWhiteBitPattern = "00";
52const char* Marking::kBlackBitPattern = "10";
53const char* Marking::kGreyBitPattern = "11";
54const char* Marking::kImpossibleBitPattern = "01";
55
56
57// -------------------------------------------------------------------------
58// MarkCompactCollector
59
60MarkCompactCollector::MarkCompactCollector() :  // NOLINT
61#ifdef DEBUG
62      state_(IDLE),
63#endif
64      sweep_precisely_(false),
65      reduce_memory_footprint_(false),
66      abort_incremental_marking_(false),
67      marking_parity_(ODD_MARKING_PARITY),
68      compacting_(false),
69      was_marked_incrementally_(false),
70      sweeping_pending_(false),
71      sequential_sweeping_(false),
72      tracer_(NULL),
73      migration_slots_buffer_(NULL),
74      heap_(NULL),
75      code_flusher_(NULL),
76      encountered_weak_collections_(NULL),
77      code_to_deoptimize_(NULL) { }
78
79#ifdef VERIFY_HEAP
80class VerifyMarkingVisitor: public ObjectVisitor {
81 public:
82  void VisitPointers(Object** start, Object** end) {
83    for (Object** current = start; current < end; current++) {
84      if ((*current)->IsHeapObject()) {
85        HeapObject* object = HeapObject::cast(*current);
86        CHECK(HEAP->mark_compact_collector()->IsMarked(object));
87      }
88    }
89  }
90
91  void VisitEmbeddedPointer(RelocInfo* rinfo) {
92    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
93    if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
94        rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
95        !rinfo->target_object()->IsMap() ||
96        !Map::cast(rinfo->target_object())->CanTransition()) {
97      VisitPointer(rinfo->target_object_address());
98    }
99  }
100};
101
102
103static void VerifyMarking(Address bottom, Address top) {
104  VerifyMarkingVisitor visitor;
105  HeapObject* object;
106  Address next_object_must_be_here_or_later = bottom;
107
108  for (Address current = bottom;
109       current < top;
110       current += kPointerSize) {
111    object = HeapObject::FromAddress(current);
112    if (MarkCompactCollector::IsMarked(object)) {
113      CHECK(current >= next_object_must_be_here_or_later);
114      object->Iterate(&visitor);
115      next_object_must_be_here_or_later = current + object->Size();
116    }
117  }
118}
119
120
121static void VerifyMarking(NewSpace* space) {
122  Address end = space->top();
123  NewSpacePageIterator it(space->bottom(), end);
124  // The bottom position is at the start of its page. Allows us to use
125  // page->area_start() as start of range on all pages.
126  CHECK_EQ(space->bottom(),
127            NewSpacePage::FromAddress(space->bottom())->area_start());
128  while (it.has_next()) {
129    NewSpacePage* page = it.next();
130    Address limit = it.has_next() ? page->area_end() : end;
131    CHECK(limit == end || !page->Contains(end));
132    VerifyMarking(page->area_start(), limit);
133  }
134}
135
136
137static void VerifyMarking(PagedSpace* space) {
138  PageIterator it(space);
139
140  while (it.has_next()) {
141    Page* p = it.next();
142    VerifyMarking(p->area_start(), p->area_end());
143  }
144}
145
146
147static void VerifyMarking(Heap* heap) {
148  VerifyMarking(heap->old_pointer_space());
149  VerifyMarking(heap->old_data_space());
150  VerifyMarking(heap->code_space());
151  VerifyMarking(heap->cell_space());
152  VerifyMarking(heap->property_cell_space());
153  VerifyMarking(heap->map_space());
154  VerifyMarking(heap->new_space());
155
156  VerifyMarkingVisitor visitor;
157
158  LargeObjectIterator it(heap->lo_space());
159  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
160    if (MarkCompactCollector::IsMarked(obj)) {
161      obj->Iterate(&visitor);
162    }
163  }
164
165  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
166}
167
168
169class VerifyEvacuationVisitor: public ObjectVisitor {
170 public:
171  void VisitPointers(Object** start, Object** end) {
172    for (Object** current = start; current < end; current++) {
173      if ((*current)->IsHeapObject()) {
174        HeapObject* object = HeapObject::cast(*current);
175        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
176      }
177    }
178  }
179};
180
181
182static void VerifyEvacuation(Address bottom, Address top) {
183  VerifyEvacuationVisitor visitor;
184  HeapObject* object;
185  Address next_object_must_be_here_or_later = bottom;
186
187  for (Address current = bottom;
188       current < top;
189       current += kPointerSize) {
190    object = HeapObject::FromAddress(current);
191    if (MarkCompactCollector::IsMarked(object)) {
192      CHECK(current >= next_object_must_be_here_or_later);
193      object->Iterate(&visitor);
194      next_object_must_be_here_or_later = current + object->Size();
195    }
196  }
197}
198
199
200static void VerifyEvacuation(NewSpace* space) {
201  NewSpacePageIterator it(space->bottom(), space->top());
202  VerifyEvacuationVisitor visitor;
203
204  while (it.has_next()) {
205    NewSpacePage* page = it.next();
206    Address current = page->area_start();
207    Address limit = it.has_next() ? page->area_end() : space->top();
208    CHECK(limit == space->top() || !page->Contains(space->top()));
209    while (current < limit) {
210      HeapObject* object = HeapObject::FromAddress(current);
211      object->Iterate(&visitor);
212      current += object->Size();
213    }
214  }
215}
216
217
218static void VerifyEvacuation(PagedSpace* space) {
219  PageIterator it(space);
220
221  while (it.has_next()) {
222    Page* p = it.next();
223    if (p->IsEvacuationCandidate()) continue;
224    VerifyEvacuation(p->area_start(), p->area_end());
225  }
226}
227
228
229static void VerifyEvacuation(Heap* heap) {
230  VerifyEvacuation(heap->old_pointer_space());
231  VerifyEvacuation(heap->old_data_space());
232  VerifyEvacuation(heap->code_space());
233  VerifyEvacuation(heap->cell_space());
234  VerifyEvacuation(heap->property_cell_space());
235  VerifyEvacuation(heap->map_space());
236  VerifyEvacuation(heap->new_space());
237
238  VerifyEvacuationVisitor visitor;
239  heap->IterateStrongRoots(&visitor, VISIT_ALL);
240}
241#endif  // VERIFY_HEAP
242
243
244#ifdef DEBUG
245class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
246 public:
247  VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
248
249  void VisitPointers(Object** start, Object** end) {
250    for (Object** current = start; current < end; current++) {
251      if ((*current)->IsHeapObject()) {
252        HeapObject* object = HeapObject::cast(*current);
253        if (object->IsString()) continue;
254        switch (object->map()->instance_type()) {
255          case JS_FUNCTION_TYPE:
256            CheckContext(JSFunction::cast(object)->context());
257            break;
258          case JS_GLOBAL_PROXY_TYPE:
259            CheckContext(JSGlobalProxy::cast(object)->native_context());
260            break;
261          case JS_GLOBAL_OBJECT_TYPE:
262          case JS_BUILTINS_OBJECT_TYPE:
263            CheckContext(GlobalObject::cast(object)->native_context());
264            break;
265          case JS_ARRAY_TYPE:
266          case JS_DATE_TYPE:
267          case JS_OBJECT_TYPE:
268          case JS_REGEXP_TYPE:
269            VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
270            break;
271          case MAP_TYPE:
272            VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
273            VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
274            break;
275          case FIXED_ARRAY_TYPE:
276            if (object->IsContext()) {
277              CheckContext(object);
278            } else {
279              FixedArray* array = FixedArray::cast(object);
280              int length = array->length();
281              // Set array length to zero to prevent cycles while iterating
282              // over array bodies, this is easier than intrusive marking.
283              array->set_length(0);
284              array->IterateBody(
285                  FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
286              array->set_length(length);
287            }
288            break;
289          case CELL_TYPE:
290          case JS_PROXY_TYPE:
291          case JS_VALUE_TYPE:
292          case TYPE_FEEDBACK_INFO_TYPE:
293            object->Iterate(this);
294            break;
295          case DECLARED_ACCESSOR_INFO_TYPE:
296          case EXECUTABLE_ACCESSOR_INFO_TYPE:
297          case BYTE_ARRAY_TYPE:
298          case CALL_HANDLER_INFO_TYPE:
299          case CODE_TYPE:
300          case FIXED_DOUBLE_ARRAY_TYPE:
301          case HEAP_NUMBER_TYPE:
302          case INTERCEPTOR_INFO_TYPE:
303          case ODDBALL_TYPE:
304          case SCRIPT_TYPE:
305          case SHARED_FUNCTION_INFO_TYPE:
306            break;
307          default:
308            UNREACHABLE();
309        }
310      }
311    }
312  }
313
314 private:
315  void CheckContext(Object* context) {
316    if (!context->IsContext()) return;
317    Context* native_context = Context::cast(context)->native_context();
318    if (current_native_context_ == NULL) {
319      current_native_context_ = native_context;
320    } else {
321      CHECK_EQ(current_native_context_, native_context);
322    }
323  }
324
325  Context* current_native_context_;
326};
327
328
329static void VerifyNativeContextSeparation(Heap* heap) {
330  HeapObjectIterator it(heap->code_space());
331
332  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
333    VerifyNativeContextSeparationVisitor visitor;
334    Code::cast(object)->CodeIterateBody(&visitor);
335  }
336}
337#endif
338
339
340void MarkCompactCollector::TearDown() {
341  AbortCompaction();
342}
343
344
345void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
346  p->MarkEvacuationCandidate();
347  evacuation_candidates_.Add(p);
348}
349
350
351static void TraceFragmentation(PagedSpace* space) {
352  int number_of_pages = space->CountTotalPages();
353  intptr_t reserved = (number_of_pages * space->AreaSize());
354  intptr_t free = reserved - space->SizeOfObjects();
355  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
356         AllocationSpaceName(space->identity()),
357         number_of_pages,
358         static_cast<int>(free),
359         static_cast<double>(free) * 100 / reserved);
360}
361
362
363bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
364  if (!compacting_) {
365    ASSERT(evacuation_candidates_.length() == 0);
366
367#ifdef ENABLE_GDB_JIT_INTERFACE
368    // If GDBJIT interface is active disable compaction.
369    if (FLAG_gdbjit) return false;
370#endif
371
372    CollectEvacuationCandidates(heap()->old_pointer_space());
373    CollectEvacuationCandidates(heap()->old_data_space());
374
375    if (FLAG_compact_code_space &&
376        (mode == NON_INCREMENTAL_COMPACTION ||
377         FLAG_incremental_code_compaction)) {
378      CollectEvacuationCandidates(heap()->code_space());
379    } else if (FLAG_trace_fragmentation) {
380      TraceFragmentation(heap()->code_space());
381    }
382
383    if (FLAG_trace_fragmentation) {
384      TraceFragmentation(heap()->map_space());
385      TraceFragmentation(heap()->cell_space());
386      TraceFragmentation(heap()->property_cell_space());
387    }
388
389    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
390    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
391    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
392
393    compacting_ = evacuation_candidates_.length() > 0;
394  }
395
396  return compacting_;
397}
398
399
400void MarkCompactCollector::CollectGarbage() {
401  // Make sure that Prepare() has been called. The individual steps below will
402  // update the state as they proceed.
403  ASSERT(state_ == PREPARE_GC);
404  ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
405
406  MarkLiveObjects();
407  ASSERT(heap_->incremental_marking()->IsStopped());
408
409  if (FLAG_collect_maps) ClearNonLiveReferences();
410
411  ClearWeakCollections();
412
413#ifdef VERIFY_HEAP
414  if (FLAG_verify_heap) {
415    VerifyMarking(heap_);
416  }
417#endif
418
419  SweepSpaces();
420
421  if (!FLAG_collect_maps) ReattachInitialMaps();
422
423#ifdef DEBUG
424  if (FLAG_verify_native_context_separation) {
425    VerifyNativeContextSeparation(heap_);
426  }
427#endif
428
429#ifdef VERIFY_HEAP
430  if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code &&
431      heap()->weak_embedded_maps_verification_enabled()) {
432    VerifyWeakEmbeddedMapsInOptimizedCode();
433  }
434  if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
435    VerifyOmittedMapChecks();
436  }
437#endif
438
439  Finish();
440
441  if (marking_parity_ == EVEN_MARKING_PARITY) {
442    marking_parity_ = ODD_MARKING_PARITY;
443  } else {
444    ASSERT(marking_parity_ == ODD_MARKING_PARITY);
445    marking_parity_ = EVEN_MARKING_PARITY;
446  }
447
448  tracer_ = NULL;
449}
450
451
452#ifdef VERIFY_HEAP
453void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
454  PageIterator it(space);
455
456  while (it.has_next()) {
457    Page* p = it.next();
458    CHECK(p->markbits()->IsClean());
459    CHECK_EQ(0, p->LiveBytes());
460  }
461}
462
463
464void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
465  NewSpacePageIterator it(space->bottom(), space->top());
466
467  while (it.has_next()) {
468    NewSpacePage* p = it.next();
469    CHECK(p->markbits()->IsClean());
470    CHECK_EQ(0, p->LiveBytes());
471  }
472}
473
474
475void MarkCompactCollector::VerifyMarkbitsAreClean() {
476  VerifyMarkbitsAreClean(heap_->old_pointer_space());
477  VerifyMarkbitsAreClean(heap_->old_data_space());
478  VerifyMarkbitsAreClean(heap_->code_space());
479  VerifyMarkbitsAreClean(heap_->cell_space());
480  VerifyMarkbitsAreClean(heap_->property_cell_space());
481  VerifyMarkbitsAreClean(heap_->map_space());
482  VerifyMarkbitsAreClean(heap_->new_space());
483
484  LargeObjectIterator it(heap_->lo_space());
485  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
486    MarkBit mark_bit = Marking::MarkBitFrom(obj);
487    CHECK(Marking::IsWhite(mark_bit));
488    CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
489  }
490}
491
492
493void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
494  HeapObjectIterator code_iterator(heap()->code_space());
495  for (HeapObject* obj = code_iterator.Next();
496       obj != NULL;
497       obj = code_iterator.Next()) {
498    Code* code = Code::cast(obj);
499    if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
500    if (WillBeDeoptimized(code)) continue;
501    code->VerifyEmbeddedMapsDependency();
502  }
503}
504
505
506void MarkCompactCollector::VerifyOmittedMapChecks() {
507  HeapObjectIterator iterator(heap()->map_space());
508  for (HeapObject* obj = iterator.Next();
509       obj != NULL;
510       obj = iterator.Next()) {
511    Map* map = Map::cast(obj);
512    map->VerifyOmittedMapChecks();
513  }
514}
515#endif  // VERIFY_HEAP
516
517
518static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
519  PageIterator it(space);
520
521  while (it.has_next()) {
522    Bitmap::Clear(it.next());
523  }
524}
525
526
527static void ClearMarkbitsInNewSpace(NewSpace* space) {
528  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
529
530  while (it.has_next()) {
531    Bitmap::Clear(it.next());
532  }
533}
534
535
536void MarkCompactCollector::ClearMarkbits() {
537  ClearMarkbitsInPagedSpace(heap_->code_space());
538  ClearMarkbitsInPagedSpace(heap_->map_space());
539  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
540  ClearMarkbitsInPagedSpace(heap_->old_data_space());
541  ClearMarkbitsInPagedSpace(heap_->cell_space());
542  ClearMarkbitsInPagedSpace(heap_->property_cell_space());
543  ClearMarkbitsInNewSpace(heap_->new_space());
544
545  LargeObjectIterator it(heap_->lo_space());
546  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
547    MarkBit mark_bit = Marking::MarkBitFrom(obj);
548    mark_bit.Clear();
549    mark_bit.Next().Clear();
550    Page::FromAddress(obj->address())->ResetProgressBar();
551    Page::FromAddress(obj->address())->ResetLiveBytes();
552  }
553}
554
555
556void MarkCompactCollector::StartSweeperThreads() {
557  sweeping_pending_ = true;
558  for (int i = 0; i < FLAG_sweeper_threads; i++) {
559    isolate()->sweeper_threads()[i]->StartSweeping();
560  }
561}
562
563
564void MarkCompactCollector::WaitUntilSweepingCompleted() {
565  ASSERT(sweeping_pending_ == true);
566  for (int i = 0; i < FLAG_sweeper_threads; i++) {
567    isolate()->sweeper_threads()[i]->WaitForSweeperThread();
568  }
569  sweeping_pending_ = false;
570  StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
571  StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
572  heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
573  heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
574}
575
576
577intptr_t MarkCompactCollector::
578             StealMemoryFromSweeperThreads(PagedSpace* space) {
579  intptr_t freed_bytes = 0;
580  for (int i = 0; i < FLAG_sweeper_threads; i++) {
581    freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
582  }
583  space->AddToAccountingStats(freed_bytes);
584  space->DecrementUnsweptFreeBytes(freed_bytes);
585  return freed_bytes;
586}
587
588
589bool MarkCompactCollector::AreSweeperThreadsActivated() {
590  return isolate()->sweeper_threads() != NULL;
591}
592
593
594bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
595  return sweeping_pending_;
596}
597
598
599void MarkCompactCollector::MarkInParallel() {
600  for (int i = 0; i < FLAG_marking_threads; i++) {
601    isolate()->marking_threads()[i]->StartMarking();
602  }
603}
604
605
606void MarkCompactCollector::WaitUntilMarkingCompleted() {
607  for (int i = 0; i < FLAG_marking_threads; i++) {
608    isolate()->marking_threads()[i]->WaitForMarkingThread();
609  }
610}
611
612
613bool Marking::TransferMark(Address old_start, Address new_start) {
614  // This is only used when resizing an object.
615  ASSERT(MemoryChunk::FromAddress(old_start) ==
616         MemoryChunk::FromAddress(new_start));
617
618  // If the mark doesn't move, we don't check the color of the object.
619  // It doesn't matter whether the object is black, since it hasn't changed
620  // size, so the adjustment to the live data count will be zero anyway.
621  if (old_start == new_start) return false;
622
623  MarkBit new_mark_bit = MarkBitFrom(new_start);
624  MarkBit old_mark_bit = MarkBitFrom(old_start);
625
626#ifdef DEBUG
627  ObjectColor old_color = Color(old_mark_bit);
628#endif
629
630  if (Marking::IsBlack(old_mark_bit)) {
631    old_mark_bit.Clear();
632    ASSERT(IsWhite(old_mark_bit));
633    Marking::MarkBlack(new_mark_bit);
634    return true;
635  } else if (Marking::IsGrey(old_mark_bit)) {
636    ASSERT(heap_->incremental_marking()->IsMarking());
637    old_mark_bit.Clear();
638    old_mark_bit.Next().Clear();
639    ASSERT(IsWhite(old_mark_bit));
640    heap_->incremental_marking()->WhiteToGreyAndPush(
641        HeapObject::FromAddress(new_start), new_mark_bit);
642    heap_->incremental_marking()->RestartIfNotMarking();
643  }
644
645#ifdef DEBUG
646  ObjectColor new_color = Color(new_mark_bit);
647  ASSERT(new_color == old_color);
648#endif
649
650  return false;
651}
652
653
654const char* AllocationSpaceName(AllocationSpace space) {
655  switch (space) {
656    case NEW_SPACE: return "NEW_SPACE";
657    case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
658    case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
659    case CODE_SPACE: return "CODE_SPACE";
660    case MAP_SPACE: return "MAP_SPACE";
661    case CELL_SPACE: return "CELL_SPACE";
662    case PROPERTY_CELL_SPACE:
663      return "PROPERTY_CELL_SPACE";
664    case LO_SPACE: return "LO_SPACE";
665    default:
666      UNREACHABLE();
667  }
668
669  return NULL;
670}
671
672
673// Returns zero for pages that have so little fragmentation that it is not
674// worth defragmenting them.  Otherwise a positive integer that gives an
675// estimate of fragmentation on an arbitrary scale.
676static int FreeListFragmentation(PagedSpace* space, Page* p) {
677  // If page was not swept then there are no free list items on it.
678  if (!p->WasSwept()) {
679    if (FLAG_trace_fragmentation) {
680      PrintF("%p [%s]: %d bytes live (unswept)\n",
681             reinterpret_cast<void*>(p),
682             AllocationSpaceName(space->identity()),
683             p->LiveBytes());
684    }
685    return 0;
686  }
687
688  PagedSpace::SizeStats sizes;
689  space->ObtainFreeListStatistics(p, &sizes);
690
691  intptr_t ratio;
692  intptr_t ratio_threshold;
693  intptr_t area_size = space->AreaSize();
694  if (space->identity() == CODE_SPACE) {
695    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
696        area_size;
697    ratio_threshold = 10;
698  } else {
699    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
700        area_size;
701    ratio_threshold = 15;
702  }
703
704  if (FLAG_trace_fragmentation) {
705    PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
706           reinterpret_cast<void*>(p),
707           AllocationSpaceName(space->identity()),
708           static_cast<int>(sizes.small_size_),
709           static_cast<double>(sizes.small_size_ * 100) /
710           area_size,
711           static_cast<int>(sizes.medium_size_),
712           static_cast<double>(sizes.medium_size_ * 100) /
713           area_size,
714           static_cast<int>(sizes.large_size_),
715           static_cast<double>(sizes.large_size_ * 100) /
716           area_size,
717           static_cast<int>(sizes.huge_size_),
718           static_cast<double>(sizes.huge_size_ * 100) /
719           area_size,
720           (ratio > ratio_threshold) ? "[fragmented]" : "");
721  }
722
723  if (FLAG_always_compact && sizes.Total() != area_size) {
724    return 1;
725  }
726
727  if (ratio <= ratio_threshold) return 0;  // Not fragmented.
728
729  return static_cast<int>(ratio - ratio_threshold);
730}
731
732
733void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
734  ASSERT(space->identity() == OLD_POINTER_SPACE ||
735         space->identity() == OLD_DATA_SPACE ||
736         space->identity() == CODE_SPACE);
737
738  static const int kMaxMaxEvacuationCandidates = 1000;
739  int number_of_pages = space->CountTotalPages();
740  int max_evacuation_candidates =
741      static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
742
743  if (FLAG_stress_compaction || FLAG_always_compact) {
744    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
745  }
746
747  class Candidate {
748   public:
749    Candidate() : fragmentation_(0), page_(NULL) { }
750    Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
751
752    int fragmentation() { return fragmentation_; }
753    Page* page() { return page_; }
754
755   private:
756    int fragmentation_;
757    Page* page_;
758  };
759
760  enum CompactionMode {
761    COMPACT_FREE_LISTS,
762    REDUCE_MEMORY_FOOTPRINT
763  };
764
765  CompactionMode mode = COMPACT_FREE_LISTS;
766
767  intptr_t reserved = number_of_pages * space->AreaSize();
768  intptr_t over_reserved = reserved - space->SizeOfObjects();
769  static const intptr_t kFreenessThreshold = 50;
770
771  if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
772    // If reduction of memory footprint was requested, we are aggressive
773    // about choosing pages to free.  We expect that half-empty pages
774    // are easier to compact so slightly bump the limit.
775    mode = REDUCE_MEMORY_FOOTPRINT;
776    max_evacuation_candidates += 2;
777  }
778
779
780  if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
781    // If over-usage is very high (more than a third of the space), we
782    // try to free all mostly empty pages.  We expect that almost empty
783    // pages are even easier to compact so bump the limit even more.
784    mode = REDUCE_MEMORY_FOOTPRINT;
785    max_evacuation_candidates *= 2;
786  }
787
788  if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
789    PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
790           "evacuation candidate limit: %d\n",
791           static_cast<double>(over_reserved) / MB,
792           static_cast<double>(reserved) / MB,
793           static_cast<int>(kFreenessThreshold),
794           max_evacuation_candidates);
795  }
796
797  intptr_t estimated_release = 0;
798
799  Candidate candidates[kMaxMaxEvacuationCandidates];
800
801  max_evacuation_candidates =
802      Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
803
804  int count = 0;
805  int fragmentation = 0;
806  Candidate* least = NULL;
807
808  PageIterator it(space);
809  if (it.has_next()) it.next();  // Never compact the first page.
810
811  while (it.has_next()) {
812    Page* p = it.next();
813    p->ClearEvacuationCandidate();
814
815    if (FLAG_stress_compaction) {
816      unsigned int counter = space->heap()->ms_count();
817      uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
818      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
819    } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
820      // Don't try to release too many pages.
821      if (estimated_release >= over_reserved) {
822        continue;
823      }
824
825      intptr_t free_bytes = 0;
826
827      if (!p->WasSwept()) {
828        free_bytes = (p->area_size() - p->LiveBytes());
829      } else {
830        PagedSpace::SizeStats sizes;
831        space->ObtainFreeListStatistics(p, &sizes);
832        free_bytes = sizes.Total();
833      }
834
835      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
836
837      if (free_pct >= kFreenessThreshold) {
838        estimated_release += free_bytes;
839        fragmentation = free_pct;
840      } else {
841        fragmentation = 0;
842      }
843
844      if (FLAG_trace_fragmentation) {
845        PrintF("%p [%s]: %d (%.2f%%) free %s\n",
846               reinterpret_cast<void*>(p),
847               AllocationSpaceName(space->identity()),
848               static_cast<int>(free_bytes),
849               static_cast<double>(free_bytes * 100) / p->area_size(),
850               (fragmentation > 0) ? "[fragmented]" : "");
851      }
852    } else {
853      fragmentation = FreeListFragmentation(space, p);
854    }
855
856    if (fragmentation != 0) {
857      if (count < max_evacuation_candidates) {
858        candidates[count++] = Candidate(fragmentation, p);
859      } else {
860        if (least == NULL) {
861          for (int i = 0; i < max_evacuation_candidates; i++) {
862            if (least == NULL ||
863                candidates[i].fragmentation() < least->fragmentation()) {
864              least = candidates + i;
865            }
866          }
867        }
868        if (least->fragmentation() < fragmentation) {
869          *least = Candidate(fragmentation, p);
870          least = NULL;
871        }
872      }
873    }
874  }
875
876  for (int i = 0; i < count; i++) {
877    AddEvacuationCandidate(candidates[i].page());
878  }
879
880  if (count > 0 && FLAG_trace_fragmentation) {
881    PrintF("Collected %d evacuation candidates for space %s\n",
882           count,
883           AllocationSpaceName(space->identity()));
884  }
885}
886
887
888void MarkCompactCollector::AbortCompaction() {
889  if (compacting_) {
890    int npages = evacuation_candidates_.length();
891    for (int i = 0; i < npages; i++) {
892      Page* p = evacuation_candidates_[i];
893      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
894      p->ClearEvacuationCandidate();
895      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
896    }
897    compacting_ = false;
898    evacuation_candidates_.Rewind(0);
899    invalidated_code_.Rewind(0);
900  }
901  ASSERT_EQ(0, evacuation_candidates_.length());
902}
903
904
905void MarkCompactCollector::Prepare(GCTracer* tracer) {
906  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
907
908  // Rather than passing the tracer around we stash it in a static member
909  // variable.
910  tracer_ = tracer;
911
912#ifdef DEBUG
913  ASSERT(state_ == IDLE);
914  state_ = PREPARE_GC;
915#endif
916
917  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
918
919  if (IsConcurrentSweepingInProgress()) {
920    // Instead of waiting we could also abort the sweeper threads here.
921    WaitUntilSweepingCompleted();
922  }
923
924  // Clear marking bits if incremental marking is aborted.
925  if (was_marked_incrementally_ && abort_incremental_marking_) {
926    heap()->incremental_marking()->Abort();
927    ClearMarkbits();
928    AbortCompaction();
929    was_marked_incrementally_ = false;
930  }
931
932  // Don't start compaction if we are in the middle of incremental
933  // marking cycle. We did not collect any slots.
934  if (!FLAG_never_compact && !was_marked_incrementally_) {
935    StartCompaction(NON_INCREMENTAL_COMPACTION);
936  }
937
938  PagedSpaces spaces(heap());
939  for (PagedSpace* space = spaces.next();
940       space != NULL;
941       space = spaces.next()) {
942    space->PrepareForMarkCompact();
943  }
944
945#ifdef VERIFY_HEAP
946  if (!was_marked_incrementally_ && FLAG_verify_heap) {
947    VerifyMarkbitsAreClean();
948  }
949#endif
950}
951
952
953void MarkCompactCollector::Finish() {
954#ifdef DEBUG
955  ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
956  state_ = IDLE;
957#endif
958  // The stub cache is not traversed during GC; clear the cache to
959  // force lazy re-initialization of it. This must be done after the
960  // GC, because it relies on the new address of certain old space
961  // objects (empty string, illegal builtin).
962  isolate()->stub_cache()->Clear();
963
964  if (code_to_deoptimize_ != Smi::FromInt(0)) {
965    // Convert the linked list of Code objects into a ZoneList.
966    Zone zone(isolate());
967    ZoneList<Code*> codes(4, &zone);
968
969    Object *list = code_to_deoptimize_;
970    while (list->IsCode()) {
971      Code *code = Code::cast(list);
972      list = code->code_to_deoptimize_link();
973      codes.Add(code, &zone);
974      // Destroy the link and don't ever try to deoptimize this code again.
975      code->set_code_to_deoptimize_link(Smi::FromInt(0));
976    }
977    code_to_deoptimize_ = Smi::FromInt(0);
978
979    Deoptimizer::DeoptimizeCodeList(isolate(), &codes);
980  }
981}
982
983
984// -------------------------------------------------------------------------
985// Phase 1: tracing and marking live objects.
986//   before: all objects are in normal state.
987//   after: a live object's map pointer is marked as '00'.
988
989// Marking all live objects in the heap as part of mark-sweep or mark-compact
990// collection.  Before marking, all objects are in their normal state.  After
991// marking, live objects' map pointers are marked indicating that the object
992// has been found reachable.
993//
994// The marking algorithm is a (mostly) depth-first (because of possible stack
995// overflow) traversal of the graph of objects reachable from the roots.  It
996// uses an explicit stack of pointers rather than recursion.  The young
997// generation's inactive ('from') space is used as a marking stack.  The
998// objects in the marking stack are the ones that have been reached and marked
999// but their children have not yet been visited.
1000//
1001// The marking stack can overflow during traversal.  In that case, we set an
1002// overflow flag.  When the overflow flag is set, we continue marking objects
1003// reachable from the objects on the marking stack, but no longer push them on
1004// the marking stack.  Instead, we mark them as both marked and overflowed.
1005// When the stack is in the overflowed state, objects marked as overflowed
1006// have been reached and marked but their children have not been visited yet.
1007// After emptying the marking stack, we clear the overflow flag and traverse
1008// the heap looking for objects marked as overflowed, push them on the stack,
1009// and continue with marking.  This process repeats until all reachable
1010// objects have been marked.
1011
1012void CodeFlusher::ProcessJSFunctionCandidates() {
1013  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
1014  Object* undefined = isolate_->heap()->undefined_value();
1015
1016  JSFunction* candidate = jsfunction_candidates_head_;
1017  JSFunction* next_candidate;
1018  while (candidate != NULL) {
1019    next_candidate = GetNextCandidate(candidate);
1020    ClearNextCandidate(candidate, undefined);
1021
1022    SharedFunctionInfo* shared = candidate->shared();
1023
1024    Code* code = shared->code();
1025    MarkBit code_mark = Marking::MarkBitFrom(code);
1026    if (!code_mark.Get()) {
1027      if (FLAG_trace_code_flushing && shared->is_compiled()) {
1028        PrintF("[code-flushing clears: ");
1029        shared->ShortPrint();
1030        PrintF(" - age: %d]\n", code->GetAge());
1031      }
1032      shared->set_code(lazy_compile);
1033      candidate->set_code(lazy_compile);
1034    } else {
1035      candidate->set_code(code);
1036    }
1037
1038    // We are in the middle of a GC cycle so the write barrier in the code
1039    // setter did not record the slot update and we have to do that manually.
1040    Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1041    Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1042    isolate_->heap()->mark_compact_collector()->
1043        RecordCodeEntrySlot(slot, target);
1044
1045    Object** shared_code_slot =
1046        HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1047    isolate_->heap()->mark_compact_collector()->
1048        RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
1049
1050    candidate = next_candidate;
1051  }
1052
1053  jsfunction_candidates_head_ = NULL;
1054}
1055
1056
1057void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1058  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
1059
1060  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1061  SharedFunctionInfo* next_candidate;
1062  while (candidate != NULL) {
1063    next_candidate = GetNextCandidate(candidate);
1064    ClearNextCandidate(candidate);
1065
1066    Code* code = candidate->code();
1067    MarkBit code_mark = Marking::MarkBitFrom(code);
1068    if (!code_mark.Get()) {
1069      if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1070        PrintF("[code-flushing clears: ");
1071        candidate->ShortPrint();
1072        PrintF(" - age: %d]\n", code->GetAge());
1073      }
1074      candidate->set_code(lazy_compile);
1075    }
1076
1077    Object** code_slot =
1078        HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1079    isolate_->heap()->mark_compact_collector()->
1080        RecordSlot(code_slot, code_slot, *code_slot);
1081
1082    candidate = next_candidate;
1083  }
1084
1085  shared_function_info_candidates_head_ = NULL;
1086}
1087
1088
1089void CodeFlusher::ProcessOptimizedCodeMaps() {
1090  static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
1091  static const int kEntryLength = SharedFunctionInfo::kEntryLength;
1092  static const int kContextOffset = 0;
1093  static const int kCodeOffset = 1;
1094  static const int kLiteralsOffset = 2;
1095  STATIC_ASSERT(kEntryLength == 3);
1096
1097  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1098  SharedFunctionInfo* next_holder;
1099  while (holder != NULL) {
1100    next_holder = GetNextCodeMap(holder);
1101    ClearNextCodeMap(holder);
1102
1103    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1104    int new_length = kEntriesStart;
1105    int old_length = code_map->length();
1106    for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
1107      Code* code = Code::cast(code_map->get(i + kCodeOffset));
1108      MarkBit code_mark = Marking::MarkBitFrom(code);
1109      if (!code_mark.Get()) {
1110        continue;
1111      }
1112
1113      // Update and record the context slot in the optimized code map.
1114      Object** context_slot = HeapObject::RawField(code_map,
1115          FixedArray::OffsetOfElementAt(new_length));
1116      code_map->set(new_length++, code_map->get(i + kContextOffset));
1117      ASSERT(Marking::IsBlack(
1118          Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
1119      isolate_->heap()->mark_compact_collector()->
1120          RecordSlot(context_slot, context_slot, *context_slot);
1121
1122      // Update and record the code slot in the optimized code map.
1123      Object** code_slot = HeapObject::RawField(code_map,
1124          FixedArray::OffsetOfElementAt(new_length));
1125      code_map->set(new_length++, code_map->get(i + kCodeOffset));
1126      ASSERT(Marking::IsBlack(
1127          Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
1128      isolate_->heap()->mark_compact_collector()->
1129          RecordSlot(code_slot, code_slot, *code_slot);
1130
1131      // Update and record the literals slot in the optimized code map.
1132      Object** literals_slot = HeapObject::RawField(code_map,
1133          FixedArray::OffsetOfElementAt(new_length));
1134      code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
1135      ASSERT(Marking::IsBlack(
1136          Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
1137      isolate_->heap()->mark_compact_collector()->
1138          RecordSlot(literals_slot, literals_slot, *literals_slot);
1139    }
1140
1141    // Trim the optimized code map if entries have been removed.
1142    if (new_length < old_length) {
1143      holder->TrimOptimizedCodeMap(old_length - new_length);
1144    }
1145
1146    holder = next_holder;
1147  }
1148
1149  optimized_code_map_holder_head_ = NULL;
1150}
1151
1152
1153void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1154  // Make sure previous flushing decisions are revisited.
1155  isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1156
1157  if (FLAG_trace_code_flushing) {
1158    PrintF("[code-flushing abandons function-info: ");
1159    shared_info->ShortPrint();
1160    PrintF("]\n");
1161  }
1162
1163  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1164  SharedFunctionInfo* next_candidate;
1165  if (candidate == shared_info) {
1166    next_candidate = GetNextCandidate(shared_info);
1167    shared_function_info_candidates_head_ = next_candidate;
1168    ClearNextCandidate(shared_info);
1169  } else {
1170    while (candidate != NULL) {
1171      next_candidate = GetNextCandidate(candidate);
1172
1173      if (next_candidate == shared_info) {
1174        next_candidate = GetNextCandidate(shared_info);
1175        SetNextCandidate(candidate, next_candidate);
1176        ClearNextCandidate(shared_info);
1177        break;
1178      }
1179
1180      candidate = next_candidate;
1181    }
1182  }
1183}
1184
1185
1186void CodeFlusher::EvictCandidate(JSFunction* function) {
1187  ASSERT(!function->next_function_link()->IsUndefined());
1188  Object* undefined = isolate_->heap()->undefined_value();
1189
1190  // Make sure previous flushing decisions are revisited.
1191  isolate_->heap()->incremental_marking()->RecordWrites(function);
1192  isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1193
1194  if (FLAG_trace_code_flushing) {
1195    PrintF("[code-flushing abandons closure: ");
1196    function->shared()->ShortPrint();
1197    PrintF("]\n");
1198  }
1199
1200  JSFunction* candidate = jsfunction_candidates_head_;
1201  JSFunction* next_candidate;
1202  if (candidate == function) {
1203    next_candidate = GetNextCandidate(function);
1204    jsfunction_candidates_head_ = next_candidate;
1205    ClearNextCandidate(function, undefined);
1206  } else {
1207    while (candidate != NULL) {
1208      next_candidate = GetNextCandidate(candidate);
1209
1210      if (next_candidate == function) {
1211        next_candidate = GetNextCandidate(function);
1212        SetNextCandidate(candidate, next_candidate);
1213        ClearNextCandidate(function, undefined);
1214        break;
1215      }
1216
1217      candidate = next_candidate;
1218    }
1219  }
1220}
1221
1222
1223void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1224  ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
1225         get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
1226
1227  // Make sure previous flushing decisions are revisited.
1228  isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1229
1230  if (FLAG_trace_code_flushing) {
1231    PrintF("[code-flushing abandons code-map: ");
1232    code_map_holder->ShortPrint();
1233    PrintF("]\n");
1234  }
1235
1236  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1237  SharedFunctionInfo* next_holder;
1238  if (holder == code_map_holder) {
1239    next_holder = GetNextCodeMap(code_map_holder);
1240    optimized_code_map_holder_head_ = next_holder;
1241    ClearNextCodeMap(code_map_holder);
1242  } else {
1243    while (holder != NULL) {
1244      next_holder = GetNextCodeMap(holder);
1245
1246      if (next_holder == code_map_holder) {
1247        next_holder = GetNextCodeMap(code_map_holder);
1248        SetNextCodeMap(holder, next_holder);
1249        ClearNextCodeMap(code_map_holder);
1250        break;
1251      }
1252
1253      holder = next_holder;
1254    }
1255  }
1256}
1257
1258
1259void CodeFlusher::EvictJSFunctionCandidates() {
1260  JSFunction* candidate = jsfunction_candidates_head_;
1261  JSFunction* next_candidate;
1262  while (candidate != NULL) {
1263    next_candidate = GetNextCandidate(candidate);
1264    EvictCandidate(candidate);
1265    candidate = next_candidate;
1266  }
1267  ASSERT(jsfunction_candidates_head_ == NULL);
1268}
1269
1270
1271void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1272  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1273  SharedFunctionInfo* next_candidate;
1274  while (candidate != NULL) {
1275    next_candidate = GetNextCandidate(candidate);
1276    EvictCandidate(candidate);
1277    candidate = next_candidate;
1278  }
1279  ASSERT(shared_function_info_candidates_head_ == NULL);
1280}
1281
1282
1283void CodeFlusher::EvictOptimizedCodeMaps() {
1284  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1285  SharedFunctionInfo* next_holder;
1286  while (holder != NULL) {
1287    next_holder = GetNextCodeMap(holder);
1288    EvictOptimizedCodeMap(holder);
1289    holder = next_holder;
1290  }
1291  ASSERT(optimized_code_map_holder_head_ == NULL);
1292}
1293
1294
1295void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1296  Heap* heap = isolate_->heap();
1297
1298  JSFunction** slot = &jsfunction_candidates_head_;
1299  JSFunction* candidate = jsfunction_candidates_head_;
1300  while (candidate != NULL) {
1301    if (heap->InFromSpace(candidate)) {
1302      v->VisitPointer(reinterpret_cast<Object**>(slot));
1303    }
1304    candidate = GetNextCandidate(*slot);
1305    slot = GetNextCandidateSlot(*slot);
1306  }
1307}
1308
1309
1310MarkCompactCollector::~MarkCompactCollector() {
1311  if (code_flusher_ != NULL) {
1312    delete code_flusher_;
1313    code_flusher_ = NULL;
1314  }
1315}
1316
1317
1318static inline HeapObject* ShortCircuitConsString(Object** p) {
1319  // Optimization: If the heap object pointed to by p is a non-internalized
1320  // cons string whose right substring is HEAP->empty_string, update
1321  // it in place to its left substring.  Return the updated value.
1322  //
1323  // Here we assume that if we change *p, we replace it with a heap object
1324  // (i.e., the left substring of a cons string is always a heap object).
1325  //
1326  // The check performed is:
1327  //   object->IsConsString() && !object->IsInternalizedString() &&
1328  //   (ConsString::cast(object)->second() == HEAP->empty_string())
1329  // except the maps for the object and its possible substrings might be
1330  // marked.
1331  HeapObject* object = HeapObject::cast(*p);
1332  if (!FLAG_clever_optimizations) return object;
1333  Map* map = object->map();
1334  InstanceType type = map->instance_type();
1335  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
1336
1337  Object* second = reinterpret_cast<ConsString*>(object)->second();
1338  Heap* heap = map->GetHeap();
1339  if (second != heap->empty_string()) {
1340    return object;
1341  }
1342
1343  // Since we don't have the object's start, it is impossible to update the
1344  // page dirty marks. Therefore, we only replace the string with its left
1345  // substring when page dirty marks do not change.
1346  Object* first = reinterpret_cast<ConsString*>(object)->first();
1347  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1348
1349  *p = first;
1350  return HeapObject::cast(first);
1351}
1352
1353
1354class MarkCompactMarkingVisitor
1355    : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1356 public:
1357  static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
1358                                   Map* map, HeapObject* obj);
1359
1360  static void ObjectStatsCountFixedArray(
1361      FixedArrayBase* fixed_array,
1362      FixedArraySubInstanceType fast_type,
1363      FixedArraySubInstanceType dictionary_type);
1364
1365  template<MarkCompactMarkingVisitor::VisitorId id>
1366  class ObjectStatsTracker {
1367   public:
1368    static inline void Visit(Map* map, HeapObject* obj);
1369  };
1370
1371  static void Initialize();
1372
1373  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1374    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1375  }
1376
1377  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1378    // Mark all objects pointed to in [start, end).
1379    const int kMinRangeForMarkingRecursion = 64;
1380    if (end - start >= kMinRangeForMarkingRecursion) {
1381      if (VisitUnmarkedObjects(heap, start, end)) return;
1382      // We are close to a stack overflow, so just mark the objects.
1383    }
1384    MarkCompactCollector* collector = heap->mark_compact_collector();
1385    for (Object** p = start; p < end; p++) {
1386      MarkObjectByPointer(collector, start, p);
1387    }
1388  }
1389
1390  // Marks the object black and pushes it on the marking stack.
1391  INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1392    MarkBit mark = Marking::MarkBitFrom(object);
1393    heap->mark_compact_collector()->MarkObject(object, mark);
1394  }
1395
1396  // Marks the object black without pushing it on the marking stack.
1397  // Returns true if object needed marking and false otherwise.
1398  INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1399    MarkBit mark_bit = Marking::MarkBitFrom(object);
1400    if (!mark_bit.Get()) {
1401      heap->mark_compact_collector()->SetMark(object, mark_bit);
1402      return true;
1403    }
1404    return false;
1405  }
1406
1407  // Mark object pointed to by p.
1408  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1409                                         Object** anchor_slot,
1410                                         Object** p)) {
1411    if (!(*p)->IsHeapObject()) return;
1412    HeapObject* object = ShortCircuitConsString(p);
1413    collector->RecordSlot(anchor_slot, p, object);
1414    MarkBit mark = Marking::MarkBitFrom(object);
1415    collector->MarkObject(object, mark);
1416  }
1417
1418
1419  // Visit an unmarked object.
1420  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1421                                         HeapObject* obj)) {
1422#ifdef DEBUG
1423    ASSERT(Isolate::Current()->heap()->Contains(obj));
1424    ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
1425#endif
1426    Map* map = obj->map();
1427    Heap* heap = obj->GetHeap();
1428    MarkBit mark = Marking::MarkBitFrom(obj);
1429    heap->mark_compact_collector()->SetMark(obj, mark);
1430    // Mark the map pointer and the body.
1431    MarkBit map_mark = Marking::MarkBitFrom(map);
1432    heap->mark_compact_collector()->MarkObject(map, map_mark);
1433    IterateBody(map, obj);
1434  }
1435
1436  // Visit all unmarked objects pointed to by [start, end).
1437  // Returns false if the operation fails (lack of stack space).
1438  INLINE(static bool VisitUnmarkedObjects(Heap* heap,
1439                                          Object** start,
1440                                          Object** end)) {
1441    // Return false is we are close to the stack limit.
1442    StackLimitCheck check(heap->isolate());
1443    if (check.HasOverflowed()) return false;
1444
1445    MarkCompactCollector* collector = heap->mark_compact_collector();
1446    // Visit the unmarked objects.
1447    for (Object** p = start; p < end; p++) {
1448      Object* o = *p;
1449      if (!o->IsHeapObject()) continue;
1450      collector->RecordSlot(start, p, o);
1451      HeapObject* obj = HeapObject::cast(o);
1452      MarkBit mark = Marking::MarkBitFrom(obj);
1453      if (mark.Get()) continue;
1454      VisitUnmarkedObject(collector, obj);
1455    }
1456    return true;
1457  }
1458
1459  INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
1460    SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
1461    shared->BeforeVisitingPointers();
1462  }
1463
1464  static void VisitWeakCollection(Map* map, HeapObject* object) {
1465    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1466    JSWeakCollection* weak_collection =
1467        reinterpret_cast<JSWeakCollection*>(object);
1468
1469    // Enqueue weak map in linked list of encountered weak maps.
1470    if (weak_collection->next() == Smi::FromInt(0)) {
1471      weak_collection->set_next(collector->encountered_weak_collections());
1472      collector->set_encountered_weak_collections(weak_collection);
1473    }
1474
1475    // Skip visiting the backing hash table containing the mappings.
1476    int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
1477    BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1478        map->GetHeap(),
1479        object,
1480        JSWeakCollection::BodyDescriptor::kStartOffset,
1481        JSWeakCollection::kTableOffset);
1482    BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1483        map->GetHeap(),
1484        object,
1485        JSWeakCollection::kTableOffset + kPointerSize,
1486        object_size);
1487
1488    // Mark the backing hash table without pushing it on the marking stack.
1489    Object* table_object = weak_collection->table();
1490    if (!table_object->IsHashTable()) return;
1491    ObjectHashTable* table = ObjectHashTable::cast(table_object);
1492    Object** table_slot =
1493        HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
1494    MarkBit table_mark = Marking::MarkBitFrom(table);
1495    collector->RecordSlot(table_slot, table_slot, table);
1496    if (!table_mark.Get()) collector->SetMark(table, table_mark);
1497    // Recording the map slot can be skipped, because maps are not compacted.
1498    collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1499    ASSERT(MarkCompactCollector::IsMarked(table->map()));
1500  }
1501
1502 private:
1503  template<int id>
1504  static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1505
1506  // Code flushing support.
1507
1508  static const int kRegExpCodeThreshold = 5;
1509
1510  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1511                                          JSRegExp* re,
1512                                          bool is_ascii) {
1513    // Make sure that the fixed array is in fact initialized on the RegExp.
1514    // We could potentially trigger a GC when initializing the RegExp.
1515    if (HeapObject::cast(re->data())->map()->instance_type() !=
1516            FIXED_ARRAY_TYPE) return;
1517
1518    // Make sure this is a RegExp that actually contains code.
1519    if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1520
1521    Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
1522    if (!code->IsSmi() &&
1523        HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1524      // Save a copy that can be reinstated if we need the code again.
1525      re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
1526
1527      // Saving a copy might create a pointer into compaction candidate
1528      // that was not observed by marker.  This might happen if JSRegExp data
1529      // was marked through the compilation cache before marker reached JSRegExp
1530      // object.
1531      FixedArray* data = FixedArray::cast(re->data());
1532      Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1533      heap->mark_compact_collector()->
1534          RecordSlot(slot, slot, code);
1535
1536      // Set a number in the 0-255 range to guarantee no smi overflow.
1537      re->SetDataAt(JSRegExp::code_index(is_ascii),
1538                    Smi::FromInt(heap->sweep_generation() & 0xff));
1539    } else if (code->IsSmi()) {
1540      int value = Smi::cast(code)->value();
1541      // The regexp has not been compiled yet or there was a compilation error.
1542      if (value == JSRegExp::kUninitializedValue ||
1543          value == JSRegExp::kCompilationErrorValue) {
1544        return;
1545      }
1546
1547      // Check if we should flush now.
1548      if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1549        re->SetDataAt(JSRegExp::code_index(is_ascii),
1550                      Smi::FromInt(JSRegExp::kUninitializedValue));
1551        re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
1552                      Smi::FromInt(JSRegExp::kUninitializedValue));
1553      }
1554    }
1555  }
1556
1557
1558  // Works by setting the current sweep_generation (as a smi) in the
1559  // code object place in the data array of the RegExp and keeps a copy
1560  // around that can be reinstated if we reuse the RegExp before flushing.
1561  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1562  // we flush the code.
1563  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1564    Heap* heap = map->GetHeap();
1565    MarkCompactCollector* collector = heap->mark_compact_collector();
1566    if (!collector->is_code_flushing_enabled()) {
1567      VisitJSRegExp(map, object);
1568      return;
1569    }
1570    JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1571    // Flush code or set age on both ASCII and two byte code.
1572    UpdateRegExpCodeAgeAndFlush(heap, re, true);
1573    UpdateRegExpCodeAgeAndFlush(heap, re, false);
1574    // Visit the fields of the RegExp, including the updated FixedArray.
1575    VisitJSRegExp(map, object);
1576  }
1577
1578  static VisitorDispatchTable<Callback> non_count_table_;
1579};
1580
1581
1582void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1583    FixedArrayBase* fixed_array,
1584    FixedArraySubInstanceType fast_type,
1585    FixedArraySubInstanceType dictionary_type) {
1586  Heap* heap = fixed_array->map()->GetHeap();
1587  if (fixed_array->map() != heap->fixed_cow_array_map() &&
1588      fixed_array->map() != heap->fixed_double_array_map() &&
1589      fixed_array != heap->empty_fixed_array()) {
1590    if (fixed_array->IsDictionary()) {
1591      heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1592                              dictionary_type,
1593                              fixed_array->Size());
1594    } else {
1595      heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1596                              fast_type,
1597                              fixed_array->Size());
1598    }
1599  }
1600}
1601
1602
1603void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1604    MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1605  Heap* heap = map->GetHeap();
1606  int object_size = obj->Size();
1607  heap->RecordObjectStats(map->instance_type(), -1, object_size);
1608  non_count_table_.GetVisitorById(id)(map, obj);
1609  if (obj->IsJSObject()) {
1610    JSObject* object = JSObject::cast(obj);
1611    ObjectStatsCountFixedArray(object->elements(),
1612                               DICTIONARY_ELEMENTS_SUB_TYPE,
1613                               FAST_ELEMENTS_SUB_TYPE);
1614    ObjectStatsCountFixedArray(object->properties(),
1615                               DICTIONARY_PROPERTIES_SUB_TYPE,
1616                               FAST_PROPERTIES_SUB_TYPE);
1617  }
1618}
1619
1620
1621template<MarkCompactMarkingVisitor::VisitorId id>
1622void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
1623    Map* map, HeapObject* obj) {
1624  ObjectStatsVisitBase(id, map, obj);
1625}
1626
1627
1628template<>
1629class MarkCompactMarkingVisitor::ObjectStatsTracker<
1630    MarkCompactMarkingVisitor::kVisitMap> {
1631 public:
1632  static inline void Visit(Map* map, HeapObject* obj) {
1633    Heap* heap = map->GetHeap();
1634    Map* map_obj = Map::cast(obj);
1635    ASSERT(map->instance_type() == MAP_TYPE);
1636    DescriptorArray* array = map_obj->instance_descriptors();
1637    if (map_obj->owns_descriptors() &&
1638        array != heap->empty_descriptor_array()) {
1639      int fixed_array_size = array->Size();
1640      heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1641                              DESCRIPTOR_ARRAY_SUB_TYPE,
1642                              fixed_array_size);
1643    }
1644    if (map_obj->HasTransitionArray()) {
1645      int fixed_array_size = map_obj->transitions()->Size();
1646      heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1647                              TRANSITION_ARRAY_SUB_TYPE,
1648                              fixed_array_size);
1649    }
1650    if (map_obj->has_code_cache()) {
1651      CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1652      heap->RecordObjectStats(
1653          FIXED_ARRAY_TYPE,
1654          MAP_CODE_CACHE_SUB_TYPE,
1655          cache->default_cache()->Size());
1656      if (!cache->normal_type_cache()->IsUndefined()) {
1657        heap->RecordObjectStats(
1658            FIXED_ARRAY_TYPE,
1659            MAP_CODE_CACHE_SUB_TYPE,
1660            FixedArray::cast(cache->normal_type_cache())->Size());
1661      }
1662    }
1663    ObjectStatsVisitBase(kVisitMap, map, obj);
1664  }
1665};
1666
1667
1668template<>
1669class MarkCompactMarkingVisitor::ObjectStatsTracker<
1670    MarkCompactMarkingVisitor::kVisitCode> {
1671 public:
1672  static inline void Visit(Map* map, HeapObject* obj) {
1673    Heap* heap = map->GetHeap();
1674    int object_size = obj->Size();
1675    ASSERT(map->instance_type() == CODE_TYPE);
1676    heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
1677    ObjectStatsVisitBase(kVisitCode, map, obj);
1678  }
1679};
1680
1681
1682template<>
1683class MarkCompactMarkingVisitor::ObjectStatsTracker<
1684    MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1685 public:
1686  static inline void Visit(Map* map, HeapObject* obj) {
1687    Heap* heap = map->GetHeap();
1688    SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1689    if (sfi->scope_info() != heap->empty_fixed_array()) {
1690      heap->RecordObjectStats(
1691          FIXED_ARRAY_TYPE,
1692          SCOPE_INFO_SUB_TYPE,
1693          FixedArray::cast(sfi->scope_info())->Size());
1694    }
1695    ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1696  }
1697};
1698
1699
1700template<>
1701class MarkCompactMarkingVisitor::ObjectStatsTracker<
1702    MarkCompactMarkingVisitor::kVisitFixedArray> {
1703 public:
1704  static inline void Visit(Map* map, HeapObject* obj) {
1705    Heap* heap = map->GetHeap();
1706    FixedArray* fixed_array = FixedArray::cast(obj);
1707    if (fixed_array == heap->string_table()) {
1708      heap->RecordObjectStats(
1709          FIXED_ARRAY_TYPE,
1710          STRING_TABLE_SUB_TYPE,
1711          fixed_array->Size());
1712    }
1713    ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1714  }
1715};
1716
1717
1718void MarkCompactMarkingVisitor::Initialize() {
1719  StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1720
1721  table_.Register(kVisitJSRegExp,
1722                  &VisitRegExpAndFlushCode);
1723
1724  if (FLAG_track_gc_object_stats) {
1725    // Copy the visitor table to make call-through possible.
1726    non_count_table_.CopyFrom(&table_);
1727#define VISITOR_ID_COUNT_FUNCTION(id)                                   \
1728    table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1729    VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1730#undef VISITOR_ID_COUNT_FUNCTION
1731  }
1732}
1733
1734
1735VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1736    MarkCompactMarkingVisitor::non_count_table_;
1737
1738
1739class CodeMarkingVisitor : public ThreadVisitor {
1740 public:
1741  explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1742      : collector_(collector) {}
1743
1744  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1745    collector_->PrepareThreadForCodeFlushing(isolate, top);
1746  }
1747
1748 private:
1749  MarkCompactCollector* collector_;
1750};
1751
1752
1753class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1754 public:
1755  explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1756      : collector_(collector) {}
1757
1758  void VisitPointers(Object** start, Object** end) {
1759    for (Object** p = start; p < end; p++) VisitPointer(p);
1760  }
1761
1762  void VisitPointer(Object** slot) {
1763    Object* obj = *slot;
1764    if (obj->IsSharedFunctionInfo()) {
1765      SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1766      MarkBit shared_mark = Marking::MarkBitFrom(shared);
1767      MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1768      collector_->MarkObject(shared->code(), code_mark);
1769      collector_->MarkObject(shared, shared_mark);
1770    }
1771  }
1772
1773 private:
1774  MarkCompactCollector* collector_;
1775};
1776
1777
1778void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1779                                                        ThreadLocalTop* top) {
1780  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1781    // Note: for the frame that has a pending lazy deoptimization
1782    // StackFrame::unchecked_code will return a non-optimized code object for
1783    // the outermost function and StackFrame::LookupCode will return
1784    // actual optimized code object.
1785    StackFrame* frame = it.frame();
1786    Code* code = frame->unchecked_code();
1787    MarkBit code_mark = Marking::MarkBitFrom(code);
1788    MarkObject(code, code_mark);
1789    if (frame->is_optimized()) {
1790      MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1791                                                          frame->LookupCode());
1792    }
1793  }
1794}
1795
1796
1797void MarkCompactCollector::PrepareForCodeFlushing() {
1798  ASSERT(heap() == Isolate::Current()->heap());
1799
1800  // Enable code flushing for non-incremental cycles.
1801  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1802    EnableCodeFlushing(!was_marked_incrementally_);
1803  }
1804
1805  // If code flushing is disabled, there is no need to prepare for it.
1806  if (!is_code_flushing_enabled()) return;
1807
1808  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1809  // relies on it being marked before any other descriptor array.
1810  HeapObject* descriptor_array = heap()->empty_descriptor_array();
1811  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1812  MarkObject(descriptor_array, descriptor_array_mark);
1813
1814  // Make sure we are not referencing the code from the stack.
1815  ASSERT(this == heap()->mark_compact_collector());
1816  PrepareThreadForCodeFlushing(heap()->isolate(),
1817                               heap()->isolate()->thread_local_top());
1818
1819  // Iterate the archived stacks in all threads to check if
1820  // the code is referenced.
1821  CodeMarkingVisitor code_marking_visitor(this);
1822  heap()->isolate()->thread_manager()->IterateArchivedThreads(
1823      &code_marking_visitor);
1824
1825  SharedFunctionInfoMarkingVisitor visitor(this);
1826  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1827  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1828
1829  ProcessMarkingDeque();
1830}
1831
1832
1833// Visitor class for marking heap roots.
1834class RootMarkingVisitor : public ObjectVisitor {
1835 public:
1836  explicit RootMarkingVisitor(Heap* heap)
1837    : collector_(heap->mark_compact_collector()) { }
1838
1839  void VisitPointer(Object** p) {
1840    MarkObjectByPointer(p);
1841  }
1842
1843  void VisitPointers(Object** start, Object** end) {
1844    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1845  }
1846
1847 private:
1848  void MarkObjectByPointer(Object** p) {
1849    if (!(*p)->IsHeapObject()) return;
1850
1851    // Replace flat cons strings in place.
1852    HeapObject* object = ShortCircuitConsString(p);
1853    MarkBit mark_bit = Marking::MarkBitFrom(object);
1854    if (mark_bit.Get()) return;
1855
1856    Map* map = object->map();
1857    // Mark the object.
1858    collector_->SetMark(object, mark_bit);
1859
1860    // Mark the map pointer and body, and push them on the marking stack.
1861    MarkBit map_mark = Marking::MarkBitFrom(map);
1862    collector_->MarkObject(map, map_mark);
1863    MarkCompactMarkingVisitor::IterateBody(map, object);
1864
1865    // Mark all the objects reachable from the map and body.  May leave
1866    // overflowed objects in the heap.
1867    collector_->EmptyMarkingDeque();
1868  }
1869
1870  MarkCompactCollector* collector_;
1871};
1872
1873
1874// Helper class for pruning the string table.
1875class StringTableCleaner : public ObjectVisitor {
1876 public:
1877  explicit StringTableCleaner(Heap* heap)
1878    : heap_(heap), pointers_removed_(0) { }
1879
1880  virtual void VisitPointers(Object** start, Object** end) {
1881    // Visit all HeapObject pointers in [start, end).
1882    for (Object** p = start; p < end; p++) {
1883      Object* o = *p;
1884      if (o->IsHeapObject() &&
1885          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1886        // Check if the internalized string being pruned is external. We need to
1887        // delete the associated external data as this string is going away.
1888
1889        // Since no objects have yet been moved we can safely access the map of
1890        // the object.
1891        if (o->IsExternalString()) {
1892          heap_->FinalizeExternalString(String::cast(*p));
1893        }
1894        // Set the entry to the_hole_value (as deleted).
1895        *p = heap_->the_hole_value();
1896        pointers_removed_++;
1897      }
1898    }
1899  }
1900
1901  int PointersRemoved() {
1902    return pointers_removed_;
1903  }
1904
1905 private:
1906  Heap* heap_;
1907  int pointers_removed_;
1908};
1909
1910
1911// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1912// are retained.
1913class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1914 public:
1915  virtual Object* RetainAs(Object* object) {
1916    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1917      return object;
1918    } else {
1919      return NULL;
1920    }
1921  }
1922};
1923
1924
1925// Fill the marking stack with overflowed objects returned by the given
1926// iterator.  Stop when the marking stack is filled or the end of the space
1927// is reached, whichever comes first.
1928template<class T>
1929static void DiscoverGreyObjectsWithIterator(Heap* heap,
1930                                            MarkingDeque* marking_deque,
1931                                            T* it) {
1932  // The caller should ensure that the marking stack is initially not full,
1933  // so that we don't waste effort pointlessly scanning for objects.
1934  ASSERT(!marking_deque->IsFull());
1935
1936  Map* filler_map = heap->one_pointer_filler_map();
1937  for (HeapObject* object = it->Next();
1938       object != NULL;
1939       object = it->Next()) {
1940    MarkBit markbit = Marking::MarkBitFrom(object);
1941    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1942      Marking::GreyToBlack(markbit);
1943      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1944      marking_deque->PushBlack(object);
1945      if (marking_deque->IsFull()) return;
1946    }
1947  }
1948}
1949
1950
1951static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1952
1953
1954static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1955                                      MemoryChunk* p) {
1956  ASSERT(!marking_deque->IsFull());
1957  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1958  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1959  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1960  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1961
1962  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1963    Address cell_base = it.CurrentCellBase();
1964    MarkBit::CellType* cell = it.CurrentCell();
1965
1966    const MarkBit::CellType current_cell = *cell;
1967    if (current_cell == 0) continue;
1968
1969    MarkBit::CellType grey_objects;
1970    if (it.HasNext()) {
1971      const MarkBit::CellType next_cell = *(cell+1);
1972      grey_objects = current_cell &
1973          ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
1974    } else {
1975      grey_objects = current_cell & (current_cell >> 1);
1976    }
1977
1978    int offset = 0;
1979    while (grey_objects != 0) {
1980      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1981      grey_objects >>= trailing_zeros;
1982      offset += trailing_zeros;
1983      MarkBit markbit(cell, 1 << offset, false);
1984      ASSERT(Marking::IsGrey(markbit));
1985      Marking::GreyToBlack(markbit);
1986      Address addr = cell_base + offset * kPointerSize;
1987      HeapObject* object = HeapObject::FromAddress(addr);
1988      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1989      marking_deque->PushBlack(object);
1990      if (marking_deque->IsFull()) return;
1991      offset += 2;
1992      grey_objects >>= 2;
1993    }
1994
1995    grey_objects >>= (Bitmap::kBitsPerCell - 1);
1996  }
1997}
1998
1999
2000int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
2001    NewSpace* new_space,
2002    NewSpacePage* p) {
2003  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
2004  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
2005  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
2006  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
2007
2008  MarkBit::CellType* cells = p->markbits()->cells();
2009  int survivors_size = 0;
2010
2011  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
2012    Address cell_base = it.CurrentCellBase();
2013    MarkBit::CellType* cell = it.CurrentCell();
2014
2015    MarkBit::CellType current_cell = *cell;
2016    if (current_cell == 0) continue;
2017
2018    int offset = 0;
2019    while (current_cell != 0) {
2020      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
2021      current_cell >>= trailing_zeros;
2022      offset += trailing_zeros;
2023      Address address = cell_base + offset * kPointerSize;
2024      HeapObject* object = HeapObject::FromAddress(address);
2025
2026      int size = object->Size();
2027      survivors_size += size;
2028
2029      offset++;
2030      current_cell >>= 1;
2031      // Aggressively promote young survivors to the old space.
2032      if (TryPromoteObject(object, size)) {
2033        continue;
2034      }
2035
2036      // Promotion failed. Just migrate object to another semispace.
2037      MaybeObject* allocation = new_space->AllocateRaw(size);
2038      if (allocation->IsFailure()) {
2039        if (!new_space->AddFreshPage()) {
2040          // Shouldn't happen. We are sweeping linearly, and to-space
2041          // has the same number of pages as from-space, so there is
2042          // always room.
2043          UNREACHABLE();
2044        }
2045        allocation = new_space->AllocateRaw(size);
2046        ASSERT(!allocation->IsFailure());
2047      }
2048      Object* target = allocation->ToObjectUnchecked();
2049
2050      MigrateObject(HeapObject::cast(target)->address(),
2051                    object->address(),
2052                    size,
2053                    NEW_SPACE);
2054    }
2055    *cells = 0;
2056  }
2057  return survivors_size;
2058}
2059
2060
2061static void DiscoverGreyObjectsInSpace(Heap* heap,
2062                                       MarkingDeque* marking_deque,
2063                                       PagedSpace* space) {
2064  if (!space->was_swept_conservatively()) {
2065    HeapObjectIterator it(space);
2066    DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2067  } else {
2068    PageIterator it(space);
2069    while (it.has_next()) {
2070      Page* p = it.next();
2071      DiscoverGreyObjectsOnPage(marking_deque, p);
2072      if (marking_deque->IsFull()) return;
2073    }
2074  }
2075}
2076
2077
2078static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2079                                          MarkingDeque* marking_deque) {
2080  NewSpace* space = heap->new_space();
2081  NewSpacePageIterator it(space->bottom(), space->top());
2082  while (it.has_next()) {
2083    NewSpacePage* page = it.next();
2084    DiscoverGreyObjectsOnPage(marking_deque, page);
2085    if (marking_deque->IsFull()) return;
2086  }
2087}
2088
2089
2090bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2091  Object* o = *p;
2092  if (!o->IsHeapObject()) return false;
2093  HeapObject* heap_object = HeapObject::cast(o);
2094  MarkBit mark = Marking::MarkBitFrom(heap_object);
2095  return !mark.Get();
2096}
2097
2098
2099bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2100                                                        Object** p) {
2101  Object* o = *p;
2102  ASSERT(o->IsHeapObject());
2103  HeapObject* heap_object = HeapObject::cast(o);
2104  MarkBit mark = Marking::MarkBitFrom(heap_object);
2105  return !mark.Get();
2106}
2107
2108
2109void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2110  StringTable* string_table = heap()->string_table();
2111  // Mark the string table itself.
2112  MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2113  SetMark(string_table, string_table_mark);
2114  // Explicitly mark the prefix.
2115  string_table->IteratePrefix(visitor);
2116  ProcessMarkingDeque();
2117}
2118
2119
2120void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2121  // Mark the heap roots including global variables, stack variables,
2122  // etc., and all objects reachable from them.
2123  heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2124
2125  // Handle the string table specially.
2126  MarkStringTable(visitor);
2127
2128  // There may be overflowed objects in the heap.  Visit them now.
2129  while (marking_deque_.overflowed()) {
2130    RefillMarkingDeque();
2131    EmptyMarkingDeque();
2132  }
2133}
2134
2135
2136void MarkCompactCollector::MarkImplicitRefGroups() {
2137  List<ImplicitRefGroup*>* ref_groups =
2138      isolate()->global_handles()->implicit_ref_groups();
2139
2140  int last = 0;
2141  for (int i = 0; i < ref_groups->length(); i++) {
2142    ImplicitRefGroup* entry = ref_groups->at(i);
2143    ASSERT(entry != NULL);
2144
2145    if (!IsMarked(*entry->parent)) {
2146      (*ref_groups)[last++] = entry;
2147      continue;
2148    }
2149
2150    Object*** children = entry->children;
2151    // A parent object is marked, so mark all child heap objects.
2152    for (size_t j = 0; j < entry->length; ++j) {
2153      if ((*children[j])->IsHeapObject()) {
2154        HeapObject* child = HeapObject::cast(*children[j]);
2155        MarkBit mark = Marking::MarkBitFrom(child);
2156        MarkObject(child, mark);
2157      }
2158    }
2159
2160    // Once the entire group has been marked, dispose it because it's
2161    // not needed anymore.
2162    delete entry;
2163  }
2164  ref_groups->Rewind(last);
2165}
2166
2167
2168// Mark all objects reachable from the objects on the marking stack.
2169// Before: the marking stack contains zero or more heap object pointers.
2170// After: the marking stack is empty, and all objects reachable from the
2171// marking stack have been marked, or are overflowed in the heap.
2172void MarkCompactCollector::EmptyMarkingDeque() {
2173  while (!marking_deque_.IsEmpty()) {
2174    HeapObject* object = marking_deque_.Pop();
2175    ASSERT(object->IsHeapObject());
2176    ASSERT(heap()->Contains(object));
2177    ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2178
2179    Map* map = object->map();
2180    MarkBit map_mark = Marking::MarkBitFrom(map);
2181    MarkObject(map, map_mark);
2182
2183    MarkCompactMarkingVisitor::IterateBody(map, object);
2184  }
2185}
2186
2187
2188// Sweep the heap for overflowed objects, clear their overflow bits, and
2189// push them on the marking stack.  Stop early if the marking stack fills
2190// before sweeping completes.  If sweeping completes, there are no remaining
2191// overflowed objects in the heap so the overflow flag on the markings stack
2192// is cleared.
2193void MarkCompactCollector::RefillMarkingDeque() {
2194  ASSERT(marking_deque_.overflowed());
2195
2196  DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2197  if (marking_deque_.IsFull()) return;
2198
2199  DiscoverGreyObjectsInSpace(heap(),
2200                             &marking_deque_,
2201                             heap()->old_pointer_space());
2202  if (marking_deque_.IsFull()) return;
2203
2204  DiscoverGreyObjectsInSpace(heap(),
2205                             &marking_deque_,
2206                             heap()->old_data_space());
2207  if (marking_deque_.IsFull()) return;
2208
2209  DiscoverGreyObjectsInSpace(heap(),
2210                             &marking_deque_,
2211                             heap()->code_space());
2212  if (marking_deque_.IsFull()) return;
2213
2214  DiscoverGreyObjectsInSpace(heap(),
2215                             &marking_deque_,
2216                             heap()->map_space());
2217  if (marking_deque_.IsFull()) return;
2218
2219  DiscoverGreyObjectsInSpace(heap(),
2220                             &marking_deque_,
2221                             heap()->cell_space());
2222  if (marking_deque_.IsFull()) return;
2223
2224  DiscoverGreyObjectsInSpace(heap(),
2225                             &marking_deque_,
2226                             heap()->property_cell_space());
2227  if (marking_deque_.IsFull()) return;
2228
2229  LargeObjectIterator lo_it(heap()->lo_space());
2230  DiscoverGreyObjectsWithIterator(heap(),
2231                                  &marking_deque_,
2232                                  &lo_it);
2233  if (marking_deque_.IsFull()) return;
2234
2235  marking_deque_.ClearOverflowed();
2236}
2237
2238
2239// Mark all objects reachable (transitively) from objects on the marking
2240// stack.  Before: the marking stack contains zero or more heap object
2241// pointers.  After: the marking stack is empty and there are no overflowed
2242// objects in the heap.
2243void MarkCompactCollector::ProcessMarkingDeque() {
2244  EmptyMarkingDeque();
2245  while (marking_deque_.overflowed()) {
2246    RefillMarkingDeque();
2247    EmptyMarkingDeque();
2248  }
2249}
2250
2251
2252// Mark all objects reachable (transitively) from objects on the marking
2253// stack including references only considered in the atomic marking pause.
2254void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2255  bool work_to_do = true;
2256  ASSERT(marking_deque_.IsEmpty());
2257  while (work_to_do) {
2258    isolate()->global_handles()->IterateObjectGroups(
2259        visitor, &IsUnmarkedHeapObjectWithHeap);
2260    MarkImplicitRefGroups();
2261    ProcessWeakCollections();
2262    work_to_do = !marking_deque_.IsEmpty();
2263    ProcessMarkingDeque();
2264  }
2265}
2266
2267
2268void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2269  for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2270       !it.done(); it.Advance()) {
2271    if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2272      return;
2273    }
2274    if (it.frame()->type() == StackFrame::OPTIMIZED) {
2275      Code* code = it.frame()->LookupCode();
2276      if (!code->CanDeoptAt(it.frame()->pc())) {
2277        code->CodeIterateBody(visitor);
2278      }
2279      ProcessMarkingDeque();
2280      return;
2281    }
2282  }
2283}
2284
2285
2286void MarkCompactCollector::MarkLiveObjects() {
2287  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2288  // The recursive GC marker detects when it is nearing stack overflow,
2289  // and switches to a different marking system.  JS interrupts interfere
2290  // with the C stack limit check.
2291  PostponeInterruptsScope postpone(isolate());
2292
2293  bool incremental_marking_overflowed = false;
2294  IncrementalMarking* incremental_marking = heap_->incremental_marking();
2295  if (was_marked_incrementally_) {
2296    // Finalize the incremental marking and check whether we had an overflow.
2297    // Both markers use grey color to mark overflowed objects so
2298    // non-incremental marker can deal with them as if overflow
2299    // occured during normal marking.
2300    // But incremental marker uses a separate marking deque
2301    // so we have to explicitly copy its overflow state.
2302    incremental_marking->Finalize();
2303    incremental_marking_overflowed =
2304        incremental_marking->marking_deque()->overflowed();
2305    incremental_marking->marking_deque()->ClearOverflowed();
2306  } else {
2307    // Abort any pending incremental activities e.g. incremental sweeping.
2308    incremental_marking->Abort();
2309  }
2310
2311#ifdef DEBUG
2312  ASSERT(state_ == PREPARE_GC);
2313  state_ = MARK_LIVE_OBJECTS;
2314#endif
2315  // The to space contains live objects, a page in from space is used as a
2316  // marking stack.
2317  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2318  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2319  if (FLAG_force_marking_deque_overflows) {
2320    marking_deque_end = marking_deque_start + 64 * kPointerSize;
2321  }
2322  marking_deque_.Initialize(marking_deque_start,
2323                            marking_deque_end);
2324  ASSERT(!marking_deque_.overflowed());
2325
2326  if (incremental_marking_overflowed) {
2327    // There are overflowed objects left in the heap after incremental marking.
2328    marking_deque_.SetOverflowed();
2329  }
2330
2331  PrepareForCodeFlushing();
2332
2333  if (was_marked_incrementally_) {
2334    // There is no write barrier on cells so we have to scan them now at the end
2335    // of the incremental marking.
2336    {
2337      HeapObjectIterator cell_iterator(heap()->cell_space());
2338      HeapObject* cell;
2339      while ((cell = cell_iterator.Next()) != NULL) {
2340        ASSERT(cell->IsCell());
2341        if (IsMarked(cell)) {
2342          int offset = Cell::kValueOffset;
2343          MarkCompactMarkingVisitor::VisitPointer(
2344              heap(),
2345              reinterpret_cast<Object**>(cell->address() + offset));
2346        }
2347      }
2348    }
2349    {
2350      HeapObjectIterator js_global_property_cell_iterator(
2351          heap()->property_cell_space());
2352      HeapObject* cell;
2353      while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2354        ASSERT(cell->IsPropertyCell());
2355        if (IsMarked(cell)) {
2356          MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2357        }
2358      }
2359    }
2360  }
2361
2362  RootMarkingVisitor root_visitor(heap());
2363  MarkRoots(&root_visitor);
2364
2365  ProcessTopOptimizedFrame(&root_visitor);
2366
2367  // The objects reachable from the roots are marked, yet unreachable
2368  // objects are unmarked.  Mark objects reachable due to host
2369  // application specific logic or through Harmony weak maps.
2370  ProcessEphemeralMarking(&root_visitor);
2371
2372  // The objects reachable from the roots, weak maps or object groups
2373  // are marked, yet unreachable objects are unmarked.  Mark objects
2374  // reachable only from weak global handles.
2375  //
2376  // First we identify nonlive weak handles and mark them as pending
2377  // destruction.
2378  heap()->isolate()->global_handles()->IdentifyWeakHandles(
2379      &IsUnmarkedHeapObject);
2380  // Then we mark the objects and process the transitive closure.
2381  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2382  while (marking_deque_.overflowed()) {
2383    RefillMarkingDeque();
2384    EmptyMarkingDeque();
2385  }
2386
2387  // Repeat host application specific and Harmony weak maps marking to
2388  // mark unmarked objects reachable from the weak roots.
2389  ProcessEphemeralMarking(&root_visitor);
2390
2391  AfterMarking();
2392}
2393
2394
2395void MarkCompactCollector::AfterMarking() {
2396  // Object literal map caches reference strings (cache keys) and maps
2397  // (cache values). At this point still useful maps have already been
2398  // marked. Mark the keys for the alive values before we process the
2399  // string table.
2400  ProcessMapCaches();
2401
2402  // Prune the string table removing all strings only pointed to by the
2403  // string table.  Cannot use string_table() here because the string
2404  // table is marked.
2405  StringTable* string_table = heap()->string_table();
2406  StringTableCleaner v(heap());
2407  string_table->IterateElements(&v);
2408  string_table->ElementsRemoved(v.PointersRemoved());
2409  heap()->external_string_table_.Iterate(&v);
2410  heap()->external_string_table_.CleanUp();
2411
2412  // Process the weak references.
2413  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2414  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2415
2416  // Remove object groups after marking phase.
2417  heap()->isolate()->global_handles()->RemoveObjectGroups();
2418  heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2419
2420  // Flush code from collected candidates.
2421  if (is_code_flushing_enabled()) {
2422    code_flusher_->ProcessCandidates();
2423    // If incremental marker does not support code flushing, we need to
2424    // disable it before incremental marking steps for next cycle.
2425    if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2426      EnableCodeFlushing(false);
2427    }
2428  }
2429
2430  if (!FLAG_watch_ic_patching) {
2431    // Clean up dead objects from the runtime profiler.
2432    heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
2433  }
2434
2435  if (FLAG_track_gc_object_stats) {
2436    heap()->CheckpointObjectStats();
2437  }
2438}
2439
2440
2441void MarkCompactCollector::ProcessMapCaches() {
2442  Object* raw_context = heap()->native_contexts_list_;
2443  while (raw_context != heap()->undefined_value()) {
2444    Context* context = reinterpret_cast<Context*>(raw_context);
2445    if (IsMarked(context)) {
2446      HeapObject* raw_map_cache =
2447          HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2448      // A map cache may be reachable from the stack. In this case
2449      // it's already transitively marked and it's too late to clean
2450      // up its parts.
2451      if (!IsMarked(raw_map_cache) &&
2452          raw_map_cache != heap()->undefined_value()) {
2453        MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2454        int existing_elements = map_cache->NumberOfElements();
2455        int used_elements = 0;
2456        for (int i = MapCache::kElementsStartIndex;
2457             i < map_cache->length();
2458             i += MapCache::kEntrySize) {
2459          Object* raw_key = map_cache->get(i);
2460          if (raw_key == heap()->undefined_value() ||
2461              raw_key == heap()->the_hole_value()) continue;
2462          STATIC_ASSERT(MapCache::kEntrySize == 2);
2463          Object* raw_map = map_cache->get(i + 1);
2464          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2465            ++used_elements;
2466          } else {
2467            // Delete useless entries with unmarked maps.
2468            ASSERT(raw_map->IsMap());
2469            map_cache->set_the_hole(i);
2470            map_cache->set_the_hole(i + 1);
2471          }
2472        }
2473        if (used_elements == 0) {
2474          context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2475        } else {
2476          // Note: we don't actually shrink the cache here to avoid
2477          // extra complexity during GC. We rely on subsequent cache
2478          // usages (EnsureCapacity) to do this.
2479          map_cache->ElementsRemoved(existing_elements - used_elements);
2480          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2481          MarkObject(map_cache, map_cache_markbit);
2482        }
2483      }
2484    }
2485    // Move to next element in the list.
2486    raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2487  }
2488  ProcessMarkingDeque();
2489}
2490
2491
2492void MarkCompactCollector::ReattachInitialMaps() {
2493  HeapObjectIterator map_iterator(heap()->map_space());
2494  for (HeapObject* obj = map_iterator.Next();
2495       obj != NULL;
2496       obj = map_iterator.Next()) {
2497    Map* map = Map::cast(obj);
2498
2499    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2500    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2501
2502    if (map->attached_to_shared_function_info()) {
2503      JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2504    }
2505  }
2506}
2507
2508
2509void MarkCompactCollector::ClearNonLiveReferences() {
2510  // Iterate over the map space, setting map transitions that go from
2511  // a marked map to an unmarked map to null transitions.  This action
2512  // is carried out only on maps of JSObjects and related subtypes.
2513  HeapObjectIterator map_iterator(heap()->map_space());
2514  for (HeapObject* obj = map_iterator.Next();
2515       obj != NULL;
2516       obj = map_iterator.Next()) {
2517    Map* map = Map::cast(obj);
2518
2519    if (!map->CanTransition()) continue;
2520
2521    MarkBit map_mark = Marking::MarkBitFrom(map);
2522    if (map_mark.Get() && map->attached_to_shared_function_info()) {
2523      // This map is used for inobject slack tracking and has been detached
2524      // from SharedFunctionInfo during the mark phase.
2525      // Since it survived the GC, reattach it now.
2526      JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2527    }
2528
2529    ClearNonLivePrototypeTransitions(map);
2530    ClearNonLiveMapTransitions(map, map_mark);
2531
2532    if (map_mark.Get()) {
2533      ClearNonLiveDependentCode(map->dependent_code());
2534    } else {
2535      ClearAndDeoptimizeDependentCode(map);
2536    }
2537  }
2538
2539  // Iterate over property cell space, removing dependent code that is not
2540  // otherwise kept alive by strong references.
2541  HeapObjectIterator cell_iterator(heap_->property_cell_space());
2542  for (HeapObject* cell = cell_iterator.Next();
2543       cell != NULL;
2544       cell = cell_iterator.Next()) {
2545    if (IsMarked(cell)) {
2546      ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2547    }
2548  }
2549}
2550
2551
2552void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2553  int number_of_transitions = map->NumberOfProtoTransitions();
2554  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2555
2556  int new_number_of_transitions = 0;
2557  const int header = Map::kProtoTransitionHeaderSize;
2558  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2559  const int map_offset = header + Map::kProtoTransitionMapOffset;
2560  const int step = Map::kProtoTransitionElementsPerEntry;
2561  for (int i = 0; i < number_of_transitions; i++) {
2562    Object* prototype = prototype_transitions->get(proto_offset + i * step);
2563    Object* cached_map = prototype_transitions->get(map_offset + i * step);
2564    if (IsMarked(prototype) && IsMarked(cached_map)) {
2565      int proto_index = proto_offset + new_number_of_transitions * step;
2566      int map_index = map_offset + new_number_of_transitions * step;
2567      if (new_number_of_transitions != i) {
2568        prototype_transitions->set(
2569            proto_index,
2570            prototype,
2571            UPDATE_WRITE_BARRIER);
2572        prototype_transitions->set(
2573            map_index,
2574            cached_map,
2575            SKIP_WRITE_BARRIER);
2576      }
2577      Object** slot =
2578          HeapObject::RawField(prototype_transitions,
2579                               FixedArray::OffsetOfElementAt(proto_index));
2580      RecordSlot(slot, slot, prototype);
2581      new_number_of_transitions++;
2582    }
2583  }
2584
2585  if (new_number_of_transitions != number_of_transitions) {
2586    map->SetNumberOfProtoTransitions(new_number_of_transitions);
2587  }
2588
2589  // Fill slots that became free with undefined value.
2590  for (int i = new_number_of_transitions * step;
2591       i < number_of_transitions * step;
2592       i++) {
2593    prototype_transitions->set_undefined(heap_, header + i);
2594  }
2595}
2596
2597
2598void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2599                                                      MarkBit map_mark) {
2600  Object* potential_parent = map->GetBackPointer();
2601  if (!potential_parent->IsMap()) return;
2602  Map* parent = Map::cast(potential_parent);
2603
2604  // Follow back pointer, check whether we are dealing with a map transition
2605  // from a live map to a dead path and in case clear transitions of parent.
2606  bool current_is_alive = map_mark.Get();
2607  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2608  if (!current_is_alive && parent_is_alive) {
2609    parent->ClearNonLiveTransitions(heap());
2610  }
2611}
2612
2613
2614void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
2615  DisallowHeapAllocation no_allocation;
2616  DependentCode* entries = map->dependent_code();
2617  DependentCode::GroupStartIndexes starts(entries);
2618  int number_of_entries = starts.number_of_entries();
2619  if (number_of_entries == 0) return;
2620  for (int i = 0; i < number_of_entries; i++) {
2621    // If the entry is compilation info then the map must be alive,
2622    // and ClearAndDeoptimizeDependentCode shouldn't be called.
2623    ASSERT(entries->is_code_at(i));
2624    Code* code = entries->code_at(i);
2625
2626    if (IsMarked(code) && !WillBeDeoptimized(code)) {
2627      // Insert the code into the code_to_deoptimize linked list.
2628      Object* next = code_to_deoptimize_;
2629      if (next != Smi::FromInt(0)) {
2630        // Record the slot so that it is updated.
2631        Object** slot = code->code_to_deoptimize_link_slot();
2632        RecordSlot(slot, slot, next);
2633      }
2634      code->set_code_to_deoptimize_link(next);
2635      code_to_deoptimize_ = code;
2636    }
2637    entries->clear_at(i);
2638  }
2639  map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2640}
2641
2642
2643void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2644  DisallowHeapAllocation no_allocation;
2645  DependentCode::GroupStartIndexes starts(entries);
2646  int number_of_entries = starts.number_of_entries();
2647  if (number_of_entries == 0) return;
2648  int new_number_of_entries = 0;
2649  // Go through all groups, remove dead codes and compact.
2650  for (int g = 0; g < DependentCode::kGroupCount; g++) {
2651    int group_number_of_entries = 0;
2652    for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2653      Object* obj = entries->object_at(i);
2654      ASSERT(obj->IsCode() || IsMarked(obj));
2655      if (IsMarked(obj) &&
2656          (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2657        if (new_number_of_entries + group_number_of_entries != i) {
2658          entries->set_object_at(
2659              new_number_of_entries + group_number_of_entries, obj);
2660        }
2661        Object** slot = entries->slot_at(new_number_of_entries +
2662                                         group_number_of_entries);
2663        RecordSlot(slot, slot, obj);
2664        group_number_of_entries++;
2665      }
2666    }
2667    entries->set_number_of_entries(
2668        static_cast<DependentCode::DependencyGroup>(g),
2669        group_number_of_entries);
2670    new_number_of_entries += group_number_of_entries;
2671  }
2672  for (int i = new_number_of_entries; i < number_of_entries; i++) {
2673    entries->clear_at(i);
2674  }
2675}
2676
2677
2678void MarkCompactCollector::ProcessWeakCollections() {
2679  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2680  Object* weak_collection_obj = encountered_weak_collections();
2681  while (weak_collection_obj != Smi::FromInt(0)) {
2682    ASSERT(MarkCompactCollector::IsMarked(
2683        HeapObject::cast(weak_collection_obj)));
2684    JSWeakCollection* weak_collection =
2685        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2686    ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2687    Object** anchor = reinterpret_cast<Object**>(table->address());
2688    for (int i = 0; i < table->Capacity(); i++) {
2689      if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2690        Object** key_slot =
2691            HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2692                ObjectHashTable::EntryToIndex(i)));
2693        RecordSlot(anchor, key_slot, *key_slot);
2694        Object** value_slot =
2695            HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2696                ObjectHashTable::EntryToValueIndex(i)));
2697        MarkCompactMarkingVisitor::MarkObjectByPointer(
2698            this, anchor, value_slot);
2699      }
2700    }
2701    weak_collection_obj = weak_collection->next();
2702  }
2703}
2704
2705
2706void MarkCompactCollector::ClearWeakCollections() {
2707  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2708  Object* weak_collection_obj = encountered_weak_collections();
2709  while (weak_collection_obj != Smi::FromInt(0)) {
2710    ASSERT(MarkCompactCollector::IsMarked(
2711        HeapObject::cast(weak_collection_obj)));
2712    JSWeakCollection* weak_collection =
2713        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2714    ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2715    for (int i = 0; i < table->Capacity(); i++) {
2716      if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2717        table->RemoveEntry(i);
2718      }
2719    }
2720    weak_collection_obj = weak_collection->next();
2721    weak_collection->set_next(Smi::FromInt(0));
2722  }
2723  set_encountered_weak_collections(Smi::FromInt(0));
2724}
2725
2726
2727// We scavange new space simultaneously with sweeping. This is done in two
2728// passes.
2729//
2730// The first pass migrates all alive objects from one semispace to another or
2731// promotes them to old space.  Forwarding address is written directly into
2732// first word of object without any encoding.  If object is dead we write
2733// NULL as a forwarding address.
2734//
2735// The second pass updates pointers to new space in all spaces.  It is possible
2736// to encounter pointers to dead new space objects during traversal of pointers
2737// to new space.  We should clear them to avoid encountering them during next
2738// pointer iteration.  This is an issue if the store buffer overflows and we
2739// have to scan the entire old space, including dead objects, looking for
2740// pointers to new space.
2741void MarkCompactCollector::MigrateObject(Address dst,
2742                                         Address src,
2743                                         int size,
2744                                         AllocationSpace dest) {
2745  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
2746  // TODO(hpayer): Replace these checks with asserts.
2747  CHECK(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
2748  CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
2749  if (dest == OLD_POINTER_SPACE) {
2750    Address src_slot = src;
2751    Address dst_slot = dst;
2752    ASSERT(IsAligned(size, kPointerSize));
2753
2754    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2755      Object* value = Memory::Object_at(src_slot);
2756
2757      Memory::Object_at(dst_slot) = value;
2758
2759      if (heap_->InNewSpace(value)) {
2760        heap_->store_buffer()->Mark(dst_slot);
2761      } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2762        SlotsBuffer::AddTo(&slots_buffer_allocator_,
2763                           &migration_slots_buffer_,
2764                           reinterpret_cast<Object**>(dst_slot),
2765                           SlotsBuffer::IGNORE_OVERFLOW);
2766      }
2767
2768      src_slot += kPointerSize;
2769      dst_slot += kPointerSize;
2770    }
2771
2772    if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2773      Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2774      Address code_entry = Memory::Address_at(code_entry_slot);
2775
2776      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2777        SlotsBuffer::AddTo(&slots_buffer_allocator_,
2778                           &migration_slots_buffer_,
2779                           SlotsBuffer::CODE_ENTRY_SLOT,
2780                           code_entry_slot,
2781                           SlotsBuffer::IGNORE_OVERFLOW);
2782      }
2783    }
2784  } else if (dest == CODE_SPACE) {
2785    PROFILE(isolate(), CodeMoveEvent(src, dst));
2786    heap()->MoveBlock(dst, src, size);
2787    SlotsBuffer::AddTo(&slots_buffer_allocator_,
2788                       &migration_slots_buffer_,
2789                       SlotsBuffer::RELOCATED_CODE_OBJECT,
2790                       dst,
2791                       SlotsBuffer::IGNORE_OVERFLOW);
2792    Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2793  } else {
2794    ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2795    heap()->MoveBlock(dst, src, size);
2796  }
2797  Memory::Address_at(src) = dst;
2798}
2799
2800
2801// Visitor for updating pointers from live objects in old spaces to new space.
2802// It does not expect to encounter pointers to dead objects.
2803class PointersUpdatingVisitor: public ObjectVisitor {
2804 public:
2805  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2806
2807  void VisitPointer(Object** p) {
2808    UpdatePointer(p);
2809  }
2810
2811  void VisitPointers(Object** start, Object** end) {
2812    for (Object** p = start; p < end; p++) UpdatePointer(p);
2813  }
2814
2815  void VisitEmbeddedPointer(RelocInfo* rinfo) {
2816    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2817    Object* target = rinfo->target_object();
2818    Object* old_target = target;
2819    VisitPointer(&target);
2820    // Avoid unnecessary changes that might unnecessary flush the instruction
2821    // cache.
2822    if (target != old_target) {
2823      rinfo->set_target_object(target);
2824    }
2825  }
2826
2827  void VisitCodeTarget(RelocInfo* rinfo) {
2828    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2829    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2830    Object* old_target = target;
2831    VisitPointer(&target);
2832    if (target != old_target) {
2833      rinfo->set_target_address(Code::cast(target)->instruction_start());
2834    }
2835  }
2836
2837  void VisitCodeAgeSequence(RelocInfo* rinfo) {
2838    ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2839    Object* stub = rinfo->code_age_stub();
2840    ASSERT(stub != NULL);
2841    VisitPointer(&stub);
2842    if (stub != rinfo->code_age_stub()) {
2843      rinfo->set_code_age_stub(Code::cast(stub));
2844    }
2845  }
2846
2847  void VisitDebugTarget(RelocInfo* rinfo) {
2848    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2849            rinfo->IsPatchedReturnSequence()) ||
2850           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2851            rinfo->IsPatchedDebugBreakSlotSequence()));
2852    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2853    VisitPointer(&target);
2854    rinfo->set_call_address(Code::cast(target)->instruction_start());
2855  }
2856
2857  static inline void UpdateSlot(Heap* heap, Object** slot) {
2858    Object* obj = *slot;
2859
2860    if (!obj->IsHeapObject()) return;
2861
2862    HeapObject* heap_obj = HeapObject::cast(obj);
2863
2864    MapWord map_word = heap_obj->map_word();
2865    if (map_word.IsForwardingAddress()) {
2866      ASSERT(heap->InFromSpace(heap_obj) ||
2867             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2868      HeapObject* target = map_word.ToForwardingAddress();
2869      *slot = target;
2870      ASSERT(!heap->InFromSpace(target) &&
2871             !MarkCompactCollector::IsOnEvacuationCandidate(target));
2872    }
2873  }
2874
2875 private:
2876  inline void UpdatePointer(Object** p) {
2877    UpdateSlot(heap_, p);
2878  }
2879
2880  Heap* heap_;
2881};
2882
2883
2884static void UpdatePointer(HeapObject** p, HeapObject* object) {
2885  ASSERT(*p == object);
2886
2887  Address old_addr = object->address();
2888
2889  Address new_addr = Memory::Address_at(old_addr);
2890
2891  // The new space sweep will overwrite the map word of dead objects
2892  // with NULL. In this case we do not need to transfer this entry to
2893  // the store buffer which we are rebuilding.
2894  if (new_addr != NULL) {
2895    *p = HeapObject::FromAddress(new_addr);
2896  } else {
2897    // We have to zap this pointer, because the store buffer may overflow later,
2898    // and then we have to scan the entire heap and we don't want to find
2899    // spurious newspace pointers in the old space.
2900    // TODO(mstarzinger): This was changed to a sentinel value to track down
2901    // rare crashes, change it back to Smi::FromInt(0) later.
2902    *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1));  // flood
2903  }
2904}
2905
2906
2907static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2908                                                         Object** p) {
2909  MapWord map_word = HeapObject::cast(*p)->map_word();
2910
2911  if (map_word.IsForwardingAddress()) {
2912    return String::cast(map_word.ToForwardingAddress());
2913  }
2914
2915  return String::cast(*p);
2916}
2917
2918
2919bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2920                                            int object_size) {
2921  // TODO(hpayer): Replace that check with an assert.
2922  CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
2923
2924  OldSpace* target_space = heap()->TargetSpace(object);
2925
2926  ASSERT(target_space == heap()->old_pointer_space() ||
2927         target_space == heap()->old_data_space());
2928  Object* result;
2929  MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2930  if (maybe_result->ToObject(&result)) {
2931    HeapObject* target = HeapObject::cast(result);
2932    MigrateObject(target->address(),
2933                  object->address(),
2934                  object_size,
2935                  target_space->identity());
2936    heap()->mark_compact_collector()->tracer()->
2937        increment_promoted_objects_size(object_size);
2938    return true;
2939  }
2940
2941  return false;
2942}
2943
2944
2945void MarkCompactCollector::EvacuateNewSpace() {
2946  // There are soft limits in the allocation code, designed trigger a mark
2947  // sweep collection by failing allocations.  But since we are already in
2948  // a mark-sweep allocation, there is no sense in trying to trigger one.
2949  AlwaysAllocateScope scope;
2950  heap()->CheckNewSpaceExpansionCriteria();
2951
2952  NewSpace* new_space = heap()->new_space();
2953
2954  // Store allocation range before flipping semispaces.
2955  Address from_bottom = new_space->bottom();
2956  Address from_top = new_space->top();
2957
2958  // Flip the semispaces.  After flipping, to space is empty, from space has
2959  // live objects.
2960  new_space->Flip();
2961  new_space->ResetAllocationInfo();
2962
2963  int survivors_size = 0;
2964
2965  // First pass: traverse all objects in inactive semispace, remove marks,
2966  // migrate live objects and write forwarding addresses.  This stage puts
2967  // new entries in the store buffer and may cause some pages to be marked
2968  // scan-on-scavenge.
2969  NewSpacePageIterator it(from_bottom, from_top);
2970  while (it.has_next()) {
2971    NewSpacePage* p = it.next();
2972    survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
2973  }
2974
2975  heap_->IncrementYoungSurvivorsCounter(survivors_size);
2976  new_space->set_age_mark(new_space->top());
2977}
2978
2979
2980void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2981  AlwaysAllocateScope always_allocate;
2982  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2983  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
2984  p->MarkSweptPrecisely();
2985
2986  int offsets[16];
2987
2988  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
2989    Address cell_base = it.CurrentCellBase();
2990    MarkBit::CellType* cell = it.CurrentCell();
2991
2992    if (*cell == 0) continue;
2993
2994    int live_objects = MarkWordToObjectStarts(*cell, offsets);
2995    for (int i = 0; i < live_objects; i++) {
2996      Address object_addr = cell_base + offsets[i] * kPointerSize;
2997      HeapObject* object = HeapObject::FromAddress(object_addr);
2998      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2999
3000      int size = object->Size();
3001
3002      MaybeObject* target = space->AllocateRaw(size);
3003      if (target->IsFailure()) {
3004        // OS refused to give us memory.
3005        V8::FatalProcessOutOfMemory("Evacuation");
3006        return;
3007      }
3008
3009      Object* target_object = target->ToObjectUnchecked();
3010
3011      MigrateObject(HeapObject::cast(target_object)->address(),
3012                    object_addr,
3013                    size,
3014                    space->identity());
3015      ASSERT(object->map_word().IsForwardingAddress());
3016    }
3017
3018    // Clear marking bits for current cell.
3019    *cell = 0;
3020  }
3021  p->ResetLiveBytes();
3022}
3023
3024
3025void MarkCompactCollector::EvacuatePages() {
3026  int npages = evacuation_candidates_.length();
3027  for (int i = 0; i < npages; i++) {
3028    Page* p = evacuation_candidates_[i];
3029    ASSERT(p->IsEvacuationCandidate() ||
3030           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3031    if (p->IsEvacuationCandidate()) {
3032      // During compaction we might have to request a new page.
3033      // Check that space still have room for that.
3034      if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3035        EvacuateLiveObjectsFromPage(p);
3036      } else {
3037        // Without room for expansion evacuation is not guaranteed to succeed.
3038        // Pessimistically abandon unevacuated pages.
3039        for (int j = i; j < npages; j++) {
3040          Page* page = evacuation_candidates_[j];
3041          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3042          page->ClearEvacuationCandidate();
3043          page->SetFlag(Page::RESCAN_ON_EVACUATION);
3044          page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
3045        }
3046        return;
3047      }
3048    }
3049  }
3050}
3051
3052
3053class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3054 public:
3055  virtual Object* RetainAs(Object* object) {
3056    if (object->IsHeapObject()) {
3057      HeapObject* heap_object = HeapObject::cast(object);
3058      MapWord map_word = heap_object->map_word();
3059      if (map_word.IsForwardingAddress()) {
3060        return map_word.ToForwardingAddress();
3061      }
3062    }
3063    return object;
3064  }
3065};
3066
3067
3068static inline void UpdateSlot(ObjectVisitor* v,
3069                              SlotsBuffer::SlotType slot_type,
3070                              Address addr) {
3071  switch (slot_type) {
3072    case SlotsBuffer::CODE_TARGET_SLOT: {
3073      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3074      rinfo.Visit(v);
3075      break;
3076    }
3077    case SlotsBuffer::CODE_ENTRY_SLOT: {
3078      v->VisitCodeEntry(addr);
3079      break;
3080    }
3081    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3082      HeapObject* obj = HeapObject::FromAddress(addr);
3083      Code::cast(obj)->CodeIterateBody(v);
3084      break;
3085    }
3086    case SlotsBuffer::DEBUG_TARGET_SLOT: {
3087      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3088      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
3089      break;
3090    }
3091    case SlotsBuffer::JS_RETURN_SLOT: {
3092      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3093      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
3094      break;
3095    }
3096    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3097      RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3098      rinfo.Visit(v);
3099      break;
3100    }
3101    default:
3102      UNREACHABLE();
3103      break;
3104  }
3105}
3106
3107
3108enum SweepingMode {
3109  SWEEP_ONLY,
3110  SWEEP_AND_VISIT_LIVE_OBJECTS
3111};
3112
3113
3114enum SkipListRebuildingMode {
3115  REBUILD_SKIP_LIST,
3116  IGNORE_SKIP_LIST
3117};
3118
3119
3120// Sweep a space precisely.  After this has been done the space can
3121// be iterated precisely, hitting only the live objects.  Code space
3122// is always swept precisely because we want to be able to iterate
3123// over it.  Map space is swept precisely, because it is not compacted.
3124// Slots in live objects pointing into evacuation candidates are updated
3125// if requested.
3126template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
3127static void SweepPrecisely(PagedSpace* space,
3128                           Page* p,
3129                           ObjectVisitor* v) {
3130  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3131  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3132            space->identity() == CODE_SPACE);
3133  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3134
3135  double start_time = 0.0;
3136  if (FLAG_print_cumulative_gc_stat) {
3137    start_time = OS::TimeCurrentMillis();
3138  }
3139
3140  p->MarkSweptPrecisely();
3141
3142  Address free_start = p->area_start();
3143  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3144  int offsets[16];
3145
3146  SkipList* skip_list = p->skip_list();
3147  int curr_region = -1;
3148  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3149    skip_list->Clear();
3150  }
3151
3152  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3153    Address cell_base = it.CurrentCellBase();
3154    MarkBit::CellType* cell = it.CurrentCell();
3155    int live_objects = MarkWordToObjectStarts(*cell, offsets);
3156    int live_index = 0;
3157    for ( ; live_objects != 0; live_objects--) {
3158      Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3159      if (free_end != free_start) {
3160        space->Free(free_start, static_cast<int>(free_end - free_start));
3161#ifdef ENABLE_GDB_JIT_INTERFACE
3162        if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3163          GDBJITInterface::RemoveCodeRange(free_start, free_end);
3164        }
3165#endif
3166      }
3167      HeapObject* live_object = HeapObject::FromAddress(free_end);
3168      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3169      Map* map = live_object->map();
3170      int size = live_object->SizeFromMap(map);
3171      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3172        live_object->IterateBody(map->instance_type(), size, v);
3173      }
3174      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3175        int new_region_start =
3176            SkipList::RegionNumber(free_end);
3177        int new_region_end =
3178            SkipList::RegionNumber(free_end + size - kPointerSize);
3179        if (new_region_start != curr_region ||
3180            new_region_end != curr_region) {
3181          skip_list->AddObject(free_end, size);
3182          curr_region = new_region_end;
3183        }
3184      }
3185      free_start = free_end + size;
3186    }
3187    // Clear marking bits for current cell.
3188    *cell = 0;
3189  }
3190  if (free_start != p->area_end()) {
3191    space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3192#ifdef ENABLE_GDB_JIT_INTERFACE
3193    if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3194      GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3195    }
3196#endif
3197  }
3198  p->ResetLiveBytes();
3199  if (FLAG_print_cumulative_gc_stat) {
3200    space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
3201  }
3202}
3203
3204
3205static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3206  Page* p = Page::FromAddress(code->address());
3207
3208  if (p->IsEvacuationCandidate() ||
3209      p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3210    return false;
3211  }
3212
3213  Address code_start = code->address();
3214  Address code_end = code_start + code->Size();
3215
3216  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3217  uint32_t end_index =
3218      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3219
3220  Bitmap* b = p->markbits();
3221
3222  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3223  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3224
3225  MarkBit::CellType* start_cell = start_mark_bit.cell();
3226  MarkBit::CellType* end_cell = end_mark_bit.cell();
3227
3228  if (value) {
3229    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3230    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3231
3232    if (start_cell == end_cell) {
3233      *start_cell |= start_mask & end_mask;
3234    } else {
3235      *start_cell |= start_mask;
3236      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3237        *cell = ~0;
3238      }
3239      *end_cell |= end_mask;
3240    }
3241  } else {
3242    for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3243      *cell = 0;
3244    }
3245  }
3246
3247  return true;
3248}
3249
3250
3251static bool IsOnInvalidatedCodeObject(Address addr) {
3252  // We did not record any slots in large objects thus
3253  // we can safely go to the page from the slot address.
3254  Page* p = Page::FromAddress(addr);
3255
3256  // First check owner's identity because old pointer and old data spaces
3257  // are swept lazily and might still have non-zero mark-bits on some
3258  // pages.
3259  if (p->owner()->identity() != CODE_SPACE) return false;
3260
3261  // In code space only bits on evacuation candidates (but we don't record
3262  // any slots on them) and under invalidated code objects are non-zero.
3263  MarkBit mark_bit =
3264      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3265
3266  return mark_bit.Get();
3267}
3268
3269
3270void MarkCompactCollector::InvalidateCode(Code* code) {
3271  if (heap_->incremental_marking()->IsCompacting() &&
3272      !ShouldSkipEvacuationSlotRecording(code)) {
3273    ASSERT(compacting_);
3274
3275    // If the object is white than no slots were recorded on it yet.
3276    MarkBit mark_bit = Marking::MarkBitFrom(code);
3277    if (Marking::IsWhite(mark_bit)) return;
3278
3279    invalidated_code_.Add(code);
3280  }
3281}
3282
3283
3284// Return true if the given code is deoptimized or will be deoptimized.
3285bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3286  // We assume the code_to_deoptimize_link is initialized to undefined.
3287  // If it is 0, or refers to another Code object, then this code
3288  // is already linked, or was already linked into the list.
3289  return code->code_to_deoptimize_link() != heap()->undefined_value()
3290      || code->marked_for_deoptimization();
3291}
3292
3293
3294bool MarkCompactCollector::MarkInvalidatedCode() {
3295  bool code_marked = false;
3296
3297  int length = invalidated_code_.length();
3298  for (int i = 0; i < length; i++) {
3299    Code* code = invalidated_code_[i];
3300
3301    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3302      code_marked = true;
3303    }
3304  }
3305
3306  return code_marked;
3307}
3308
3309
3310void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3311  int length = invalidated_code_.length();
3312  for (int i = 0; i < length; i++) {
3313    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3314  }
3315}
3316
3317
3318void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3319  int length = invalidated_code_.length();
3320  for (int i = 0; i < length; i++) {
3321    Code* code = invalidated_code_[i];
3322    if (code != NULL) {
3323      code->Iterate(visitor);
3324      SetMarkBitsUnderInvalidatedCode(code, false);
3325    }
3326  }
3327  invalidated_code_.Rewind(0);
3328}
3329
3330
3331void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3332  Heap::RelocationLock relocation_lock(heap());
3333
3334  bool code_slots_filtering_required;
3335  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3336    code_slots_filtering_required = MarkInvalidatedCode();
3337    EvacuateNewSpace();
3338  }
3339
3340  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3341    EvacuatePages();
3342  }
3343
3344  // Second pass: find pointers to new space and update them.
3345  PointersUpdatingVisitor updating_visitor(heap());
3346
3347  { GCTracer::Scope gc_scope(tracer_,
3348                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3349    // Update pointers in to space.
3350    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3351                            heap()->new_space()->top());
3352    for (HeapObject* object = to_it.Next();
3353         object != NULL;
3354         object = to_it.Next()) {
3355      Map* map = object->map();
3356      object->IterateBody(map->instance_type(),
3357                          object->SizeFromMap(map),
3358                          &updating_visitor);
3359    }
3360  }
3361
3362  { GCTracer::Scope gc_scope(tracer_,
3363                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3364    // Update roots.
3365    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3366  }
3367
3368  { GCTracer::Scope gc_scope(tracer_,
3369                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3370    StoreBufferRebuildScope scope(heap_,
3371                                  heap_->store_buffer(),
3372                                  &Heap::ScavengeStoreBufferCallback);
3373    heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3374        &UpdatePointer);
3375  }
3376
3377  { GCTracer::Scope gc_scope(tracer_,
3378                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3379    SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3380                                       migration_slots_buffer_,
3381                                       code_slots_filtering_required);
3382    if (FLAG_trace_fragmentation) {
3383      PrintF("  migration slots buffer: %d\n",
3384             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3385    }
3386
3387    if (compacting_ && was_marked_incrementally_) {
3388      // It's difficult to filter out slots recorded for large objects.
3389      LargeObjectIterator it(heap_->lo_space());
3390      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3391        // LargeObjectSpace is not swept yet thus we have to skip
3392        // dead objects explicitly.
3393        if (!IsMarked(obj)) continue;
3394
3395        Page* p = Page::FromAddress(obj->address());
3396        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3397          obj->Iterate(&updating_visitor);
3398          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3399        }
3400      }
3401    }
3402  }
3403
3404  int npages = evacuation_candidates_.length();
3405  { GCTracer::Scope gc_scope(
3406      tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3407    for (int i = 0; i < npages; i++) {
3408      Page* p = evacuation_candidates_[i];
3409      ASSERT(p->IsEvacuationCandidate() ||
3410             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3411
3412      if (p->IsEvacuationCandidate()) {
3413        SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3414                                           p->slots_buffer(),
3415                                           code_slots_filtering_required);
3416        if (FLAG_trace_fragmentation) {
3417          PrintF("  page %p slots buffer: %d\n",
3418                 reinterpret_cast<void*>(p),
3419                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3420        }
3421
3422        // Important: skip list should be cleared only after roots were updated
3423        // because root iteration traverses the stack and might have to find
3424        // code objects from non-updated pc pointing into evacuation candidate.
3425        SkipList* list = p->skip_list();
3426        if (list != NULL) list->Clear();
3427      } else {
3428        if (FLAG_gc_verbose) {
3429          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3430                 reinterpret_cast<intptr_t>(p));
3431        }
3432        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3433        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3434
3435        switch (space->identity()) {
3436          case OLD_DATA_SPACE:
3437            SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
3438            break;
3439          case OLD_POINTER_SPACE:
3440            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3441                space, p, &updating_visitor);
3442            break;
3443          case CODE_SPACE:
3444            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3445                space, p, &updating_visitor);
3446            break;
3447          default:
3448            UNREACHABLE();
3449            break;
3450        }
3451      }
3452    }
3453  }
3454
3455  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3456
3457  // Update pointers from cells.
3458  HeapObjectIterator cell_iterator(heap_->cell_space());
3459  for (HeapObject* cell = cell_iterator.Next();
3460       cell != NULL;
3461       cell = cell_iterator.Next()) {
3462    if (cell->IsCell()) {
3463      Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3464    }
3465  }
3466
3467  HeapObjectIterator js_global_property_cell_iterator(
3468      heap_->property_cell_space());
3469  for (HeapObject* cell = js_global_property_cell_iterator.Next();
3470       cell != NULL;
3471       cell = js_global_property_cell_iterator.Next()) {
3472    if (cell->IsPropertyCell()) {
3473      PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3474    }
3475  }
3476
3477  // Update the heads of the native contexts list the code to deoptimize list.
3478  updating_visitor.VisitPointer(heap_->native_contexts_list_address());
3479  updating_visitor.VisitPointer(&code_to_deoptimize_);
3480
3481  heap_->string_table()->Iterate(&updating_visitor);
3482
3483  // Update pointers from external string table.
3484  heap_->UpdateReferencesInExternalStringTable(
3485      &UpdateReferenceInExternalStringTableEntry);
3486
3487  if (!FLAG_watch_ic_patching) {
3488    // Update JSFunction pointers from the runtime profiler.
3489    heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
3490        &updating_visitor);
3491  }
3492
3493  EvacuationWeakObjectRetainer evacuation_object_retainer;
3494  heap()->ProcessWeakReferences(&evacuation_object_retainer);
3495
3496  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3497  // under it.
3498  ProcessInvalidatedCode(&updating_visitor);
3499
3500  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3501
3502#ifdef VERIFY_HEAP
3503  if (FLAG_verify_heap) {
3504    VerifyEvacuation(heap_);
3505  }
3506#endif
3507
3508  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3509  ASSERT(migration_slots_buffer_ == NULL);
3510}
3511
3512
3513void MarkCompactCollector::UnlinkEvacuationCandidates() {
3514  int npages = evacuation_candidates_.length();
3515  for (int i = 0; i < npages; i++) {
3516    Page* p = evacuation_candidates_[i];
3517    if (!p->IsEvacuationCandidate()) continue;
3518    p->Unlink();
3519    p->ClearSweptPrecisely();
3520    p->ClearSweptConservatively();
3521  }
3522}
3523
3524
3525void MarkCompactCollector::ReleaseEvacuationCandidates() {
3526  int npages = evacuation_candidates_.length();
3527  for (int i = 0; i < npages; i++) {
3528    Page* p = evacuation_candidates_[i];
3529    if (!p->IsEvacuationCandidate()) continue;
3530    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3531    space->Free(p->area_start(), p->area_size());
3532    p->set_scan_on_scavenge(false);
3533    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3534    p->ResetLiveBytes();
3535    space->ReleasePage(p, false);
3536  }
3537  evacuation_candidates_.Rewind(0);
3538  compacting_ = false;
3539  heap()->FreeQueuedChunks();
3540}
3541
3542
3543static const int kStartTableEntriesPerLine = 5;
3544static const int kStartTableLines = 171;
3545static const int kStartTableInvalidLine = 127;
3546static const int kStartTableUnusedEntry = 126;
3547
3548#define _ kStartTableUnusedEntry
3549#define X kStartTableInvalidLine
3550// Mark-bit to object start offset table.
3551//
3552// The line is indexed by the mark bits in a byte.  The first number on
3553// the line describes the number of live object starts for the line and the
3554// other numbers on the line describe the offsets (in words) of the object
3555// starts.
3556//
3557// Since objects are at least 2 words large we don't have entries for two
3558// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
3559char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3560  0, _, _, _, _,  // 0
3561  1, 0, _, _, _,  // 1
3562  1, 1, _, _, _,  // 2
3563  X, _, _, _, _,  // 3
3564  1, 2, _, _, _,  // 4
3565  2, 0, 2, _, _,  // 5
3566  X, _, _, _, _,  // 6
3567  X, _, _, _, _,  // 7
3568  1, 3, _, _, _,  // 8
3569  2, 0, 3, _, _,  // 9
3570  2, 1, 3, _, _,  // 10
3571  X, _, _, _, _,  // 11
3572  X, _, _, _, _,  // 12
3573  X, _, _, _, _,  // 13
3574  X, _, _, _, _,  // 14
3575  X, _, _, _, _,  // 15
3576  1, 4, _, _, _,  // 16
3577  2, 0, 4, _, _,  // 17
3578  2, 1, 4, _, _,  // 18
3579  X, _, _, _, _,  // 19
3580  2, 2, 4, _, _,  // 20
3581  3, 0, 2, 4, _,  // 21
3582  X, _, _, _, _,  // 22
3583  X, _, _, _, _,  // 23
3584  X, _, _, _, _,  // 24
3585  X, _, _, _, _,  // 25
3586  X, _, _, _, _,  // 26
3587  X, _, _, _, _,  // 27
3588  X, _, _, _, _,  // 28
3589  X, _, _, _, _,  // 29
3590  X, _, _, _, _,  // 30
3591  X, _, _, _, _,  // 31
3592  1, 5, _, _, _,  // 32
3593  2, 0, 5, _, _,  // 33
3594  2, 1, 5, _, _,  // 34
3595  X, _, _, _, _,  // 35
3596  2, 2, 5, _, _,  // 36
3597  3, 0, 2, 5, _,  // 37
3598  X, _, _, _, _,  // 38
3599  X, _, _, _, _,  // 39
3600  2, 3, 5, _, _,  // 40
3601  3, 0, 3, 5, _,  // 41
3602  3, 1, 3, 5, _,  // 42
3603  X, _, _, _, _,  // 43
3604  X, _, _, _, _,  // 44
3605  X, _, _, _, _,  // 45
3606  X, _, _, _, _,  // 46
3607  X, _, _, _, _,  // 47
3608  X, _, _, _, _,  // 48
3609  X, _, _, _, _,  // 49
3610  X, _, _, _, _,  // 50
3611  X, _, _, _, _,  // 51
3612  X, _, _, _, _,  // 52
3613  X, _, _, _, _,  // 53
3614  X, _, _, _, _,  // 54
3615  X, _, _, _, _,  // 55
3616  X, _, _, _, _,  // 56
3617  X, _, _, _, _,  // 57
3618  X, _, _, _, _,  // 58
3619  X, _, _, _, _,  // 59
3620  X, _, _, _, _,  // 60
3621  X, _, _, _, _,  // 61
3622  X, _, _, _, _,  // 62
3623  X, _, _, _, _,  // 63
3624  1, 6, _, _, _,  // 64
3625  2, 0, 6, _, _,  // 65
3626  2, 1, 6, _, _,  // 66
3627  X, _, _, _, _,  // 67
3628  2, 2, 6, _, _,  // 68
3629  3, 0, 2, 6, _,  // 69
3630  X, _, _, _, _,  // 70
3631  X, _, _, _, _,  // 71
3632  2, 3, 6, _, _,  // 72
3633  3, 0, 3, 6, _,  // 73
3634  3, 1, 3, 6, _,  // 74
3635  X, _, _, _, _,  // 75
3636  X, _, _, _, _,  // 76
3637  X, _, _, _, _,  // 77
3638  X, _, _, _, _,  // 78
3639  X, _, _, _, _,  // 79
3640  2, 4, 6, _, _,  // 80
3641  3, 0, 4, 6, _,  // 81
3642  3, 1, 4, 6, _,  // 82
3643  X, _, _, _, _,  // 83
3644  3, 2, 4, 6, _,  // 84
3645  4, 0, 2, 4, 6,  // 85
3646  X, _, _, _, _,  // 86
3647  X, _, _, _, _,  // 87
3648  X, _, _, _, _,  // 88
3649  X, _, _, _, _,  // 89
3650  X, _, _, _, _,  // 90
3651  X, _, _, _, _,  // 91
3652  X, _, _, _, _,  // 92
3653  X, _, _, _, _,  // 93
3654  X, _, _, _, _,  // 94
3655  X, _, _, _, _,  // 95
3656  X, _, _, _, _,  // 96
3657  X, _, _, _, _,  // 97
3658  X, _, _, _, _,  // 98
3659  X, _, _, _, _,  // 99
3660  X, _, _, _, _,  // 100
3661  X, _, _, _, _,  // 101
3662  X, _, _, _, _,  // 102
3663  X, _, _, _, _,  // 103
3664  X, _, _, _, _,  // 104
3665  X, _, _, _, _,  // 105
3666  X, _, _, _, _,  // 106
3667  X, _, _, _, _,  // 107
3668  X, _, _, _, _,  // 108
3669  X, _, _, _, _,  // 109
3670  X, _, _, _, _,  // 110
3671  X, _, _, _, _,  // 111
3672  X, _, _, _, _,  // 112
3673  X, _, _, _, _,  // 113
3674  X, _, _, _, _,  // 114
3675  X, _, _, _, _,  // 115
3676  X, _, _, _, _,  // 116
3677  X, _, _, _, _,  // 117
3678  X, _, _, _, _,  // 118
3679  X, _, _, _, _,  // 119
3680  X, _, _, _, _,  // 120
3681  X, _, _, _, _,  // 121
3682  X, _, _, _, _,  // 122
3683  X, _, _, _, _,  // 123
3684  X, _, _, _, _,  // 124
3685  X, _, _, _, _,  // 125
3686  X, _, _, _, _,  // 126
3687  X, _, _, _, _,  // 127
3688  1, 7, _, _, _,  // 128
3689  2, 0, 7, _, _,  // 129
3690  2, 1, 7, _, _,  // 130
3691  X, _, _, _, _,  // 131
3692  2, 2, 7, _, _,  // 132
3693  3, 0, 2, 7, _,  // 133
3694  X, _, _, _, _,  // 134
3695  X, _, _, _, _,  // 135
3696  2, 3, 7, _, _,  // 136
3697  3, 0, 3, 7, _,  // 137
3698  3, 1, 3, 7, _,  // 138
3699  X, _, _, _, _,  // 139
3700  X, _, _, _, _,  // 140
3701  X, _, _, _, _,  // 141
3702  X, _, _, _, _,  // 142
3703  X, _, _, _, _,  // 143
3704  2, 4, 7, _, _,  // 144
3705  3, 0, 4, 7, _,  // 145
3706  3, 1, 4, 7, _,  // 146
3707  X, _, _, _, _,  // 147
3708  3, 2, 4, 7, _,  // 148
3709  4, 0, 2, 4, 7,  // 149
3710  X, _, _, _, _,  // 150
3711  X, _, _, _, _,  // 151
3712  X, _, _, _, _,  // 152
3713  X, _, _, _, _,  // 153
3714  X, _, _, _, _,  // 154
3715  X, _, _, _, _,  // 155
3716  X, _, _, _, _,  // 156
3717  X, _, _, _, _,  // 157
3718  X, _, _, _, _,  // 158
3719  X, _, _, _, _,  // 159
3720  2, 5, 7, _, _,  // 160
3721  3, 0, 5, 7, _,  // 161
3722  3, 1, 5, 7, _,  // 162
3723  X, _, _, _, _,  // 163
3724  3, 2, 5, 7, _,  // 164
3725  4, 0, 2, 5, 7,  // 165
3726  X, _, _, _, _,  // 166
3727  X, _, _, _, _,  // 167
3728  3, 3, 5, 7, _,  // 168
3729  4, 0, 3, 5, 7,  // 169
3730  4, 1, 3, 5, 7   // 170
3731};
3732#undef _
3733#undef X
3734
3735
3736// Takes a word of mark bits.  Returns the number of objects that start in the
3737// range.  Puts the offsets of the words in the supplied array.
3738static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3739  int objects = 0;
3740  int offset = 0;
3741
3742  // No consecutive 1 bits.
3743  ASSERT((mark_bits & 0x180) != 0x180);
3744  ASSERT((mark_bits & 0x18000) != 0x18000);
3745  ASSERT((mark_bits & 0x1800000) != 0x1800000);
3746
3747  while (mark_bits != 0) {
3748    int byte = (mark_bits & 0xff);
3749    mark_bits >>= 8;
3750    if (byte != 0) {
3751      ASSERT(byte < kStartTableLines);  // No consecutive 1 bits.
3752      char* table = kStartTable + byte * kStartTableEntriesPerLine;
3753      int objects_in_these_8_words = table[0];
3754      ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3755      ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3756      for (int i = 0; i < objects_in_these_8_words; i++) {
3757        starts[objects++] = offset + table[1 + i];
3758      }
3759    }
3760    offset += 8;
3761  }
3762  return objects;
3763}
3764
3765
3766static inline Address DigestFreeStart(Address approximate_free_start,
3767                                      uint32_t free_start_cell) {
3768  ASSERT(free_start_cell != 0);
3769
3770  // No consecutive 1 bits.
3771  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3772
3773  int offsets[16];
3774  uint32_t cell = free_start_cell;
3775  int offset_of_last_live;
3776  if ((cell & 0x80000000u) != 0) {
3777    // This case would overflow below.
3778    offset_of_last_live = 31;
3779  } else {
3780    // Remove all but one bit, the most significant.  This is an optimization
3781    // that may or may not be worthwhile.
3782    cell |= cell >> 16;
3783    cell |= cell >> 8;
3784    cell |= cell >> 4;
3785    cell |= cell >> 2;
3786    cell |= cell >> 1;
3787    cell = (cell + 1) >> 1;
3788    int live_objects = MarkWordToObjectStarts(cell, offsets);
3789    ASSERT(live_objects == 1);
3790    offset_of_last_live = offsets[live_objects - 1];
3791  }
3792  Address last_live_start =
3793      approximate_free_start + offset_of_last_live * kPointerSize;
3794  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3795  Address free_start = last_live_start + last_live->Size();
3796  return free_start;
3797}
3798
3799
3800static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3801  ASSERT(cell != 0);
3802
3803  // No consecutive 1 bits.
3804  ASSERT((cell & (cell << 1)) == 0);
3805
3806  int offsets[16];
3807  if (cell == 0x80000000u) {  // Avoid overflow below.
3808    return block_address + 31 * kPointerSize;
3809  }
3810  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3811  ASSERT((first_set_bit & cell) == first_set_bit);
3812  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3813  ASSERT(live_objects == 1);
3814  USE(live_objects);
3815  return block_address + offsets[0] * kPointerSize;
3816}
3817
3818
3819template<MarkCompactCollector::SweepingParallelism mode>
3820static intptr_t Free(PagedSpace* space,
3821                     FreeList* free_list,
3822                     Address start,
3823                     int size) {
3824  if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
3825    return space->Free(start, size);
3826  } else {
3827    return size - free_list->Free(start, size);
3828  }
3829}
3830
3831
3832// Force instantiation of templatized SweepConservatively method for
3833// SWEEP_SEQUENTIALLY mode.
3834template intptr_t MarkCompactCollector::
3835    SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
3836        PagedSpace*, FreeList*, Page*);
3837
3838
3839// Force instantiation of templatized SweepConservatively method for
3840// SWEEP_IN_PARALLEL mode.
3841template intptr_t MarkCompactCollector::
3842    SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3843        PagedSpace*, FreeList*, Page*);
3844
3845
3846// Sweeps a space conservatively.  After this has been done the larger free
3847// spaces have been put on the free list and the smaller ones have been
3848// ignored and left untouched.  A free space is always either ignored or put
3849// on the free list, never split up into two parts.  This is important
3850// because it means that any FreeSpace maps left actually describe a region of
3851// memory that can be ignored when scanning.  Dead objects other than free
3852// spaces will not contain the free space map.
3853template<MarkCompactCollector::SweepingParallelism mode>
3854intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
3855                                                   FreeList* free_list,
3856                                                   Page* p) {
3857  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3858  ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
3859         free_list != NULL) ||
3860         (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
3861         free_list == NULL));
3862
3863  p->MarkSweptConservatively();
3864
3865  intptr_t freed_bytes = 0;
3866  size_t size = 0;
3867
3868  // Skip over all the dead objects at the start of the page and mark them free.
3869  Address cell_base = 0;
3870  MarkBit::CellType* cell = NULL;
3871  MarkBitCellIterator it(p);
3872  for (; !it.Done(); it.Advance()) {
3873    cell_base = it.CurrentCellBase();
3874    cell = it.CurrentCell();
3875    if (*cell != 0) break;
3876  }
3877
3878  if (it.Done()) {
3879    size = p->area_end() - p->area_start();
3880    freed_bytes += Free<mode>(space, free_list, p->area_start(),
3881                              static_cast<int>(size));
3882    ASSERT_EQ(0, p->LiveBytes());
3883    return freed_bytes;
3884  }
3885
3886  // Grow the size of the start-of-page free space a little to get up to the
3887  // first live object.
3888  Address free_end = StartOfLiveObject(cell_base, *cell);
3889  // Free the first free space.
3890  size = free_end - p->area_start();
3891  freed_bytes += Free<mode>(space, free_list, p->area_start(),
3892                            static_cast<int>(size));
3893
3894  // The start of the current free area is represented in undigested form by
3895  // the address of the last 32-word section that contained a live object and
3896  // the marking bitmap for that cell, which describes where the live object
3897  // started.  Unless we find a large free space in the bitmap we will not
3898  // digest this pair into a real address.  We start the iteration here at the
3899  // first word in the marking bit map that indicates a live object.
3900  Address free_start = cell_base;
3901  MarkBit::CellType free_start_cell = *cell;
3902
3903  for (; !it.Done(); it.Advance()) {
3904    cell_base = it.CurrentCellBase();
3905    cell = it.CurrentCell();
3906    if (*cell != 0) {
3907      // We have a live object.  Check approximately whether it is more than 32
3908      // words since the last live object.
3909      if (cell_base - free_start > 32 * kPointerSize) {
3910        free_start = DigestFreeStart(free_start, free_start_cell);
3911        if (cell_base - free_start > 32 * kPointerSize) {
3912          // Now that we know the exact start of the free space it still looks
3913          // like we have a large enough free space to be worth bothering with.
3914          // so now we need to find the start of the first live object at the
3915          // end of the free space.
3916          free_end = StartOfLiveObject(cell_base, *cell);
3917          freed_bytes += Free<mode>(space, free_list, free_start,
3918                                    static_cast<int>(free_end - free_start));
3919        }
3920      }
3921      // Update our undigested record of where the current free area started.
3922      free_start = cell_base;
3923      free_start_cell = *cell;
3924      // Clear marking bits for current cell.
3925      *cell = 0;
3926    }
3927  }
3928
3929  // Handle the free space at the end of the page.
3930  if (cell_base - free_start > 32 * kPointerSize) {
3931    free_start = DigestFreeStart(free_start, free_start_cell);
3932    freed_bytes += Free<mode>(space, free_list, free_start,
3933                              static_cast<int>(p->area_end() - free_start));
3934  }
3935
3936  p->ResetLiveBytes();
3937  return freed_bytes;
3938}
3939
3940
3941void MarkCompactCollector::SweepInParallel(PagedSpace* space,
3942                                           FreeList* private_free_list,
3943                                           FreeList* free_list) {
3944  PageIterator it(space);
3945  while (it.has_next()) {
3946    Page* p = it.next();
3947
3948    if (p->TryParallelSweeping()) {
3949      SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
3950      free_list->Concatenate(private_free_list);
3951    }
3952  }
3953}
3954
3955
3956void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3957  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3958                                      sweeper == LAZY_CONSERVATIVE ||
3959                                      sweeper == PARALLEL_CONSERVATIVE ||
3960                                      sweeper == CONCURRENT_CONSERVATIVE);
3961  space->ClearStats();
3962
3963  PageIterator it(space);
3964
3965  int pages_swept = 0;
3966  bool lazy_sweeping_active = false;
3967  bool unused_page_present = false;
3968  bool parallel_sweeping_active = false;
3969
3970  while (it.has_next()) {
3971    Page* p = it.next();
3972
3973    ASSERT(p->parallel_sweeping() == 0);
3974    ASSERT(!p->IsEvacuationCandidate());
3975
3976    // Clear sweeping flags indicating that marking bits are still intact.
3977    p->ClearSweptPrecisely();
3978    p->ClearSweptConservatively();
3979
3980    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3981      // Will be processed in EvacuateNewSpaceAndCandidates.
3982      ASSERT(evacuation_candidates_.length() > 0);
3983      continue;
3984    }
3985
3986    // One unused page is kept, all further are released before sweeping them.
3987    if (p->LiveBytes() == 0) {
3988      if (unused_page_present) {
3989        if (FLAG_gc_verbose) {
3990          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3991                 reinterpret_cast<intptr_t>(p));
3992        }
3993        // Adjust unswept free bytes because releasing a page expects said
3994        // counter to be accurate for unswept pages.
3995        space->IncreaseUnsweptFreeBytes(p);
3996        space->ReleasePage(p, true);
3997        continue;
3998      }
3999      unused_page_present = true;
4000    }
4001
4002    switch (sweeper) {
4003      case CONSERVATIVE: {
4004        if (FLAG_gc_verbose) {
4005          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4006                 reinterpret_cast<intptr_t>(p));
4007        }
4008        SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4009        pages_swept++;
4010        break;
4011      }
4012      case LAZY_CONSERVATIVE: {
4013        if (lazy_sweeping_active) {
4014          if (FLAG_gc_verbose) {
4015            PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
4016                   reinterpret_cast<intptr_t>(p));
4017          }
4018          space->IncreaseUnsweptFreeBytes(p);
4019        } else {
4020          if (FLAG_gc_verbose) {
4021            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4022                   reinterpret_cast<intptr_t>(p));
4023          }
4024          SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4025          pages_swept++;
4026          space->SetPagesToSweep(p->next_page());
4027          lazy_sweeping_active = true;
4028        }
4029        break;
4030      }
4031      case CONCURRENT_CONSERVATIVE:
4032      case PARALLEL_CONSERVATIVE: {
4033        if (!parallel_sweeping_active) {
4034          if (FLAG_gc_verbose) {
4035            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4036                   reinterpret_cast<intptr_t>(p));
4037          }
4038          SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4039          pages_swept++;
4040          parallel_sweeping_active = true;
4041        } else {
4042          if (FLAG_gc_verbose) {
4043            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4044                   reinterpret_cast<intptr_t>(p));
4045          }
4046          p->set_parallel_sweeping(1);
4047          space->IncreaseUnsweptFreeBytes(p);
4048        }
4049        break;
4050      }
4051      case PRECISE: {
4052        if (FLAG_gc_verbose) {
4053          PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4054                 reinterpret_cast<intptr_t>(p));
4055        }
4056        if (space->identity() == CODE_SPACE) {
4057          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
4058        } else {
4059          SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
4060        }
4061        pages_swept++;
4062        break;
4063      }
4064      default: {
4065        UNREACHABLE();
4066      }
4067    }
4068  }
4069
4070  if (FLAG_gc_verbose) {
4071    PrintF("SweepSpace: %s (%d pages swept)\n",
4072           AllocationSpaceName(space->identity()),
4073           pages_swept);
4074  }
4075
4076  // Give pages that are queued to be freed back to the OS.
4077  heap()->FreeQueuedChunks();
4078}
4079
4080
4081void MarkCompactCollector::SweepSpaces() {
4082  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4083#ifdef DEBUG
4084  state_ = SWEEP_SPACES;
4085#endif
4086  SweeperType how_to_sweep =
4087      FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
4088  if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4089  if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4090  if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
4091  if (sweep_precisely_) how_to_sweep = PRECISE;
4092
4093  // Unlink evacuation candidates before sweeper threads access the list of
4094  // pages to avoid race condition.
4095  UnlinkEvacuationCandidates();
4096
4097  // Noncompacting collections simply sweep the spaces to clear the mark
4098  // bits and free the nonlive blocks (for old and map spaces).  We sweep
4099  // the map space last because freeing non-live maps overwrites them and
4100  // the other spaces rely on possibly non-live maps to get the sizes for
4101  // non-live objects.
4102  SequentialSweepingScope scope(this);
4103  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4104  SweepSpace(heap()->old_data_space(), how_to_sweep);
4105
4106  if (how_to_sweep == PARALLEL_CONSERVATIVE ||
4107      how_to_sweep == CONCURRENT_CONSERVATIVE) {
4108    // TODO(hpayer): fix race with concurrent sweeper
4109    StartSweeperThreads();
4110  }
4111
4112  if (how_to_sweep == PARALLEL_CONSERVATIVE) {
4113    WaitUntilSweepingCompleted();
4114  }
4115
4116  RemoveDeadInvalidatedCode();
4117  SweepSpace(heap()->code_space(), PRECISE);
4118
4119  SweepSpace(heap()->cell_space(), PRECISE);
4120  SweepSpace(heap()->property_cell_space(), PRECISE);
4121
4122  EvacuateNewSpaceAndCandidates();
4123
4124  // ClearNonLiveTransitions depends on precise sweeping of map space to
4125  // detect whether unmarked map became dead in this collection or in one
4126  // of the previous ones.
4127  SweepSpace(heap()->map_space(), PRECISE);
4128
4129  // Deallocate unmarked objects and clear marked bits for marked objects.
4130  heap_->lo_space()->FreeUnmarkedObjects();
4131
4132  // Deallocate evacuated candidate pages.
4133  ReleaseEvacuationCandidates();
4134}
4135
4136
4137void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4138#ifdef ENABLE_DEBUGGER_SUPPORT
4139  if (isolate()->debug()->IsLoaded() ||
4140      isolate()->debug()->has_break_points()) {
4141    enable = false;
4142  }
4143#endif
4144
4145  if (enable) {
4146    if (code_flusher_ != NULL) return;
4147    code_flusher_ = new CodeFlusher(isolate());
4148  } else {
4149    if (code_flusher_ == NULL) return;
4150    code_flusher_->EvictAllCandidates();
4151    delete code_flusher_;
4152    code_flusher_ = NULL;
4153  }
4154
4155  if (FLAG_trace_code_flushing) {
4156    PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4157  }
4158}
4159
4160
4161// TODO(1466) ReportDeleteIfNeeded is not called currently.
4162// Our profiling tools do not expect intersections between
4163// code objects. We should either reenable it or change our tools.
4164void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4165                                                Isolate* isolate) {
4166#ifdef ENABLE_GDB_JIT_INTERFACE
4167  if (obj->IsCode()) {
4168    GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
4169  }
4170#endif
4171  if (obj->IsCode()) {
4172    PROFILE(isolate, CodeDeleteEvent(obj->address()));
4173  }
4174}
4175
4176
4177Isolate* MarkCompactCollector::isolate() const {
4178  return heap_->isolate();
4179}
4180
4181
4182void MarkCompactCollector::Initialize() {
4183  MarkCompactMarkingVisitor::Initialize();
4184  IncrementalMarking::Initialize();
4185}
4186
4187
4188bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4189  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4190}
4191
4192
4193bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4194                        SlotsBuffer** buffer_address,
4195                        SlotType type,
4196                        Address addr,
4197                        AdditionMode mode) {
4198  SlotsBuffer* buffer = *buffer_address;
4199  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4200    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4201      allocator->DeallocateChain(buffer_address);
4202      return false;
4203    }
4204    buffer = allocator->AllocateBuffer(buffer);
4205    *buffer_address = buffer;
4206  }
4207  ASSERT(buffer->HasSpaceForTypedSlot());
4208  buffer->Add(reinterpret_cast<ObjectSlot>(type));
4209  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4210  return true;
4211}
4212
4213
4214static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4215  if (RelocInfo::IsCodeTarget(rmode)) {
4216    return SlotsBuffer::CODE_TARGET_SLOT;
4217  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4218    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4219  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4220    return SlotsBuffer::DEBUG_TARGET_SLOT;
4221  } else if (RelocInfo::IsJSReturn(rmode)) {
4222    return SlotsBuffer::JS_RETURN_SLOT;
4223  }
4224  UNREACHABLE();
4225  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4226}
4227
4228
4229void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4230  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4231  if (target_page->IsEvacuationCandidate() &&
4232      (rinfo->host() == NULL ||
4233       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4234    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4235                            target_page->slots_buffer_address(),
4236                            SlotTypeForRMode(rinfo->rmode()),
4237                            rinfo->pc(),
4238                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
4239      EvictEvacuationCandidate(target_page);
4240    }
4241  }
4242}
4243
4244
4245void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4246  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4247  if (target_page->IsEvacuationCandidate() &&
4248      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4249    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4250                            target_page->slots_buffer_address(),
4251                            SlotsBuffer::CODE_ENTRY_SLOT,
4252                            slot,
4253                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
4254      EvictEvacuationCandidate(target_page);
4255    }
4256  }
4257}
4258
4259
4260void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4261  ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
4262  if (is_compacting()) {
4263    Code* host = isolate()->inner_pointer_to_code_cache()->
4264        GcSafeFindCodeForInnerPointer(pc);
4265    MarkBit mark_bit = Marking::MarkBitFrom(host);
4266    if (Marking::IsBlack(mark_bit)) {
4267      RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4268      RecordRelocSlot(&rinfo, target);
4269    }
4270  }
4271}
4272
4273
4274static inline SlotsBuffer::SlotType DecodeSlotType(
4275    SlotsBuffer::ObjectSlot slot) {
4276  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4277}
4278
4279
4280void SlotsBuffer::UpdateSlots(Heap* heap) {
4281  PointersUpdatingVisitor v(heap);
4282
4283  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4284    ObjectSlot slot = slots_[slot_idx];
4285    if (!IsTypedSlot(slot)) {
4286      PointersUpdatingVisitor::UpdateSlot(heap, slot);
4287    } else {
4288      ++slot_idx;
4289      ASSERT(slot_idx < idx_);
4290      UpdateSlot(&v,
4291                 DecodeSlotType(slot),
4292                 reinterpret_cast<Address>(slots_[slot_idx]));
4293    }
4294  }
4295}
4296
4297
4298void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4299  PointersUpdatingVisitor v(heap);
4300
4301  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4302    ObjectSlot slot = slots_[slot_idx];
4303    if (!IsTypedSlot(slot)) {
4304      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4305        PointersUpdatingVisitor::UpdateSlot(heap, slot);
4306      }
4307    } else {
4308      ++slot_idx;
4309      ASSERT(slot_idx < idx_);
4310      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4311      if (!IsOnInvalidatedCodeObject(pc)) {
4312        UpdateSlot(&v,
4313                   DecodeSlotType(slot),
4314                   reinterpret_cast<Address>(slots_[slot_idx]));
4315      }
4316    }
4317  }
4318}
4319
4320
4321SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4322  return new SlotsBuffer(next_buffer);
4323}
4324
4325
4326void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4327  delete buffer;
4328}
4329
4330
4331void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4332  SlotsBuffer* buffer = *buffer_address;
4333  while (buffer != NULL) {
4334    SlotsBuffer* next_buffer = buffer->next();
4335    DeallocateBuffer(buffer);
4336    buffer = next_buffer;
4337  }
4338  *buffer_address = NULL;
4339}
4340
4341
4342} }  // namespace v8::internal
4343