1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen.h"
34#include "compilation-cache.h"
35#include "cpu-profiler.h"
36#include "debug.h"
37#include "deoptimizer.h"
38#include "global-handles.h"
39#include "heap-profiler.h"
40#include "incremental-marking.h"
41#include "mark-compact.h"
42#include "natives.h"
43#include "objects-visiting.h"
44#include "objects-visiting-inl.h"
45#include "once.h"
46#include "runtime-profiler.h"
47#include "scopeinfo.h"
48#include "snapshot.h"
49#include "store-buffer.h"
50#include "v8threads.h"
51#include "v8utils.h"
52#include "vm-state-inl.h"
53#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54#include "regexp-macro-assembler.h"
55#include "arm/regexp-macro-assembler-arm.h"
56#endif
57#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58#include "regexp-macro-assembler.h"
59#include "mips/regexp-macro-assembler-mips.h"
60#endif
61
62namespace v8 {
63namespace internal {
64
65
66Heap::Heap()
67    : isolate_(NULL),
68// semispace_size_ should be a power of 2 and old_generation_size_ should be
69// a multiple of Page::kPageSize.
70#if V8_TARGET_ARCH_X64
71#define LUMP_OF_MEMORY (2 * MB)
72      code_range_size_(512*MB),
73#else
74#define LUMP_OF_MEMORY MB
75      code_range_size_(0),
76#endif
77#if defined(ANDROID) || V8_TARGET_ARCH_MIPS
78      reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79      max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80      initial_semispace_size_(Page::kPageSize),
81      max_old_generation_size_(192*MB),
82      max_executable_size_(max_old_generation_size_),
83#else
84      reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85      max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86      initial_semispace_size_(Page::kPageSize),
87      max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88      max_executable_size_(256l * LUMP_OF_MEMORY),
89#endif
90
91// Variables set based on semispace_size_ and old_generation_size_ in
92// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93// Will be 4 * reserved_semispace_size_ to ensure that young
94// generation can be aligned to its size.
95      survived_since_last_expansion_(0),
96      sweep_generation_(0),
97      always_allocate_scope_depth_(0),
98      linear_allocation_scope_depth_(0),
99      contexts_disposed_(0),
100      global_ic_age_(0),
101      flush_monomorphic_ics_(false),
102      scan_on_scavenge_pages_(0),
103      new_space_(this),
104      old_pointer_space_(NULL),
105      old_data_space_(NULL),
106      code_space_(NULL),
107      map_space_(NULL),
108      cell_space_(NULL),
109      property_cell_space_(NULL),
110      lo_space_(NULL),
111      gc_state_(NOT_IN_GC),
112      gc_post_processing_depth_(0),
113      ms_count_(0),
114      gc_count_(0),
115      remembered_unmapped_pages_index_(0),
116      unflattened_strings_length_(0),
117#ifdef DEBUG
118      allocation_timeout_(0),
119      disallow_allocation_failure_(false),
120#endif  // DEBUG
121      new_space_high_promotion_mode_active_(false),
122      old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
123      size_of_old_gen_at_last_old_space_gc_(0),
124      external_allocation_limit_(0),
125      amount_of_external_allocated_memory_(0),
126      amount_of_external_allocated_memory_at_last_global_gc_(0),
127      old_gen_exhausted_(false),
128      store_buffer_rebuilder_(store_buffer()),
129      hidden_string_(NULL),
130      global_gc_prologue_callback_(NULL),
131      global_gc_epilogue_callback_(NULL),
132      gc_safe_size_of_old_object_(NULL),
133      total_regexp_code_generated_(0),
134      tracer_(NULL),
135      young_survivors_after_last_gc_(0),
136      high_survival_rate_period_length_(0),
137      low_survival_rate_period_length_(0),
138      survival_rate_(0),
139      previous_survival_rate_trend_(Heap::STABLE),
140      survival_rate_trend_(Heap::STABLE),
141      max_gc_pause_(0.0),
142      total_gc_time_ms_(0.0),
143      max_alive_after_gc_(0),
144      min_in_mutator_(kMaxInt),
145      alive_after_last_gc_(0),
146      last_gc_end_timestamp_(0.0),
147      marking_time_(0.0),
148      sweeping_time_(0.0),
149      store_buffer_(this),
150      marking_(this),
151      incremental_marking_(this),
152      number_idle_notifications_(0),
153      last_idle_notification_gc_count_(0),
154      last_idle_notification_gc_count_init_(false),
155      mark_sweeps_since_idle_round_started_(0),
156      gc_count_at_last_idle_gc_(0),
157      scavenges_since_last_idle_round_(kIdleScavengeThreshold),
158      gcs_since_last_deopt_(0),
159#ifdef VERIFY_HEAP
160      no_weak_embedded_maps_verification_scope_depth_(0),
161#endif
162      promotion_queue_(this),
163      configured_(false),
164      chunks_queued_for_free_(NULL),
165      relocation_mutex_(NULL) {
166  // Allow build-time customization of the max semispace size. Building
167  // V8 with snapshots and a non-default max semispace size is much
168  // easier if you can define it as part of the build environment.
169#if defined(V8_MAX_SEMISPACE_SIZE)
170  max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
171#endif
172
173  intptr_t max_virtual = OS::MaxVirtualMemory();
174
175  if (max_virtual > 0) {
176    if (code_range_size_ > 0) {
177      // Reserve no more than 1/8 of the memory for the code range.
178      code_range_size_ = Min(code_range_size_, max_virtual >> 3);
179    }
180  }
181
182  memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183  native_contexts_list_ = NULL;
184  array_buffers_list_ = Smi::FromInt(0);
185  allocation_sites_list_ = Smi::FromInt(0);
186  mark_compact_collector_.heap_ = this;
187  external_string_table_.heap_ = this;
188  // Put a dummy entry in the remembered pages so we can find the list the
189  // minidump even if there are no real unmapped pages.
190  RememberUnmappedPage(NULL, false);
191
192  ClearObjectStats(true);
193}
194
195
196intptr_t Heap::Capacity() {
197  if (!HasBeenSetUp()) return 0;
198
199  return new_space_.Capacity() +
200      old_pointer_space_->Capacity() +
201      old_data_space_->Capacity() +
202      code_space_->Capacity() +
203      map_space_->Capacity() +
204      cell_space_->Capacity() +
205      property_cell_space_->Capacity();
206}
207
208
209intptr_t Heap::CommittedMemory() {
210  if (!HasBeenSetUp()) return 0;
211
212  return new_space_.CommittedMemory() +
213      old_pointer_space_->CommittedMemory() +
214      old_data_space_->CommittedMemory() +
215      code_space_->CommittedMemory() +
216      map_space_->CommittedMemory() +
217      cell_space_->CommittedMemory() +
218      property_cell_space_->CommittedMemory() +
219      lo_space_->Size();
220}
221
222
223size_t Heap::CommittedPhysicalMemory() {
224  if (!HasBeenSetUp()) return 0;
225
226  return new_space_.CommittedPhysicalMemory() +
227      old_pointer_space_->CommittedPhysicalMemory() +
228      old_data_space_->CommittedPhysicalMemory() +
229      code_space_->CommittedPhysicalMemory() +
230      map_space_->CommittedPhysicalMemory() +
231      cell_space_->CommittedPhysicalMemory() +
232      property_cell_space_->CommittedPhysicalMemory() +
233      lo_space_->CommittedPhysicalMemory();
234}
235
236
237intptr_t Heap::CommittedMemoryExecutable() {
238  if (!HasBeenSetUp()) return 0;
239
240  return isolate()->memory_allocator()->SizeExecutable();
241}
242
243
244intptr_t Heap::Available() {
245  if (!HasBeenSetUp()) return 0;
246
247  return new_space_.Available() +
248      old_pointer_space_->Available() +
249      old_data_space_->Available() +
250      code_space_->Available() +
251      map_space_->Available() +
252      cell_space_->Available() +
253      property_cell_space_->Available();
254}
255
256
257bool Heap::HasBeenSetUp() {
258  return old_pointer_space_ != NULL &&
259         old_data_space_ != NULL &&
260         code_space_ != NULL &&
261         map_space_ != NULL &&
262         cell_space_ != NULL &&
263         property_cell_space_ != NULL &&
264         lo_space_ != NULL;
265}
266
267
268int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
269  if (IntrusiveMarking::IsMarked(object)) {
270    return IntrusiveMarking::SizeOfMarkedObject(object);
271  }
272  return object->SizeFromMap(object->map());
273}
274
275
276GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
277                                              const char** reason) {
278  // Is global GC requested?
279  if (space != NEW_SPACE) {
280    isolate_->counters()->gc_compactor_caused_by_request()->Increment();
281    *reason = "GC in old space requested";
282    return MARK_COMPACTOR;
283  }
284
285  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
286    *reason = "GC in old space forced by flags";
287    return MARK_COMPACTOR;
288  }
289
290  // Is enough data promoted to justify a global GC?
291  if (OldGenerationAllocationLimitReached()) {
292    isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
293    *reason = "promotion limit reached";
294    return MARK_COMPACTOR;
295  }
296
297  // Have allocation in OLD and LO failed?
298  if (old_gen_exhausted_) {
299    isolate_->counters()->
300        gc_compactor_caused_by_oldspace_exhaustion()->Increment();
301    *reason = "old generations exhausted";
302    return MARK_COMPACTOR;
303  }
304
305  // Is there enough space left in OLD to guarantee that a scavenge can
306  // succeed?
307  //
308  // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
309  // for object promotion. It counts only the bytes that the memory
310  // allocator has not yet allocated from the OS and assigned to any space,
311  // and does not count available bytes already in the old space or code
312  // space.  Undercounting is safe---we may get an unrequested full GC when
313  // a scavenge would have succeeded.
314  if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
315    isolate_->counters()->
316        gc_compactor_caused_by_oldspace_exhaustion()->Increment();
317    *reason = "scavenge might not succeed";
318    return MARK_COMPACTOR;
319  }
320
321  // Default
322  *reason = NULL;
323  return SCAVENGER;
324}
325
326
327// TODO(1238405): Combine the infrastructure for --heap-stats and
328// --log-gc to avoid the complicated preprocessor and flag testing.
329void Heap::ReportStatisticsBeforeGC() {
330  // Heap::ReportHeapStatistics will also log NewSpace statistics when
331  // compiled --log-gc is set.  The following logic is used to avoid
332  // double logging.
333#ifdef DEBUG
334  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
335  if (FLAG_heap_stats) {
336    ReportHeapStatistics("Before GC");
337  } else if (FLAG_log_gc) {
338    new_space_.ReportStatistics();
339  }
340  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
341#else
342  if (FLAG_log_gc) {
343    new_space_.CollectStatistics();
344    new_space_.ReportStatistics();
345    new_space_.ClearHistograms();
346  }
347#endif  // DEBUG
348}
349
350
351void Heap::PrintShortHeapStatistics() {
352  if (!FLAG_trace_gc_verbose) return;
353  PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
354               ", available: %6" V8_PTR_PREFIX "d KB\n",
355           isolate_->memory_allocator()->Size() / KB,
356           isolate_->memory_allocator()->Available() / KB);
357  PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
358               ", available: %6" V8_PTR_PREFIX "d KB"
359               ", committed: %6" V8_PTR_PREFIX "d KB\n",
360           new_space_.Size() / KB,
361           new_space_.Available() / KB,
362           new_space_.CommittedMemory() / KB);
363  PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
364               ", available: %6" V8_PTR_PREFIX "d KB"
365               ", committed: %6" V8_PTR_PREFIX "d KB\n",
366           old_pointer_space_->SizeOfObjects() / KB,
367           old_pointer_space_->Available() / KB,
368           old_pointer_space_->CommittedMemory() / KB);
369  PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
370               ", available: %6" V8_PTR_PREFIX "d KB"
371               ", committed: %6" V8_PTR_PREFIX "d KB\n",
372           old_data_space_->SizeOfObjects() / KB,
373           old_data_space_->Available() / KB,
374           old_data_space_->CommittedMemory() / KB);
375  PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
376               ", available: %6" V8_PTR_PREFIX "d KB"
377               ", committed: %6" V8_PTR_PREFIX "d KB\n",
378           code_space_->SizeOfObjects() / KB,
379           code_space_->Available() / KB,
380           code_space_->CommittedMemory() / KB);
381  PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
382               ", available: %6" V8_PTR_PREFIX "d KB"
383               ", committed: %6" V8_PTR_PREFIX "d KB\n",
384           map_space_->SizeOfObjects() / KB,
385           map_space_->Available() / KB,
386           map_space_->CommittedMemory() / KB);
387  PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
388               ", available: %6" V8_PTR_PREFIX "d KB"
389               ", committed: %6" V8_PTR_PREFIX "d KB\n",
390           cell_space_->SizeOfObjects() / KB,
391           cell_space_->Available() / KB,
392           cell_space_->CommittedMemory() / KB);
393  PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
394               ", available: %6" V8_PTR_PREFIX "d KB"
395               ", committed: %6" V8_PTR_PREFIX "d KB\n",
396           property_cell_space_->SizeOfObjects() / KB,
397           property_cell_space_->Available() / KB,
398           property_cell_space_->CommittedMemory() / KB);
399  PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
400               ", available: %6" V8_PTR_PREFIX "d KB"
401               ", committed: %6" V8_PTR_PREFIX "d KB\n",
402           lo_space_->SizeOfObjects() / KB,
403           lo_space_->Available() / KB,
404           lo_space_->CommittedMemory() / KB);
405  PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
406               ", available: %6" V8_PTR_PREFIX "d KB"
407               ", committed: %6" V8_PTR_PREFIX "d KB\n",
408           this->SizeOfObjects() / KB,
409           this->Available() / KB,
410           this->CommittedMemory() / KB);
411  PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
412           amount_of_external_allocated_memory_ / KB);
413  PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
414}
415
416
417// TODO(1238405): Combine the infrastructure for --heap-stats and
418// --log-gc to avoid the complicated preprocessor and flag testing.
419void Heap::ReportStatisticsAfterGC() {
420  // Similar to the before GC, we use some complicated logic to ensure that
421  // NewSpace statistics are logged exactly once when --log-gc is turned on.
422#if defined(DEBUG)
423  if (FLAG_heap_stats) {
424    new_space_.CollectStatistics();
425    ReportHeapStatistics("After GC");
426  } else if (FLAG_log_gc) {
427    new_space_.ReportStatistics();
428  }
429#else
430  if (FLAG_log_gc) new_space_.ReportStatistics();
431#endif  // DEBUG
432}
433
434
435void Heap::GarbageCollectionPrologue() {
436  {  AllowHeapAllocation for_the_first_part_of_prologue;
437    isolate_->transcendental_cache()->Clear();
438    ClearJSFunctionResultCaches();
439    gc_count_++;
440    unflattened_strings_length_ = 0;
441
442    if (FLAG_flush_code && FLAG_flush_code_incrementally) {
443      mark_compact_collector()->EnableCodeFlushing(true);
444    }
445
446#ifdef VERIFY_HEAP
447    if (FLAG_verify_heap) {
448      Verify();
449    }
450#endif
451  }
452
453#ifdef DEBUG
454  ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
455
456  if (FLAG_gc_verbose) Print();
457
458  ReportStatisticsBeforeGC();
459#endif  // DEBUG
460
461  store_buffer()->GCPrologue();
462}
463
464
465intptr_t Heap::SizeOfObjects() {
466  intptr_t total = 0;
467  AllSpaces spaces(this);
468  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
469    total += space->SizeOfObjects();
470  }
471  return total;
472}
473
474
475void Heap::RepairFreeListsAfterBoot() {
476  PagedSpaces spaces(this);
477  for (PagedSpace* space = spaces.next();
478       space != NULL;
479       space = spaces.next()) {
480    space->RepairFreeListsAfterBoot();
481  }
482}
483
484
485void Heap::GarbageCollectionEpilogue() {
486  store_buffer()->GCEpilogue();
487
488  // In release mode, we only zap the from space under heap verification.
489  if (Heap::ShouldZapGarbage()) {
490    ZapFromSpace();
491  }
492
493#ifdef VERIFY_HEAP
494  if (FLAG_verify_heap) {
495    Verify();
496  }
497#endif
498
499  AllowHeapAllocation for_the_rest_of_the_epilogue;
500
501#ifdef DEBUG
502  if (FLAG_print_global_handles) isolate_->global_handles()->Print();
503  if (FLAG_print_handles) PrintHandles();
504  if (FLAG_gc_verbose) Print();
505  if (FLAG_code_stats) ReportCodeStatistics("After GC");
506#endif
507  if (FLAG_deopt_every_n_garbage_collections > 0) {
508    if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
509      Deoptimizer::DeoptimizeAll(isolate());
510      gcs_since_last_deopt_ = 0;
511    }
512  }
513
514  isolate_->counters()->alive_after_last_gc()->Set(
515      static_cast<int>(SizeOfObjects()));
516
517  isolate_->counters()->string_table_capacity()->Set(
518      string_table()->Capacity());
519  isolate_->counters()->number_of_symbols()->Set(
520      string_table()->NumberOfElements());
521
522  if (CommittedMemory() > 0) {
523    isolate_->counters()->external_fragmentation_total()->AddSample(
524        static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
525
526    isolate_->counters()->heap_fraction_map_space()->AddSample(
527        static_cast<int>(
528            (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
529    isolate_->counters()->heap_fraction_cell_space()->AddSample(
530        static_cast<int>(
531            (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
532    isolate_->counters()->heap_fraction_property_cell_space()->
533        AddSample(static_cast<int>(
534            (property_cell_space()->CommittedMemory() * 100.0) /
535            CommittedMemory()));
536
537    isolate_->counters()->heap_sample_total_committed()->AddSample(
538        static_cast<int>(CommittedMemory() / KB));
539    isolate_->counters()->heap_sample_total_used()->AddSample(
540        static_cast<int>(SizeOfObjects() / KB));
541    isolate_->counters()->heap_sample_map_space_committed()->AddSample(
542        static_cast<int>(map_space()->CommittedMemory() / KB));
543    isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
544        static_cast<int>(cell_space()->CommittedMemory() / KB));
545    isolate_->counters()->
546        heap_sample_property_cell_space_committed()->
547            AddSample(static_cast<int>(
548                property_cell_space()->CommittedMemory() / KB));
549  }
550
551#define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
552  isolate_->counters()->space##_bytes_available()->Set(                        \
553      static_cast<int>(space()->Available()));                                 \
554  isolate_->counters()->space##_bytes_committed()->Set(                        \
555      static_cast<int>(space()->CommittedMemory()));                           \
556  isolate_->counters()->space##_bytes_used()->Set(                             \
557      static_cast<int>(space()->SizeOfObjects()));
558#define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
559  if (space()->CommittedMemory() > 0) {                                        \
560    isolate_->counters()->external_fragmentation_##space()->AddSample(         \
561        static_cast<int>(100 -                                                 \
562            (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
563  }
564#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
565  UPDATE_COUNTERS_FOR_SPACE(space)                                             \
566  UPDATE_FRAGMENTATION_FOR_SPACE(space)
567
568  UPDATE_COUNTERS_FOR_SPACE(new_space)
569  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
570  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
571  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
572  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
573  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
574  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
575  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
576#undef UPDATE_COUNTERS_FOR_SPACE
577#undef UPDATE_FRAGMENTATION_FOR_SPACE
578#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
579
580#if defined(DEBUG)
581  ReportStatisticsAfterGC();
582#endif  // DEBUG
583#ifdef ENABLE_DEBUGGER_SUPPORT
584  isolate_->debug()->AfterGarbageCollection();
585#endif  // ENABLE_DEBUGGER_SUPPORT
586}
587
588
589void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
590  // Since we are ignoring the return value, the exact choice of space does
591  // not matter, so long as we do not specify NEW_SPACE, which would not
592  // cause a full GC.
593  mark_compact_collector_.SetFlags(flags);
594  CollectGarbage(OLD_POINTER_SPACE, gc_reason);
595  mark_compact_collector_.SetFlags(kNoGCFlags);
596}
597
598
599void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
600  // Since we are ignoring the return value, the exact choice of space does
601  // not matter, so long as we do not specify NEW_SPACE, which would not
602  // cause a full GC.
603  // Major GC would invoke weak handle callbacks on weakly reachable
604  // handles, but won't collect weakly reachable objects until next
605  // major GC.  Therefore if we collect aggressively and weak handle callback
606  // has been invoked, we rerun major GC to release objects which become
607  // garbage.
608  // Note: as weak callbacks can execute arbitrary code, we cannot
609  // hope that eventually there will be no weak callbacks invocations.
610  // Therefore stop recollecting after several attempts.
611  mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
612                                     kReduceMemoryFootprintMask);
613  isolate_->compilation_cache()->Clear();
614  const int kMaxNumberOfAttempts = 7;
615  const int kMinNumberOfAttempts = 2;
616  for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
617    if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
618        attempt + 1 >= kMinNumberOfAttempts) {
619      break;
620    }
621  }
622  mark_compact_collector()->SetFlags(kNoGCFlags);
623  new_space_.Shrink();
624  UncommitFromSpace();
625  incremental_marking()->UncommitMarkingDeque();
626}
627
628
629bool Heap::CollectGarbage(AllocationSpace space,
630                          GarbageCollector collector,
631                          const char* gc_reason,
632                          const char* collector_reason) {
633  // The VM is in the GC state until exiting this function.
634  VMState<GC> state(isolate_);
635
636#ifdef DEBUG
637  // Reset the allocation timeout to the GC interval, but make sure to
638  // allow at least a few allocations after a collection. The reason
639  // for this is that we have a lot of allocation sequences and we
640  // assume that a garbage collection will allow the subsequent
641  // allocation attempts to go through.
642  allocation_timeout_ = Max(6, FLAG_gc_interval);
643#endif
644
645  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
646    if (FLAG_trace_incremental_marking) {
647      PrintF("[IncrementalMarking] Scavenge during marking.\n");
648    }
649  }
650
651  if (collector == MARK_COMPACTOR &&
652      !mark_compact_collector()->abort_incremental_marking() &&
653      !incremental_marking()->IsStopped() &&
654      !incremental_marking()->should_hurry() &&
655      FLAG_incremental_marking_steps) {
656    // Make progress in incremental marking.
657    const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
658    incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
659                                IncrementalMarking::NO_GC_VIA_STACK_GUARD);
660    if (!incremental_marking()->IsComplete()) {
661      if (FLAG_trace_incremental_marking) {
662        PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
663      }
664      collector = SCAVENGER;
665      collector_reason = "incremental marking delaying mark-sweep";
666    }
667  }
668
669  bool next_gc_likely_to_collect_more = false;
670
671  { GCTracer tracer(this, gc_reason, collector_reason);
672    ASSERT(AllowHeapAllocation::IsAllowed());
673    DisallowHeapAllocation no_allocation_during_gc;
674    GarbageCollectionPrologue();
675    // The GC count was incremented in the prologue.  Tell the tracer about
676    // it.
677    tracer.set_gc_count(gc_count_);
678
679    // Tell the tracer which collector we've selected.
680    tracer.set_collector(collector);
681
682    {
683      HistogramTimerScope histogram_timer_scope(
684          (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
685                                   : isolate_->counters()->gc_compactor());
686      next_gc_likely_to_collect_more =
687          PerformGarbageCollection(collector, &tracer);
688    }
689
690    GarbageCollectionEpilogue();
691  }
692
693  // Start incremental marking for the next cycle. The heap snapshot
694  // generator needs incremental marking to stay off after it aborted.
695  if (!mark_compact_collector()->abort_incremental_marking() &&
696      incremental_marking()->IsStopped() &&
697      incremental_marking()->WorthActivating() &&
698      NextGCIsLikelyToBeFull()) {
699    incremental_marking()->Start();
700  }
701
702  return next_gc_likely_to_collect_more;
703}
704
705
706int Heap::NotifyContextDisposed() {
707  if (FLAG_parallel_recompilation) {
708    // Flush the queued recompilation tasks.
709    isolate()->optimizing_compiler_thread()->Flush();
710  }
711  flush_monomorphic_ics_ = true;
712  return ++contexts_disposed_;
713}
714
715
716void Heap::PerformScavenge() {
717  GCTracer tracer(this, NULL, NULL);
718  if (incremental_marking()->IsStopped()) {
719    PerformGarbageCollection(SCAVENGER, &tracer);
720  } else {
721    PerformGarbageCollection(MARK_COMPACTOR, &tracer);
722  }
723}
724
725
726void Heap::MoveElements(FixedArray* array,
727                        int dst_index,
728                        int src_index,
729                        int len) {
730  if (len == 0) return;
731
732  ASSERT(array->map() != HEAP->fixed_cow_array_map());
733  Object** dst_objects = array->data_start() + dst_index;
734  OS::MemMove(dst_objects,
735              array->data_start() + src_index,
736              len * kPointerSize);
737  if (!InNewSpace(array)) {
738    for (int i = 0; i < len; i++) {
739      // TODO(hpayer): check store buffer for entries
740      if (InNewSpace(dst_objects[i])) {
741        RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
742      }
743    }
744  }
745  incremental_marking()->RecordWrites(array);
746}
747
748
749#ifdef VERIFY_HEAP
750// Helper class for verifying the string table.
751class StringTableVerifier : public ObjectVisitor {
752 public:
753  void VisitPointers(Object** start, Object** end) {
754    // Visit all HeapObject pointers in [start, end).
755    for (Object** p = start; p < end; p++) {
756      if ((*p)->IsHeapObject()) {
757        // Check that the string is actually internalized.
758        CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
759              (*p)->IsInternalizedString());
760      }
761    }
762  }
763};
764
765
766static void VerifyStringTable() {
767  StringTableVerifier verifier;
768  HEAP->string_table()->IterateElements(&verifier);
769}
770#endif  // VERIFY_HEAP
771
772
773static bool AbortIncrementalMarkingAndCollectGarbage(
774    Heap* heap,
775    AllocationSpace space,
776    const char* gc_reason = NULL) {
777  heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
778  bool result = heap->CollectGarbage(space, gc_reason);
779  heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
780  return result;
781}
782
783
784void Heap::ReserveSpace(
785    int *sizes,
786    Address *locations_out) {
787  bool gc_performed = true;
788  int counter = 0;
789  static const int kThreshold = 20;
790  while (gc_performed && counter++ < kThreshold) {
791    gc_performed = false;
792    ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
793    for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
794      if (sizes[space] != 0) {
795        MaybeObject* allocation;
796        if (space == NEW_SPACE) {
797          allocation = new_space()->AllocateRaw(sizes[space]);
798        } else {
799          allocation = paged_space(space)->AllocateRaw(sizes[space]);
800        }
801        FreeListNode* node;
802        if (!allocation->To<FreeListNode>(&node)) {
803          if (space == NEW_SPACE) {
804            Heap::CollectGarbage(NEW_SPACE,
805                                 "failed to reserve space in the new space");
806          } else {
807            AbortIncrementalMarkingAndCollectGarbage(
808                this,
809                static_cast<AllocationSpace>(space),
810                "failed to reserve space in paged space");
811          }
812          gc_performed = true;
813          break;
814        } else {
815          // Mark with a free list node, in case we have a GC before
816          // deserializing.
817          node->set_size(this, sizes[space]);
818          locations_out[space] = node->address();
819        }
820      }
821    }
822  }
823
824  if (gc_performed) {
825    // Failed to reserve the space after several attempts.
826    V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
827  }
828}
829
830
831void Heap::EnsureFromSpaceIsCommitted() {
832  if (new_space_.CommitFromSpaceIfNeeded()) return;
833
834  // Committing memory to from space failed.
835  // Memory is exhausted and we will die.
836  V8::FatalProcessOutOfMemory("Committing semi space failed.");
837}
838
839
840void Heap::ClearJSFunctionResultCaches() {
841  if (isolate_->bootstrapper()->IsActive()) return;
842
843  Object* context = native_contexts_list_;
844  while (!context->IsUndefined()) {
845    // Get the caches for this context. GC can happen when the context
846    // is not fully initialized, so the caches can be undefined.
847    Object* caches_or_undefined =
848        Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
849    if (!caches_or_undefined->IsUndefined()) {
850      FixedArray* caches = FixedArray::cast(caches_or_undefined);
851      // Clear the caches:
852      int length = caches->length();
853      for (int i = 0; i < length; i++) {
854        JSFunctionResultCache::cast(caches->get(i))->Clear();
855      }
856    }
857    // Get the next context:
858    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
859  }
860}
861
862
863void Heap::ClearNormalizedMapCaches() {
864  if (isolate_->bootstrapper()->IsActive() &&
865      !incremental_marking()->IsMarking()) {
866    return;
867  }
868
869  Object* context = native_contexts_list_;
870  while (!context->IsUndefined()) {
871    // GC can happen when the context is not fully initialized,
872    // so the cache can be undefined.
873    Object* cache =
874        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
875    if (!cache->IsUndefined()) {
876      NormalizedMapCache::cast(cache)->Clear();
877    }
878    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
879  }
880}
881
882
883void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
884  double survival_rate =
885      (static_cast<double>(young_survivors_after_last_gc_) * 100) /
886      start_new_space_size;
887
888  if (survival_rate > kYoungSurvivalRateHighThreshold) {
889    high_survival_rate_period_length_++;
890  } else {
891    high_survival_rate_period_length_ = 0;
892  }
893
894  if (survival_rate < kYoungSurvivalRateLowThreshold) {
895    low_survival_rate_period_length_++;
896  } else {
897    low_survival_rate_period_length_ = 0;
898  }
899
900  double survival_rate_diff = survival_rate_ - survival_rate;
901
902  if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
903    set_survival_rate_trend(DECREASING);
904  } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
905    set_survival_rate_trend(INCREASING);
906  } else {
907    set_survival_rate_trend(STABLE);
908  }
909
910  survival_rate_ = survival_rate;
911}
912
913bool Heap::PerformGarbageCollection(GarbageCollector collector,
914                                    GCTracer* tracer) {
915  bool next_gc_likely_to_collect_more = false;
916
917  if (collector != SCAVENGER) {
918    PROFILE(isolate_, CodeMovingGCEvent());
919  }
920
921#ifdef VERIFY_HEAP
922  if (FLAG_verify_heap) {
923    VerifyStringTable();
924  }
925#endif
926
927  GCType gc_type =
928      collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
929
930  {
931    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
932    VMState<EXTERNAL> state(isolate_);
933    HandleScope handle_scope(isolate_);
934    CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
935  }
936
937  EnsureFromSpaceIsCommitted();
938
939  int start_new_space_size = Heap::new_space()->SizeAsInt();
940
941  if (IsHighSurvivalRate()) {
942    // We speed up the incremental marker if it is running so that it
943    // does not fall behind the rate of promotion, which would cause a
944    // constantly growing old space.
945    incremental_marking()->NotifyOfHighPromotionRate();
946  }
947
948  if (collector == MARK_COMPACTOR) {
949    // Perform mark-sweep with optional compaction.
950    MarkCompact(tracer);
951    sweep_generation_++;
952
953    UpdateSurvivalRateTrend(start_new_space_size);
954
955    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
956
957    old_generation_allocation_limit_ =
958        OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
959
960    old_gen_exhausted_ = false;
961  } else {
962    tracer_ = tracer;
963    Scavenge();
964    tracer_ = NULL;
965
966    UpdateSurvivalRateTrend(start_new_space_size);
967  }
968
969  if (!new_space_high_promotion_mode_active_ &&
970      new_space_.Capacity() == new_space_.MaximumCapacity() &&
971      IsStableOrIncreasingSurvivalTrend() &&
972      IsHighSurvivalRate()) {
973    // Stable high survival rates even though young generation is at
974    // maximum capacity indicates that most objects will be promoted.
975    // To decrease scavenger pauses and final mark-sweep pauses, we
976    // have to limit maximal capacity of the young generation.
977    SetNewSpaceHighPromotionModeActive(true);
978    if (FLAG_trace_gc) {
979      PrintPID("Limited new space size due to high promotion rate: %d MB\n",
980               new_space_.InitialCapacity() / MB);
981    }
982    // Support for global pre-tenuring uses the high promotion mode as a
983    // heuristic indicator of whether to pretenure or not, we trigger
984    // deoptimization here to take advantage of pre-tenuring as soon as
985    // possible.
986    if (FLAG_pretenuring) {
987      isolate_->stack_guard()->FullDeopt();
988    }
989  } else if (new_space_high_promotion_mode_active_ &&
990      IsStableOrDecreasingSurvivalTrend() &&
991      IsLowSurvivalRate()) {
992    // Decreasing low survival rates might indicate that the above high
993    // promotion mode is over and we should allow the young generation
994    // to grow again.
995    SetNewSpaceHighPromotionModeActive(false);
996    if (FLAG_trace_gc) {
997      PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
998               new_space_.MaximumCapacity() / MB);
999    }
1000    // Trigger deoptimization here to turn off pre-tenuring as soon as
1001    // possible.
1002    if (FLAG_pretenuring) {
1003      isolate_->stack_guard()->FullDeopt();
1004    }
1005  }
1006
1007  if (new_space_high_promotion_mode_active_ &&
1008      new_space_.Capacity() > new_space_.InitialCapacity()) {
1009    new_space_.Shrink();
1010  }
1011
1012  isolate_->counters()->objs_since_last_young()->Set(0);
1013
1014  // Callbacks that fire after this point might trigger nested GCs and
1015  // restart incremental marking, the assertion can't be moved down.
1016  ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1017
1018  gc_post_processing_depth_++;
1019  { AllowHeapAllocation allow_allocation;
1020    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1021    next_gc_likely_to_collect_more =
1022        isolate_->global_handles()->PostGarbageCollectionProcessing(
1023            collector, tracer);
1024  }
1025  gc_post_processing_depth_--;
1026
1027  isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1028
1029  // Update relocatables.
1030  Relocatable::PostGarbageCollectionProcessing();
1031
1032  if (collector == MARK_COMPACTOR) {
1033    // Register the amount of external allocated memory.
1034    amount_of_external_allocated_memory_at_last_global_gc_ =
1035        amount_of_external_allocated_memory_;
1036  }
1037
1038  {
1039    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1040    VMState<EXTERNAL> state(isolate_);
1041    HandleScope handle_scope(isolate_);
1042    CallGCEpilogueCallbacks(gc_type);
1043  }
1044
1045#ifdef VERIFY_HEAP
1046  if (FLAG_verify_heap) {
1047    VerifyStringTable();
1048  }
1049#endif
1050
1051  return next_gc_likely_to_collect_more;
1052}
1053
1054
1055void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1056  if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1057    global_gc_prologue_callback_();
1058  }
1059  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1060    if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1061      gc_prologue_callbacks_[i].callback(gc_type, flags);
1062    }
1063  }
1064}
1065
1066
1067void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1068  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1069    if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1070      gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1071    }
1072  }
1073  if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1074    global_gc_epilogue_callback_();
1075  }
1076}
1077
1078
1079void Heap::MarkCompact(GCTracer* tracer) {
1080  gc_state_ = MARK_COMPACT;
1081  LOG(isolate_, ResourceEvent("markcompact", "begin"));
1082
1083  mark_compact_collector_.Prepare(tracer);
1084
1085  ms_count_++;
1086  tracer->set_full_gc_count(ms_count_);
1087
1088  MarkCompactPrologue();
1089
1090  mark_compact_collector_.CollectGarbage();
1091
1092  LOG(isolate_, ResourceEvent("markcompact", "end"));
1093
1094  gc_state_ = NOT_IN_GC;
1095
1096  isolate_->counters()->objs_since_last_full()->Set(0);
1097
1098  contexts_disposed_ = 0;
1099
1100  flush_monomorphic_ics_ = false;
1101}
1102
1103
1104void Heap::MarkCompactPrologue() {
1105  // At any old GC clear the keyed lookup cache to enable collection of unused
1106  // maps.
1107  isolate_->keyed_lookup_cache()->Clear();
1108  isolate_->context_slot_cache()->Clear();
1109  isolate_->descriptor_lookup_cache()->Clear();
1110  RegExpResultsCache::Clear(string_split_cache());
1111  RegExpResultsCache::Clear(regexp_multiple_cache());
1112
1113  isolate_->compilation_cache()->MarkCompactPrologue();
1114
1115  CompletelyClearInstanceofCache();
1116
1117  FlushNumberStringCache();
1118  if (FLAG_cleanup_code_caches_at_gc) {
1119    polymorphic_code_cache()->set_cache(undefined_value());
1120  }
1121
1122  ClearNormalizedMapCaches();
1123}
1124
1125
1126// Helper class for copying HeapObjects
1127class ScavengeVisitor: public ObjectVisitor {
1128 public:
1129  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1130
1131  void VisitPointer(Object** p) { ScavengePointer(p); }
1132
1133  void VisitPointers(Object** start, Object** end) {
1134    // Copy all HeapObject pointers in [start, end)
1135    for (Object** p = start; p < end; p++) ScavengePointer(p);
1136  }
1137
1138 private:
1139  void ScavengePointer(Object** p) {
1140    Object* object = *p;
1141    if (!heap_->InNewSpace(object)) return;
1142    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1143                         reinterpret_cast<HeapObject*>(object));
1144  }
1145
1146  Heap* heap_;
1147};
1148
1149
1150#ifdef VERIFY_HEAP
1151// Visitor class to verify pointers in code or data space do not point into
1152// new space.
1153class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1154 public:
1155  void VisitPointers(Object** start, Object**end) {
1156    for (Object** current = start; current < end; current++) {
1157      if ((*current)->IsHeapObject()) {
1158        CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1159      }
1160    }
1161  }
1162};
1163
1164
1165static void VerifyNonPointerSpacePointers() {
1166  // Verify that there are no pointers to new space in spaces where we
1167  // do not expect them.
1168  VerifyNonPointerSpacePointersVisitor v;
1169  HeapObjectIterator code_it(HEAP->code_space());
1170  for (HeapObject* object = code_it.Next();
1171       object != NULL; object = code_it.Next())
1172    object->Iterate(&v);
1173
1174  // The old data space was normally swept conservatively so that the iterator
1175  // doesn't work, so we normally skip the next bit.
1176  if (!HEAP->old_data_space()->was_swept_conservatively()) {
1177    HeapObjectIterator data_it(HEAP->old_data_space());
1178    for (HeapObject* object = data_it.Next();
1179         object != NULL; object = data_it.Next())
1180      object->Iterate(&v);
1181  }
1182}
1183#endif  // VERIFY_HEAP
1184
1185
1186void Heap::CheckNewSpaceExpansionCriteria() {
1187  if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1188      survived_since_last_expansion_ > new_space_.Capacity() &&
1189      !new_space_high_promotion_mode_active_) {
1190    // Grow the size of new space if there is room to grow, enough data
1191    // has survived scavenge since the last expansion and we are not in
1192    // high promotion mode.
1193    new_space_.Grow();
1194    survived_since_last_expansion_ = 0;
1195  }
1196}
1197
1198
1199static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1200  return heap->InNewSpace(*p) &&
1201      !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1202}
1203
1204
1205void Heap::ScavengeStoreBufferCallback(
1206    Heap* heap,
1207    MemoryChunk* page,
1208    StoreBufferEvent event) {
1209  heap->store_buffer_rebuilder_.Callback(page, event);
1210}
1211
1212
1213void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1214  if (event == kStoreBufferStartScanningPagesEvent) {
1215    start_of_current_page_ = NULL;
1216    current_page_ = NULL;
1217  } else if (event == kStoreBufferScanningPageEvent) {
1218    if (current_page_ != NULL) {
1219      // If this page already overflowed the store buffer during this iteration.
1220      if (current_page_->scan_on_scavenge()) {
1221        // Then we should wipe out the entries that have been added for it.
1222        store_buffer_->SetTop(start_of_current_page_);
1223      } else if (store_buffer_->Top() - start_of_current_page_ >=
1224                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1225        // Did we find too many pointers in the previous page?  The heuristic is
1226        // that no page can take more then 1/5 the remaining slots in the store
1227        // buffer.
1228        current_page_->set_scan_on_scavenge(true);
1229        store_buffer_->SetTop(start_of_current_page_);
1230      } else {
1231        // In this case the page we scanned took a reasonable number of slots in
1232        // the store buffer.  It has now been rehabilitated and is no longer
1233        // marked scan_on_scavenge.
1234        ASSERT(!current_page_->scan_on_scavenge());
1235      }
1236    }
1237    start_of_current_page_ = store_buffer_->Top();
1238    current_page_ = page;
1239  } else if (event == kStoreBufferFullEvent) {
1240    // The current page overflowed the store buffer again.  Wipe out its entries
1241    // in the store buffer and mark it scan-on-scavenge again.  This may happen
1242    // several times while scanning.
1243    if (current_page_ == NULL) {
1244      // Store Buffer overflowed while scanning promoted objects.  These are not
1245      // in any particular page, though they are likely to be clustered by the
1246      // allocation routines.
1247      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1248    } else {
1249      // Store Buffer overflowed while scanning a particular old space page for
1250      // pointers to new space.
1251      ASSERT(current_page_ == page);
1252      ASSERT(page != NULL);
1253      current_page_->set_scan_on_scavenge(true);
1254      ASSERT(start_of_current_page_ != store_buffer_->Top());
1255      store_buffer_->SetTop(start_of_current_page_);
1256    }
1257  } else {
1258    UNREACHABLE();
1259  }
1260}
1261
1262
1263void PromotionQueue::Initialize() {
1264  // Assumes that a NewSpacePage exactly fits a number of promotion queue
1265  // entries (where each is a pair of intptr_t). This allows us to simplify
1266  // the test fpr when to switch pages.
1267  ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1268         == 0);
1269  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1270  front_ = rear_ =
1271      reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1272  emergency_stack_ = NULL;
1273  guard_ = false;
1274}
1275
1276
1277void PromotionQueue::RelocateQueueHead() {
1278  ASSERT(emergency_stack_ == NULL);
1279
1280  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1281  intptr_t* head_start = rear_;
1282  intptr_t* head_end =
1283      Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1284
1285  int entries_count =
1286      static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1287
1288  emergency_stack_ = new List<Entry>(2 * entries_count);
1289
1290  while (head_start != head_end) {
1291    int size = static_cast<int>(*(head_start++));
1292    HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1293    emergency_stack_->Add(Entry(obj, size));
1294  }
1295  rear_ = head_end;
1296}
1297
1298
1299class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1300 public:
1301  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1302
1303  virtual Object* RetainAs(Object* object) {
1304    if (!heap_->InFromSpace(object)) {
1305      return object;
1306    }
1307
1308    MapWord map_word = HeapObject::cast(object)->map_word();
1309    if (map_word.IsForwardingAddress()) {
1310      return map_word.ToForwardingAddress();
1311    }
1312    return NULL;
1313  }
1314
1315 private:
1316  Heap* heap_;
1317};
1318
1319
1320void Heap::Scavenge() {
1321  RelocationLock relocation_lock(this);
1322
1323#ifdef VERIFY_HEAP
1324  if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1325#endif
1326
1327  gc_state_ = SCAVENGE;
1328
1329  // Implements Cheney's copying algorithm
1330  LOG(isolate_, ResourceEvent("scavenge", "begin"));
1331
1332  // Clear descriptor cache.
1333  isolate_->descriptor_lookup_cache()->Clear();
1334
1335  // Used for updating survived_since_last_expansion_ at function end.
1336  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1337
1338  CheckNewSpaceExpansionCriteria();
1339
1340  SelectScavengingVisitorsTable();
1341
1342  incremental_marking()->PrepareForScavenge();
1343
1344  paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1345  paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1346
1347  // Flip the semispaces.  After flipping, to space is empty, from space has
1348  // live objects.
1349  new_space_.Flip();
1350  new_space_.ResetAllocationInfo();
1351
1352  // We need to sweep newly copied objects which can be either in the
1353  // to space or promoted to the old generation.  For to-space
1354  // objects, we treat the bottom of the to space as a queue.  Newly
1355  // copied and unswept objects lie between a 'front' mark and the
1356  // allocation pointer.
1357  //
1358  // Promoted objects can go into various old-generation spaces, and
1359  // can be allocated internally in the spaces (from the free list).
1360  // We treat the top of the to space as a queue of addresses of
1361  // promoted objects.  The addresses of newly promoted and unswept
1362  // objects lie between a 'front' mark and a 'rear' mark that is
1363  // updated as a side effect of promoting an object.
1364  //
1365  // There is guaranteed to be enough room at the top of the to space
1366  // for the addresses of promoted objects: every object promoted
1367  // frees up its size in bytes from the top of the new space, and
1368  // objects are at least one pointer in size.
1369  Address new_space_front = new_space_.ToSpaceStart();
1370  promotion_queue_.Initialize();
1371
1372#ifdef DEBUG
1373  store_buffer()->Clean();
1374#endif
1375
1376  ScavengeVisitor scavenge_visitor(this);
1377  // Copy roots.
1378  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1379
1380  // Copy objects reachable from the old generation.
1381  {
1382    StoreBufferRebuildScope scope(this,
1383                                  store_buffer(),
1384                                  &ScavengeStoreBufferCallback);
1385    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1386  }
1387
1388  // Copy objects reachable from simple cells by scavenging cell values
1389  // directly.
1390  HeapObjectIterator cell_iterator(cell_space_);
1391  for (HeapObject* heap_object = cell_iterator.Next();
1392       heap_object != NULL;
1393       heap_object = cell_iterator.Next()) {
1394    if (heap_object->IsCell()) {
1395      Cell* cell = Cell::cast(heap_object);
1396      Address value_address = cell->ValueAddress();
1397      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1398    }
1399  }
1400
1401  // Copy objects reachable from global property cells by scavenging global
1402  // property cell values directly.
1403  HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1404  for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1405       heap_object != NULL;
1406       heap_object = js_global_property_cell_iterator.Next()) {
1407    if (heap_object->IsPropertyCell()) {
1408      PropertyCell* cell = PropertyCell::cast(heap_object);
1409      Address value_address = cell->ValueAddress();
1410      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1411      Address type_address = cell->TypeAddress();
1412      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1413    }
1414  }
1415
1416  // Copy objects reachable from the code flushing candidates list.
1417  MarkCompactCollector* collector = mark_compact_collector();
1418  if (collector->is_code_flushing_enabled()) {
1419    collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1420  }
1421
1422  // Scavenge object reachable from the native contexts list directly.
1423  scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1424
1425  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1426
1427  while (isolate()->global_handles()->IterateObjectGroups(
1428      &scavenge_visitor, &IsUnscavengedHeapObject)) {
1429    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1430  }
1431  isolate()->global_handles()->RemoveObjectGroups();
1432  isolate()->global_handles()->RemoveImplicitRefGroups();
1433
1434  isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1435      &IsUnscavengedHeapObject);
1436  isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1437      &scavenge_visitor);
1438  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1439
1440  UpdateNewSpaceReferencesInExternalStringTable(
1441      &UpdateNewSpaceReferenceInExternalStringTableEntry);
1442
1443  promotion_queue_.Destroy();
1444
1445  if (!FLAG_watch_ic_patching) {
1446    isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1447  }
1448  incremental_marking()->UpdateMarkingDequeAfterScavenge();
1449
1450  ScavengeWeakObjectRetainer weak_object_retainer(this);
1451  ProcessWeakReferences(&weak_object_retainer);
1452
1453  ASSERT(new_space_front == new_space_.top());
1454
1455  // Set age mark.
1456  new_space_.set_age_mark(new_space_.top());
1457
1458  new_space_.LowerInlineAllocationLimit(
1459      new_space_.inline_allocation_limit_step());
1460
1461  // Update how much has survived scavenge.
1462  IncrementYoungSurvivorsCounter(static_cast<int>(
1463      (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1464
1465  LOG(isolate_, ResourceEvent("scavenge", "end"));
1466
1467  gc_state_ = NOT_IN_GC;
1468
1469  scavenges_since_last_idle_round_++;
1470}
1471
1472
1473String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1474                                                                Object** p) {
1475  MapWord first_word = HeapObject::cast(*p)->map_word();
1476
1477  if (!first_word.IsForwardingAddress()) {
1478    // Unreachable external string can be finalized.
1479    heap->FinalizeExternalString(String::cast(*p));
1480    return NULL;
1481  }
1482
1483  // String is still reachable.
1484  return String::cast(first_word.ToForwardingAddress());
1485}
1486
1487
1488void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1489    ExternalStringTableUpdaterCallback updater_func) {
1490#ifdef VERIFY_HEAP
1491  if (FLAG_verify_heap) {
1492    external_string_table_.Verify();
1493  }
1494#endif
1495
1496  if (external_string_table_.new_space_strings_.is_empty()) return;
1497
1498  Object** start = &external_string_table_.new_space_strings_[0];
1499  Object** end = start + external_string_table_.new_space_strings_.length();
1500  Object** last = start;
1501
1502  for (Object** p = start; p < end; ++p) {
1503    ASSERT(InFromSpace(*p));
1504    String* target = updater_func(this, p);
1505
1506    if (target == NULL) continue;
1507
1508    ASSERT(target->IsExternalString());
1509
1510    if (InNewSpace(target)) {
1511      // String is still in new space.  Update the table entry.
1512      *last = target;
1513      ++last;
1514    } else {
1515      // String got promoted.  Move it to the old string list.
1516      external_string_table_.AddOldString(target);
1517    }
1518  }
1519
1520  ASSERT(last <= end);
1521  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1522}
1523
1524
1525void Heap::UpdateReferencesInExternalStringTable(
1526    ExternalStringTableUpdaterCallback updater_func) {
1527
1528  // Update old space string references.
1529  if (external_string_table_.old_space_strings_.length() > 0) {
1530    Object** start = &external_string_table_.old_space_strings_[0];
1531    Object** end = start + external_string_table_.old_space_strings_.length();
1532    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1533  }
1534
1535  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1536}
1537
1538
1539template <class T>
1540struct WeakListVisitor;
1541
1542
1543template <class T>
1544static Object* VisitWeakList(Heap* heap,
1545                             Object* list,
1546                             WeakObjectRetainer* retainer,
1547                             bool record_slots) {
1548  Object* undefined = heap->undefined_value();
1549  Object* head = undefined;
1550  T* tail = NULL;
1551  MarkCompactCollector* collector = heap->mark_compact_collector();
1552  while (list != undefined) {
1553    // Check whether to keep the candidate in the list.
1554    T* candidate = reinterpret_cast<T*>(list);
1555    Object* retained = retainer->RetainAs(list);
1556    if (retained != NULL) {
1557      if (head == undefined) {
1558        // First element in the list.
1559        head = retained;
1560      } else {
1561        // Subsequent elements in the list.
1562        ASSERT(tail != NULL);
1563        WeakListVisitor<T>::SetWeakNext(tail, retained);
1564        if (record_slots) {
1565          Object** next_slot =
1566            HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1567          collector->RecordSlot(next_slot, next_slot, retained);
1568        }
1569      }
1570      // Retained object is new tail.
1571      ASSERT(!retained->IsUndefined());
1572      candidate = reinterpret_cast<T*>(retained);
1573      tail = candidate;
1574
1575
1576      // tail is a live object, visit it.
1577      WeakListVisitor<T>::VisitLiveObject(
1578          heap, tail, retainer, record_slots);
1579    } else {
1580      WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1581    }
1582
1583    // Move to next element in the list.
1584    list = WeakListVisitor<T>::WeakNext(candidate);
1585  }
1586
1587  // Terminate the list if there is one or more elements.
1588  if (tail != NULL) {
1589    WeakListVisitor<T>::SetWeakNext(tail, undefined);
1590  }
1591  return head;
1592}
1593
1594
1595template<>
1596struct WeakListVisitor<JSFunction> {
1597  static void SetWeakNext(JSFunction* function, Object* next) {
1598    function->set_next_function_link(next);
1599  }
1600
1601  static Object* WeakNext(JSFunction* function) {
1602    return function->next_function_link();
1603  }
1604
1605  static int WeakNextOffset() {
1606    return JSFunction::kNextFunctionLinkOffset;
1607  }
1608
1609  static void VisitLiveObject(Heap*, JSFunction*,
1610                              WeakObjectRetainer*, bool) {
1611  }
1612
1613  static void VisitPhantomObject(Heap*, JSFunction*) {
1614  }
1615};
1616
1617
1618template<>
1619struct WeakListVisitor<Context> {
1620  static void SetWeakNext(Context* context, Object* next) {
1621    context->set(Context::NEXT_CONTEXT_LINK,
1622                 next,
1623                 UPDATE_WRITE_BARRIER);
1624  }
1625
1626  static Object* WeakNext(Context* context) {
1627    return context->get(Context::NEXT_CONTEXT_LINK);
1628  }
1629
1630  static void VisitLiveObject(Heap* heap,
1631                              Context* context,
1632                              WeakObjectRetainer* retainer,
1633                              bool record_slots) {
1634    // Process the weak list of optimized functions for the context.
1635    Object* function_list_head =
1636        VisitWeakList<JSFunction>(
1637            heap,
1638            context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1639            retainer,
1640            record_slots);
1641    context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
1642                 function_list_head,
1643                 UPDATE_WRITE_BARRIER);
1644    if (record_slots) {
1645      Object** optimized_functions =
1646          HeapObject::RawField(
1647              context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1648      heap->mark_compact_collector()->RecordSlot(
1649          optimized_functions, optimized_functions, function_list_head);
1650    }
1651  }
1652
1653  static void VisitPhantomObject(Heap*, Context*) {
1654  }
1655
1656  static int WeakNextOffset() {
1657    return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1658  }
1659};
1660
1661
1662void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1663  // We don't record weak slots during marking or scavenges.
1664  // Instead we do it once when we complete mark-compact cycle.
1665  // Note that write barrier has no effect if we are already in the middle of
1666  // compacting mark-sweep cycle and we have to record slots manually.
1667  bool record_slots =
1668      gc_state() == MARK_COMPACT &&
1669      mark_compact_collector()->is_compacting();
1670  ProcessArrayBuffers(retainer, record_slots);
1671  ProcessNativeContexts(retainer, record_slots);
1672  ProcessAllocationSites(retainer, record_slots);
1673}
1674
1675void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1676                                 bool record_slots) {
1677  Object* head =
1678      VisitWeakList<Context>(
1679          this, native_contexts_list(), retainer, record_slots);
1680  // Update the head of the list of contexts.
1681  native_contexts_list_ = head;
1682}
1683
1684
1685template<>
1686struct WeakListVisitor<JSArrayBufferView> {
1687  static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1688    obj->set_weak_next(next);
1689  }
1690
1691  static Object* WeakNext(JSArrayBufferView* obj) {
1692    return obj->weak_next();
1693  }
1694
1695  static void VisitLiveObject(Heap*,
1696                              JSArrayBufferView* obj,
1697                              WeakObjectRetainer* retainer,
1698                              bool record_slots) {}
1699
1700  static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1701
1702  static int WeakNextOffset() {
1703    return JSArrayBufferView::kWeakNextOffset;
1704  }
1705};
1706
1707
1708template<>
1709struct WeakListVisitor<JSArrayBuffer> {
1710  static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1711    obj->set_weak_next(next);
1712  }
1713
1714  static Object* WeakNext(JSArrayBuffer* obj) {
1715    return obj->weak_next();
1716  }
1717
1718  static void VisitLiveObject(Heap* heap,
1719                              JSArrayBuffer* array_buffer,
1720                              WeakObjectRetainer* retainer,
1721                              bool record_slots) {
1722    Object* typed_array_obj =
1723        VisitWeakList<JSArrayBufferView>(
1724            heap,
1725            array_buffer->weak_first_view(),
1726            retainer, record_slots);
1727    array_buffer->set_weak_first_view(typed_array_obj);
1728    if (typed_array_obj != heap->undefined_value() && record_slots) {
1729      Object** slot = HeapObject::RawField(
1730          array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1731      heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1732    }
1733  }
1734
1735  static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1736    Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1737  }
1738
1739  static int WeakNextOffset() {
1740    return JSArrayBuffer::kWeakNextOffset;
1741  }
1742};
1743
1744
1745void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1746                               bool record_slots) {
1747  Object* array_buffer_obj =
1748      VisitWeakList<JSArrayBuffer>(this,
1749                                   array_buffers_list(),
1750                                   retainer, record_slots);
1751  set_array_buffers_list(array_buffer_obj);
1752}
1753
1754
1755void Heap::TearDownArrayBuffers() {
1756  Object* undefined = undefined_value();
1757  for (Object* o = array_buffers_list(); o != undefined;) {
1758    JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1759    Runtime::FreeArrayBuffer(isolate(), buffer);
1760    o = buffer->weak_next();
1761  }
1762  array_buffers_list_ = undefined;
1763}
1764
1765
1766template<>
1767struct WeakListVisitor<AllocationSite> {
1768  static void SetWeakNext(AllocationSite* obj, Object* next) {
1769    obj->set_weak_next(next);
1770  }
1771
1772  static Object* WeakNext(AllocationSite* obj) {
1773    return obj->weak_next();
1774  }
1775
1776  static void VisitLiveObject(Heap* heap,
1777                              AllocationSite* array_buffer,
1778                              WeakObjectRetainer* retainer,
1779                              bool record_slots) {}
1780
1781  static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1782
1783  static int WeakNextOffset() {
1784    return AllocationSite::kWeakNextOffset;
1785  }
1786};
1787
1788
1789void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1790                                  bool record_slots) {
1791  Object* allocation_site_obj =
1792      VisitWeakList<AllocationSite>(this,
1793                                    allocation_sites_list(),
1794                                    retainer, record_slots);
1795  set_allocation_sites_list(allocation_site_obj);
1796}
1797
1798
1799void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1800  DisallowHeapAllocation no_allocation;
1801
1802  // Both the external string table and the string table may contain
1803  // external strings, but neither lists them exhaustively, nor is the
1804  // intersection set empty.  Therefore we iterate over the external string
1805  // table first, ignoring internalized strings, and then over the
1806  // internalized string table.
1807
1808  class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1809   public:
1810    explicit ExternalStringTableVisitorAdapter(
1811        v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1812    virtual void VisitPointers(Object** start, Object** end) {
1813      for (Object** p = start; p < end; p++) {
1814        // Visit non-internalized external strings,
1815        // since internalized strings are listed in the string table.
1816        if (!(*p)->IsInternalizedString()) {
1817          ASSERT((*p)->IsExternalString());
1818          visitor_->VisitExternalString(Utils::ToLocal(
1819              Handle<String>(String::cast(*p))));
1820        }
1821      }
1822    }
1823   private:
1824    v8::ExternalResourceVisitor* visitor_;
1825  } external_string_table_visitor(visitor);
1826
1827  external_string_table_.Iterate(&external_string_table_visitor);
1828
1829  class StringTableVisitorAdapter : public ObjectVisitor {
1830   public:
1831    explicit StringTableVisitorAdapter(
1832        v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1833    virtual void VisitPointers(Object** start, Object** end) {
1834      for (Object** p = start; p < end; p++) {
1835        if ((*p)->IsExternalString()) {
1836          ASSERT((*p)->IsInternalizedString());
1837          visitor_->VisitExternalString(Utils::ToLocal(
1838              Handle<String>(String::cast(*p))));
1839        }
1840      }
1841    }
1842   private:
1843    v8::ExternalResourceVisitor* visitor_;
1844  } string_table_visitor(visitor);
1845
1846  string_table()->IterateElements(&string_table_visitor);
1847}
1848
1849
1850class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1851 public:
1852  static inline void VisitPointer(Heap* heap, Object** p) {
1853    Object* object = *p;
1854    if (!heap->InNewSpace(object)) return;
1855    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1856                         reinterpret_cast<HeapObject*>(object));
1857  }
1858};
1859
1860
1861Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1862                         Address new_space_front) {
1863  do {
1864    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1865    // The addresses new_space_front and new_space_.top() define a
1866    // queue of unprocessed copied objects.  Process them until the
1867    // queue is empty.
1868    while (new_space_front != new_space_.top()) {
1869      if (!NewSpacePage::IsAtEnd(new_space_front)) {
1870        HeapObject* object = HeapObject::FromAddress(new_space_front);
1871        new_space_front +=
1872          NewSpaceScavenger::IterateBody(object->map(), object);
1873      } else {
1874        new_space_front =
1875            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1876      }
1877    }
1878
1879    // Promote and process all the to-be-promoted objects.
1880    {
1881      StoreBufferRebuildScope scope(this,
1882                                    store_buffer(),
1883                                    &ScavengeStoreBufferCallback);
1884      while (!promotion_queue()->is_empty()) {
1885        HeapObject* target;
1886        int size;
1887        promotion_queue()->remove(&target, &size);
1888
1889        // Promoted object might be already partially visited
1890        // during old space pointer iteration. Thus we search specificly
1891        // for pointers to from semispace instead of looking for pointers
1892        // to new space.
1893        ASSERT(!target->IsMap());
1894        IterateAndMarkPointersToFromSpace(target->address(),
1895                                          target->address() + size,
1896                                          &ScavengeObject);
1897      }
1898    }
1899
1900    // Take another spin if there are now unswept objects in new space
1901    // (there are currently no more unswept promoted objects).
1902  } while (new_space_front != new_space_.top());
1903
1904  return new_space_front;
1905}
1906
1907
1908STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1909
1910
1911INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1912                                              HeapObject* object,
1913                                              int size));
1914
1915static HeapObject* EnsureDoubleAligned(Heap* heap,
1916                                       HeapObject* object,
1917                                       int size) {
1918  if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1919    heap->CreateFillerObjectAt(object->address(), kPointerSize);
1920    return HeapObject::FromAddress(object->address() + kPointerSize);
1921  } else {
1922    heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1923                               kPointerSize);
1924    return object;
1925  }
1926}
1927
1928
1929enum LoggingAndProfiling {
1930  LOGGING_AND_PROFILING_ENABLED,
1931  LOGGING_AND_PROFILING_DISABLED
1932};
1933
1934
1935enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1936
1937
1938template<MarksHandling marks_handling,
1939         LoggingAndProfiling logging_and_profiling_mode>
1940class ScavengingVisitor : public StaticVisitorBase {
1941 public:
1942  static void Initialize() {
1943    table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1944    table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1945    table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1946    table_.Register(kVisitByteArray, &EvacuateByteArray);
1947    table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1948    table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1949
1950    table_.Register(kVisitNativeContext,
1951                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1952                        template VisitSpecialized<Context::kSize>);
1953
1954    table_.Register(kVisitConsString,
1955                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1956                        template VisitSpecialized<ConsString::kSize>);
1957
1958    table_.Register(kVisitSlicedString,
1959                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1960                        template VisitSpecialized<SlicedString::kSize>);
1961
1962    table_.Register(kVisitSymbol,
1963                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1964                        template VisitSpecialized<Symbol::kSize>);
1965
1966    table_.Register(kVisitSharedFunctionInfo,
1967                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1968                        template VisitSpecialized<SharedFunctionInfo::kSize>);
1969
1970    table_.Register(kVisitJSWeakMap,
1971                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1972                    Visit);
1973
1974    table_.Register(kVisitJSWeakSet,
1975                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1976                    Visit);
1977
1978    table_.Register(kVisitJSArrayBuffer,
1979                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1980                    Visit);
1981
1982    table_.Register(kVisitJSTypedArray,
1983                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1984                    Visit);
1985
1986    table_.Register(kVisitJSDataView,
1987                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1988                    Visit);
1989
1990    table_.Register(kVisitJSRegExp,
1991                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
1992                    Visit);
1993
1994    if (marks_handling == IGNORE_MARKS) {
1995      table_.Register(kVisitJSFunction,
1996                      &ObjectEvacuationStrategy<POINTER_OBJECT>::
1997                          template VisitSpecialized<JSFunction::kSize>);
1998    } else {
1999      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2000    }
2001
2002    table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2003                                   kVisitDataObject,
2004                                   kVisitDataObjectGeneric>();
2005
2006    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2007                                   kVisitJSObject,
2008                                   kVisitJSObjectGeneric>();
2009
2010    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2011                                   kVisitStruct,
2012                                   kVisitStructGeneric>();
2013  }
2014
2015  static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2016    return &table_;
2017  }
2018
2019 private:
2020  enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
2021
2022  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2023    bool should_record = false;
2024#ifdef DEBUG
2025    should_record = FLAG_heap_stats;
2026#endif
2027    should_record = should_record || FLAG_log_gc;
2028    if (should_record) {
2029      if (heap->new_space()->Contains(obj)) {
2030        heap->new_space()->RecordAllocation(obj);
2031      } else {
2032        heap->new_space()->RecordPromotion(obj);
2033      }
2034    }
2035  }
2036
2037  // Helper function used by CopyObject to copy a source object to an
2038  // allocated target object and update the forwarding pointer in the source
2039  // object.  Returns the target object.
2040  INLINE(static void MigrateObject(Heap* heap,
2041                                   HeapObject* source,
2042                                   HeapObject* target,
2043                                   int size)) {
2044    // Copy the content of source to target.
2045    heap->CopyBlock(target->address(), source->address(), size);
2046
2047    // Set the forwarding address.
2048    source->set_map_word(MapWord::FromForwardingAddress(target));
2049
2050    if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2051      // Update NewSpace stats if necessary.
2052      RecordCopiedObject(heap, target);
2053      HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
2054      Isolate* isolate = heap->isolate();
2055      if (isolate->logger()->is_logging_code_events() ||
2056          isolate->cpu_profiler()->is_profiling()) {
2057        if (target->IsSharedFunctionInfo()) {
2058          PROFILE(isolate, SharedFunctionInfoMoveEvent(
2059              source->address(), target->address()));
2060        }
2061      }
2062    }
2063
2064    if (marks_handling == TRANSFER_MARKS) {
2065      if (Marking::TransferColor(source, target)) {
2066        MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2067      }
2068    }
2069  }
2070
2071
2072  template<ObjectContents object_contents, int alignment>
2073  static inline void EvacuateObject(Map* map,
2074                                    HeapObject** slot,
2075                                    HeapObject* object,
2076                                    int object_size) {
2077    SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
2078    SLOW_ASSERT(object->Size() == object_size);
2079
2080    int allocation_size = object_size;
2081    if (alignment != kObjectAlignment) {
2082      ASSERT(alignment == kDoubleAlignment);
2083      allocation_size += kPointerSize;
2084    }
2085
2086    Heap* heap = map->GetHeap();
2087    if (heap->ShouldBePromoted(object->address(), object_size)) {
2088      MaybeObject* maybe_result;
2089
2090      if (object_contents == DATA_OBJECT) {
2091        // TODO(mstarzinger): Turn this check into a regular assert soon!
2092        CHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2093        maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2094      } else {
2095        // TODO(mstarzinger): Turn this check into a regular assert soon!
2096        CHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2097        maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
2098      }
2099
2100      Object* result = NULL;  // Initialization to please compiler.
2101      if (maybe_result->ToObject(&result)) {
2102        HeapObject* target = HeapObject::cast(result);
2103
2104        if (alignment != kObjectAlignment) {
2105          target = EnsureDoubleAligned(heap, target, allocation_size);
2106        }
2107
2108        // Order is important: slot might be inside of the target if target
2109        // was allocated over a dead object and slot comes from the store
2110        // buffer.
2111        *slot = target;
2112        MigrateObject(heap, object, target, object_size);
2113
2114        if (object_contents == POINTER_OBJECT) {
2115          if (map->instance_type() == JS_FUNCTION_TYPE) {
2116            heap->promotion_queue()->insert(
2117                target, JSFunction::kNonWeakFieldsEndOffset);
2118          } else {
2119            heap->promotion_queue()->insert(target, object_size);
2120          }
2121        }
2122
2123        heap->tracer()->increment_promoted_objects_size(object_size);
2124        return;
2125      }
2126    }
2127    // TODO(mstarzinger): Turn this check into a regular assert soon!
2128    CHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
2129    MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2130    heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2131    Object* result = allocation->ToObjectUnchecked();
2132    HeapObject* target = HeapObject::cast(result);
2133
2134    if (alignment != kObjectAlignment) {
2135      target = EnsureDoubleAligned(heap, target, allocation_size);
2136    }
2137
2138    // Order is important: slot might be inside of the target if target
2139    // was allocated over a dead object and slot comes from the store
2140    // buffer.
2141    *slot = target;
2142    MigrateObject(heap, object, target, object_size);
2143    return;
2144  }
2145
2146
2147  static inline void EvacuateJSFunction(Map* map,
2148                                        HeapObject** slot,
2149                                        HeapObject* object) {
2150    ObjectEvacuationStrategy<POINTER_OBJECT>::
2151        template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2152
2153    HeapObject* target = *slot;
2154    MarkBit mark_bit = Marking::MarkBitFrom(target);
2155    if (Marking::IsBlack(mark_bit)) {
2156      // This object is black and it might not be rescanned by marker.
2157      // We should explicitly record code entry slot for compaction because
2158      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2159      // miss it as it is not HeapObject-tagged.
2160      Address code_entry_slot =
2161          target->address() + JSFunction::kCodeEntryOffset;
2162      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2163      map->GetHeap()->mark_compact_collector()->
2164          RecordCodeEntrySlot(code_entry_slot, code);
2165    }
2166  }
2167
2168
2169  static inline void EvacuateFixedArray(Map* map,
2170                                        HeapObject** slot,
2171                                        HeapObject* object) {
2172    int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2173    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2174        map, slot, object, object_size);
2175  }
2176
2177
2178  static inline void EvacuateFixedDoubleArray(Map* map,
2179                                              HeapObject** slot,
2180                                              HeapObject* object) {
2181    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2182    int object_size = FixedDoubleArray::SizeFor(length);
2183    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2184        map, slot, object, object_size);
2185  }
2186
2187
2188  static inline void EvacuateByteArray(Map* map,
2189                                       HeapObject** slot,
2190                                       HeapObject* object) {
2191    int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2192    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2193        map, slot, object, object_size);
2194  }
2195
2196
2197  static inline void EvacuateSeqOneByteString(Map* map,
2198                                            HeapObject** slot,
2199                                            HeapObject* object) {
2200    int object_size = SeqOneByteString::cast(object)->
2201        SeqOneByteStringSize(map->instance_type());
2202    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2203        map, slot, object, object_size);
2204  }
2205
2206
2207  static inline void EvacuateSeqTwoByteString(Map* map,
2208                                              HeapObject** slot,
2209                                              HeapObject* object) {
2210    int object_size = SeqTwoByteString::cast(object)->
2211        SeqTwoByteStringSize(map->instance_type());
2212    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2213        map, slot, object, object_size);
2214  }
2215
2216
2217  static inline bool IsShortcutCandidate(int type) {
2218    return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2219  }
2220
2221  static inline void EvacuateShortcutCandidate(Map* map,
2222                                               HeapObject** slot,
2223                                               HeapObject* object) {
2224    ASSERT(IsShortcutCandidate(map->instance_type()));
2225
2226    Heap* heap = map->GetHeap();
2227
2228    if (marks_handling == IGNORE_MARKS &&
2229        ConsString::cast(object)->unchecked_second() ==
2230        heap->empty_string()) {
2231      HeapObject* first =
2232          HeapObject::cast(ConsString::cast(object)->unchecked_first());
2233
2234      *slot = first;
2235
2236      if (!heap->InNewSpace(first)) {
2237        object->set_map_word(MapWord::FromForwardingAddress(first));
2238        return;
2239      }
2240
2241      MapWord first_word = first->map_word();
2242      if (first_word.IsForwardingAddress()) {
2243        HeapObject* target = first_word.ToForwardingAddress();
2244
2245        *slot = target;
2246        object->set_map_word(MapWord::FromForwardingAddress(target));
2247        return;
2248      }
2249
2250      heap->DoScavengeObject(first->map(), slot, first);
2251      object->set_map_word(MapWord::FromForwardingAddress(*slot));
2252      return;
2253    }
2254
2255    int object_size = ConsString::kSize;
2256    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2257        map, slot, object, object_size);
2258  }
2259
2260  template<ObjectContents object_contents>
2261  class ObjectEvacuationStrategy {
2262   public:
2263    template<int object_size>
2264    static inline void VisitSpecialized(Map* map,
2265                                        HeapObject** slot,
2266                                        HeapObject* object) {
2267      EvacuateObject<object_contents, kObjectAlignment>(
2268          map, slot, object, object_size);
2269    }
2270
2271    static inline void Visit(Map* map,
2272                             HeapObject** slot,
2273                             HeapObject* object) {
2274      int object_size = map->instance_size();
2275      EvacuateObject<object_contents, kObjectAlignment>(
2276          map, slot, object, object_size);
2277    }
2278  };
2279
2280  static VisitorDispatchTable<ScavengingCallback> table_;
2281};
2282
2283
2284template<MarksHandling marks_handling,
2285         LoggingAndProfiling logging_and_profiling_mode>
2286VisitorDispatchTable<ScavengingCallback>
2287    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2288
2289
2290static void InitializeScavengingVisitorsTables() {
2291  ScavengingVisitor<TRANSFER_MARKS,
2292                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
2293  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2294  ScavengingVisitor<TRANSFER_MARKS,
2295                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
2296  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2297}
2298
2299
2300void Heap::SelectScavengingVisitorsTable() {
2301  bool logging_and_profiling =
2302      isolate()->logger()->is_logging() ||
2303      isolate()->cpu_profiler()->is_profiling() ||
2304      (isolate()->heap_profiler() != NULL &&
2305       isolate()->heap_profiler()->is_profiling());
2306
2307  if (!incremental_marking()->IsMarking()) {
2308    if (!logging_and_profiling) {
2309      scavenging_visitors_table_.CopyFrom(
2310          ScavengingVisitor<IGNORE_MARKS,
2311                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
2312    } else {
2313      scavenging_visitors_table_.CopyFrom(
2314          ScavengingVisitor<IGNORE_MARKS,
2315                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
2316    }
2317  } else {
2318    if (!logging_and_profiling) {
2319      scavenging_visitors_table_.CopyFrom(
2320          ScavengingVisitor<TRANSFER_MARKS,
2321                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
2322    } else {
2323      scavenging_visitors_table_.CopyFrom(
2324          ScavengingVisitor<TRANSFER_MARKS,
2325                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
2326    }
2327
2328    if (incremental_marking()->IsCompacting()) {
2329      // When compacting forbid short-circuiting of cons-strings.
2330      // Scavenging code relies on the fact that new space object
2331      // can't be evacuated into evacuation candidate but
2332      // short-circuiting violates this assumption.
2333      scavenging_visitors_table_.Register(
2334          StaticVisitorBase::kVisitShortcutCandidate,
2335          scavenging_visitors_table_.GetVisitorById(
2336              StaticVisitorBase::kVisitConsString));
2337    }
2338  }
2339}
2340
2341
2342void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2343  SLOW_ASSERT(HEAP->InFromSpace(object));
2344  MapWord first_word = object->map_word();
2345  SLOW_ASSERT(!first_word.IsForwardingAddress());
2346  Map* map = first_word.ToMap();
2347  map->GetHeap()->DoScavengeObject(map, p, object);
2348}
2349
2350
2351MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2352                                      int instance_size) {
2353  Object* result;
2354  MaybeObject* maybe_result = AllocateRawMap();
2355  if (!maybe_result->ToObject(&result)) return maybe_result;
2356
2357  // Map::cast cannot be used due to uninitialized map field.
2358  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2359  reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2360  reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2361  reinterpret_cast<Map*>(result)->set_visitor_id(
2362        StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2363  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2364  reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2365  reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2366  reinterpret_cast<Map*>(result)->set_bit_field(0);
2367  reinterpret_cast<Map*>(result)->set_bit_field2(0);
2368  int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2369                   Map::OwnsDescriptors::encode(true);
2370  reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2371  return result;
2372}
2373
2374
2375MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2376                               int instance_size,
2377                               ElementsKind elements_kind) {
2378  Object* result;
2379  MaybeObject* maybe_result = AllocateRawMap();
2380  if (!maybe_result->To(&result)) return maybe_result;
2381
2382  Map* map = reinterpret_cast<Map*>(result);
2383  map->set_map_no_write_barrier(meta_map());
2384  map->set_instance_type(instance_type);
2385  map->set_visitor_id(
2386      StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2387  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2388  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2389  map->set_instance_size(instance_size);
2390  map->set_inobject_properties(0);
2391  map->set_pre_allocated_property_fields(0);
2392  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2393  map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2394                          SKIP_WRITE_BARRIER);
2395  map->init_back_pointer(undefined_value());
2396  map->set_unused_property_fields(0);
2397  map->set_instance_descriptors(empty_descriptor_array());
2398  map->set_bit_field(0);
2399  map->set_bit_field2(1 << Map::kIsExtensible);
2400  int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2401                   Map::OwnsDescriptors::encode(true);
2402  map->set_bit_field3(bit_field3);
2403  map->set_elements_kind(elements_kind);
2404
2405  return map;
2406}
2407
2408
2409MaybeObject* Heap::AllocateCodeCache() {
2410  CodeCache* code_cache;
2411  { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2412    if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2413  }
2414  code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2415  code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2416  return code_cache;
2417}
2418
2419
2420MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2421  return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2422}
2423
2424
2425MaybeObject* Heap::AllocateAccessorPair() {
2426  AccessorPair* accessors;
2427  { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2428    if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2429  }
2430  accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2431  accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2432  return accessors;
2433}
2434
2435
2436MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2437  TypeFeedbackInfo* info;
2438  { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2439    if (!maybe_info->To(&info)) return maybe_info;
2440  }
2441  info->initialize_storage();
2442  info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2443                                SKIP_WRITE_BARRIER);
2444  return info;
2445}
2446
2447
2448MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2449  AliasedArgumentsEntry* entry;
2450  { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2451    if (!maybe_entry->To(&entry)) return maybe_entry;
2452  }
2453  entry->set_aliased_context_slot(aliased_context_slot);
2454  return entry;
2455}
2456
2457
2458const Heap::StringTypeTable Heap::string_type_table[] = {
2459#define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2460  {type, size, k##camel_name##MapRootIndex},
2461  STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2462#undef STRING_TYPE_ELEMENT
2463};
2464
2465
2466const Heap::ConstantStringTable Heap::constant_string_table[] = {
2467#define CONSTANT_STRING_ELEMENT(name, contents)                                \
2468  {contents, k##name##RootIndex},
2469  INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2470#undef CONSTANT_STRING_ELEMENT
2471};
2472
2473
2474const Heap::StructTable Heap::struct_table[] = {
2475#define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2476  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2477  STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2478#undef STRUCT_TABLE_ELEMENT
2479};
2480
2481
2482bool Heap::CreateInitialMaps() {
2483  Object* obj;
2484  { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2485    if (!maybe_obj->ToObject(&obj)) return false;
2486  }
2487  // Map::cast cannot be used due to uninitialized map field.
2488  Map* new_meta_map = reinterpret_cast<Map*>(obj);
2489  set_meta_map(new_meta_map);
2490  new_meta_map->set_map(new_meta_map);
2491
2492  { MaybeObject* maybe_obj =
2493        AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2494    if (!maybe_obj->ToObject(&obj)) return false;
2495  }
2496  set_fixed_array_map(Map::cast(obj));
2497
2498  { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2499    if (!maybe_obj->ToObject(&obj)) return false;
2500  }
2501  set_oddball_map(Map::cast(obj));
2502
2503  // Allocate the empty array.
2504  { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2505    if (!maybe_obj->ToObject(&obj)) return false;
2506  }
2507  set_empty_fixed_array(FixedArray::cast(obj));
2508
2509  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2510    if (!maybe_obj->ToObject(&obj)) return false;
2511  }
2512  set_null_value(Oddball::cast(obj));
2513  Oddball::cast(obj)->set_kind(Oddball::kNull);
2514
2515  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2516    if (!maybe_obj->ToObject(&obj)) return false;
2517  }
2518  set_undefined_value(Oddball::cast(obj));
2519  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2520  ASSERT(!InNewSpace(undefined_value()));
2521
2522  // Allocate the empty descriptor array.
2523  { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2524    if (!maybe_obj->ToObject(&obj)) return false;
2525  }
2526  set_empty_descriptor_array(DescriptorArray::cast(obj));
2527
2528  // Fix the instance_descriptors for the existing maps.
2529  meta_map()->set_code_cache(empty_fixed_array());
2530  meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2531  meta_map()->init_back_pointer(undefined_value());
2532  meta_map()->set_instance_descriptors(empty_descriptor_array());
2533
2534  fixed_array_map()->set_code_cache(empty_fixed_array());
2535  fixed_array_map()->set_dependent_code(
2536      DependentCode::cast(empty_fixed_array()));
2537  fixed_array_map()->init_back_pointer(undefined_value());
2538  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2539
2540  oddball_map()->set_code_cache(empty_fixed_array());
2541  oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2542  oddball_map()->init_back_pointer(undefined_value());
2543  oddball_map()->set_instance_descriptors(empty_descriptor_array());
2544
2545  // Fix prototype object for existing maps.
2546  meta_map()->set_prototype(null_value());
2547  meta_map()->set_constructor(null_value());
2548
2549  fixed_array_map()->set_prototype(null_value());
2550  fixed_array_map()->set_constructor(null_value());
2551
2552  oddball_map()->set_prototype(null_value());
2553  oddball_map()->set_constructor(null_value());
2554
2555  { MaybeObject* maybe_obj =
2556        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2557    if (!maybe_obj->ToObject(&obj)) return false;
2558  }
2559  set_fixed_cow_array_map(Map::cast(obj));
2560  ASSERT(fixed_array_map() != fixed_cow_array_map());
2561
2562  { MaybeObject* maybe_obj =
2563        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2564    if (!maybe_obj->ToObject(&obj)) return false;
2565  }
2566  set_scope_info_map(Map::cast(obj));
2567
2568  { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2569    if (!maybe_obj->ToObject(&obj)) return false;
2570  }
2571  set_heap_number_map(Map::cast(obj));
2572
2573  { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2574    if (!maybe_obj->ToObject(&obj)) return false;
2575  }
2576  set_symbol_map(Map::cast(obj));
2577
2578  { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2579    if (!maybe_obj->ToObject(&obj)) return false;
2580  }
2581  set_foreign_map(Map::cast(obj));
2582
2583  for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2584    const StringTypeTable& entry = string_type_table[i];
2585    { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2586      if (!maybe_obj->ToObject(&obj)) return false;
2587    }
2588    roots_[entry.index] = Map::cast(obj);
2589  }
2590
2591  { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2592    if (!maybe_obj->ToObject(&obj)) return false;
2593  }
2594  set_undetectable_string_map(Map::cast(obj));
2595  Map::cast(obj)->set_is_undetectable();
2596
2597  { MaybeObject* maybe_obj =
2598        AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2599    if (!maybe_obj->ToObject(&obj)) return false;
2600  }
2601  set_undetectable_ascii_string_map(Map::cast(obj));
2602  Map::cast(obj)->set_is_undetectable();
2603
2604  { MaybeObject* maybe_obj =
2605        AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2606    if (!maybe_obj->ToObject(&obj)) return false;
2607  }
2608  set_fixed_double_array_map(Map::cast(obj));
2609
2610  { MaybeObject* maybe_obj =
2611        AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2612    if (!maybe_obj->ToObject(&obj)) return false;
2613  }
2614  set_byte_array_map(Map::cast(obj));
2615
2616  { MaybeObject* maybe_obj =
2617        AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2618    if (!maybe_obj->ToObject(&obj)) return false;
2619  }
2620  set_free_space_map(Map::cast(obj));
2621
2622  { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2623    if (!maybe_obj->ToObject(&obj)) return false;
2624  }
2625  set_empty_byte_array(ByteArray::cast(obj));
2626
2627  { MaybeObject* maybe_obj =
2628        AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2629    if (!maybe_obj->ToObject(&obj)) return false;
2630  }
2631  set_external_pixel_array_map(Map::cast(obj));
2632
2633  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2634                                         ExternalArray::kAlignedSize);
2635    if (!maybe_obj->ToObject(&obj)) return false;
2636  }
2637  set_external_byte_array_map(Map::cast(obj));
2638
2639  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2640                                         ExternalArray::kAlignedSize);
2641    if (!maybe_obj->ToObject(&obj)) return false;
2642  }
2643  set_external_unsigned_byte_array_map(Map::cast(obj));
2644
2645  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2646                                         ExternalArray::kAlignedSize);
2647    if (!maybe_obj->ToObject(&obj)) return false;
2648  }
2649  set_external_short_array_map(Map::cast(obj));
2650
2651  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2652                                         ExternalArray::kAlignedSize);
2653    if (!maybe_obj->ToObject(&obj)) return false;
2654  }
2655  set_external_unsigned_short_array_map(Map::cast(obj));
2656
2657  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2658                                         ExternalArray::kAlignedSize);
2659    if (!maybe_obj->ToObject(&obj)) return false;
2660  }
2661  set_external_int_array_map(Map::cast(obj));
2662
2663  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2664                                         ExternalArray::kAlignedSize);
2665    if (!maybe_obj->ToObject(&obj)) return false;
2666  }
2667  set_external_unsigned_int_array_map(Map::cast(obj));
2668
2669  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2670                                         ExternalArray::kAlignedSize);
2671    if (!maybe_obj->ToObject(&obj)) return false;
2672  }
2673  set_external_float_array_map(Map::cast(obj));
2674
2675  { MaybeObject* maybe_obj =
2676        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2677    if (!maybe_obj->ToObject(&obj)) return false;
2678  }
2679  set_non_strict_arguments_elements_map(Map::cast(obj));
2680
2681  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2682                                         ExternalArray::kAlignedSize);
2683    if (!maybe_obj->ToObject(&obj)) return false;
2684  }
2685  set_external_double_array_map(Map::cast(obj));
2686
2687  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2688    if (!maybe_obj->ToObject(&obj)) return false;
2689  }
2690  set_empty_external_byte_array(ExternalArray::cast(obj));
2691
2692  { MaybeObject* maybe_obj =
2693        AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2694    if (!maybe_obj->ToObject(&obj)) return false;
2695  }
2696  set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2697
2698  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2699    if (!maybe_obj->ToObject(&obj)) return false;
2700  }
2701  set_empty_external_short_array(ExternalArray::cast(obj));
2702
2703  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2704      kExternalUnsignedShortArray);
2705    if (!maybe_obj->ToObject(&obj)) return false;
2706  }
2707  set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2708
2709  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2710    if (!maybe_obj->ToObject(&obj)) return false;
2711  }
2712  set_empty_external_int_array(ExternalArray::cast(obj));
2713
2714  { MaybeObject* maybe_obj =
2715        AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2716    if (!maybe_obj->ToObject(&obj)) return false;
2717  }
2718  set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2719
2720  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2721    if (!maybe_obj->ToObject(&obj)) return false;
2722  }
2723  set_empty_external_float_array(ExternalArray::cast(obj));
2724
2725  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2726    if (!maybe_obj->ToObject(&obj)) return false;
2727  }
2728  set_empty_external_double_array(ExternalArray::cast(obj));
2729
2730  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2731    if (!maybe_obj->ToObject(&obj)) return false;
2732  }
2733  set_empty_external_pixel_array(ExternalArray::cast(obj));
2734
2735  { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2736    if (!maybe_obj->ToObject(&obj)) return false;
2737  }
2738  set_code_map(Map::cast(obj));
2739
2740  { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2741    if (!maybe_obj->ToObject(&obj)) return false;
2742  }
2743  set_cell_map(Map::cast(obj));
2744
2745  { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2746                                         PropertyCell::kSize);
2747    if (!maybe_obj->ToObject(&obj)) return false;
2748  }
2749  set_global_property_cell_map(Map::cast(obj));
2750
2751  { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2752    if (!maybe_obj->ToObject(&obj)) return false;
2753  }
2754  set_one_pointer_filler_map(Map::cast(obj));
2755
2756  { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2757    if (!maybe_obj->ToObject(&obj)) return false;
2758  }
2759  set_two_pointer_filler_map(Map::cast(obj));
2760
2761  for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2762    const StructTable& entry = struct_table[i];
2763    { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2764      if (!maybe_obj->ToObject(&obj)) return false;
2765    }
2766    roots_[entry.index] = Map::cast(obj);
2767  }
2768
2769  { MaybeObject* maybe_obj =
2770        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2771    if (!maybe_obj->ToObject(&obj)) return false;
2772  }
2773  set_hash_table_map(Map::cast(obj));
2774
2775  { MaybeObject* maybe_obj =
2776        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2777    if (!maybe_obj->ToObject(&obj)) return false;
2778  }
2779  set_function_context_map(Map::cast(obj));
2780
2781  { MaybeObject* maybe_obj =
2782        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2783    if (!maybe_obj->ToObject(&obj)) return false;
2784  }
2785  set_catch_context_map(Map::cast(obj));
2786
2787  { MaybeObject* maybe_obj =
2788        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2789    if (!maybe_obj->ToObject(&obj)) return false;
2790  }
2791  set_with_context_map(Map::cast(obj));
2792
2793  { MaybeObject* maybe_obj =
2794        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2795    if (!maybe_obj->ToObject(&obj)) return false;
2796  }
2797  set_block_context_map(Map::cast(obj));
2798
2799  { MaybeObject* maybe_obj =
2800        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2801    if (!maybe_obj->ToObject(&obj)) return false;
2802  }
2803  set_module_context_map(Map::cast(obj));
2804
2805  { MaybeObject* maybe_obj =
2806        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2807    if (!maybe_obj->ToObject(&obj)) return false;
2808  }
2809  set_global_context_map(Map::cast(obj));
2810
2811  { MaybeObject* maybe_obj =
2812        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2813    if (!maybe_obj->ToObject(&obj)) return false;
2814  }
2815  Map* native_context_map = Map::cast(obj);
2816  native_context_map->set_dictionary_map(true);
2817  native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2818  set_native_context_map(native_context_map);
2819
2820  { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2821                                         SharedFunctionInfo::kAlignedSize);
2822    if (!maybe_obj->ToObject(&obj)) return false;
2823  }
2824  set_shared_function_info_map(Map::cast(obj));
2825
2826  { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2827                                         JSMessageObject::kSize);
2828    if (!maybe_obj->ToObject(&obj)) return false;
2829  }
2830  set_message_object_map(Map::cast(obj));
2831
2832  Map* external_map;
2833  { MaybeObject* maybe_obj =
2834        AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2835    if (!maybe_obj->To(&external_map)) return false;
2836  }
2837  external_map->set_is_extensible(false);
2838  set_external_map(external_map);
2839
2840  ASSERT(!InNewSpace(empty_fixed_array()));
2841  return true;
2842}
2843
2844
2845MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2846  // Statically ensure that it is safe to allocate heap numbers in paged
2847  // spaces.
2848  STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2849  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2850
2851  Object* result;
2852  { MaybeObject* maybe_result =
2853        AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2854    if (!maybe_result->ToObject(&result)) return maybe_result;
2855  }
2856
2857  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2858  HeapNumber::cast(result)->set_value(value);
2859  return result;
2860}
2861
2862
2863MaybeObject* Heap::AllocateHeapNumber(double value) {
2864  // Use general version, if we're forced to always allocate.
2865  if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2866
2867  // This version of AllocateHeapNumber is optimized for
2868  // allocation in new space.
2869  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2870  Object* result;
2871  { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2872    if (!maybe_result->ToObject(&result)) return maybe_result;
2873  }
2874  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2875  HeapNumber::cast(result)->set_value(value);
2876  return result;
2877}
2878
2879
2880MaybeObject* Heap::AllocateCell(Object* value) {
2881  Object* result;
2882  { MaybeObject* maybe_result = AllocateRawCell();
2883    if (!maybe_result->ToObject(&result)) return maybe_result;
2884  }
2885  HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2886  Cell::cast(result)->set_value(value);
2887  return result;
2888}
2889
2890
2891MaybeObject* Heap::AllocatePropertyCell(Object* value) {
2892  Object* result;
2893  MaybeObject* maybe_result = AllocateRawPropertyCell();
2894  if (!maybe_result->ToObject(&result)) return maybe_result;
2895
2896  HeapObject::cast(result)->set_map_no_write_barrier(
2897      global_property_cell_map());
2898  PropertyCell* cell = PropertyCell::cast(result);
2899  cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2900                           SKIP_WRITE_BARRIER);
2901  cell->set_value(value);
2902  cell->set_type(Type::None());
2903  maybe_result = cell->SetValueInferType(value);
2904  if (maybe_result->IsFailure()) return maybe_result;
2905  return result;
2906}
2907
2908
2909MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2910  Box* result;
2911  MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2912  if (!maybe_result->To(&result)) return maybe_result;
2913  result->set_value(value);
2914  return result;
2915}
2916
2917
2918MaybeObject* Heap::AllocateAllocationSite() {
2919  Object* result;
2920  MaybeObject* maybe_result = Allocate(allocation_site_map(),
2921                                       OLD_POINTER_SPACE);
2922  if (!maybe_result->ToObject(&result)) return maybe_result;
2923  AllocationSite* site = AllocationSite::cast(result);
2924  site->Initialize();
2925
2926  // Link the site
2927  site->set_weak_next(allocation_sites_list());
2928  set_allocation_sites_list(site);
2929  return result;
2930}
2931
2932
2933MaybeObject* Heap::CreateOddball(const char* to_string,
2934                                 Object* to_number,
2935                                 byte kind) {
2936  Object* result;
2937  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2938    if (!maybe_result->ToObject(&result)) return maybe_result;
2939  }
2940  return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2941}
2942
2943
2944bool Heap::CreateApiObjects() {
2945  Object* obj;
2946
2947  { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2948    if (!maybe_obj->ToObject(&obj)) return false;
2949  }
2950  // Don't use Smi-only elements optimizations for objects with the neander
2951  // map. There are too many cases where element values are set directly with a
2952  // bottleneck to trap the Smi-only -> fast elements transition, and there
2953  // appears to be no benefit for optimize this case.
2954  Map* new_neander_map = Map::cast(obj);
2955  new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2956  set_neander_map(new_neander_map);
2957
2958  { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2959    if (!maybe_obj->ToObject(&obj)) return false;
2960  }
2961  Object* elements;
2962  { MaybeObject* maybe_elements = AllocateFixedArray(2);
2963    if (!maybe_elements->ToObject(&elements)) return false;
2964  }
2965  FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2966  JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2967  set_message_listeners(JSObject::cast(obj));
2968
2969  return true;
2970}
2971
2972
2973void Heap::CreateJSEntryStub() {
2974  JSEntryStub stub;
2975  set_js_entry_code(*stub.GetCode(isolate()));
2976}
2977
2978
2979void Heap::CreateJSConstructEntryStub() {
2980  JSConstructEntryStub stub;
2981  set_js_construct_entry_code(*stub.GetCode(isolate()));
2982}
2983
2984
2985void Heap::CreateFixedStubs() {
2986  // Here we create roots for fixed stubs. They are needed at GC
2987  // for cooking and uncooking (check out frames.cc).
2988  // The eliminates the need for doing dictionary lookup in the
2989  // stub cache for these stubs.
2990  HandleScope scope(isolate());
2991  // gcc-4.4 has problem generating correct code of following snippet:
2992  // {  JSEntryStub stub;
2993  //    js_entry_code_ = *stub.GetCode();
2994  // }
2995  // {  JSConstructEntryStub stub;
2996  //    js_construct_entry_code_ = *stub.GetCode();
2997  // }
2998  // To workaround the problem, make separate functions without inlining.
2999  Heap::CreateJSEntryStub();
3000  Heap::CreateJSConstructEntryStub();
3001
3002  // Create stubs that should be there, so we don't unexpectedly have to
3003  // create them if we need them during the creation of another stub.
3004  // Stub creation mixes raw pointers and handles in an unsafe manner so
3005  // we cannot create stubs while we are creating stubs.
3006  CodeStub::GenerateStubsAheadOfTime(isolate());
3007}
3008
3009
3010bool Heap::CreateInitialObjects() {
3011  Object* obj;
3012
3013  // The -0 value must be set before NumberFromDouble works.
3014  { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3015    if (!maybe_obj->ToObject(&obj)) return false;
3016  }
3017  set_minus_zero_value(HeapNumber::cast(obj));
3018  ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3019
3020  { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3021    if (!maybe_obj->ToObject(&obj)) return false;
3022  }
3023  set_nan_value(HeapNumber::cast(obj));
3024
3025  { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3026    if (!maybe_obj->ToObject(&obj)) return false;
3027  }
3028  set_infinity_value(HeapNumber::cast(obj));
3029
3030  // The hole has not been created yet, but we want to put something
3031  // predictable in the gaps in the string table, so lets make that Smi zero.
3032  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3033
3034  // Allocate initial string table.
3035  { MaybeObject* maybe_obj =
3036        StringTable::Allocate(this, kInitialStringTableSize);
3037    if (!maybe_obj->ToObject(&obj)) return false;
3038  }
3039  // Don't use set_string_table() due to asserts.
3040  roots_[kStringTableRootIndex] = obj;
3041
3042  // Finish initializing oddballs after creating the string table.
3043  { MaybeObject* maybe_obj =
3044        undefined_value()->Initialize("undefined",
3045                                      nan_value(),
3046                                      Oddball::kUndefined);
3047    if (!maybe_obj->ToObject(&obj)) return false;
3048  }
3049
3050  // Initialize the null_value.
3051  { MaybeObject* maybe_obj =
3052        null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
3053    if (!maybe_obj->ToObject(&obj)) return false;
3054  }
3055
3056  { MaybeObject* maybe_obj = CreateOddball("true",
3057                                           Smi::FromInt(1),
3058                                           Oddball::kTrue);
3059    if (!maybe_obj->ToObject(&obj)) return false;
3060  }
3061  set_true_value(Oddball::cast(obj));
3062
3063  { MaybeObject* maybe_obj = CreateOddball("false",
3064                                           Smi::FromInt(0),
3065                                           Oddball::kFalse);
3066    if (!maybe_obj->ToObject(&obj)) return false;
3067  }
3068  set_false_value(Oddball::cast(obj));
3069
3070  { MaybeObject* maybe_obj = CreateOddball("hole",
3071                                           Smi::FromInt(-1),
3072                                           Oddball::kTheHole);
3073    if (!maybe_obj->ToObject(&obj)) return false;
3074  }
3075  set_the_hole_value(Oddball::cast(obj));
3076
3077  { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3078                                           Smi::FromInt(-1),
3079                                           Oddball::kUninitialized);
3080    if (!maybe_obj->ToObject(&obj)) return false;
3081  }
3082  set_uninitialized_value(Oddball::cast(obj));
3083
3084  { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3085                                           Smi::FromInt(-4),
3086                                           Oddball::kArgumentMarker);
3087    if (!maybe_obj->ToObject(&obj)) return false;
3088  }
3089  set_arguments_marker(Oddball::cast(obj));
3090
3091  { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3092                                           Smi::FromInt(-2),
3093                                           Oddball::kOther);
3094    if (!maybe_obj->ToObject(&obj)) return false;
3095  }
3096  set_no_interceptor_result_sentinel(obj);
3097
3098  { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3099                                           Smi::FromInt(-3),
3100                                           Oddball::kOther);
3101    if (!maybe_obj->ToObject(&obj)) return false;
3102  }
3103  set_termination_exception(obj);
3104
3105  for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3106    { MaybeObject* maybe_obj =
3107          InternalizeUtf8String(constant_string_table[i].contents);
3108      if (!maybe_obj->ToObject(&obj)) return false;
3109    }
3110    roots_[constant_string_table[i].index] = String::cast(obj);
3111  }
3112
3113  // Allocate the hidden string which is used to identify the hidden properties
3114  // in JSObjects. The hash code has a special value so that it will not match
3115  // the empty string when searching for the property. It cannot be part of the
3116  // loop above because it needs to be allocated manually with the special
3117  // hash code in place. The hash code for the hidden_string is zero to ensure
3118  // that it will always be at the first entry in property descriptors.
3119  { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3120      OneByteVector("", 0), String::kEmptyStringHash);
3121    if (!maybe_obj->ToObject(&obj)) return false;
3122  }
3123  hidden_string_ = String::cast(obj);
3124
3125  // Allocate the code_stubs dictionary. The initial size is set to avoid
3126  // expanding the dictionary during bootstrapping.
3127  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3128    if (!maybe_obj->ToObject(&obj)) return false;
3129  }
3130  set_code_stubs(UnseededNumberDictionary::cast(obj));
3131
3132
3133  // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3134  // is set to avoid expanding the dictionary during bootstrapping.
3135  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3136    if (!maybe_obj->ToObject(&obj)) return false;
3137  }
3138  set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3139
3140  { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3141    if (!maybe_obj->ToObject(&obj)) return false;
3142  }
3143  set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3144
3145  set_instanceof_cache_function(Smi::FromInt(0));
3146  set_instanceof_cache_map(Smi::FromInt(0));
3147  set_instanceof_cache_answer(Smi::FromInt(0));
3148
3149  CreateFixedStubs();
3150
3151  // Allocate the dictionary of intrinsic function names.
3152  { MaybeObject* maybe_obj =
3153        NameDictionary::Allocate(this, Runtime::kNumFunctions);
3154    if (!maybe_obj->ToObject(&obj)) return false;
3155  }
3156  { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3157                                                                       obj);
3158    if (!maybe_obj->ToObject(&obj)) return false;
3159  }
3160  set_intrinsic_function_names(NameDictionary::cast(obj));
3161
3162  { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3163    if (!maybe_obj->ToObject(&obj)) return false;
3164  }
3165  set_number_string_cache(FixedArray::cast(obj));
3166
3167  // Allocate cache for single character one byte strings.
3168  { MaybeObject* maybe_obj =
3169        AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3170    if (!maybe_obj->ToObject(&obj)) return false;
3171  }
3172  set_single_character_string_cache(FixedArray::cast(obj));
3173
3174  // Allocate cache for string split.
3175  { MaybeObject* maybe_obj = AllocateFixedArray(
3176      RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3177    if (!maybe_obj->ToObject(&obj)) return false;
3178  }
3179  set_string_split_cache(FixedArray::cast(obj));
3180
3181  { MaybeObject* maybe_obj = AllocateFixedArray(
3182      RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3183    if (!maybe_obj->ToObject(&obj)) return false;
3184  }
3185  set_regexp_multiple_cache(FixedArray::cast(obj));
3186
3187  // Allocate cache for external strings pointing to native source code.
3188  { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3189    if (!maybe_obj->ToObject(&obj)) return false;
3190  }
3191  set_natives_source_cache(FixedArray::cast(obj));
3192
3193  // Allocate object to hold object observation state.
3194  { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3195    if (!maybe_obj->ToObject(&obj)) return false;
3196  }
3197  { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3198    if (!maybe_obj->ToObject(&obj)) return false;
3199  }
3200  set_observation_state(JSObject::cast(obj));
3201
3202  { MaybeObject* maybe_obj = AllocateSymbol();
3203    if (!maybe_obj->ToObject(&obj)) return false;
3204  }
3205  set_frozen_symbol(Symbol::cast(obj));
3206
3207  { MaybeObject* maybe_obj = AllocateSymbol();
3208    if (!maybe_obj->ToObject(&obj)) return false;
3209  }
3210  set_elements_transition_symbol(Symbol::cast(obj));
3211
3212  { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3213    if (!maybe_obj->ToObject(&obj)) return false;
3214  }
3215  SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3216  set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3217
3218  { MaybeObject* maybe_obj = AllocateSymbol();
3219    if (!maybe_obj->ToObject(&obj)) return false;
3220  }
3221  set_observed_symbol(Symbol::cast(obj));
3222
3223  // Handling of script id generation is in Factory::NewScript.
3224  set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3225
3226  // Initialize keyed lookup cache.
3227  isolate_->keyed_lookup_cache()->Clear();
3228
3229  // Initialize context slot cache.
3230  isolate_->context_slot_cache()->Clear();
3231
3232  // Initialize descriptor cache.
3233  isolate_->descriptor_lookup_cache()->Clear();
3234
3235  // Initialize compilation cache.
3236  isolate_->compilation_cache()->Clear();
3237
3238  return true;
3239}
3240
3241
3242bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3243  RootListIndex writable_roots[] = {
3244    kStoreBufferTopRootIndex,
3245    kStackLimitRootIndex,
3246    kNumberStringCacheRootIndex,
3247    kInstanceofCacheFunctionRootIndex,
3248    kInstanceofCacheMapRootIndex,
3249    kInstanceofCacheAnswerRootIndex,
3250    kCodeStubsRootIndex,
3251    kNonMonomorphicCacheRootIndex,
3252    kPolymorphicCodeCacheRootIndex,
3253    kLastScriptIdRootIndex,
3254    kEmptyScriptRootIndex,
3255    kRealStackLimitRootIndex,
3256    kArgumentsAdaptorDeoptPCOffsetRootIndex,
3257    kConstructStubDeoptPCOffsetRootIndex,
3258    kGetterStubDeoptPCOffsetRootIndex,
3259    kSetterStubDeoptPCOffsetRootIndex,
3260    kStringTableRootIndex,
3261  };
3262
3263  for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3264    if (root_index == writable_roots[i])
3265      return true;
3266  }
3267  return false;
3268}
3269
3270
3271bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3272  return !RootCanBeWrittenAfterInitialization(root_index) &&
3273      !InNewSpace(roots_array_start()[root_index]);
3274}
3275
3276
3277Object* RegExpResultsCache::Lookup(Heap* heap,
3278                                   String* key_string,
3279                                   Object* key_pattern,
3280                                   ResultsCacheType type) {
3281  FixedArray* cache;
3282  if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3283  if (type == STRING_SPLIT_SUBSTRINGS) {
3284    ASSERT(key_pattern->IsString());
3285    if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3286    cache = heap->string_split_cache();
3287  } else {
3288    ASSERT(type == REGEXP_MULTIPLE_INDICES);
3289    ASSERT(key_pattern->IsFixedArray());
3290    cache = heap->regexp_multiple_cache();
3291  }
3292
3293  uint32_t hash = key_string->Hash();
3294  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3295      ~(kArrayEntriesPerCacheEntry - 1));
3296  if (cache->get(index + kStringOffset) == key_string &&
3297      cache->get(index + kPatternOffset) == key_pattern) {
3298    return cache->get(index + kArrayOffset);
3299  }
3300  index =
3301      ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3302  if (cache->get(index + kStringOffset) == key_string &&
3303      cache->get(index + kPatternOffset) == key_pattern) {
3304    return cache->get(index + kArrayOffset);
3305  }
3306  return Smi::FromInt(0);
3307}
3308
3309
3310void RegExpResultsCache::Enter(Heap* heap,
3311                               String* key_string,
3312                               Object* key_pattern,
3313                               FixedArray* value_array,
3314                               ResultsCacheType type) {
3315  FixedArray* cache;
3316  if (!key_string->IsInternalizedString()) return;
3317  if (type == STRING_SPLIT_SUBSTRINGS) {
3318    ASSERT(key_pattern->IsString());
3319    if (!key_pattern->IsInternalizedString()) return;
3320    cache = heap->string_split_cache();
3321  } else {
3322    ASSERT(type == REGEXP_MULTIPLE_INDICES);
3323    ASSERT(key_pattern->IsFixedArray());
3324    cache = heap->regexp_multiple_cache();
3325  }
3326
3327  uint32_t hash = key_string->Hash();
3328  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3329      ~(kArrayEntriesPerCacheEntry - 1));
3330  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3331    cache->set(index + kStringOffset, key_string);
3332    cache->set(index + kPatternOffset, key_pattern);
3333    cache->set(index + kArrayOffset, value_array);
3334  } else {
3335    uint32_t index2 =
3336        ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3337    if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3338      cache->set(index2 + kStringOffset, key_string);
3339      cache->set(index2 + kPatternOffset, key_pattern);
3340      cache->set(index2 + kArrayOffset, value_array);
3341    } else {
3342      cache->set(index2 + kStringOffset, Smi::FromInt(0));
3343      cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3344      cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3345      cache->set(index + kStringOffset, key_string);
3346      cache->set(index + kPatternOffset, key_pattern);
3347      cache->set(index + kArrayOffset, value_array);
3348    }
3349  }
3350  // If the array is a reasonably short list of substrings, convert it into a
3351  // list of internalized strings.
3352  if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3353    for (int i = 0; i < value_array->length(); i++) {
3354      String* str = String::cast(value_array->get(i));
3355      Object* internalized_str;
3356      MaybeObject* maybe_string = heap->InternalizeString(str);
3357      if (maybe_string->ToObject(&internalized_str)) {
3358        value_array->set(i, internalized_str);
3359      }
3360    }
3361  }
3362  // Convert backing store to a copy-on-write array.
3363  value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3364}
3365
3366
3367void RegExpResultsCache::Clear(FixedArray* cache) {
3368  for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3369    cache->set(i, Smi::FromInt(0));
3370  }
3371}
3372
3373
3374MaybeObject* Heap::AllocateInitialNumberStringCache() {
3375  MaybeObject* maybe_obj =
3376      AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3377  return maybe_obj;
3378}
3379
3380
3381int Heap::FullSizeNumberStringCacheLength() {
3382  // Compute the size of the number string cache based on the max newspace size.
3383  // The number string cache has a minimum size based on twice the initial cache
3384  // size to ensure that it is bigger after being made 'full size'.
3385  int number_string_cache_size = max_semispace_size_ / 512;
3386  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3387                                 Min(0x4000, number_string_cache_size));
3388  // There is a string and a number per entry so the length is twice the number
3389  // of entries.
3390  return number_string_cache_size * 2;
3391}
3392
3393
3394void Heap::AllocateFullSizeNumberStringCache() {
3395  // The idea is to have a small number string cache in the snapshot to keep
3396  // boot-time memory usage down.  If we expand the number string cache already
3397  // while creating the snapshot then that didn't work out.
3398  ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3399  MaybeObject* maybe_obj =
3400      AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3401  Object* new_cache;
3402  if (maybe_obj->ToObject(&new_cache)) {
3403    // We don't bother to repopulate the cache with entries from the old cache.
3404    // It will be repopulated soon enough with new strings.
3405    set_number_string_cache(FixedArray::cast(new_cache));
3406  }
3407  // If allocation fails then we just return without doing anything.  It is only
3408  // a cache, so best effort is OK here.
3409}
3410
3411
3412void Heap::FlushNumberStringCache() {
3413  // Flush the number to string cache.
3414  int len = number_string_cache()->length();
3415  for (int i = 0; i < len; i++) {
3416    number_string_cache()->set_undefined(this, i);
3417  }
3418}
3419
3420
3421static inline int double_get_hash(double d) {
3422  DoubleRepresentation rep(d);
3423  return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3424}
3425
3426
3427static inline int smi_get_hash(Smi* smi) {
3428  return smi->value();
3429}
3430
3431
3432Object* Heap::GetNumberStringCache(Object* number) {
3433  int hash;
3434  int mask = (number_string_cache()->length() >> 1) - 1;
3435  if (number->IsSmi()) {
3436    hash = smi_get_hash(Smi::cast(number)) & mask;
3437  } else {
3438    hash = double_get_hash(number->Number()) & mask;
3439  }
3440  Object* key = number_string_cache()->get(hash * 2);
3441  if (key == number) {
3442    return String::cast(number_string_cache()->get(hash * 2 + 1));
3443  } else if (key->IsHeapNumber() &&
3444             number->IsHeapNumber() &&
3445             key->Number() == number->Number()) {
3446    return String::cast(number_string_cache()->get(hash * 2 + 1));
3447  }
3448  return undefined_value();
3449}
3450
3451
3452void Heap::SetNumberStringCache(Object* number, String* string) {
3453  int hash;
3454  int mask = (number_string_cache()->length() >> 1) - 1;
3455  if (number->IsSmi()) {
3456    hash = smi_get_hash(Smi::cast(number)) & mask;
3457  } else {
3458    hash = double_get_hash(number->Number()) & mask;
3459  }
3460  if (number_string_cache()->get(hash * 2) != undefined_value() &&
3461      number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3462    // The first time we have a hash collision, we move to the full sized
3463    // number string cache.
3464    AllocateFullSizeNumberStringCache();
3465    return;
3466  }
3467  number_string_cache()->set(hash * 2, number);
3468  number_string_cache()->set(hash * 2 + 1, string);
3469}
3470
3471
3472MaybeObject* Heap::NumberToString(Object* number,
3473                                  bool check_number_string_cache,
3474                                  PretenureFlag pretenure) {
3475  isolate_->counters()->number_to_string_runtime()->Increment();
3476  if (check_number_string_cache) {
3477    Object* cached = GetNumberStringCache(number);
3478    if (cached != undefined_value()) {
3479      return cached;
3480    }
3481  }
3482
3483  char arr[100];
3484  Vector<char> buffer(arr, ARRAY_SIZE(arr));
3485  const char* str;
3486  if (number->IsSmi()) {
3487    int num = Smi::cast(number)->value();
3488    str = IntToCString(num, buffer);
3489  } else {
3490    double num = HeapNumber::cast(number)->value();
3491    str = DoubleToCString(num, buffer);
3492  }
3493
3494  Object* js_string;
3495  MaybeObject* maybe_js_string =
3496      AllocateStringFromOneByte(CStrVector(str), pretenure);
3497  if (maybe_js_string->ToObject(&js_string)) {
3498    SetNumberStringCache(number, String::cast(js_string));
3499  }
3500  return maybe_js_string;
3501}
3502
3503
3504MaybeObject* Heap::Uint32ToString(uint32_t value,
3505                                  bool check_number_string_cache) {
3506  Object* number;
3507  MaybeObject* maybe = NumberFromUint32(value);
3508  if (!maybe->To<Object>(&number)) return maybe;
3509  return NumberToString(number, check_number_string_cache);
3510}
3511
3512
3513Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3514  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3515}
3516
3517
3518Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3519    ExternalArrayType array_type) {
3520  switch (array_type) {
3521    case kExternalByteArray:
3522      return kExternalByteArrayMapRootIndex;
3523    case kExternalUnsignedByteArray:
3524      return kExternalUnsignedByteArrayMapRootIndex;
3525    case kExternalShortArray:
3526      return kExternalShortArrayMapRootIndex;
3527    case kExternalUnsignedShortArray:
3528      return kExternalUnsignedShortArrayMapRootIndex;
3529    case kExternalIntArray:
3530      return kExternalIntArrayMapRootIndex;
3531    case kExternalUnsignedIntArray:
3532      return kExternalUnsignedIntArrayMapRootIndex;
3533    case kExternalFloatArray:
3534      return kExternalFloatArrayMapRootIndex;
3535    case kExternalDoubleArray:
3536      return kExternalDoubleArrayMapRootIndex;
3537    case kExternalPixelArray:
3538      return kExternalPixelArrayMapRootIndex;
3539    default:
3540      UNREACHABLE();
3541      return kUndefinedValueRootIndex;
3542  }
3543}
3544
3545Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3546    ElementsKind elementsKind) {
3547  switch (elementsKind) {
3548    case EXTERNAL_BYTE_ELEMENTS:
3549      return kEmptyExternalByteArrayRootIndex;
3550    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3551      return kEmptyExternalUnsignedByteArrayRootIndex;
3552    case EXTERNAL_SHORT_ELEMENTS:
3553      return kEmptyExternalShortArrayRootIndex;
3554    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3555      return kEmptyExternalUnsignedShortArrayRootIndex;
3556    case EXTERNAL_INT_ELEMENTS:
3557      return kEmptyExternalIntArrayRootIndex;
3558    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3559      return kEmptyExternalUnsignedIntArrayRootIndex;
3560    case EXTERNAL_FLOAT_ELEMENTS:
3561      return kEmptyExternalFloatArrayRootIndex;
3562    case EXTERNAL_DOUBLE_ELEMENTS:
3563      return kEmptyExternalDoubleArrayRootIndex;
3564    case EXTERNAL_PIXEL_ELEMENTS:
3565      return kEmptyExternalPixelArrayRootIndex;
3566    default:
3567      UNREACHABLE();
3568      return kUndefinedValueRootIndex;
3569  }
3570}
3571
3572
3573ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3574  return ExternalArray::cast(
3575      roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3576}
3577
3578
3579
3580
3581MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3582  // We need to distinguish the minus zero value and this cannot be
3583  // done after conversion to int. Doing this by comparing bit
3584  // patterns is faster than using fpclassify() et al.
3585  static const DoubleRepresentation minus_zero(-0.0);
3586
3587  DoubleRepresentation rep(value);
3588  if (rep.bits == minus_zero.bits) {
3589    return AllocateHeapNumber(-0.0, pretenure);
3590  }
3591
3592  int int_value = FastD2I(value);
3593  if (value == int_value && Smi::IsValid(int_value)) {
3594    return Smi::FromInt(int_value);
3595  }
3596
3597  // Materialize the value in the heap.
3598  return AllocateHeapNumber(value, pretenure);
3599}
3600
3601
3602MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3603  // Statically ensure that it is safe to allocate foreigns in paged spaces.
3604  STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3605  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3606  Foreign* result;
3607  MaybeObject* maybe_result = Allocate(foreign_map(), space);
3608  if (!maybe_result->To(&result)) return maybe_result;
3609  result->set_foreign_address(address);
3610  return result;
3611}
3612
3613
3614MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3615  SharedFunctionInfo* share;
3616  MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3617  if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3618
3619  // Set pointer fields.
3620  share->set_name(name);
3621  Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3622  share->set_code(illegal);
3623  share->set_optimized_code_map(Smi::FromInt(0));
3624  share->set_scope_info(ScopeInfo::Empty(isolate_));
3625  Code* construct_stub =
3626      isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3627  share->set_construct_stub(construct_stub);
3628  share->set_instance_class_name(Object_string());
3629  share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3630  share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3631  share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3632  share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3633  share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3634  share->set_ast_node_count(0);
3635  share->set_counters(0);
3636
3637  // Set integer fields (smi or int, depending on the architecture).
3638  share->set_length(0);
3639  share->set_formal_parameter_count(0);
3640  share->set_expected_nof_properties(0);
3641  share->set_num_literals(0);
3642  share->set_start_position_and_type(0);
3643  share->set_end_position(0);
3644  share->set_function_token_position(0);
3645  // All compiler hints default to false or 0.
3646  share->set_compiler_hints(0);
3647  share->set_opt_count(0);
3648
3649  return share;
3650}
3651
3652
3653MaybeObject* Heap::AllocateJSMessageObject(String* type,
3654                                           JSArray* arguments,
3655                                           int start_position,
3656                                           int end_position,
3657                                           Object* script,
3658                                           Object* stack_trace,
3659                                           Object* stack_frames) {
3660  Object* result;
3661  { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3662    if (!maybe_result->ToObject(&result)) return maybe_result;
3663  }
3664  JSMessageObject* message = JSMessageObject::cast(result);
3665  message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3666  message->initialize_elements();
3667  message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3668  message->set_type(type);
3669  message->set_arguments(arguments);
3670  message->set_start_position(start_position);
3671  message->set_end_position(end_position);
3672  message->set_script(script);
3673  message->set_stack_trace(stack_trace);
3674  message->set_stack_frames(stack_frames);
3675  return result;
3676}
3677
3678
3679
3680// Returns true for a character in a range.  Both limits are inclusive.
3681static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3682  // This makes uses of the the unsigned wraparound.
3683  return character - from <= to - from;
3684}
3685
3686
3687MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3688    Heap* heap,
3689    uint16_t c1,
3690    uint16_t c2) {
3691  String* result;
3692  // Numeric strings have a different hash algorithm not known by
3693  // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3694  if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3695      heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3696    return result;
3697  // Now we know the length is 2, we might as well make use of that fact
3698  // when building the new string.
3699  } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3700    // We can do this.
3701    ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3702    Object* result;
3703    { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3704      if (!maybe_result->ToObject(&result)) return maybe_result;
3705    }
3706    uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3707    dest[0] = static_cast<uint8_t>(c1);
3708    dest[1] = static_cast<uint8_t>(c2);
3709    return result;
3710  } else {
3711    Object* result;
3712    { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3713      if (!maybe_result->ToObject(&result)) return maybe_result;
3714    }
3715    uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3716    dest[0] = c1;
3717    dest[1] = c2;
3718    return result;
3719  }
3720}
3721
3722
3723MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3724  int first_length = first->length();
3725  if (first_length == 0) {
3726    return second;
3727  }
3728
3729  int second_length = second->length();
3730  if (second_length == 0) {
3731    return first;
3732  }
3733
3734  int length = first_length + second_length;
3735
3736  // Optimization for 2-byte strings often used as keys in a decompression
3737  // dictionary.  Check whether we already have the string in the string
3738  // table to prevent creation of many unneccesary strings.
3739  if (length == 2) {
3740    uint16_t c1 = first->Get(0);
3741    uint16_t c2 = second->Get(0);
3742    return MakeOrFindTwoCharacterString(this, c1, c2);
3743  }
3744
3745  bool first_is_one_byte = first->IsOneByteRepresentation();
3746  bool second_is_one_byte = second->IsOneByteRepresentation();
3747  bool is_one_byte = first_is_one_byte && second_is_one_byte;
3748  // Make sure that an out of memory exception is thrown if the length
3749  // of the new cons string is too large.
3750  if (length > String::kMaxLength || length < 0) {
3751    isolate()->context()->mark_out_of_memory();
3752    return Failure::OutOfMemoryException(0x4);
3753  }
3754
3755  bool is_one_byte_data_in_two_byte_string = false;
3756  if (!is_one_byte) {
3757    // At least one of the strings uses two-byte representation so we
3758    // can't use the fast case code for short ASCII strings below, but
3759    // we can try to save memory if all chars actually fit in ASCII.
3760    is_one_byte_data_in_two_byte_string =
3761        first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3762    if (is_one_byte_data_in_two_byte_string) {
3763      isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3764    }
3765  }
3766
3767  // If the resulting string is small make a flat string.
3768  if (length < ConsString::kMinLength) {
3769    // Note that neither of the two inputs can be a slice because:
3770    STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3771    ASSERT(first->IsFlat());
3772    ASSERT(second->IsFlat());
3773    if (is_one_byte) {
3774      Object* result;
3775      { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3776        if (!maybe_result->ToObject(&result)) return maybe_result;
3777      }
3778      // Copy the characters into the new object.
3779      uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3780      // Copy first part.
3781      const uint8_t* src;
3782      if (first->IsExternalString()) {
3783        src = ExternalAsciiString::cast(first)->GetChars();
3784      } else {
3785        src = SeqOneByteString::cast(first)->GetChars();
3786      }
3787      for (int i = 0; i < first_length; i++) *dest++ = src[i];
3788      // Copy second part.
3789      if (second->IsExternalString()) {
3790        src = ExternalAsciiString::cast(second)->GetChars();
3791      } else {
3792        src = SeqOneByteString::cast(second)->GetChars();
3793      }
3794      for (int i = 0; i < second_length; i++) *dest++ = src[i];
3795      return result;
3796    } else {
3797      if (is_one_byte_data_in_two_byte_string) {
3798        Object* result;
3799        { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3800          if (!maybe_result->ToObject(&result)) return maybe_result;
3801        }
3802        // Copy the characters into the new object.
3803        uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3804        String::WriteToFlat(first, dest, 0, first_length);
3805        String::WriteToFlat(second, dest + first_length, 0, second_length);
3806        isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3807        return result;
3808      }
3809
3810      Object* result;
3811      { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3812        if (!maybe_result->ToObject(&result)) return maybe_result;
3813      }
3814      // Copy the characters into the new object.
3815      uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3816      String::WriteToFlat(first, dest, 0, first_length);
3817      String::WriteToFlat(second, dest + first_length, 0, second_length);
3818      return result;
3819    }
3820  }
3821
3822  Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3823      cons_ascii_string_map() : cons_string_map();
3824
3825  Object* result;
3826  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3827    if (!maybe_result->ToObject(&result)) return maybe_result;
3828  }
3829
3830  DisallowHeapAllocation no_gc;
3831  ConsString* cons_string = ConsString::cast(result);
3832  WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3833  cons_string->set_length(length);
3834  cons_string->set_hash_field(String::kEmptyHashField);
3835  cons_string->set_first(first, mode);
3836  cons_string->set_second(second, mode);
3837  return result;
3838}
3839
3840
3841MaybeObject* Heap::AllocateSubString(String* buffer,
3842                                     int start,
3843                                     int end,
3844                                     PretenureFlag pretenure) {
3845  int length = end - start;
3846  if (length <= 0) {
3847    return empty_string();
3848  } else if (length == 1) {
3849    return LookupSingleCharacterStringFromCode(buffer->Get(start));
3850  } else if (length == 2) {
3851    // Optimization for 2-byte strings often used as keys in a decompression
3852    // dictionary.  Check whether we already have the string in the string
3853    // table to prevent creation of many unnecessary strings.
3854    uint16_t c1 = buffer->Get(start);
3855    uint16_t c2 = buffer->Get(start + 1);
3856    return MakeOrFindTwoCharacterString(this, c1, c2);
3857  }
3858
3859  // Make an attempt to flatten the buffer to reduce access time.
3860  buffer = buffer->TryFlattenGetString();
3861
3862  if (!FLAG_string_slices ||
3863      !buffer->IsFlat() ||
3864      length < SlicedString::kMinLength ||
3865      pretenure == TENURED) {
3866    Object* result;
3867    // WriteToFlat takes care of the case when an indirect string has a
3868    // different encoding from its underlying string.  These encodings may
3869    // differ because of externalization.
3870    bool is_one_byte = buffer->IsOneByteRepresentation();
3871    { MaybeObject* maybe_result = is_one_byte
3872                                  ? AllocateRawOneByteString(length, pretenure)
3873                                  : AllocateRawTwoByteString(length, pretenure);
3874      if (!maybe_result->ToObject(&result)) return maybe_result;
3875    }
3876    String* string_result = String::cast(result);
3877    // Copy the characters into the new object.
3878    if (is_one_byte) {
3879      ASSERT(string_result->IsOneByteRepresentation());
3880      uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3881      String::WriteToFlat(buffer, dest, start, end);
3882    } else {
3883      ASSERT(string_result->IsTwoByteRepresentation());
3884      uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3885      String::WriteToFlat(buffer, dest, start, end);
3886    }
3887    return result;
3888  }
3889
3890  ASSERT(buffer->IsFlat());
3891#if VERIFY_HEAP
3892  if (FLAG_verify_heap) {
3893    buffer->StringVerify();
3894  }
3895#endif
3896
3897  Object* result;
3898  // When slicing an indirect string we use its encoding for a newly created
3899  // slice and don't check the encoding of the underlying string.  This is safe
3900  // even if the encodings are different because of externalization.  If an
3901  // indirect ASCII string is pointing to a two-byte string, the two-byte char
3902  // codes of the underlying string must still fit into ASCII (because
3903  // externalization must not change char codes).
3904  { Map* map = buffer->IsOneByteRepresentation()
3905                 ? sliced_ascii_string_map()
3906                 : sliced_string_map();
3907    MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3908    if (!maybe_result->ToObject(&result)) return maybe_result;
3909  }
3910
3911  DisallowHeapAllocation no_gc;
3912  SlicedString* sliced_string = SlicedString::cast(result);
3913  sliced_string->set_length(length);
3914  sliced_string->set_hash_field(String::kEmptyHashField);
3915  if (buffer->IsConsString()) {
3916    ConsString* cons = ConsString::cast(buffer);
3917    ASSERT(cons->second()->length() == 0);
3918    sliced_string->set_parent(cons->first());
3919    sliced_string->set_offset(start);
3920  } else if (buffer->IsSlicedString()) {
3921    // Prevent nesting sliced strings.
3922    SlicedString* parent_slice = SlicedString::cast(buffer);
3923    sliced_string->set_parent(parent_slice->parent());
3924    sliced_string->set_offset(start + parent_slice->offset());
3925  } else {
3926    sliced_string->set_parent(buffer);
3927    sliced_string->set_offset(start);
3928  }
3929  ASSERT(sliced_string->parent()->IsSeqString() ||
3930         sliced_string->parent()->IsExternalString());
3931  return result;
3932}
3933
3934
3935MaybeObject* Heap::AllocateExternalStringFromAscii(
3936    const ExternalAsciiString::Resource* resource) {
3937  size_t length = resource->length();
3938  if (length > static_cast<size_t>(String::kMaxLength)) {
3939    isolate()->context()->mark_out_of_memory();
3940    return Failure::OutOfMemoryException(0x5);
3941  }
3942
3943  Map* map = external_ascii_string_map();
3944  Object* result;
3945  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3946    if (!maybe_result->ToObject(&result)) return maybe_result;
3947  }
3948
3949  ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3950  external_string->set_length(static_cast<int>(length));
3951  external_string->set_hash_field(String::kEmptyHashField);
3952  external_string->set_resource(resource);
3953
3954  return result;
3955}
3956
3957
3958MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3959    const ExternalTwoByteString::Resource* resource) {
3960  size_t length = resource->length();
3961  if (length > static_cast<size_t>(String::kMaxLength)) {
3962    isolate()->context()->mark_out_of_memory();
3963    return Failure::OutOfMemoryException(0x6);
3964  }
3965
3966  // For small strings we check whether the resource contains only
3967  // one byte characters.  If yes, we use a different string map.
3968  static const size_t kOneByteCheckLengthLimit = 32;
3969  bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3970      String::IsOneByte(resource->data(), static_cast<int>(length));
3971  Map* map = is_one_byte ?
3972      external_string_with_one_byte_data_map() : external_string_map();
3973  Object* result;
3974  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3975    if (!maybe_result->ToObject(&result)) return maybe_result;
3976  }
3977
3978  ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3979  external_string->set_length(static_cast<int>(length));
3980  external_string->set_hash_field(String::kEmptyHashField);
3981  external_string->set_resource(resource);
3982
3983  return result;
3984}
3985
3986
3987MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3988  if (code <= String::kMaxOneByteCharCode) {
3989    Object* value = single_character_string_cache()->get(code);
3990    if (value != undefined_value()) return value;
3991
3992    uint8_t buffer[1];
3993    buffer[0] = static_cast<uint8_t>(code);
3994    Object* result;
3995    MaybeObject* maybe_result =
3996        InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3997
3998    if (!maybe_result->ToObject(&result)) return maybe_result;
3999    single_character_string_cache()->set(code, result);
4000    return result;
4001  }
4002
4003  Object* result;
4004  { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
4005    if (!maybe_result->ToObject(&result)) return maybe_result;
4006  }
4007  String* answer = String::cast(result);
4008  answer->Set(0, code);
4009  return answer;
4010}
4011
4012
4013MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
4014  if (length < 0 || length > ByteArray::kMaxLength) {
4015    return Failure::OutOfMemoryException(0x7);
4016  }
4017  if (pretenure == NOT_TENURED) {
4018    return AllocateByteArray(length);
4019  }
4020  int size = ByteArray::SizeFor(length);
4021  AllocationSpace space =
4022      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_DATA_SPACE;
4023  Object* result;
4024  { MaybeObject* maybe_result = AllocateRaw(size, space, space);
4025    if (!maybe_result->ToObject(&result)) return maybe_result;
4026  }
4027
4028  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4029      byte_array_map());
4030  reinterpret_cast<ByteArray*>(result)->set_length(length);
4031  return result;
4032}
4033
4034
4035MaybeObject* Heap::AllocateByteArray(int length) {
4036  if (length < 0 || length > ByteArray::kMaxLength) {
4037    return Failure::OutOfMemoryException(0x8);
4038  }
4039  int size = ByteArray::SizeFor(length);
4040  AllocationSpace space =
4041      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
4042  Object* result;
4043  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4044    if (!maybe_result->ToObject(&result)) return maybe_result;
4045  }
4046
4047  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4048      byte_array_map());
4049  reinterpret_cast<ByteArray*>(result)->set_length(length);
4050  return result;
4051}
4052
4053
4054void Heap::CreateFillerObjectAt(Address addr, int size) {
4055  if (size == 0) return;
4056  HeapObject* filler = HeapObject::FromAddress(addr);
4057  if (size == kPointerSize) {
4058    filler->set_map_no_write_barrier(one_pointer_filler_map());
4059  } else if (size == 2 * kPointerSize) {
4060    filler->set_map_no_write_barrier(two_pointer_filler_map());
4061  } else {
4062    filler->set_map_no_write_barrier(free_space_map());
4063    FreeSpace::cast(filler)->set_size(size);
4064  }
4065}
4066
4067
4068MaybeObject* Heap::AllocateExternalArray(int length,
4069                                         ExternalArrayType array_type,
4070                                         void* external_pointer,
4071                                         PretenureFlag pretenure) {
4072  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4073  Object* result;
4074  { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
4075                                            space,
4076                                            OLD_DATA_SPACE);
4077    if (!maybe_result->ToObject(&result)) return maybe_result;
4078  }
4079
4080  reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4081      MapForExternalArrayType(array_type));
4082  reinterpret_cast<ExternalArray*>(result)->set_length(length);
4083  reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4084      external_pointer);
4085
4086  return result;
4087}
4088
4089
4090MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4091                              Code::Flags flags,
4092                              Handle<Object> self_reference,
4093                              bool immovable,
4094                              bool crankshafted) {
4095  // Allocate ByteArray before the Code object, so that we do not risk
4096  // leaving uninitialized Code object (and breaking the heap).
4097  ByteArray* reloc_info;
4098  MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4099  if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4100
4101  // Compute size.
4102  int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4103  int obj_size = Code::SizeFor(body_size);
4104  ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4105  MaybeObject* maybe_result;
4106  // Large code objects and code objects which should stay at a fixed address
4107  // are allocated in large object space.
4108  HeapObject* result;
4109  bool force_lo_space = obj_size > code_space()->AreaSize();
4110  if (force_lo_space) {
4111    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4112  } else {
4113    maybe_result = code_space_->AllocateRaw(obj_size);
4114  }
4115  if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4116
4117  if (immovable && !force_lo_space &&
4118      // Objects on the first page of each space are never moved.
4119      !code_space_->FirstPage()->Contains(result->address())) {
4120    // Discard the first code allocation, which was on a page where it could be
4121    // moved.
4122    CreateFillerObjectAt(result->address(), obj_size);
4123    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4124    if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4125  }
4126
4127  // Initialize the object
4128  result->set_map_no_write_barrier(code_map());
4129  Code* code = Code::cast(result);
4130  ASSERT(!isolate_->code_range()->exists() ||
4131      isolate_->code_range()->contains(code->address()));
4132  code->set_instruction_size(desc.instr_size);
4133  code->set_relocation_info(reloc_info);
4134  code->set_flags(flags);
4135  if (code->is_call_stub() || code->is_keyed_call_stub()) {
4136    code->set_check_type(RECEIVER_MAP_CHECK);
4137  }
4138  code->set_is_crankshafted(crankshafted);
4139  code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4140  code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4141  code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4142  code->set_gc_metadata(Smi::FromInt(0));
4143  code->set_ic_age(global_ic_age_);
4144  code->set_prologue_offset(kPrologueOffsetNotSet);
4145  if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4146    code->set_marked_for_deoptimization(false);
4147  }
4148  // Allow self references to created code object by patching the handle to
4149  // point to the newly allocated Code object.
4150  if (!self_reference.is_null()) {
4151    *(self_reference.location()) = code;
4152  }
4153  // Migrate generated code.
4154  // The generated code can contain Object** values (typically from handles)
4155  // that are dereferenced during the copy to point directly to the actual heap
4156  // objects. These pointers can include references to the code object itself,
4157  // through the self_reference parameter.
4158  code->CopyFrom(desc);
4159
4160#ifdef VERIFY_HEAP
4161  if (FLAG_verify_heap) {
4162    code->Verify();
4163  }
4164#endif
4165  return code;
4166}
4167
4168
4169MaybeObject* Heap::CopyCode(Code* code) {
4170  // Allocate an object the same size as the code object.
4171  int obj_size = code->Size();
4172  MaybeObject* maybe_result;
4173  if (obj_size > code_space()->AreaSize()) {
4174    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4175  } else {
4176    maybe_result = code_space_->AllocateRaw(obj_size);
4177  }
4178
4179  Object* result;
4180  if (!maybe_result->ToObject(&result)) return maybe_result;
4181
4182  // Copy code object.
4183  Address old_addr = code->address();
4184  Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4185  CopyBlock(new_addr, old_addr, obj_size);
4186  // Relocate the copy.
4187  Code* new_code = Code::cast(result);
4188  ASSERT(!isolate_->code_range()->exists() ||
4189      isolate_->code_range()->contains(code->address()));
4190  new_code->Relocate(new_addr - old_addr);
4191  return new_code;
4192}
4193
4194
4195MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4196  // Allocate ByteArray before the Code object, so that we do not risk
4197  // leaving uninitialized Code object (and breaking the heap).
4198  Object* reloc_info_array;
4199  { MaybeObject* maybe_reloc_info_array =
4200        AllocateByteArray(reloc_info.length(), TENURED);
4201    if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4202      return maybe_reloc_info_array;
4203    }
4204  }
4205
4206  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4207
4208  int new_obj_size = Code::SizeFor(new_body_size);
4209
4210  Address old_addr = code->address();
4211
4212  size_t relocation_offset =
4213      static_cast<size_t>(code->instruction_end() - old_addr);
4214
4215  MaybeObject* maybe_result;
4216  if (new_obj_size > code_space()->AreaSize()) {
4217    maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4218  } else {
4219    maybe_result = code_space_->AllocateRaw(new_obj_size);
4220  }
4221
4222  Object* result;
4223  if (!maybe_result->ToObject(&result)) return maybe_result;
4224
4225  // Copy code object.
4226  Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4227
4228  // Copy header and instructions.
4229  CopyBytes(new_addr, old_addr, relocation_offset);
4230
4231  Code* new_code = Code::cast(result);
4232  new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4233
4234  // Copy patched rinfo.
4235  CopyBytes(new_code->relocation_start(),
4236            reloc_info.start(),
4237            static_cast<size_t>(reloc_info.length()));
4238
4239  // Relocate the copy.
4240  ASSERT(!isolate_->code_range()->exists() ||
4241      isolate_->code_range()->contains(code->address()));
4242  new_code->Relocate(new_addr - old_addr);
4243
4244#ifdef VERIFY_HEAP
4245  if (FLAG_verify_heap) {
4246    code->Verify();
4247  }
4248#endif
4249  return new_code;
4250}
4251
4252
4253MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4254    Handle<AllocationSite> allocation_site) {
4255  ASSERT(gc_state_ == NOT_IN_GC);
4256  ASSERT(map->instance_type() != MAP_TYPE);
4257  // If allocation failures are disallowed, we may allocate in a different
4258  // space when new space is full and the object is not a large object.
4259  AllocationSpace retry_space =
4260      (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4261  int size = map->instance_size() + AllocationMemento::kSize;
4262  Object* result;
4263  MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4264  if (!maybe_result->ToObject(&result)) return maybe_result;
4265  // No need for write barrier since object is white and map is in old space.
4266  HeapObject::cast(result)->set_map_no_write_barrier(map);
4267  AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4268      reinterpret_cast<Address>(result) + map->instance_size());
4269  alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4270  alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4271  return result;
4272}
4273
4274
4275MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4276  ASSERT(gc_state_ == NOT_IN_GC);
4277  ASSERT(map->instance_type() != MAP_TYPE);
4278  // If allocation failures are disallowed, we may allocate in a different
4279  // space when new space is full and the object is not a large object.
4280  AllocationSpace retry_space =
4281      (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4282  int size = map->instance_size();
4283  Object* result;
4284  MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4285  if (!maybe_result->ToObject(&result)) return maybe_result;
4286  // No need for write barrier since object is white and map is in old space.
4287  HeapObject::cast(result)->set_map_no_write_barrier(map);
4288  return result;
4289}
4290
4291
4292void Heap::InitializeFunction(JSFunction* function,
4293                              SharedFunctionInfo* shared,
4294                              Object* prototype) {
4295  ASSERT(!prototype->IsMap());
4296  function->initialize_properties();
4297  function->initialize_elements();
4298  function->set_shared(shared);
4299  function->set_code(shared->code());
4300  function->set_prototype_or_initial_map(prototype);
4301  function->set_context(undefined_value());
4302  function->set_literals_or_bindings(empty_fixed_array());
4303  function->set_next_function_link(undefined_value());
4304}
4305
4306
4307MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4308  // Make sure to use globals from the function's context, since the function
4309  // can be from a different context.
4310  Context* native_context = function->context()->native_context();
4311  Map* new_map;
4312  if (function->shared()->is_generator()) {
4313    // Generator prototypes can share maps since they don't have "constructor"
4314    // properties.
4315    new_map = native_context->generator_object_prototype_map();
4316  } else {
4317    // Each function prototype gets a fresh map to avoid unwanted sharing of
4318    // maps between prototypes of different constructors.
4319    JSFunction* object_function = native_context->object_function();
4320    ASSERT(object_function->has_initial_map());
4321    MaybeObject* maybe_map = object_function->initial_map()->Copy();
4322    if (!maybe_map->To(&new_map)) return maybe_map;
4323  }
4324
4325  Object* prototype;
4326  MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4327  if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4328
4329  if (!function->shared()->is_generator()) {
4330    MaybeObject* maybe_failure =
4331        JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4332            constructor_string(), function, DONT_ENUM);
4333    if (maybe_failure->IsFailure()) return maybe_failure;
4334  }
4335
4336  return prototype;
4337}
4338
4339
4340MaybeObject* Heap::AllocateFunction(Map* function_map,
4341                                    SharedFunctionInfo* shared,
4342                                    Object* prototype,
4343                                    PretenureFlag pretenure) {
4344  AllocationSpace space =
4345      (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4346  Object* result;
4347  { MaybeObject* maybe_result = Allocate(function_map, space);
4348    if (!maybe_result->ToObject(&result)) return maybe_result;
4349  }
4350  InitializeFunction(JSFunction::cast(result), shared, prototype);
4351  return result;
4352}
4353
4354
4355MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4356  // To get fast allocation and map sharing for arguments objects we
4357  // allocate them based on an arguments boilerplate.
4358
4359  JSObject* boilerplate;
4360  int arguments_object_size;
4361  bool strict_mode_callee = callee->IsJSFunction() &&
4362      !JSFunction::cast(callee)->shared()->is_classic_mode();
4363  if (strict_mode_callee) {
4364    boilerplate =
4365        isolate()->context()->native_context()->
4366            strict_mode_arguments_boilerplate();
4367    arguments_object_size = kArgumentsObjectSizeStrict;
4368  } else {
4369    boilerplate =
4370        isolate()->context()->native_context()->arguments_boilerplate();
4371    arguments_object_size = kArgumentsObjectSize;
4372  }
4373
4374  // This calls Copy directly rather than using Heap::AllocateRaw so we
4375  // duplicate the check here.
4376  ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4377
4378  // Check that the size of the boilerplate matches our
4379  // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4380  // on the size being a known constant.
4381  ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4382
4383  // Do the allocation.
4384  Object* result;
4385  { MaybeObject* maybe_result =
4386        AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4387    if (!maybe_result->ToObject(&result)) return maybe_result;
4388  }
4389
4390  // Copy the content. The arguments boilerplate doesn't have any
4391  // fields that point to new space so it's safe to skip the write
4392  // barrier here.
4393  CopyBlock(HeapObject::cast(result)->address(),
4394            boilerplate->address(),
4395            JSObject::kHeaderSize);
4396
4397  // Set the length property.
4398  JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4399                                                Smi::FromInt(length),
4400                                                SKIP_WRITE_BARRIER);
4401  // Set the callee property for non-strict mode arguments object only.
4402  if (!strict_mode_callee) {
4403    JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4404                                                  callee);
4405  }
4406
4407  // Check the state of the object
4408  ASSERT(JSObject::cast(result)->HasFastProperties());
4409  ASSERT(JSObject::cast(result)->HasFastObjectElements());
4410
4411  return result;
4412}
4413
4414
4415MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4416  ASSERT(!fun->has_initial_map());
4417
4418  // First create a new map with the size and number of in-object properties
4419  // suggested by the function.
4420  InstanceType instance_type;
4421  int instance_size;
4422  int in_object_properties;
4423  if (fun->shared()->is_generator()) {
4424    instance_type = JS_GENERATOR_OBJECT_TYPE;
4425    instance_size = JSGeneratorObject::kSize;
4426    in_object_properties = 0;
4427  } else {
4428    instance_type = JS_OBJECT_TYPE;
4429    instance_size = fun->shared()->CalculateInstanceSize();
4430    in_object_properties = fun->shared()->CalculateInObjectProperties();
4431  }
4432  Map* map;
4433  MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4434  if (!maybe_map->To(&map)) return maybe_map;
4435
4436  // Fetch or allocate prototype.
4437  Object* prototype;
4438  if (fun->has_instance_prototype()) {
4439    prototype = fun->instance_prototype();
4440  } else {
4441    MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4442    if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4443  }
4444  map->set_inobject_properties(in_object_properties);
4445  map->set_unused_property_fields(in_object_properties);
4446  map->set_prototype(prototype);
4447  ASSERT(map->has_fast_object_elements());
4448
4449  if (!fun->shared()->is_generator()) {
4450    fun->shared()->StartInobjectSlackTracking(map);
4451  }
4452
4453  return map;
4454}
4455
4456
4457void Heap::InitializeJSObjectFromMap(JSObject* obj,
4458                                     FixedArray* properties,
4459                                     Map* map) {
4460  obj->set_properties(properties);
4461  obj->initialize_elements();
4462  // TODO(1240798): Initialize the object's body using valid initial values
4463  // according to the object's initial map.  For example, if the map's
4464  // instance type is JS_ARRAY_TYPE, the length field should be initialized
4465  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4466  // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4467  // verification code has to cope with (temporarily) invalid objects.  See
4468  // for example, JSArray::JSArrayVerify).
4469  Object* filler;
4470  // We cannot always fill with one_pointer_filler_map because objects
4471  // created from API functions expect their internal fields to be initialized
4472  // with undefined_value.
4473  // Pre-allocated fields need to be initialized with undefined_value as well
4474  // so that object accesses before the constructor completes (e.g. in the
4475  // debugger) will not cause a crash.
4476  if (map->constructor()->IsJSFunction() &&
4477      JSFunction::cast(map->constructor())->shared()->
4478          IsInobjectSlackTrackingInProgress()) {
4479    // We might want to shrink the object later.
4480    ASSERT(obj->GetInternalFieldCount() == 0);
4481    filler = Heap::one_pointer_filler_map();
4482  } else {
4483    filler = Heap::undefined_value();
4484  }
4485  obj->InitializeBody(map, Heap::undefined_value(), filler);
4486}
4487
4488
4489MaybeObject* Heap::AllocateJSObjectFromMap(
4490    Map* map, PretenureFlag pretenure, bool allocate_properties) {
4491  // JSFunctions should be allocated using AllocateFunction to be
4492  // properly initialized.
4493  ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4494
4495  // Both types of global objects should be allocated using
4496  // AllocateGlobalObject to be properly initialized.
4497  ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4498  ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4499
4500  // Allocate the backing storage for the properties.
4501  FixedArray* properties;
4502  if (allocate_properties) {
4503    int prop_size = map->InitialPropertiesLength();
4504    ASSERT(prop_size >= 0);
4505    { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4506      if (!maybe_properties->To(&properties)) return maybe_properties;
4507    }
4508  } else {
4509    properties = empty_fixed_array();
4510  }
4511
4512  // Allocate the JSObject.
4513  AllocationSpace space =
4514      (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4515  if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4516  Object* obj;
4517  MaybeObject* maybe_obj = Allocate(map, space);
4518  if (!maybe_obj->To(&obj)) return maybe_obj;
4519
4520  // Initialize the JSObject.
4521  InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4522  ASSERT(JSObject::cast(obj)->HasFastElements() ||
4523         JSObject::cast(obj)->HasExternalArrayElements());
4524  return obj;
4525}
4526
4527
4528MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
4529    Map* map, Handle<AllocationSite> allocation_site) {
4530  // JSFunctions should be allocated using AllocateFunction to be
4531  // properly initialized.
4532  ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4533
4534  // Both types of global objects should be allocated using
4535  // AllocateGlobalObject to be properly initialized.
4536  ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4537  ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4538
4539  // Allocate the backing storage for the properties.
4540  int prop_size = map->InitialPropertiesLength();
4541  ASSERT(prop_size >= 0);
4542  FixedArray* properties;
4543  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4544    if (!maybe_properties->To(&properties)) return maybe_properties;
4545  }
4546
4547  // Allocate the JSObject.
4548  AllocationSpace space = NEW_SPACE;
4549  if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4550  Object* obj;
4551  MaybeObject* maybe_obj =
4552      AllocateWithAllocationSite(map, space, allocation_site);
4553  if (!maybe_obj->To(&obj)) return maybe_obj;
4554
4555  // Initialize the JSObject.
4556  InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4557  ASSERT(JSObject::cast(obj)->HasFastElements());
4558  return obj;
4559}
4560
4561
4562MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4563                                    PretenureFlag pretenure) {
4564  // Allocate the initial map if absent.
4565  if (!constructor->has_initial_map()) {
4566    Object* initial_map;
4567    { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4568      if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4569    }
4570    constructor->set_initial_map(Map::cast(initial_map));
4571    Map::cast(initial_map)->set_constructor(constructor);
4572  }
4573  // Allocate the object based on the constructors initial map.
4574  MaybeObject* result = AllocateJSObjectFromMap(
4575      constructor->initial_map(), pretenure);
4576#ifdef DEBUG
4577  // Make sure result is NOT a global object if valid.
4578  Object* non_failure;
4579  ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4580#endif
4581  return result;
4582}
4583
4584
4585MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4586    Handle<AllocationSite> allocation_site) {
4587  // Allocate the initial map if absent.
4588  if (!constructor->has_initial_map()) {
4589    Object* initial_map;
4590    { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4591      if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4592    }
4593    constructor->set_initial_map(Map::cast(initial_map));
4594    Map::cast(initial_map)->set_constructor(constructor);
4595  }
4596  // Allocate the object based on the constructors initial map, or the payload
4597  // advice
4598  Map* initial_map = constructor->initial_map();
4599
4600  Smi* smi = Smi::cast(allocation_site->transition_info());
4601  ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4602  AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4603  if (to_kind != initial_map->elements_kind()) {
4604    MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4605    if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4606    // Possibly alter the mode, since we found an updated elements kind
4607    // in the type info cell.
4608    mode = AllocationSite::GetMode(to_kind);
4609  }
4610
4611  MaybeObject* result;
4612  if (mode == TRACK_ALLOCATION_SITE) {
4613    result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4614        allocation_site);
4615  } else {
4616    result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4617  }
4618#ifdef DEBUG
4619  // Make sure result is NOT a global object if valid.
4620  Object* non_failure;
4621  ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4622#endif
4623  return result;
4624}
4625
4626
4627MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4628  ASSERT(function->shared()->is_generator());
4629  Map *map;
4630  if (function->has_initial_map()) {
4631    map = function->initial_map();
4632  } else {
4633    // Allocate the initial map if absent.
4634    MaybeObject* maybe_map = AllocateInitialMap(function);
4635    if (!maybe_map->To(&map)) return maybe_map;
4636    function->set_initial_map(map);
4637    map->set_constructor(function);
4638  }
4639  ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4640  return AllocateJSObjectFromMap(map);
4641}
4642
4643
4644MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4645  // Allocate a fresh map. Modules do not have a prototype.
4646  Map* map;
4647  MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4648  if (!maybe_map->To(&map)) return maybe_map;
4649  // Allocate the object based on the map.
4650  JSModule* module;
4651  MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4652  if (!maybe_module->To(&module)) return maybe_module;
4653  module->set_context(context);
4654  module->set_scope_info(scope_info);
4655  return module;
4656}
4657
4658
4659MaybeObject* Heap::AllocateJSArrayAndStorage(
4660    ElementsKind elements_kind,
4661    int length,
4662    int capacity,
4663    ArrayStorageAllocationMode mode,
4664    PretenureFlag pretenure) {
4665  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4666  JSArray* array;
4667  if (!maybe_array->To(&array)) return maybe_array;
4668
4669  // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4670  // for performance reasons.
4671  ASSERT(capacity >= length);
4672
4673  if (capacity == 0) {
4674    array->set_length(Smi::FromInt(0));
4675    array->set_elements(empty_fixed_array());
4676    return array;
4677  }
4678
4679  FixedArrayBase* elms;
4680  MaybeObject* maybe_elms = NULL;
4681  if (IsFastDoubleElementsKind(elements_kind)) {
4682    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4683      maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4684    } else {
4685      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4686      maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4687    }
4688  } else {
4689    ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4690    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4691      maybe_elms = AllocateUninitializedFixedArray(capacity);
4692    } else {
4693      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4694      maybe_elms = AllocateFixedArrayWithHoles(capacity);
4695    }
4696  }
4697  if (!maybe_elms->To(&elms)) return maybe_elms;
4698
4699  array->set_elements(elms);
4700  array->set_length(Smi::FromInt(length));
4701  return array;
4702}
4703
4704
4705MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4706    ElementsKind elements_kind,
4707    int length,
4708    int capacity,
4709    Handle<AllocationSite> allocation_site,
4710    ArrayStorageAllocationMode mode) {
4711  MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4712      allocation_site);
4713  JSArray* array;
4714  if (!maybe_array->To(&array)) return maybe_array;
4715  return AllocateJSArrayStorage(array, length, capacity, mode);
4716}
4717
4718
4719MaybeObject* Heap::AllocateJSArrayStorage(
4720    JSArray* array,
4721    int length,
4722    int capacity,
4723    ArrayStorageAllocationMode mode) {
4724  ASSERT(capacity >= length);
4725
4726  if (capacity == 0) {
4727    array->set_length(Smi::FromInt(0));
4728    array->set_elements(empty_fixed_array());
4729    return array;
4730  }
4731
4732  FixedArrayBase* elms;
4733  MaybeObject* maybe_elms = NULL;
4734  ElementsKind elements_kind = array->GetElementsKind();
4735  if (IsFastDoubleElementsKind(elements_kind)) {
4736    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4737      maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4738    } else {
4739      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4740      maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4741    }
4742  } else {
4743    ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4744    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4745      maybe_elms = AllocateUninitializedFixedArray(capacity);
4746    } else {
4747      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4748      maybe_elms = AllocateFixedArrayWithHoles(capacity);
4749    }
4750  }
4751  if (!maybe_elms->To(&elms)) return maybe_elms;
4752
4753  array->set_elements(elms);
4754  array->set_length(Smi::FromInt(length));
4755  return array;
4756}
4757
4758
4759MaybeObject* Heap::AllocateJSArrayWithElements(
4760    FixedArrayBase* elements,
4761    ElementsKind elements_kind,
4762    int length,
4763    PretenureFlag pretenure) {
4764  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4765  JSArray* array;
4766  if (!maybe_array->To(&array)) return maybe_array;
4767
4768  array->set_elements(elements);
4769  array->set_length(Smi::FromInt(length));
4770  array->ValidateElements();
4771  return array;
4772}
4773
4774
4775MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4776  // Allocate map.
4777  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4778  // maps. Will probably depend on the identity of the handler object, too.
4779  Map* map;
4780  MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4781  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4782  map->set_prototype(prototype);
4783
4784  // Allocate the proxy object.
4785  JSProxy* result;
4786  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4787  if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4788  result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4789  result->set_handler(handler);
4790  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4791  return result;
4792}
4793
4794
4795MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4796                                           Object* call_trap,
4797                                           Object* construct_trap,
4798                                           Object* prototype) {
4799  // Allocate map.
4800  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4801  // maps. Will probably depend on the identity of the handler object, too.
4802  Map* map;
4803  MaybeObject* maybe_map_obj =
4804      AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4805  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4806  map->set_prototype(prototype);
4807
4808  // Allocate the proxy object.
4809  JSFunctionProxy* result;
4810  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4811  if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4812  result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4813  result->set_handler(handler);
4814  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4815  result->set_call_trap(call_trap);
4816  result->set_construct_trap(construct_trap);
4817  return result;
4818}
4819
4820
4821MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4822  ASSERT(constructor->has_initial_map());
4823  Map* map = constructor->initial_map();
4824  ASSERT(map->is_dictionary_map());
4825
4826  // Make sure no field properties are described in the initial map.
4827  // This guarantees us that normalizing the properties does not
4828  // require us to change property values to PropertyCells.
4829  ASSERT(map->NextFreePropertyIndex() == 0);
4830
4831  // Make sure we don't have a ton of pre-allocated slots in the
4832  // global objects. They will be unused once we normalize the object.
4833  ASSERT(map->unused_property_fields() == 0);
4834  ASSERT(map->inobject_properties() == 0);
4835
4836  // Initial size of the backing store to avoid resize of the storage during
4837  // bootstrapping. The size differs between the JS global object ad the
4838  // builtins object.
4839  int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4840
4841  // Allocate a dictionary object for backing storage.
4842  NameDictionary* dictionary;
4843  MaybeObject* maybe_dictionary =
4844      NameDictionary::Allocate(
4845          this,
4846          map->NumberOfOwnDescriptors() * 2 + initial_size);
4847  if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4848
4849  // The global object might be created from an object template with accessors.
4850  // Fill these accessors into the dictionary.
4851  DescriptorArray* descs = map->instance_descriptors();
4852  for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4853    PropertyDetails details = descs->GetDetails(i);
4854    ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4855    PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4856    Object* value = descs->GetCallbacksObject(i);
4857    MaybeObject* maybe_value = AllocatePropertyCell(value);
4858    if (!maybe_value->ToObject(&value)) return maybe_value;
4859
4860    MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4861    if (!maybe_added->To(&dictionary)) return maybe_added;
4862  }
4863
4864  // Allocate the global object and initialize it with the backing store.
4865  JSObject* global;
4866  MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4867  if (!maybe_global->To(&global)) return maybe_global;
4868
4869  InitializeJSObjectFromMap(global, dictionary, map);
4870
4871  // Create a new map for the global object.
4872  Map* new_map;
4873  MaybeObject* maybe_map = map->CopyDropDescriptors();
4874  if (!maybe_map->To(&new_map)) return maybe_map;
4875  new_map->set_dictionary_map(true);
4876
4877  // Set up the global object as a normalized object.
4878  global->set_map(new_map);
4879  global->set_properties(dictionary);
4880
4881  // Make sure result is a global object with properties in dictionary.
4882  ASSERT(global->IsGlobalObject());
4883  ASSERT(!global->HasFastProperties());
4884  return global;
4885}
4886
4887
4888MaybeObject* Heap::CopyJSObject(JSObject* source) {
4889  // Never used to copy functions.  If functions need to be copied we
4890  // have to be careful to clear the literals array.
4891  SLOW_ASSERT(!source->IsJSFunction());
4892
4893  // Make the clone.
4894  Map* map = source->map();
4895  int object_size = map->instance_size();
4896  Object* clone;
4897
4898  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4899
4900  // If we're forced to always allocate, we use the general allocation
4901  // functions which may leave us with an object in old space.
4902  if (always_allocate()) {
4903    { MaybeObject* maybe_clone =
4904          AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4905      if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4906    }
4907    Address clone_address = HeapObject::cast(clone)->address();
4908    CopyBlock(clone_address,
4909              source->address(),
4910              object_size);
4911    // Update write barrier for all fields that lie beyond the header.
4912    RecordWrites(clone_address,
4913                 JSObject::kHeaderSize,
4914                 (object_size - JSObject::kHeaderSize) / kPointerSize);
4915  } else {
4916    wb_mode = SKIP_WRITE_BARRIER;
4917
4918    { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4919      if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4920    }
4921    SLOW_ASSERT(InNewSpace(clone));
4922    // Since we know the clone is allocated in new space, we can copy
4923    // the contents without worrying about updating the write barrier.
4924    CopyBlock(HeapObject::cast(clone)->address(),
4925              source->address(),
4926              object_size);
4927  }
4928
4929  SLOW_ASSERT(
4930      JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4931  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4932  FixedArray* properties = FixedArray::cast(source->properties());
4933  // Update elements if necessary.
4934  if (elements->length() > 0) {
4935    Object* elem;
4936    { MaybeObject* maybe_elem;
4937      if (elements->map() == fixed_cow_array_map()) {
4938        maybe_elem = FixedArray::cast(elements);
4939      } else if (source->HasFastDoubleElements()) {
4940        maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4941      } else {
4942        maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4943      }
4944      if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4945    }
4946    JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4947  }
4948  // Update properties if necessary.
4949  if (properties->length() > 0) {
4950    Object* prop;
4951    { MaybeObject* maybe_prop = CopyFixedArray(properties);
4952      if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4953    }
4954    JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4955  }
4956  // Return the new clone.
4957  return clone;
4958}
4959
4960
4961MaybeObject* Heap::CopyJSObjectWithAllocationSite(
4962    JSObject* source,
4963    AllocationSite* site) {
4964  // Never used to copy functions.  If functions need to be copied we
4965  // have to be careful to clear the literals array.
4966  SLOW_ASSERT(!source->IsJSFunction());
4967
4968  // Make the clone.
4969  Map* map = source->map();
4970  int object_size = map->instance_size();
4971  Object* clone;
4972
4973  ASSERT(AllocationSite::CanTrack(map->instance_type()));
4974  ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4975  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4976
4977  // If we're forced to always allocate, we use the general allocation
4978  // functions which may leave us with an object in old space.
4979  int adjusted_object_size = object_size;
4980  if (always_allocate()) {
4981    // We'll only track origin if we are certain to allocate in new space
4982    const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4983    if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
4984      adjusted_object_size += AllocationMemento::kSize;
4985    }
4986
4987    { MaybeObject* maybe_clone =
4988          AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4989      if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4990    }
4991    Address clone_address = HeapObject::cast(clone)->address();
4992    CopyBlock(clone_address,
4993              source->address(),
4994              object_size);
4995    // Update write barrier for all fields that lie beyond the header.
4996    int write_barrier_offset = adjusted_object_size > object_size
4997        ? JSArray::kSize + AllocationMemento::kSize
4998        : JSObject::kHeaderSize;
4999    if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
5000      RecordWrites(clone_address,
5001                   write_barrier_offset,
5002                   (object_size - write_barrier_offset) / kPointerSize);
5003    }
5004
5005    // Track allocation site information, if we failed to allocate it inline.
5006    if (InNewSpace(clone) &&
5007        adjusted_object_size == object_size) {
5008      MaybeObject* maybe_alloc_memento =
5009          AllocateStruct(ALLOCATION_MEMENTO_TYPE);
5010      AllocationMemento* alloc_memento;
5011      if (maybe_alloc_memento->To(&alloc_memento)) {
5012        alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5013        alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5014      }
5015    }
5016  } else {
5017    wb_mode = SKIP_WRITE_BARRIER;
5018    adjusted_object_size += AllocationMemento::kSize;
5019
5020    { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
5021      if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5022    }
5023    SLOW_ASSERT(InNewSpace(clone));
5024    // Since we know the clone is allocated in new space, we can copy
5025    // the contents without worrying about updating the write barrier.
5026    CopyBlock(HeapObject::cast(clone)->address(),
5027              source->address(),
5028              object_size);
5029  }
5030
5031  if (adjusted_object_size > object_size) {
5032    AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5033        reinterpret_cast<Address>(clone) + object_size);
5034    alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5035    alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5036  }
5037
5038  SLOW_ASSERT(
5039      JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5040  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5041  FixedArray* properties = FixedArray::cast(source->properties());
5042  // Update elements if necessary.
5043  if (elements->length() > 0) {
5044    Object* elem;
5045    { MaybeObject* maybe_elem;
5046      if (elements->map() == fixed_cow_array_map()) {
5047        maybe_elem = FixedArray::cast(elements);
5048      } else if (source->HasFastDoubleElements()) {
5049        maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
5050      } else {
5051        maybe_elem = CopyFixedArray(FixedArray::cast(elements));
5052      }
5053      if (!maybe_elem->ToObject(&elem)) return maybe_elem;
5054    }
5055    JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
5056  }
5057  // Update properties if necessary.
5058  if (properties->length() > 0) {
5059    Object* prop;
5060    { MaybeObject* maybe_prop = CopyFixedArray(properties);
5061      if (!maybe_prop->ToObject(&prop)) return maybe_prop;
5062    }
5063    JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
5064  }
5065  // Return the new clone.
5066  return clone;
5067}
5068
5069
5070MaybeObject* Heap::ReinitializeJSReceiver(
5071    JSReceiver* object, InstanceType type, int size) {
5072  ASSERT(type >= FIRST_JS_OBJECT_TYPE);
5073
5074  // Allocate fresh map.
5075  // TODO(rossberg): Once we optimize proxies, cache these maps.
5076  Map* map;
5077  MaybeObject* maybe = AllocateMap(type, size);
5078  if (!maybe->To<Map>(&map)) return maybe;
5079
5080  // Check that the receiver has at least the size of the fresh object.
5081  int size_difference = object->map()->instance_size() - map->instance_size();
5082  ASSERT(size_difference >= 0);
5083
5084  map->set_prototype(object->map()->prototype());
5085
5086  // Allocate the backing storage for the properties.
5087  int prop_size = map->unused_property_fields() - map->inobject_properties();
5088  Object* properties;
5089  maybe = AllocateFixedArray(prop_size, TENURED);
5090  if (!maybe->ToObject(&properties)) return maybe;
5091
5092  // Functions require some allocation, which might fail here.
5093  SharedFunctionInfo* shared = NULL;
5094  if (type == JS_FUNCTION_TYPE) {
5095    String* name;
5096    maybe =
5097        InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
5098    if (!maybe->To<String>(&name)) return maybe;
5099    maybe = AllocateSharedFunctionInfo(name);
5100    if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
5101  }
5102
5103  // Because of possible retries of this function after failure,
5104  // we must NOT fail after this point, where we have changed the type!
5105
5106  // Reset the map for the object.
5107  object->set_map(map);
5108  JSObject* jsobj = JSObject::cast(object);
5109
5110  // Reinitialize the object from the constructor map.
5111  InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5112
5113  // Functions require some minimal initialization.
5114  if (type == JS_FUNCTION_TYPE) {
5115    map->set_function_with_prototype(true);
5116    InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5117    JSFunction::cast(object)->set_context(
5118        isolate()->context()->native_context());
5119  }
5120
5121  // Put in filler if the new object is smaller than the old.
5122  if (size_difference > 0) {
5123    CreateFillerObjectAt(
5124        object->address() + map->instance_size(), size_difference);
5125  }
5126
5127  return object;
5128}
5129
5130
5131MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5132                                             JSGlobalProxy* object) {
5133  ASSERT(constructor->has_initial_map());
5134  Map* map = constructor->initial_map();
5135
5136  // Check that the already allocated object has the same size and type as
5137  // objects allocated using the constructor.
5138  ASSERT(map->instance_size() == object->map()->instance_size());
5139  ASSERT(map->instance_type() == object->map()->instance_type());
5140
5141  // Allocate the backing storage for the properties.
5142  int prop_size = map->unused_property_fields() - map->inobject_properties();
5143  Object* properties;
5144  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5145    if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5146  }
5147
5148  // Reset the map for the object.
5149  object->set_map(constructor->initial_map());
5150
5151  // Reinitialize the object from the constructor map.
5152  InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5153  return object;
5154}
5155
5156
5157MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5158                                           PretenureFlag pretenure) {
5159  int length = string.length();
5160  if (length == 1) {
5161    return Heap::LookupSingleCharacterStringFromCode(string[0]);
5162  }
5163  Object* result;
5164  { MaybeObject* maybe_result =
5165        AllocateRawOneByteString(string.length(), pretenure);
5166    if (!maybe_result->ToObject(&result)) return maybe_result;
5167  }
5168
5169  // Copy the characters into the new object.
5170  CopyChars(SeqOneByteString::cast(result)->GetChars(),
5171            string.start(),
5172            length);
5173  return result;
5174}
5175
5176
5177MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5178                                              int non_ascii_start,
5179                                              PretenureFlag pretenure) {
5180  // Continue counting the number of characters in the UTF-8 string, starting
5181  // from the first non-ascii character or word.
5182  Access<UnicodeCache::Utf8Decoder>
5183      decoder(isolate_->unicode_cache()->utf8_decoder());
5184  decoder->Reset(string.start() + non_ascii_start,
5185                 string.length() - non_ascii_start);
5186  int utf16_length = decoder->Utf16Length();
5187  ASSERT(utf16_length > 0);
5188  // Allocate string.
5189  Object* result;
5190  {
5191    int chars = non_ascii_start + utf16_length;
5192    MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5193    if (!maybe_result->ToObject(&result)) return maybe_result;
5194  }
5195  // Convert and copy the characters into the new object.
5196  SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5197  // Copy ascii portion.
5198  uint16_t* data = twobyte->GetChars();
5199  if (non_ascii_start != 0) {
5200    const char* ascii_data = string.start();
5201    for (int i = 0; i < non_ascii_start; i++) {
5202      *data++ = *ascii_data++;
5203    }
5204  }
5205  // Now write the remainder.
5206  decoder->WriteUtf16(data, utf16_length);
5207  return result;
5208}
5209
5210
5211MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5212                                             PretenureFlag pretenure) {
5213  // Check if the string is an ASCII string.
5214  Object* result;
5215  int length = string.length();
5216  const uc16* start = string.start();
5217
5218  if (String::IsOneByte(start, length)) {
5219    MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5220    if (!maybe_result->ToObject(&result)) return maybe_result;
5221    CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5222  } else {  // It's not a one byte string.
5223    MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5224    if (!maybe_result->ToObject(&result)) return maybe_result;
5225    CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5226  }
5227  return result;
5228}
5229
5230
5231Map* Heap::InternalizedStringMapForString(String* string) {
5232  // If the string is in new space it cannot be used as internalized.
5233  if (InNewSpace(string)) return NULL;
5234
5235  // Find the corresponding internalized string map for strings.
5236  switch (string->map()->instance_type()) {
5237    case STRING_TYPE: return internalized_string_map();
5238    case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5239    case CONS_STRING_TYPE: return cons_internalized_string_map();
5240    case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5241    case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5242    case EXTERNAL_ASCII_STRING_TYPE:
5243      return external_ascii_internalized_string_map();
5244    case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5245      return external_internalized_string_with_one_byte_data_map();
5246    case SHORT_EXTERNAL_STRING_TYPE:
5247      return short_external_internalized_string_map();
5248    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5249      return short_external_ascii_internalized_string_map();
5250    case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5251      return short_external_internalized_string_with_one_byte_data_map();
5252    default: return NULL;  // No match found.
5253  }
5254}
5255
5256
5257static inline void WriteOneByteData(Vector<const char> vector,
5258                                    uint8_t* chars,
5259                                    int len) {
5260  // Only works for ascii.
5261  ASSERT(vector.length() == len);
5262  OS::MemCopy(chars, vector.start(), len);
5263}
5264
5265static inline void WriteTwoByteData(Vector<const char> vector,
5266                                    uint16_t* chars,
5267                                    int len) {
5268  const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5269  unsigned stream_length = vector.length();
5270  while (stream_length != 0) {
5271    unsigned consumed = 0;
5272    uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5273    ASSERT(c != unibrow::Utf8::kBadChar);
5274    ASSERT(consumed <= stream_length);
5275    stream_length -= consumed;
5276    stream += consumed;
5277    if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5278      len -= 2;
5279      if (len < 0) break;
5280      *chars++ = unibrow::Utf16::LeadSurrogate(c);
5281      *chars++ = unibrow::Utf16::TrailSurrogate(c);
5282    } else {
5283      len -= 1;
5284      if (len < 0) break;
5285      *chars++ = c;
5286    }
5287  }
5288  ASSERT(stream_length == 0);
5289  ASSERT(len == 0);
5290}
5291
5292
5293static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5294  ASSERT(s->length() == len);
5295  String::WriteToFlat(s, chars, 0, len);
5296}
5297
5298
5299static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5300  ASSERT(s->length() == len);
5301  String::WriteToFlat(s, chars, 0, len);
5302}
5303
5304
5305template<bool is_one_byte, typename T>
5306MaybeObject* Heap::AllocateInternalizedStringImpl(
5307    T t, int chars, uint32_t hash_field) {
5308  ASSERT(chars >= 0);
5309  // Compute map and object size.
5310  int size;
5311  Map* map;
5312
5313  if (is_one_byte) {
5314    if (chars > SeqOneByteString::kMaxLength) {
5315      return Failure::OutOfMemoryException(0x9);
5316    }
5317    map = ascii_internalized_string_map();
5318    size = SeqOneByteString::SizeFor(chars);
5319  } else {
5320    if (chars > SeqTwoByteString::kMaxLength) {
5321      return Failure::OutOfMemoryException(0xa);
5322    }
5323    map = internalized_string_map();
5324    size = SeqTwoByteString::SizeFor(chars);
5325  }
5326
5327  // Allocate string.
5328  Object* result;
5329  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5330                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5331                   : old_data_space_->AllocateRaw(size);
5332    if (!maybe_result->ToObject(&result)) return maybe_result;
5333  }
5334
5335  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5336  // Set length and hash fields of the allocated string.
5337  String* answer = String::cast(result);
5338  answer->set_length(chars);
5339  answer->set_hash_field(hash_field);
5340
5341  ASSERT_EQ(size, answer->Size());
5342
5343  if (is_one_byte) {
5344    WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5345  } else {
5346    WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5347  }
5348  return answer;
5349}
5350
5351
5352// Need explicit instantiations.
5353template
5354MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5355template
5356MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5357    String*, int, uint32_t);
5358template
5359MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5360    Vector<const char>, int, uint32_t);
5361
5362
5363MaybeObject* Heap::AllocateRawOneByteString(int length,
5364                                            PretenureFlag pretenure) {
5365  if (length < 0 || length > SeqOneByteString::kMaxLength) {
5366    return Failure::OutOfMemoryException(0xb);
5367  }
5368  int size = SeqOneByteString::SizeFor(length);
5369  ASSERT(size <= SeqOneByteString::kMaxSize);
5370  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5371  AllocationSpace retry_space = OLD_DATA_SPACE;
5372
5373  if (size > Page::kMaxNonCodeHeapObjectSize) {
5374    // Allocate in large object space, retry space will be ignored.
5375    space = LO_SPACE;
5376  }
5377
5378  Object* result;
5379  { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5380    if (!maybe_result->ToObject(&result)) return maybe_result;
5381  }
5382
5383  // Partially initialize the object.
5384  HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5385  String::cast(result)->set_length(length);
5386  String::cast(result)->set_hash_field(String::kEmptyHashField);
5387  ASSERT_EQ(size, HeapObject::cast(result)->Size());
5388
5389  return result;
5390}
5391
5392
5393MaybeObject* Heap::AllocateRawTwoByteString(int length,
5394                                            PretenureFlag pretenure) {
5395  if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5396    return Failure::OutOfMemoryException(0xc);
5397  }
5398  int size = SeqTwoByteString::SizeFor(length);
5399  ASSERT(size <= SeqTwoByteString::kMaxSize);
5400  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5401  AllocationSpace retry_space = OLD_DATA_SPACE;
5402
5403  if (size > Page::kMaxNonCodeHeapObjectSize) {
5404    // Allocate in large object space, retry space will be ignored.
5405    space = LO_SPACE;
5406  }
5407
5408  Object* result;
5409  { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5410    if (!maybe_result->ToObject(&result)) return maybe_result;
5411  }
5412
5413  // Partially initialize the object.
5414  HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5415  String::cast(result)->set_length(length);
5416  String::cast(result)->set_hash_field(String::kEmptyHashField);
5417  ASSERT_EQ(size, HeapObject::cast(result)->Size());
5418  return result;
5419}
5420
5421
5422MaybeObject* Heap::AllocateJSArray(
5423    ElementsKind elements_kind,
5424    PretenureFlag pretenure) {
5425  Context* native_context = isolate()->context()->native_context();
5426  JSFunction* array_function = native_context->array_function();
5427  Map* map = array_function->initial_map();
5428  Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5429  if (transition_map != NULL) map = transition_map;
5430  return AllocateJSObjectFromMap(map, pretenure);
5431}
5432
5433
5434MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5435    ElementsKind elements_kind,
5436    Handle<AllocationSite> allocation_site) {
5437  Context* native_context = isolate()->context()->native_context();
5438  JSFunction* array_function = native_context->array_function();
5439  Map* map = array_function->initial_map();
5440  Object* maybe_map_array = native_context->js_array_maps();
5441  if (!maybe_map_array->IsUndefined()) {
5442    Object* maybe_transitioned_map =
5443        FixedArray::cast(maybe_map_array)->get(elements_kind);
5444    if (!maybe_transitioned_map->IsUndefined()) {
5445      map = Map::cast(maybe_transitioned_map);
5446    }
5447  }
5448  return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
5449}
5450
5451
5452MaybeObject* Heap::AllocateEmptyFixedArray() {
5453  int size = FixedArray::SizeFor(0);
5454  Object* result;
5455  { MaybeObject* maybe_result =
5456        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5457    if (!maybe_result->ToObject(&result)) return maybe_result;
5458  }
5459  // Initialize the object.
5460  reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5461      fixed_array_map());
5462  reinterpret_cast<FixedArray*>(result)->set_length(0);
5463  return result;
5464}
5465
5466
5467MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5468  return AllocateExternalArray(0, array_type, NULL, TENURED);
5469}
5470
5471
5472MaybeObject* Heap::AllocateRawFixedArray(int length) {
5473  if (length < 0 || length > FixedArray::kMaxLength) {
5474    return Failure::OutOfMemoryException(0xd);
5475  }
5476  ASSERT(length > 0);
5477  // Use the general function if we're forced to always allocate.
5478  if (always_allocate()) return AllocateFixedArray(length, TENURED);
5479  // Allocate the raw data for a fixed array.
5480  int size = FixedArray::SizeFor(length);
5481  return size <= Page::kMaxNonCodeHeapObjectSize
5482      ? new_space_.AllocateRaw(size)
5483      : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5484}
5485
5486
5487MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5488  int len = src->length();
5489  Object* obj;
5490  { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5491    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5492  }
5493  if (InNewSpace(obj)) {
5494    HeapObject* dst = HeapObject::cast(obj);
5495    dst->set_map_no_write_barrier(map);
5496    CopyBlock(dst->address() + kPointerSize,
5497              src->address() + kPointerSize,
5498              FixedArray::SizeFor(len) - kPointerSize);
5499    return obj;
5500  }
5501  HeapObject::cast(obj)->set_map_no_write_barrier(map);
5502  FixedArray* result = FixedArray::cast(obj);
5503  result->set_length(len);
5504
5505  // Copy the content
5506  DisallowHeapAllocation no_gc;
5507  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5508  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5509  return result;
5510}
5511
5512
5513MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5514                                               Map* map) {
5515  int len = src->length();
5516  Object* obj;
5517  { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5518    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5519  }
5520  HeapObject* dst = HeapObject::cast(obj);
5521  dst->set_map_no_write_barrier(map);
5522  CopyBlock(
5523      dst->address() + FixedDoubleArray::kLengthOffset,
5524      src->address() + FixedDoubleArray::kLengthOffset,
5525      FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5526  return obj;
5527}
5528
5529
5530MaybeObject* Heap::AllocateFixedArray(int length) {
5531  ASSERT(length >= 0);
5532  if (length == 0) return empty_fixed_array();
5533  Object* result;
5534  { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5535    if (!maybe_result->ToObject(&result)) return maybe_result;
5536  }
5537  // Initialize header.
5538  FixedArray* array = reinterpret_cast<FixedArray*>(result);
5539  array->set_map_no_write_barrier(fixed_array_map());
5540  array->set_length(length);
5541  // Initialize body.
5542  ASSERT(!InNewSpace(undefined_value()));
5543  MemsetPointer(array->data_start(), undefined_value(), length);
5544  return result;
5545}
5546
5547
5548MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5549  if (length < 0 || length > FixedArray::kMaxLength) {
5550    return Failure::OutOfMemoryException(0xe);
5551  }
5552  int size = FixedArray::SizeFor(length);
5553  AllocationSpace space =
5554      (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5555  AllocationSpace retry_space = OLD_POINTER_SPACE;
5556
5557  if (size > Page::kMaxNonCodeHeapObjectSize) {
5558    // Allocate in large object space, retry space will be ignored.
5559    space = LO_SPACE;
5560  }
5561
5562  return AllocateRaw(size, space, retry_space);
5563}
5564
5565
5566MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5567    Heap* heap,
5568    int length,
5569    PretenureFlag pretenure,
5570    Object* filler) {
5571  ASSERT(length >= 0);
5572  ASSERT(heap->empty_fixed_array()->IsFixedArray());
5573  if (length == 0) return heap->empty_fixed_array();
5574
5575  ASSERT(!heap->InNewSpace(filler));
5576  Object* result;
5577  { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5578    if (!maybe_result->ToObject(&result)) return maybe_result;
5579  }
5580
5581  HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5582  FixedArray* array = FixedArray::cast(result);
5583  array->set_length(length);
5584  MemsetPointer(array->data_start(), filler, length);
5585  return array;
5586}
5587
5588
5589MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5590  return AllocateFixedArrayWithFiller(this,
5591                                      length,
5592                                      pretenure,
5593                                      undefined_value());
5594}
5595
5596
5597MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5598                                               PretenureFlag pretenure) {
5599  return AllocateFixedArrayWithFiller(this,
5600                                      length,
5601                                      pretenure,
5602                                      the_hole_value());
5603}
5604
5605
5606MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5607  if (length == 0) return empty_fixed_array();
5608
5609  Object* obj;
5610  { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5611    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5612  }
5613
5614  reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5615      fixed_array_map());
5616  FixedArray::cast(obj)->set_length(length);
5617  return obj;
5618}
5619
5620
5621MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5622  int size = FixedDoubleArray::SizeFor(0);
5623  Object* result;
5624  { MaybeObject* maybe_result =
5625        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5626    if (!maybe_result->ToObject(&result)) return maybe_result;
5627  }
5628  // Initialize the object.
5629  reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5630      fixed_double_array_map());
5631  reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5632  return result;
5633}
5634
5635
5636MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5637    int length,
5638    PretenureFlag pretenure) {
5639  if (length == 0) return empty_fixed_array();
5640
5641  Object* elements_object;
5642  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5643  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5644  FixedDoubleArray* elements =
5645      reinterpret_cast<FixedDoubleArray*>(elements_object);
5646
5647  elements->set_map_no_write_barrier(fixed_double_array_map());
5648  elements->set_length(length);
5649  return elements;
5650}
5651
5652
5653MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5654    int length,
5655    PretenureFlag pretenure) {
5656  if (length == 0) return empty_fixed_array();
5657
5658  Object* elements_object;
5659  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5660  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5661  FixedDoubleArray* elements =
5662      reinterpret_cast<FixedDoubleArray*>(elements_object);
5663
5664  for (int i = 0; i < length; ++i) {
5665    elements->set_the_hole(i);
5666  }
5667
5668  elements->set_map_no_write_barrier(fixed_double_array_map());
5669  elements->set_length(length);
5670  return elements;
5671}
5672
5673
5674MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5675                                               PretenureFlag pretenure) {
5676  if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5677    return Failure::OutOfMemoryException(0xf);
5678  }
5679  int size = FixedDoubleArray::SizeFor(length);
5680  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5681  AllocationSpace retry_space = OLD_DATA_SPACE;
5682
5683#ifndef V8_HOST_ARCH_64_BIT
5684  size += kPointerSize;
5685#endif
5686
5687  if (size > Page::kMaxNonCodeHeapObjectSize) {
5688    // Allocate in large object space, retry space will be ignored.
5689    space = LO_SPACE;
5690  }
5691
5692  HeapObject* object;
5693  { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5694    if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5695  }
5696
5697  return EnsureDoubleAligned(this, object, size);
5698}
5699
5700
5701MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5702  Object* result;
5703  { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5704    if (!maybe_result->ToObject(&result)) return maybe_result;
5705  }
5706  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5707      hash_table_map());
5708  ASSERT(result->IsHashTable());
5709  return result;
5710}
5711
5712
5713MaybeObject* Heap::AllocateSymbol() {
5714  // Statically ensure that it is safe to allocate symbols in paged spaces.
5715  STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5716
5717  Object* result;
5718  MaybeObject* maybe =
5719      AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5720  if (!maybe->ToObject(&result)) return maybe;
5721
5722  HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5723
5724  // Generate a random hash value.
5725  int hash;
5726  int attempts = 0;
5727  do {
5728    hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5729    attempts++;
5730  } while (hash == 0 && attempts < 30);
5731  if (hash == 0) hash = 1;  // never return 0
5732
5733  Symbol::cast(result)->set_hash_field(
5734      Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5735  Symbol::cast(result)->set_name(undefined_value());
5736
5737  ASSERT(result->IsSymbol());
5738  return result;
5739}
5740
5741
5742MaybeObject* Heap::AllocateNativeContext() {
5743  Object* result;
5744  { MaybeObject* maybe_result =
5745        AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5746    if (!maybe_result->ToObject(&result)) return maybe_result;
5747  }
5748  Context* context = reinterpret_cast<Context*>(result);
5749  context->set_map_no_write_barrier(native_context_map());
5750  context->set_js_array_maps(undefined_value());
5751  ASSERT(context->IsNativeContext());
5752  ASSERT(result->IsContext());
5753  return result;
5754}
5755
5756
5757MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5758                                         ScopeInfo* scope_info) {
5759  Object* result;
5760  { MaybeObject* maybe_result =
5761        AllocateFixedArray(scope_info->ContextLength(), TENURED);
5762    if (!maybe_result->ToObject(&result)) return maybe_result;
5763  }
5764  Context* context = reinterpret_cast<Context*>(result);
5765  context->set_map_no_write_barrier(global_context_map());
5766  context->set_closure(function);
5767  context->set_previous(function->context());
5768  context->set_extension(scope_info);
5769  context->set_global_object(function->context()->global_object());
5770  ASSERT(context->IsGlobalContext());
5771  ASSERT(result->IsContext());
5772  return context;
5773}
5774
5775
5776MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5777  Object* result;
5778  { MaybeObject* maybe_result =
5779        AllocateFixedArray(scope_info->ContextLength(), TENURED);
5780    if (!maybe_result->ToObject(&result)) return maybe_result;
5781  }
5782  Context* context = reinterpret_cast<Context*>(result);
5783  context->set_map_no_write_barrier(module_context_map());
5784  // Instance link will be set later.
5785  context->set_extension(Smi::FromInt(0));
5786  return context;
5787}
5788
5789
5790MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5791  ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5792  Object* result;
5793  { MaybeObject* maybe_result = AllocateFixedArray(length);
5794    if (!maybe_result->ToObject(&result)) return maybe_result;
5795  }
5796  Context* context = reinterpret_cast<Context*>(result);
5797  context->set_map_no_write_barrier(function_context_map());
5798  context->set_closure(function);
5799  context->set_previous(function->context());
5800  context->set_extension(Smi::FromInt(0));
5801  context->set_global_object(function->context()->global_object());
5802  return context;
5803}
5804
5805
5806MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5807                                        Context* previous,
5808                                        String* name,
5809                                        Object* thrown_object) {
5810  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5811  Object* result;
5812  { MaybeObject* maybe_result =
5813        AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5814    if (!maybe_result->ToObject(&result)) return maybe_result;
5815  }
5816  Context* context = reinterpret_cast<Context*>(result);
5817  context->set_map_no_write_barrier(catch_context_map());
5818  context->set_closure(function);
5819  context->set_previous(previous);
5820  context->set_extension(name);
5821  context->set_global_object(previous->global_object());
5822  context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5823  return context;
5824}
5825
5826
5827MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5828                                       Context* previous,
5829                                       JSReceiver* extension) {
5830  Object* result;
5831  { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5832    if (!maybe_result->ToObject(&result)) return maybe_result;
5833  }
5834  Context* context = reinterpret_cast<Context*>(result);
5835  context->set_map_no_write_barrier(with_context_map());
5836  context->set_closure(function);
5837  context->set_previous(previous);
5838  context->set_extension(extension);
5839  context->set_global_object(previous->global_object());
5840  return context;
5841}
5842
5843
5844MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5845                                        Context* previous,
5846                                        ScopeInfo* scope_info) {
5847  Object* result;
5848  { MaybeObject* maybe_result =
5849        AllocateFixedArrayWithHoles(scope_info->ContextLength());
5850    if (!maybe_result->ToObject(&result)) return maybe_result;
5851  }
5852  Context* context = reinterpret_cast<Context*>(result);
5853  context->set_map_no_write_barrier(block_context_map());
5854  context->set_closure(function);
5855  context->set_previous(previous);
5856  context->set_extension(scope_info);
5857  context->set_global_object(previous->global_object());
5858  return context;
5859}
5860
5861
5862MaybeObject* Heap::AllocateScopeInfo(int length) {
5863  FixedArray* scope_info;
5864  MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5865  if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5866  scope_info->set_map_no_write_barrier(scope_info_map());
5867  return scope_info;
5868}
5869
5870
5871MaybeObject* Heap::AllocateExternal(void* value) {
5872  Foreign* foreign;
5873  { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5874    if (!maybe_result->To(&foreign)) return maybe_result;
5875  }
5876  JSObject* external;
5877  { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5878    if (!maybe_result->To(&external)) return maybe_result;
5879  }
5880  external->SetInternalField(0, foreign);
5881  return external;
5882}
5883
5884
5885MaybeObject* Heap::AllocateStruct(InstanceType type) {
5886  Map* map;
5887  switch (type) {
5888#define MAKE_CASE(NAME, Name, name) \
5889    case NAME##_TYPE: map = name##_map(); break;
5890STRUCT_LIST(MAKE_CASE)
5891#undef MAKE_CASE
5892    default:
5893      UNREACHABLE();
5894      return Failure::InternalError();
5895  }
5896  int size = map->instance_size();
5897  AllocationSpace space =
5898      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5899  Object* result;
5900  { MaybeObject* maybe_result = Allocate(map, space);
5901    if (!maybe_result->ToObject(&result)) return maybe_result;
5902  }
5903  Struct::cast(result)->InitializeBody(size);
5904  return result;
5905}
5906
5907
5908bool Heap::IsHeapIterable() {
5909  return (!old_pointer_space()->was_swept_conservatively() &&
5910          !old_data_space()->was_swept_conservatively());
5911}
5912
5913
5914void Heap::EnsureHeapIsIterable() {
5915  ASSERT(AllowHeapAllocation::IsAllowed());
5916  if (!IsHeapIterable()) {
5917    CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5918  }
5919  ASSERT(IsHeapIterable());
5920}
5921
5922
5923void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5924  incremental_marking()->Step(step_size,
5925                              IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5926
5927  if (incremental_marking()->IsComplete()) {
5928    bool uncommit = false;
5929    if (gc_count_at_last_idle_gc_ == gc_count_) {
5930      // No GC since the last full GC, the mutator is probably not active.
5931      isolate_->compilation_cache()->Clear();
5932      uncommit = true;
5933    }
5934    CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5935    mark_sweeps_since_idle_round_started_++;
5936    gc_count_at_last_idle_gc_ = gc_count_;
5937    if (uncommit) {
5938      new_space_.Shrink();
5939      UncommitFromSpace();
5940    }
5941  }
5942}
5943
5944
5945bool Heap::IdleNotification(int hint) {
5946  // Hints greater than this value indicate that
5947  // the embedder is requesting a lot of GC work.
5948  const int kMaxHint = 1000;
5949  const int kMinHintForIncrementalMarking = 10;
5950  // Minimal hint that allows to do full GC.
5951  const int kMinHintForFullGC = 100;
5952  intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5953  // The size factor is in range [5..250]. The numbers here are chosen from
5954  // experiments. If you changes them, make sure to test with
5955  // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5956  intptr_t step_size =
5957      size_factor * IncrementalMarking::kAllocatedThreshold;
5958
5959  if (contexts_disposed_ > 0) {
5960    if (hint >= kMaxHint) {
5961      // The embedder is requesting a lot of GC work after context disposal,
5962      // we age inline caches so that they don't keep objects from
5963      // the old context alive.
5964      AgeInlineCaches();
5965    }
5966    int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5967    if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5968        incremental_marking()->IsStopped()) {
5969      HistogramTimerScope scope(isolate_->counters()->gc_context());
5970      CollectAllGarbage(kReduceMemoryFootprintMask,
5971                        "idle notification: contexts disposed");
5972    } else {
5973      AdvanceIdleIncrementalMarking(step_size);
5974      contexts_disposed_ = 0;
5975    }
5976    // After context disposal there is likely a lot of garbage remaining, reset
5977    // the idle notification counters in order to trigger more incremental GCs
5978    // on subsequent idle notifications.
5979    StartIdleRound();
5980    return false;
5981  }
5982
5983  if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5984    return IdleGlobalGC();
5985  }
5986
5987  // By doing small chunks of GC work in each IdleNotification,
5988  // perform a round of incremental GCs and after that wait until
5989  // the mutator creates enough garbage to justify a new round.
5990  // An incremental GC progresses as follows:
5991  // 1. many incremental marking steps,
5992  // 2. one old space mark-sweep-compact,
5993  // 3. many lazy sweep steps.
5994  // Use mark-sweep-compact events to count incremental GCs in a round.
5995
5996  if (incremental_marking()->IsStopped()) {
5997    if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5998        !IsSweepingComplete() &&
5999        !AdvanceSweepers(static_cast<int>(step_size))) {
6000      return false;
6001    }
6002  }
6003
6004  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6005    if (EnoughGarbageSinceLastIdleRound()) {
6006      StartIdleRound();
6007    } else {
6008      return true;
6009    }
6010  }
6011
6012  int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
6013                              mark_sweeps_since_idle_round_started_;
6014
6015  if (incremental_marking()->IsStopped()) {
6016    // If there are no more than two GCs left in this idle round and we are
6017    // allowed to do a full GC, then make those GCs full in order to compact
6018    // the code space.
6019    // TODO(ulan): Once we enable code compaction for incremental marking,
6020    // we can get rid of this special case and always start incremental marking.
6021    if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
6022      CollectAllGarbage(kReduceMemoryFootprintMask,
6023                        "idle notification: finalize idle round");
6024      mark_sweeps_since_idle_round_started_++;
6025    } else if (hint > kMinHintForIncrementalMarking) {
6026      incremental_marking()->Start();
6027    }
6028  }
6029  if (!incremental_marking()->IsStopped() &&
6030      hint > kMinHintForIncrementalMarking) {
6031    AdvanceIdleIncrementalMarking(step_size);
6032  }
6033
6034  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6035    FinishIdleRound();
6036    return true;
6037  }
6038
6039  return false;
6040}
6041
6042
6043bool Heap::IdleGlobalGC() {
6044  static const int kIdlesBeforeScavenge = 4;
6045  static const int kIdlesBeforeMarkSweep = 7;
6046  static const int kIdlesBeforeMarkCompact = 8;
6047  static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
6048  static const unsigned int kGCsBetweenCleanup = 4;
6049
6050  if (!last_idle_notification_gc_count_init_) {
6051    last_idle_notification_gc_count_ = gc_count_;
6052    last_idle_notification_gc_count_init_ = true;
6053  }
6054
6055  bool uncommit = true;
6056  bool finished = false;
6057
6058  // Reset the number of idle notifications received when a number of
6059  // GCs have taken place. This allows another round of cleanup based
6060  // on idle notifications if enough work has been carried out to
6061  // provoke a number of garbage collections.
6062  if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
6063    number_idle_notifications_ =
6064        Min(number_idle_notifications_ + 1, kMaxIdleCount);
6065  } else {
6066    number_idle_notifications_ = 0;
6067    last_idle_notification_gc_count_ = gc_count_;
6068  }
6069
6070  if (number_idle_notifications_ == kIdlesBeforeScavenge) {
6071    CollectGarbage(NEW_SPACE, "idle notification");
6072    new_space_.Shrink();
6073    last_idle_notification_gc_count_ = gc_count_;
6074  } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
6075    // Before doing the mark-sweep collections we clear the
6076    // compilation cache to avoid hanging on to source code and
6077    // generated code for cached functions.
6078    isolate_->compilation_cache()->Clear();
6079
6080    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6081    new_space_.Shrink();
6082    last_idle_notification_gc_count_ = gc_count_;
6083
6084  } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
6085    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6086    new_space_.Shrink();
6087    last_idle_notification_gc_count_ = gc_count_;
6088    number_idle_notifications_ = 0;
6089    finished = true;
6090  } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
6091    // If we have received more than kIdlesBeforeMarkCompact idle
6092    // notifications we do not perform any cleanup because we don't
6093    // expect to gain much by doing so.
6094    finished = true;
6095  }
6096
6097  if (uncommit) UncommitFromSpace();
6098
6099  return finished;
6100}
6101
6102
6103#ifdef DEBUG
6104
6105void Heap::Print() {
6106  if (!HasBeenSetUp()) return;
6107  isolate()->PrintStack(stdout);
6108  AllSpaces spaces(this);
6109  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6110    space->Print();
6111  }
6112}
6113
6114
6115void Heap::ReportCodeStatistics(const char* title) {
6116  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6117  PagedSpace::ResetCodeStatistics();
6118  // We do not look for code in new space, map space, or old space.  If code
6119  // somehow ends up in those spaces, we would miss it here.
6120  code_space_->CollectCodeStatistics();
6121  lo_space_->CollectCodeStatistics();
6122  PagedSpace::ReportCodeStatistics();
6123}
6124
6125
6126// This function expects that NewSpace's allocated objects histogram is
6127// populated (via a call to CollectStatistics or else as a side effect of a
6128// just-completed scavenge collection).
6129void Heap::ReportHeapStatistics(const char* title) {
6130  USE(title);
6131  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6132         title, gc_count_);
6133  PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6134         old_generation_allocation_limit_);
6135
6136  PrintF("\n");
6137  PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6138  isolate_->global_handles()->PrintStats();
6139  PrintF("\n");
6140
6141  PrintF("Heap statistics : ");
6142  isolate_->memory_allocator()->ReportStatistics();
6143  PrintF("To space : ");
6144  new_space_.ReportStatistics();
6145  PrintF("Old pointer space : ");
6146  old_pointer_space_->ReportStatistics();
6147  PrintF("Old data space : ");
6148  old_data_space_->ReportStatistics();
6149  PrintF("Code space : ");
6150  code_space_->ReportStatistics();
6151  PrintF("Map space : ");
6152  map_space_->ReportStatistics();
6153  PrintF("Cell space : ");
6154  cell_space_->ReportStatistics();
6155  PrintF("PropertyCell space : ");
6156  property_cell_space_->ReportStatistics();
6157  PrintF("Large object space : ");
6158  lo_space_->ReportStatistics();
6159  PrintF(">>>>>> ========================================= >>>>>>\n");
6160}
6161
6162#endif  // DEBUG
6163
6164bool Heap::Contains(HeapObject* value) {
6165  return Contains(value->address());
6166}
6167
6168
6169bool Heap::Contains(Address addr) {
6170  if (OS::IsOutsideAllocatedSpace(addr)) return false;
6171  return HasBeenSetUp() &&
6172    (new_space_.ToSpaceContains(addr) ||
6173     old_pointer_space_->Contains(addr) ||
6174     old_data_space_->Contains(addr) ||
6175     code_space_->Contains(addr) ||
6176     map_space_->Contains(addr) ||
6177     cell_space_->Contains(addr) ||
6178     property_cell_space_->Contains(addr) ||
6179     lo_space_->SlowContains(addr));
6180}
6181
6182
6183bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6184  return InSpace(value->address(), space);
6185}
6186
6187
6188bool Heap::InSpace(Address addr, AllocationSpace space) {
6189  if (OS::IsOutsideAllocatedSpace(addr)) return false;
6190  if (!HasBeenSetUp()) return false;
6191
6192  switch (space) {
6193    case NEW_SPACE:
6194      return new_space_.ToSpaceContains(addr);
6195    case OLD_POINTER_SPACE:
6196      return old_pointer_space_->Contains(addr);
6197    case OLD_DATA_SPACE:
6198      return old_data_space_->Contains(addr);
6199    case CODE_SPACE:
6200      return code_space_->Contains(addr);
6201    case MAP_SPACE:
6202      return map_space_->Contains(addr);
6203    case CELL_SPACE:
6204      return cell_space_->Contains(addr);
6205    case PROPERTY_CELL_SPACE:
6206      return property_cell_space_->Contains(addr);
6207    case LO_SPACE:
6208      return lo_space_->SlowContains(addr);
6209  }
6210
6211  return false;
6212}
6213
6214
6215#ifdef VERIFY_HEAP
6216void Heap::Verify() {
6217  CHECK(HasBeenSetUp());
6218
6219  store_buffer()->Verify();
6220
6221  VerifyPointersVisitor visitor;
6222  IterateRoots(&visitor, VISIT_ONLY_STRONG);
6223
6224  new_space_.Verify();
6225
6226  old_pointer_space_->Verify(&visitor);
6227  map_space_->Verify(&visitor);
6228
6229  VerifyPointersVisitor no_dirty_regions_visitor;
6230  old_data_space_->Verify(&no_dirty_regions_visitor);
6231  code_space_->Verify(&no_dirty_regions_visitor);
6232  cell_space_->Verify(&no_dirty_regions_visitor);
6233  property_cell_space_->Verify(&no_dirty_regions_visitor);
6234
6235  lo_space_->Verify();
6236}
6237#endif
6238
6239
6240MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6241  Object* result = NULL;
6242  Object* new_table;
6243  { MaybeObject* maybe_new_table =
6244        string_table()->LookupUtf8String(string, &result);
6245    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6246  }
6247  // Can't use set_string_table because StringTable::cast knows that
6248  // StringTable is a singleton and checks for identity.
6249  roots_[kStringTableRootIndex] = new_table;
6250  ASSERT(result != NULL);
6251  return result;
6252}
6253
6254
6255MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6256  Object* result = NULL;
6257  Object* new_table;
6258  { MaybeObject* maybe_new_table =
6259        string_table()->LookupOneByteString(string, &result);
6260    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6261  }
6262  // Can't use set_string_table because StringTable::cast knows that
6263  // StringTable is a singleton and checks for identity.
6264  roots_[kStringTableRootIndex] = new_table;
6265  ASSERT(result != NULL);
6266  return result;
6267}
6268
6269
6270MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6271                                     int from,
6272                                     int length) {
6273  Object* result = NULL;
6274  Object* new_table;
6275  { MaybeObject* maybe_new_table =
6276        string_table()->LookupSubStringOneByteString(string,
6277                                                   from,
6278                                                   length,
6279                                                   &result);
6280    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6281  }
6282  // Can't use set_string_table because StringTable::cast knows that
6283  // StringTable is a singleton and checks for identity.
6284  roots_[kStringTableRootIndex] = new_table;
6285  ASSERT(result != NULL);
6286  return result;
6287}
6288
6289
6290MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6291  Object* result = NULL;
6292  Object* new_table;
6293  { MaybeObject* maybe_new_table =
6294        string_table()->LookupTwoByteString(string, &result);
6295    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6296  }
6297  // Can't use set_string_table because StringTable::cast knows that
6298  // StringTable is a singleton and checks for identity.
6299  roots_[kStringTableRootIndex] = new_table;
6300  ASSERT(result != NULL);
6301  return result;
6302}
6303
6304
6305MaybeObject* Heap::InternalizeString(String* string) {
6306  if (string->IsInternalizedString()) return string;
6307  Object* result = NULL;
6308  Object* new_table;
6309  { MaybeObject* maybe_new_table =
6310        string_table()->LookupString(string, &result);
6311    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6312  }
6313  // Can't use set_string_table because StringTable::cast knows that
6314  // StringTable is a singleton and checks for identity.
6315  roots_[kStringTableRootIndex] = new_table;
6316  ASSERT(result != NULL);
6317  return result;
6318}
6319
6320
6321bool Heap::InternalizeStringIfExists(String* string, String** result) {
6322  if (string->IsInternalizedString()) {
6323    *result = string;
6324    return true;
6325  }
6326  return string_table()->LookupStringIfExists(string, result);
6327}
6328
6329
6330void Heap::ZapFromSpace() {
6331  NewSpacePageIterator it(new_space_.FromSpaceStart(),
6332                          new_space_.FromSpaceEnd());
6333  while (it.has_next()) {
6334    NewSpacePage* page = it.next();
6335    for (Address cursor = page->area_start(), limit = page->area_end();
6336         cursor < limit;
6337         cursor += kPointerSize) {
6338      Memory::Address_at(cursor) = kFromSpaceZapValue;
6339    }
6340  }
6341}
6342
6343
6344void Heap::IterateAndMarkPointersToFromSpace(Address start,
6345                                             Address end,
6346                                             ObjectSlotCallback callback) {
6347  Address slot_address = start;
6348
6349  // We are not collecting slots on new space objects during mutation
6350  // thus we have to scan for pointers to evacuation candidates when we
6351  // promote objects. But we should not record any slots in non-black
6352  // objects. Grey object's slots would be rescanned.
6353  // White object might not survive until the end of collection
6354  // it would be a violation of the invariant to record it's slots.
6355  bool record_slots = false;
6356  if (incremental_marking()->IsCompacting()) {
6357    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6358    record_slots = Marking::IsBlack(mark_bit);
6359  }
6360
6361  while (slot_address < end) {
6362    Object** slot = reinterpret_cast<Object**>(slot_address);
6363    Object* object = *slot;
6364    // If the store buffer becomes overfull we mark pages as being exempt from
6365    // the store buffer.  These pages are scanned to find pointers that point
6366    // to the new space.  In that case we may hit newly promoted objects and
6367    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6368    if (object->IsHeapObject()) {
6369      if (Heap::InFromSpace(object)) {
6370        callback(reinterpret_cast<HeapObject**>(slot),
6371                 HeapObject::cast(object));
6372        Object* new_object = *slot;
6373        if (InNewSpace(new_object)) {
6374          SLOW_ASSERT(Heap::InToSpace(new_object));
6375          SLOW_ASSERT(new_object->IsHeapObject());
6376          store_buffer_.EnterDirectlyIntoStoreBuffer(
6377              reinterpret_cast<Address>(slot));
6378        }
6379        SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6380      } else if (record_slots &&
6381                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6382        mark_compact_collector()->RecordSlot(slot, slot, object);
6383      }
6384    }
6385    slot_address += kPointerSize;
6386  }
6387}
6388
6389
6390#ifdef DEBUG
6391typedef bool (*CheckStoreBufferFilter)(Object** addr);
6392
6393
6394bool IsAMapPointerAddress(Object** addr) {
6395  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6396  int mod = a % Map::kSize;
6397  return mod >= Map::kPointerFieldsBeginOffset &&
6398         mod < Map::kPointerFieldsEndOffset;
6399}
6400
6401
6402bool EverythingsAPointer(Object** addr) {
6403  return true;
6404}
6405
6406
6407static void CheckStoreBuffer(Heap* heap,
6408                             Object** current,
6409                             Object** limit,
6410                             Object**** store_buffer_position,
6411                             Object*** store_buffer_top,
6412                             CheckStoreBufferFilter filter,
6413                             Address special_garbage_start,
6414                             Address special_garbage_end) {
6415  Map* free_space_map = heap->free_space_map();
6416  for ( ; current < limit; current++) {
6417    Object* o = *current;
6418    Address current_address = reinterpret_cast<Address>(current);
6419    // Skip free space.
6420    if (o == free_space_map) {
6421      Address current_address = reinterpret_cast<Address>(current);
6422      FreeSpace* free_space =
6423          FreeSpace::cast(HeapObject::FromAddress(current_address));
6424      int skip = free_space->Size();
6425      ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6426      ASSERT(skip > 0);
6427      current_address += skip - kPointerSize;
6428      current = reinterpret_cast<Object**>(current_address);
6429      continue;
6430    }
6431    // Skip the current linear allocation space between top and limit which is
6432    // unmarked with the free space map, but can contain junk.
6433    if (current_address == special_garbage_start &&
6434        special_garbage_end != special_garbage_start) {
6435      current_address = special_garbage_end - kPointerSize;
6436      current = reinterpret_cast<Object**>(current_address);
6437      continue;
6438    }
6439    if (!(*filter)(current)) continue;
6440    ASSERT(current_address < special_garbage_start ||
6441           current_address >= special_garbage_end);
6442    ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6443    // We have to check that the pointer does not point into new space
6444    // without trying to cast it to a heap object since the hash field of
6445    // a string can contain values like 1 and 3 which are tagged null
6446    // pointers.
6447    if (!heap->InNewSpace(o)) continue;
6448    while (**store_buffer_position < current &&
6449           *store_buffer_position < store_buffer_top) {
6450      (*store_buffer_position)++;
6451    }
6452    if (**store_buffer_position != current ||
6453        *store_buffer_position == store_buffer_top) {
6454      Object** obj_start = current;
6455      while (!(*obj_start)->IsMap()) obj_start--;
6456      UNREACHABLE();
6457    }
6458  }
6459}
6460
6461
6462// Check that the store buffer contains all intergenerational pointers by
6463// scanning a page and ensuring that all pointers to young space are in the
6464// store buffer.
6465void Heap::OldPointerSpaceCheckStoreBuffer() {
6466  OldSpace* space = old_pointer_space();
6467  PageIterator pages(space);
6468
6469  store_buffer()->SortUniq();
6470
6471  while (pages.has_next()) {
6472    Page* page = pages.next();
6473    Object** current = reinterpret_cast<Object**>(page->area_start());
6474
6475    Address end = page->area_end();
6476
6477    Object*** store_buffer_position = store_buffer()->Start();
6478    Object*** store_buffer_top = store_buffer()->Top();
6479
6480    Object** limit = reinterpret_cast<Object**>(end);
6481    CheckStoreBuffer(this,
6482                     current,
6483                     limit,
6484                     &store_buffer_position,
6485                     store_buffer_top,
6486                     &EverythingsAPointer,
6487                     space->top(),
6488                     space->limit());
6489  }
6490}
6491
6492
6493void Heap::MapSpaceCheckStoreBuffer() {
6494  MapSpace* space = map_space();
6495  PageIterator pages(space);
6496
6497  store_buffer()->SortUniq();
6498
6499  while (pages.has_next()) {
6500    Page* page = pages.next();
6501    Object** current = reinterpret_cast<Object**>(page->area_start());
6502
6503    Address end = page->area_end();
6504
6505    Object*** store_buffer_position = store_buffer()->Start();
6506    Object*** store_buffer_top = store_buffer()->Top();
6507
6508    Object** limit = reinterpret_cast<Object**>(end);
6509    CheckStoreBuffer(this,
6510                     current,
6511                     limit,
6512                     &store_buffer_position,
6513                     store_buffer_top,
6514                     &IsAMapPointerAddress,
6515                     space->top(),
6516                     space->limit());
6517  }
6518}
6519
6520
6521void Heap::LargeObjectSpaceCheckStoreBuffer() {
6522  LargeObjectIterator it(lo_space());
6523  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6524    // We only have code, sequential strings, or fixed arrays in large
6525    // object space, and only fixed arrays can possibly contain pointers to
6526    // the young generation.
6527    if (object->IsFixedArray()) {
6528      Object*** store_buffer_position = store_buffer()->Start();
6529      Object*** store_buffer_top = store_buffer()->Top();
6530      Object** current = reinterpret_cast<Object**>(object->address());
6531      Object** limit =
6532          reinterpret_cast<Object**>(object->address() + object->Size());
6533      CheckStoreBuffer(this,
6534                       current,
6535                       limit,
6536                       &store_buffer_position,
6537                       store_buffer_top,
6538                       &EverythingsAPointer,
6539                       NULL,
6540                       NULL);
6541    }
6542  }
6543}
6544#endif
6545
6546
6547void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6548  IterateStrongRoots(v, mode);
6549  IterateWeakRoots(v, mode);
6550}
6551
6552
6553void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6554  v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6555  v->Synchronize(VisitorSynchronization::kStringTable);
6556  if (mode != VISIT_ALL_IN_SCAVENGE &&
6557      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6558    // Scavenge collections have special processing for this.
6559    external_string_table_.Iterate(v);
6560  }
6561  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6562}
6563
6564
6565void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6566  v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6567  v->Synchronize(VisitorSynchronization::kStrongRootList);
6568
6569  v->VisitPointer(BitCast<Object**>(&hidden_string_));
6570  v->Synchronize(VisitorSynchronization::kInternalizedString);
6571
6572  isolate_->bootstrapper()->Iterate(v);
6573  v->Synchronize(VisitorSynchronization::kBootstrapper);
6574  isolate_->Iterate(v);
6575  v->Synchronize(VisitorSynchronization::kTop);
6576  Relocatable::Iterate(v);
6577  v->Synchronize(VisitorSynchronization::kRelocatable);
6578
6579#ifdef ENABLE_DEBUGGER_SUPPORT
6580  isolate_->debug()->Iterate(v);
6581  if (isolate_->deoptimizer_data() != NULL) {
6582    isolate_->deoptimizer_data()->Iterate(v);
6583  }
6584#endif
6585  v->Synchronize(VisitorSynchronization::kDebug);
6586  isolate_->compilation_cache()->Iterate(v);
6587  v->Synchronize(VisitorSynchronization::kCompilationCache);
6588
6589  // Iterate over local handles in handle scopes.
6590  isolate_->handle_scope_implementer()->Iterate(v);
6591  isolate_->IterateDeferredHandles(v);
6592  v->Synchronize(VisitorSynchronization::kHandleScope);
6593
6594  // Iterate over the builtin code objects and code stubs in the
6595  // heap. Note that it is not necessary to iterate over code objects
6596  // on scavenge collections.
6597  if (mode != VISIT_ALL_IN_SCAVENGE) {
6598    isolate_->builtins()->IterateBuiltins(v);
6599  }
6600  v->Synchronize(VisitorSynchronization::kBuiltins);
6601
6602  // Iterate over global handles.
6603  switch (mode) {
6604    case VISIT_ONLY_STRONG:
6605      isolate_->global_handles()->IterateStrongRoots(v);
6606      break;
6607    case VISIT_ALL_IN_SCAVENGE:
6608      isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6609      break;
6610    case VISIT_ALL_IN_SWEEP_NEWSPACE:
6611    case VISIT_ALL:
6612      isolate_->global_handles()->IterateAllRoots(v);
6613      break;
6614  }
6615  v->Synchronize(VisitorSynchronization::kGlobalHandles);
6616
6617  // Iterate over eternal handles.
6618  if (mode == VISIT_ALL_IN_SCAVENGE) {
6619    isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6620  } else {
6621    isolate_->eternal_handles()->IterateAllRoots(v);
6622  }
6623  v->Synchronize(VisitorSynchronization::kEternalHandles);
6624
6625  // Iterate over pointers being held by inactive threads.
6626  isolate_->thread_manager()->Iterate(v);
6627  v->Synchronize(VisitorSynchronization::kThreadManager);
6628
6629  // Iterate over the pointers the Serialization/Deserialization code is
6630  // holding.
6631  // During garbage collection this keeps the partial snapshot cache alive.
6632  // During deserialization of the startup snapshot this creates the partial
6633  // snapshot cache and deserializes the objects it refers to.  During
6634  // serialization this does nothing, since the partial snapshot cache is
6635  // empty.  However the next thing we do is create the partial snapshot,
6636  // filling up the partial snapshot cache with objects it needs as we go.
6637  SerializerDeserializer::Iterate(v);
6638  // We don't do a v->Synchronize call here, because in debug mode that will
6639  // output a flag to the snapshot.  However at this point the serializer and
6640  // deserializer are deliberately a little unsynchronized (see above) so the
6641  // checking of the sync flag in the snapshot would fail.
6642}
6643
6644
6645// TODO(1236194): Since the heap size is configurable on the command line
6646// and through the API, we should gracefully handle the case that the heap
6647// size is not big enough to fit all the initial objects.
6648bool Heap::ConfigureHeap(int max_semispace_size,
6649                         intptr_t max_old_gen_size,
6650                         intptr_t max_executable_size) {
6651  if (HasBeenSetUp()) return false;
6652
6653  if (FLAG_stress_compaction) {
6654    // This will cause more frequent GCs when stressing.
6655    max_semispace_size_ = Page::kPageSize;
6656  }
6657
6658  if (max_semispace_size > 0) {
6659    if (max_semispace_size < Page::kPageSize) {
6660      max_semispace_size = Page::kPageSize;
6661      if (FLAG_trace_gc) {
6662        PrintPID("Max semispace size cannot be less than %dkbytes\n",
6663                 Page::kPageSize >> 10);
6664      }
6665    }
6666    max_semispace_size_ = max_semispace_size;
6667  }
6668
6669  if (Snapshot::IsEnabled()) {
6670    // If we are using a snapshot we always reserve the default amount
6671    // of memory for each semispace because code in the snapshot has
6672    // write-barrier code that relies on the size and alignment of new
6673    // space.  We therefore cannot use a larger max semispace size
6674    // than the default reserved semispace size.
6675    if (max_semispace_size_ > reserved_semispace_size_) {
6676      max_semispace_size_ = reserved_semispace_size_;
6677      if (FLAG_trace_gc) {
6678        PrintPID("Max semispace size cannot be more than %dkbytes\n",
6679                 reserved_semispace_size_ >> 10);
6680      }
6681    }
6682  } else {
6683    // If we are not using snapshots we reserve space for the actual
6684    // max semispace size.
6685    reserved_semispace_size_ = max_semispace_size_;
6686  }
6687
6688  if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6689  if (max_executable_size > 0) {
6690    max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6691  }
6692
6693  // The max executable size must be less than or equal to the max old
6694  // generation size.
6695  if (max_executable_size_ > max_old_generation_size_) {
6696    max_executable_size_ = max_old_generation_size_;
6697  }
6698
6699  // The new space size must be a power of two to support single-bit testing
6700  // for containment.
6701  max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6702  reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6703  initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6704
6705  // The external allocation limit should be below 256 MB on all architectures
6706  // to avoid unnecessary low memory notifications, as that is the threshold
6707  // for some embedders.
6708  external_allocation_limit_ = 12 * max_semispace_size_;
6709  ASSERT(external_allocation_limit_ <= 256 * MB);
6710
6711  // The old generation is paged and needs at least one page for each space.
6712  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6713  max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6714                                                       Page::kPageSize),
6715                                 RoundUp(max_old_generation_size_,
6716                                         Page::kPageSize));
6717
6718  configured_ = true;
6719  return true;
6720}
6721
6722
6723bool Heap::ConfigureHeapDefault() {
6724  return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6725                       static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6726                       static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6727}
6728
6729
6730void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6731  *stats->start_marker = HeapStats::kStartMarker;
6732  *stats->end_marker = HeapStats::kEndMarker;
6733  *stats->new_space_size = new_space_.SizeAsInt();
6734  *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6735  *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6736  *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6737  *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6738  *stats->old_data_space_capacity = old_data_space_->Capacity();
6739  *stats->code_space_size = code_space_->SizeOfObjects();
6740  *stats->code_space_capacity = code_space_->Capacity();
6741  *stats->map_space_size = map_space_->SizeOfObjects();
6742  *stats->map_space_capacity = map_space_->Capacity();
6743  *stats->cell_space_size = cell_space_->SizeOfObjects();
6744  *stats->cell_space_capacity = cell_space_->Capacity();
6745  *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6746  *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6747  *stats->lo_space_size = lo_space_->Size();
6748  isolate_->global_handles()->RecordStats(stats);
6749  *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6750  *stats->memory_allocator_capacity =
6751      isolate()->memory_allocator()->Size() +
6752      isolate()->memory_allocator()->Available();
6753  *stats->os_error = OS::GetLastError();
6754      isolate()->memory_allocator()->Available();
6755  if (take_snapshot) {
6756    HeapIterator iterator(this);
6757    for (HeapObject* obj = iterator.next();
6758         obj != NULL;
6759         obj = iterator.next()) {
6760      InstanceType type = obj->map()->instance_type();
6761      ASSERT(0 <= type && type <= LAST_TYPE);
6762      stats->objects_per_type[type]++;
6763      stats->size_per_type[type] += obj->Size();
6764    }
6765  }
6766}
6767
6768
6769intptr_t Heap::PromotedSpaceSizeOfObjects() {
6770  return old_pointer_space_->SizeOfObjects()
6771      + old_data_space_->SizeOfObjects()
6772      + code_space_->SizeOfObjects()
6773      + map_space_->SizeOfObjects()
6774      + cell_space_->SizeOfObjects()
6775      + property_cell_space_->SizeOfObjects()
6776      + lo_space_->SizeOfObjects();
6777}
6778
6779
6780intptr_t Heap::PromotedExternalMemorySize() {
6781  if (amount_of_external_allocated_memory_
6782      <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6783  return amount_of_external_allocated_memory_
6784      - amount_of_external_allocated_memory_at_last_global_gc_;
6785}
6786
6787
6788V8_DECLARE_ONCE(initialize_gc_once);
6789
6790static void InitializeGCOnce() {
6791  InitializeScavengingVisitorsTables();
6792  NewSpaceScavenger::Initialize();
6793  MarkCompactCollector::Initialize();
6794}
6795
6796
6797bool Heap::SetUp() {
6798#ifdef DEBUG
6799  allocation_timeout_ = FLAG_gc_interval;
6800#endif
6801
6802  // Initialize heap spaces and initial maps and objects. Whenever something
6803  // goes wrong, just return false. The caller should check the results and
6804  // call Heap::TearDown() to release allocated memory.
6805  //
6806  // If the heap is not yet configured (e.g. through the API), configure it.
6807  // Configuration is based on the flags new-space-size (really the semispace
6808  // size) and old-space-size if set or the initial values of semispace_size_
6809  // and old_generation_size_ otherwise.
6810  if (!configured_) {
6811    if (!ConfigureHeapDefault()) return false;
6812  }
6813
6814  CallOnce(&initialize_gc_once, &InitializeGCOnce);
6815
6816  MarkMapPointersAsEncoded(false);
6817
6818  // Set up memory allocator.
6819  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6820      return false;
6821
6822  // Set up new space.
6823  if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6824    return false;
6825  }
6826
6827  // Initialize old pointer space.
6828  old_pointer_space_ =
6829      new OldSpace(this,
6830                   max_old_generation_size_,
6831                   OLD_POINTER_SPACE,
6832                   NOT_EXECUTABLE);
6833  if (old_pointer_space_ == NULL) return false;
6834  if (!old_pointer_space_->SetUp()) return false;
6835
6836  // Initialize old data space.
6837  old_data_space_ =
6838      new OldSpace(this,
6839                   max_old_generation_size_,
6840                   OLD_DATA_SPACE,
6841                   NOT_EXECUTABLE);
6842  if (old_data_space_ == NULL) return false;
6843  if (!old_data_space_->SetUp()) return false;
6844
6845  // Initialize the code space, set its maximum capacity to the old
6846  // generation size. It needs executable memory.
6847  // On 64-bit platform(s), we put all code objects in a 2 GB range of
6848  // virtual address space, so that they can call each other with near calls.
6849  if (code_range_size_ > 0) {
6850    if (!isolate_->code_range()->SetUp(code_range_size_)) {
6851      return false;
6852    }
6853  }
6854
6855  code_space_ =
6856      new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6857  if (code_space_ == NULL) return false;
6858  if (!code_space_->SetUp()) return false;
6859
6860  // Initialize map space.
6861  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6862  if (map_space_ == NULL) return false;
6863  if (!map_space_->SetUp()) return false;
6864
6865  // Initialize simple cell space.
6866  cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6867  if (cell_space_ == NULL) return false;
6868  if (!cell_space_->SetUp()) return false;
6869
6870  // Initialize global property cell space.
6871  property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6872                                               PROPERTY_CELL_SPACE);
6873  if (property_cell_space_ == NULL) return false;
6874  if (!property_cell_space_->SetUp()) return false;
6875
6876  // The large object code space may contain code or data.  We set the memory
6877  // to be non-executable here for safety, but this means we need to enable it
6878  // explicitly when allocating large code objects.
6879  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6880  if (lo_space_ == NULL) return false;
6881  if (!lo_space_->SetUp()) return false;
6882
6883  // Set up the seed that is used to randomize the string hash function.
6884  ASSERT(hash_seed() == 0);
6885  if (FLAG_randomize_hashes) {
6886    if (FLAG_hash_seed == 0) {
6887      set_hash_seed(
6888          Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6889    } else {
6890      set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6891    }
6892  }
6893
6894  LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6895  LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6896
6897  store_buffer()->SetUp();
6898
6899  if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6900#ifdef DEBUG
6901  relocation_mutex_locked_by_optimizer_thread_ = false;
6902#endif  // DEBUG
6903
6904  return true;
6905}
6906
6907
6908bool Heap::CreateHeapObjects() {
6909  // Create initial maps.
6910  if (!CreateInitialMaps()) return false;
6911  if (!CreateApiObjects()) return false;
6912
6913  // Create initial objects
6914  if (!CreateInitialObjects()) return false;
6915
6916  native_contexts_list_ = undefined_value();
6917  array_buffers_list_ = undefined_value();
6918  allocation_sites_list_ = undefined_value();
6919  return true;
6920}
6921
6922
6923void Heap::SetStackLimits() {
6924  ASSERT(isolate_ != NULL);
6925  ASSERT(isolate_ == isolate());
6926  // On 64 bit machines, pointers are generally out of range of Smis.  We write
6927  // something that looks like an out of range Smi to the GC.
6928
6929  // Set up the special root array entries containing the stack limits.
6930  // These are actually addresses, but the tag makes the GC ignore it.
6931  roots_[kStackLimitRootIndex] =
6932      reinterpret_cast<Object*>(
6933          (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6934  roots_[kRealStackLimitRootIndex] =
6935      reinterpret_cast<Object*>(
6936          (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6937}
6938
6939
6940void Heap::TearDown() {
6941#ifdef VERIFY_HEAP
6942  if (FLAG_verify_heap) {
6943    Verify();
6944  }
6945#endif
6946
6947  if (FLAG_print_cumulative_gc_stat) {
6948    PrintF("\n");
6949    PrintF("gc_count=%d ", gc_count_);
6950    PrintF("mark_sweep_count=%d ", ms_count_);
6951    PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6952    PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6953    PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6954    PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6955           get_max_alive_after_gc());
6956    PrintF("total_marking_time=%.1f ", marking_time());
6957    PrintF("total_sweeping_time=%.1f ", sweeping_time());
6958    PrintF("\n\n");
6959  }
6960
6961  TearDownArrayBuffers();
6962
6963  isolate_->global_handles()->TearDown();
6964
6965  external_string_table_.TearDown();
6966
6967  mark_compact_collector()->TearDown();
6968
6969  new_space_.TearDown();
6970
6971  if (old_pointer_space_ != NULL) {
6972    old_pointer_space_->TearDown();
6973    delete old_pointer_space_;
6974    old_pointer_space_ = NULL;
6975  }
6976
6977  if (old_data_space_ != NULL) {
6978    old_data_space_->TearDown();
6979    delete old_data_space_;
6980    old_data_space_ = NULL;
6981  }
6982
6983  if (code_space_ != NULL) {
6984    code_space_->TearDown();
6985    delete code_space_;
6986    code_space_ = NULL;
6987  }
6988
6989  if (map_space_ != NULL) {
6990    map_space_->TearDown();
6991    delete map_space_;
6992    map_space_ = NULL;
6993  }
6994
6995  if (cell_space_ != NULL) {
6996    cell_space_->TearDown();
6997    delete cell_space_;
6998    cell_space_ = NULL;
6999  }
7000
7001  if (property_cell_space_ != NULL) {
7002    property_cell_space_->TearDown();
7003    delete property_cell_space_;
7004    property_cell_space_ = NULL;
7005  }
7006
7007  if (lo_space_ != NULL) {
7008    lo_space_->TearDown();
7009    delete lo_space_;
7010    lo_space_ = NULL;
7011  }
7012
7013  store_buffer()->TearDown();
7014  incremental_marking()->TearDown();
7015
7016  isolate_->memory_allocator()->TearDown();
7017
7018  delete relocation_mutex_;
7019}
7020
7021
7022void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
7023  ASSERT(callback != NULL);
7024  GCPrologueCallbackPair pair(callback, gc_type);
7025  ASSERT(!gc_prologue_callbacks_.Contains(pair));
7026  return gc_prologue_callbacks_.Add(pair);
7027}
7028
7029
7030void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
7031  ASSERT(callback != NULL);
7032  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
7033    if (gc_prologue_callbacks_[i].callback == callback) {
7034      gc_prologue_callbacks_.Remove(i);
7035      return;
7036    }
7037  }
7038  UNREACHABLE();
7039}
7040
7041
7042void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
7043  ASSERT(callback != NULL);
7044  GCEpilogueCallbackPair pair(callback, gc_type);
7045  ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7046  return gc_epilogue_callbacks_.Add(pair);
7047}
7048
7049
7050void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7051  ASSERT(callback != NULL);
7052  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7053    if (gc_epilogue_callbacks_[i].callback == callback) {
7054      gc_epilogue_callbacks_.Remove(i);
7055      return;
7056    }
7057  }
7058  UNREACHABLE();
7059}
7060
7061
7062#ifdef DEBUG
7063
7064class PrintHandleVisitor: public ObjectVisitor {
7065 public:
7066  void VisitPointers(Object** start, Object** end) {
7067    for (Object** p = start; p < end; p++)
7068      PrintF("  handle %p to %p\n",
7069             reinterpret_cast<void*>(p),
7070             reinterpret_cast<void*>(*p));
7071  }
7072};
7073
7074
7075void Heap::PrintHandles() {
7076  PrintF("Handles:\n");
7077  PrintHandleVisitor v;
7078  isolate_->handle_scope_implementer()->Iterate(&v);
7079}
7080
7081#endif
7082
7083
7084Space* AllSpaces::next() {
7085  switch (counter_++) {
7086    case NEW_SPACE:
7087      return heap_->new_space();
7088    case OLD_POINTER_SPACE:
7089      return heap_->old_pointer_space();
7090    case OLD_DATA_SPACE:
7091      return heap_->old_data_space();
7092    case CODE_SPACE:
7093      return heap_->code_space();
7094    case MAP_SPACE:
7095      return heap_->map_space();
7096    case CELL_SPACE:
7097      return heap_->cell_space();
7098    case PROPERTY_CELL_SPACE:
7099      return heap_->property_cell_space();
7100    case LO_SPACE:
7101      return heap_->lo_space();
7102    default:
7103      return NULL;
7104  }
7105}
7106
7107
7108PagedSpace* PagedSpaces::next() {
7109  switch (counter_++) {
7110    case OLD_POINTER_SPACE:
7111      return heap_->old_pointer_space();
7112    case OLD_DATA_SPACE:
7113      return heap_->old_data_space();
7114    case CODE_SPACE:
7115      return heap_->code_space();
7116    case MAP_SPACE:
7117      return heap_->map_space();
7118    case CELL_SPACE:
7119      return heap_->cell_space();
7120    case PROPERTY_CELL_SPACE:
7121      return heap_->property_cell_space();
7122    default:
7123      return NULL;
7124  }
7125}
7126
7127
7128
7129OldSpace* OldSpaces::next() {
7130  switch (counter_++) {
7131    case OLD_POINTER_SPACE:
7132      return heap_->old_pointer_space();
7133    case OLD_DATA_SPACE:
7134      return heap_->old_data_space();
7135    case CODE_SPACE:
7136      return heap_->code_space();
7137    default:
7138      return NULL;
7139  }
7140}
7141
7142
7143SpaceIterator::SpaceIterator(Heap* heap)
7144    : heap_(heap),
7145      current_space_(FIRST_SPACE),
7146      iterator_(NULL),
7147      size_func_(NULL) {
7148}
7149
7150
7151SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7152    : heap_(heap),
7153      current_space_(FIRST_SPACE),
7154      iterator_(NULL),
7155      size_func_(size_func) {
7156}
7157
7158
7159SpaceIterator::~SpaceIterator() {
7160  // Delete active iterator if any.
7161  delete iterator_;
7162}
7163
7164
7165bool SpaceIterator::has_next() {
7166  // Iterate until no more spaces.
7167  return current_space_ != LAST_SPACE;
7168}
7169
7170
7171ObjectIterator* SpaceIterator::next() {
7172  if (iterator_ != NULL) {
7173    delete iterator_;
7174    iterator_ = NULL;
7175    // Move to the next space
7176    current_space_++;
7177    if (current_space_ > LAST_SPACE) {
7178      return NULL;
7179    }
7180  }
7181
7182  // Return iterator for the new current space.
7183  return CreateIterator();
7184}
7185
7186
7187// Create an iterator for the space to iterate.
7188ObjectIterator* SpaceIterator::CreateIterator() {
7189  ASSERT(iterator_ == NULL);
7190
7191  switch (current_space_) {
7192    case NEW_SPACE:
7193      iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7194      break;
7195    case OLD_POINTER_SPACE:
7196      iterator_ =
7197          new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7198      break;
7199    case OLD_DATA_SPACE:
7200      iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7201      break;
7202    case CODE_SPACE:
7203      iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7204      break;
7205    case MAP_SPACE:
7206      iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7207      break;
7208    case CELL_SPACE:
7209      iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7210      break;
7211    case PROPERTY_CELL_SPACE:
7212      iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7213                                         size_func_);
7214      break;
7215    case LO_SPACE:
7216      iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7217      break;
7218  }
7219
7220  // Return the newly allocated iterator;
7221  ASSERT(iterator_ != NULL);
7222  return iterator_;
7223}
7224
7225
7226class HeapObjectsFilter {
7227 public:
7228  virtual ~HeapObjectsFilter() {}
7229  virtual bool SkipObject(HeapObject* object) = 0;
7230};
7231
7232
7233class UnreachableObjectsFilter : public HeapObjectsFilter {
7234 public:
7235  UnreachableObjectsFilter() {
7236    MarkReachableObjects();
7237  }
7238
7239  ~UnreachableObjectsFilter() {
7240    Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7241  }
7242
7243  bool SkipObject(HeapObject* object) {
7244    MarkBit mark_bit = Marking::MarkBitFrom(object);
7245    return !mark_bit.Get();
7246  }
7247
7248 private:
7249  class MarkingVisitor : public ObjectVisitor {
7250   public:
7251    MarkingVisitor() : marking_stack_(10) {}
7252
7253    void VisitPointers(Object** start, Object** end) {
7254      for (Object** p = start; p < end; p++) {
7255        if (!(*p)->IsHeapObject()) continue;
7256        HeapObject* obj = HeapObject::cast(*p);
7257        MarkBit mark_bit = Marking::MarkBitFrom(obj);
7258        if (!mark_bit.Get()) {
7259          mark_bit.Set();
7260          marking_stack_.Add(obj);
7261        }
7262      }
7263    }
7264
7265    void TransitiveClosure() {
7266      while (!marking_stack_.is_empty()) {
7267        HeapObject* obj = marking_stack_.RemoveLast();
7268        obj->Iterate(this);
7269      }
7270    }
7271
7272   private:
7273    List<HeapObject*> marking_stack_;
7274  };
7275
7276  void MarkReachableObjects() {
7277    Heap* heap = Isolate::Current()->heap();
7278    MarkingVisitor visitor;
7279    heap->IterateRoots(&visitor, VISIT_ALL);
7280    visitor.TransitiveClosure();
7281  }
7282
7283  DisallowHeapAllocation no_allocation_;
7284};
7285
7286
7287HeapIterator::HeapIterator(Heap* heap)
7288    : heap_(heap),
7289      filtering_(HeapIterator::kNoFiltering),
7290      filter_(NULL) {
7291  Init();
7292}
7293
7294
7295HeapIterator::HeapIterator(Heap* heap,
7296                           HeapIterator::HeapObjectsFiltering filtering)
7297    : heap_(heap),
7298      filtering_(filtering),
7299      filter_(NULL) {
7300  Init();
7301}
7302
7303
7304HeapIterator::~HeapIterator() {
7305  Shutdown();
7306}
7307
7308
7309void HeapIterator::Init() {
7310  // Start the iteration.
7311  space_iterator_ = new SpaceIterator(heap_);
7312  switch (filtering_) {
7313    case kFilterUnreachable:
7314      filter_ = new UnreachableObjectsFilter;
7315      break;
7316    default:
7317      break;
7318  }
7319  object_iterator_ = space_iterator_->next();
7320}
7321
7322
7323void HeapIterator::Shutdown() {
7324#ifdef DEBUG
7325  // Assert that in filtering mode we have iterated through all
7326  // objects. Otherwise, heap will be left in an inconsistent state.
7327  if (filtering_ != kNoFiltering) {
7328    ASSERT(object_iterator_ == NULL);
7329  }
7330#endif
7331  // Make sure the last iterator is deallocated.
7332  delete space_iterator_;
7333  space_iterator_ = NULL;
7334  object_iterator_ = NULL;
7335  delete filter_;
7336  filter_ = NULL;
7337}
7338
7339
7340HeapObject* HeapIterator::next() {
7341  if (filter_ == NULL) return NextObject();
7342
7343  HeapObject* obj = NextObject();
7344  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7345  return obj;
7346}
7347
7348
7349HeapObject* HeapIterator::NextObject() {
7350  // No iterator means we are done.
7351  if (object_iterator_ == NULL) return NULL;
7352
7353  if (HeapObject* obj = object_iterator_->next_object()) {
7354    // If the current iterator has more objects we are fine.
7355    return obj;
7356  } else {
7357    // Go though the spaces looking for one that has objects.
7358    while (space_iterator_->has_next()) {
7359      object_iterator_ = space_iterator_->next();
7360      if (HeapObject* obj = object_iterator_->next_object()) {
7361        return obj;
7362      }
7363    }
7364  }
7365  // Done with the last space.
7366  object_iterator_ = NULL;
7367  return NULL;
7368}
7369
7370
7371void HeapIterator::reset() {
7372  // Restart the iterator.
7373  Shutdown();
7374  Init();
7375}
7376
7377
7378#ifdef DEBUG
7379
7380Object* const PathTracer::kAnyGlobalObject = NULL;
7381
7382class PathTracer::MarkVisitor: public ObjectVisitor {
7383 public:
7384  explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7385  void VisitPointers(Object** start, Object** end) {
7386    // Scan all HeapObject pointers in [start, end)
7387    for (Object** p = start; !tracer_->found() && (p < end); p++) {
7388      if ((*p)->IsHeapObject())
7389        tracer_->MarkRecursively(p, this);
7390    }
7391  }
7392
7393 private:
7394  PathTracer* tracer_;
7395};
7396
7397
7398class PathTracer::UnmarkVisitor: public ObjectVisitor {
7399 public:
7400  explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7401  void VisitPointers(Object** start, Object** end) {
7402    // Scan all HeapObject pointers in [start, end)
7403    for (Object** p = start; p < end; p++) {
7404      if ((*p)->IsHeapObject())
7405        tracer_->UnmarkRecursively(p, this);
7406    }
7407  }
7408
7409 private:
7410  PathTracer* tracer_;
7411};
7412
7413
7414void PathTracer::VisitPointers(Object** start, Object** end) {
7415  bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7416  // Visit all HeapObject pointers in [start, end)
7417  for (Object** p = start; !done && (p < end); p++) {
7418    if ((*p)->IsHeapObject()) {
7419      TracePathFrom(p);
7420      done = ((what_to_find_ == FIND_FIRST) && found_target_);
7421    }
7422  }
7423}
7424
7425
7426void PathTracer::Reset() {
7427  found_target_ = false;
7428  object_stack_.Clear();
7429}
7430
7431
7432void PathTracer::TracePathFrom(Object** root) {
7433  ASSERT((search_target_ == kAnyGlobalObject) ||
7434         search_target_->IsHeapObject());
7435  found_target_in_trace_ = false;
7436  Reset();
7437
7438  MarkVisitor mark_visitor(this);
7439  MarkRecursively(root, &mark_visitor);
7440
7441  UnmarkVisitor unmark_visitor(this);
7442  UnmarkRecursively(root, &unmark_visitor);
7443
7444  ProcessResults();
7445}
7446
7447
7448static bool SafeIsNativeContext(HeapObject* obj) {
7449  return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7450}
7451
7452
7453void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7454  if (!(*p)->IsHeapObject()) return;
7455
7456  HeapObject* obj = HeapObject::cast(*p);
7457
7458  Object* map = obj->map();
7459
7460  if (!map->IsHeapObject()) return;  // visited before
7461
7462  if (found_target_in_trace_) return;  // stop if target found
7463  object_stack_.Add(obj);
7464  if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7465      (obj == search_target_)) {
7466    found_target_in_trace_ = true;
7467    found_target_ = true;
7468    return;
7469  }
7470
7471  bool is_native_context = SafeIsNativeContext(obj);
7472
7473  // not visited yet
7474  Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7475
7476  Address map_addr = map_p->address();
7477
7478  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7479
7480  // Scan the object body.
7481  if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7482    // This is specialized to scan Context's properly.
7483    Object** start = reinterpret_cast<Object**>(obj->address() +
7484                                                Context::kHeaderSize);
7485    Object** end = reinterpret_cast<Object**>(obj->address() +
7486        Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7487    mark_visitor->VisitPointers(start, end);
7488  } else {
7489    obj->IterateBody(map_p->instance_type(),
7490                     obj->SizeFromMap(map_p),
7491                     mark_visitor);
7492  }
7493
7494  // Scan the map after the body because the body is a lot more interesting
7495  // when doing leak detection.
7496  MarkRecursively(&map, mark_visitor);
7497
7498  if (!found_target_in_trace_)  // don't pop if found the target
7499    object_stack_.RemoveLast();
7500}
7501
7502
7503void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7504  if (!(*p)->IsHeapObject()) return;
7505
7506  HeapObject* obj = HeapObject::cast(*p);
7507
7508  Object* map = obj->map();
7509
7510  if (map->IsHeapObject()) return;  // unmarked already
7511
7512  Address map_addr = reinterpret_cast<Address>(map);
7513
7514  map_addr -= kMarkTag;
7515
7516  ASSERT_TAG_ALIGNED(map_addr);
7517
7518  HeapObject* map_p = HeapObject::FromAddress(map_addr);
7519
7520  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7521
7522  UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7523
7524  obj->IterateBody(Map::cast(map_p)->instance_type(),
7525                   obj->SizeFromMap(Map::cast(map_p)),
7526                   unmark_visitor);
7527}
7528
7529
7530void PathTracer::ProcessResults() {
7531  if (found_target_) {
7532    PrintF("=====================================\n");
7533    PrintF("====        Path to object       ====\n");
7534    PrintF("=====================================\n\n");
7535
7536    ASSERT(!object_stack_.is_empty());
7537    for (int i = 0; i < object_stack_.length(); i++) {
7538      if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7539      Object* obj = object_stack_[i];
7540      obj->Print();
7541    }
7542    PrintF("=====================================\n");
7543  }
7544}
7545
7546
7547// Triggers a depth-first traversal of reachable objects from one
7548// given root object and finds a path to a specific heap object and
7549// prints it.
7550void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7551  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7552  tracer.VisitPointer(&root);
7553}
7554
7555
7556// Triggers a depth-first traversal of reachable objects from roots
7557// and finds a path to a specific heap object and prints it.
7558void Heap::TracePathToObject(Object* target) {
7559  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7560  IterateRoots(&tracer, VISIT_ONLY_STRONG);
7561}
7562
7563
7564// Triggers a depth-first traversal of reachable objects from roots
7565// and finds a path to any global object and prints it. Useful for
7566// determining the source for leaks of global objects.
7567void Heap::TracePathToGlobal() {
7568  PathTracer tracer(PathTracer::kAnyGlobalObject,
7569                    PathTracer::FIND_ALL,
7570                    VISIT_ALL);
7571  IterateRoots(&tracer, VISIT_ONLY_STRONG);
7572}
7573#endif
7574
7575
7576static intptr_t CountTotalHolesSize(Heap* heap) {
7577  intptr_t holes_size = 0;
7578  OldSpaces spaces(heap);
7579  for (OldSpace* space = spaces.next();
7580       space != NULL;
7581       space = spaces.next()) {
7582    holes_size += space->Waste() + space->Available();
7583  }
7584  return holes_size;
7585}
7586
7587
7588GCTracer::GCTracer(Heap* heap,
7589                   const char* gc_reason,
7590                   const char* collector_reason)
7591    : start_time_(0.0),
7592      start_object_size_(0),
7593      start_memory_size_(0),
7594      gc_count_(0),
7595      full_gc_count_(0),
7596      allocated_since_last_gc_(0),
7597      spent_in_mutator_(0),
7598      promoted_objects_size_(0),
7599      nodes_died_in_new_space_(0),
7600      nodes_copied_in_new_space_(0),
7601      nodes_promoted_(0),
7602      heap_(heap),
7603      gc_reason_(gc_reason),
7604      collector_reason_(collector_reason) {
7605  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7606  start_time_ = OS::TimeCurrentMillis();
7607  start_object_size_ = heap_->SizeOfObjects();
7608  start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7609
7610  for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7611    scopes_[i] = 0;
7612  }
7613
7614  in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7615
7616  allocated_since_last_gc_ =
7617      heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7618
7619  if (heap_->last_gc_end_timestamp_ > 0) {
7620    spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7621  }
7622
7623  steps_count_ = heap_->incremental_marking()->steps_count();
7624  steps_took_ = heap_->incremental_marking()->steps_took();
7625  longest_step_ = heap_->incremental_marking()->longest_step();
7626  steps_count_since_last_gc_ =
7627      heap_->incremental_marking()->steps_count_since_last_gc();
7628  steps_took_since_last_gc_ =
7629      heap_->incremental_marking()->steps_took_since_last_gc();
7630}
7631
7632
7633GCTracer::~GCTracer() {
7634  // Printf ONE line iff flag is set.
7635  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7636
7637  bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7638
7639  heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7640  heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7641
7642  double time = heap_->last_gc_end_timestamp_ - start_time_;
7643
7644  // Update cumulative GC statistics if required.
7645  if (FLAG_print_cumulative_gc_stat) {
7646    heap_->total_gc_time_ms_ += time;
7647    heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7648    heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7649                                     heap_->alive_after_last_gc_);
7650    if (!first_gc) {
7651      heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7652                                   spent_in_mutator_);
7653    }
7654  } else if (FLAG_trace_gc_verbose) {
7655    heap_->total_gc_time_ms_ += time;
7656  }
7657
7658  if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7659
7660  heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7661
7662  if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7663  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7664
7665  if (!FLAG_trace_gc_nvp) {
7666    int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7667
7668    double end_memory_size_mb =
7669        static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7670
7671    PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7672           CollectorString(),
7673           static_cast<double>(start_object_size_) / MB,
7674           static_cast<double>(start_memory_size_) / MB,
7675           SizeOfHeapObjects(),
7676           end_memory_size_mb);
7677
7678    if (external_time > 0) PrintF("%d / ", external_time);
7679    PrintF("%.1f ms", time);
7680    if (steps_count_ > 0) {
7681      if (collector_ == SCAVENGER) {
7682        PrintF(" (+ %.1f ms in %d steps since last GC)",
7683               steps_took_since_last_gc_,
7684               steps_count_since_last_gc_);
7685      } else {
7686        PrintF(" (+ %.1f ms in %d steps since start of marking, "
7687                   "biggest step %.1f ms)",
7688               steps_took_,
7689               steps_count_,
7690               longest_step_);
7691      }
7692    }
7693
7694    if (gc_reason_ != NULL) {
7695      PrintF(" [%s]", gc_reason_);
7696    }
7697
7698    if (collector_reason_ != NULL) {
7699      PrintF(" [%s]", collector_reason_);
7700    }
7701
7702    PrintF(".\n");
7703  } else {
7704    PrintF("pause=%.1f ", time);
7705    PrintF("mutator=%.1f ", spent_in_mutator_);
7706    PrintF("gc=");
7707    switch (collector_) {
7708      case SCAVENGER:
7709        PrintF("s");
7710        break;
7711      case MARK_COMPACTOR:
7712        PrintF("ms");
7713        break;
7714      default:
7715        UNREACHABLE();
7716    }
7717    PrintF(" ");
7718
7719    PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7720    PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7721    PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7722    PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7723    PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7724    PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7725    PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7726    PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7727    PrintF("compaction_ptrs=%.1f ",
7728        scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7729    PrintF("intracompaction_ptrs=%.1f ",
7730        scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7731    PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7732    PrintF("weakcollection_process=%.1f ",
7733        scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7734    PrintF("weakcollection_clear=%.1f ",
7735        scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7736
7737    PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7738    PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7739    PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7740           in_free_list_or_wasted_before_gc_);
7741    PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7742
7743    PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7744    PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7745    PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7746    PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7747    PrintF("nodes_promoted=%d ", nodes_promoted_);
7748
7749    if (collector_ == SCAVENGER) {
7750      PrintF("stepscount=%d ", steps_count_since_last_gc_);
7751      PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7752    } else {
7753      PrintF("stepscount=%d ", steps_count_);
7754      PrintF("stepstook=%.1f ", steps_took_);
7755      PrintF("longeststep=%.1f ", longest_step_);
7756    }
7757
7758    PrintF("\n");
7759  }
7760
7761  heap_->PrintShortHeapStatistics();
7762}
7763
7764
7765const char* GCTracer::CollectorString() {
7766  switch (collector_) {
7767    case SCAVENGER:
7768      return "Scavenge";
7769    case MARK_COMPACTOR:
7770      return "Mark-sweep";
7771  }
7772  return "Unknown GC";
7773}
7774
7775
7776int KeyedLookupCache::Hash(Map* map, Name* name) {
7777  // Uses only lower 32 bits if pointers are larger.
7778  uintptr_t addr_hash =
7779      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7780  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7781}
7782
7783
7784int KeyedLookupCache::Lookup(Map* map, Name* name) {
7785  int index = (Hash(map, name) & kHashMask);
7786  for (int i = 0; i < kEntriesPerBucket; i++) {
7787    Key& key = keys_[index + i];
7788    if ((key.map == map) && key.name->Equals(name)) {
7789      return field_offsets_[index + i];
7790    }
7791  }
7792  return kNotFound;
7793}
7794
7795
7796void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7797  if (!name->IsUniqueName()) {
7798    String* internalized_string;
7799    if (!HEAP->InternalizeStringIfExists(
7800            String::cast(name), &internalized_string)) {
7801      return;
7802    }
7803    name = internalized_string;
7804  }
7805  // This cache is cleared only between mark compact passes, so we expect the
7806  // cache to only contain old space names.
7807  ASSERT(!HEAP->InNewSpace(name));
7808
7809  int index = (Hash(map, name) & kHashMask);
7810  // After a GC there will be free slots, so we use them in order (this may
7811  // help to get the most frequently used one in position 0).
7812  for (int i = 0; i< kEntriesPerBucket; i++) {
7813    Key& key = keys_[index];
7814    Object* free_entry_indicator = NULL;
7815    if (key.map == free_entry_indicator) {
7816      key.map = map;
7817      key.name = name;
7818      field_offsets_[index + i] = field_offset;
7819      return;
7820    }
7821  }
7822  // No free entry found in this bucket, so we move them all down one and
7823  // put the new entry at position zero.
7824  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7825    Key& key = keys_[index + i];
7826    Key& key2 = keys_[index + i - 1];
7827    key = key2;
7828    field_offsets_[index + i] = field_offsets_[index + i - 1];
7829  }
7830
7831  // Write the new first entry.
7832  Key& key = keys_[index];
7833  key.map = map;
7834  key.name = name;
7835  field_offsets_[index] = field_offset;
7836}
7837
7838
7839void KeyedLookupCache::Clear() {
7840  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7841}
7842
7843
7844void DescriptorLookupCache::Clear() {
7845  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7846}
7847
7848
7849#ifdef DEBUG
7850void Heap::GarbageCollectionGreedyCheck() {
7851  ASSERT(FLAG_gc_greedy);
7852  if (isolate_->bootstrapper()->IsActive()) return;
7853  if (disallow_allocation_failure()) return;
7854  CollectGarbage(NEW_SPACE);
7855}
7856#endif
7857
7858
7859TranscendentalCache::SubCache::SubCache(Type t)
7860  : type_(t),
7861    isolate_(Isolate::Current()) {
7862  uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7863  uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7864  for (int i = 0; i < kCacheSize; i++) {
7865    elements_[i].in[0] = in0;
7866    elements_[i].in[1] = in1;
7867    elements_[i].output = NULL;
7868  }
7869}
7870
7871
7872void TranscendentalCache::Clear() {
7873  for (int i = 0; i < kNumberOfCaches; i++) {
7874    if (caches_[i] != NULL) {
7875      delete caches_[i];
7876      caches_[i] = NULL;
7877    }
7878  }
7879}
7880
7881
7882void ExternalStringTable::CleanUp() {
7883  int last = 0;
7884  for (int i = 0; i < new_space_strings_.length(); ++i) {
7885    if (new_space_strings_[i] == heap_->the_hole_value()) {
7886      continue;
7887    }
7888    if (heap_->InNewSpace(new_space_strings_[i])) {
7889      new_space_strings_[last++] = new_space_strings_[i];
7890    } else {
7891      old_space_strings_.Add(new_space_strings_[i]);
7892    }
7893  }
7894  new_space_strings_.Rewind(last);
7895  new_space_strings_.Trim();
7896
7897  last = 0;
7898  for (int i = 0; i < old_space_strings_.length(); ++i) {
7899    if (old_space_strings_[i] == heap_->the_hole_value()) {
7900      continue;
7901    }
7902    ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7903    old_space_strings_[last++] = old_space_strings_[i];
7904  }
7905  old_space_strings_.Rewind(last);
7906  old_space_strings_.Trim();
7907#ifdef VERIFY_HEAP
7908  if (FLAG_verify_heap) {
7909    Verify();
7910  }
7911#endif
7912}
7913
7914
7915void ExternalStringTable::TearDown() {
7916  new_space_strings_.Free();
7917  old_space_strings_.Free();
7918}
7919
7920
7921void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7922  chunk->set_next_chunk(chunks_queued_for_free_);
7923  chunks_queued_for_free_ = chunk;
7924}
7925
7926
7927void Heap::FreeQueuedChunks() {
7928  if (chunks_queued_for_free_ == NULL) return;
7929  MemoryChunk* next;
7930  MemoryChunk* chunk;
7931  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7932    next = chunk->next_chunk();
7933    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7934
7935    if (chunk->owner()->identity() == LO_SPACE) {
7936      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7937      // If FromAnyPointerAddress encounters a slot that belongs to a large
7938      // chunk queued for deletion it will fail to find the chunk because
7939      // it try to perform a search in the list of pages owned by of the large
7940      // object space and queued chunks were detached from that list.
7941      // To work around this we split large chunk into normal kPageSize aligned
7942      // pieces and initialize size, owner and flags field of every piece.
7943      // If FromAnyPointerAddress encounters a slot that belongs to one of
7944      // these smaller pieces it will treat it as a slot on a normal Page.
7945      Address chunk_end = chunk->address() + chunk->size();
7946      MemoryChunk* inner = MemoryChunk::FromAddress(
7947          chunk->address() + Page::kPageSize);
7948      MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7949      while (inner <= inner_last) {
7950        // Size of a large chunk is always a multiple of
7951        // OS::AllocateAlignment() so there is always
7952        // enough space for a fake MemoryChunk header.
7953        Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7954        // Guard against overflow.
7955        if (area_end < inner->address()) area_end = chunk_end;
7956        inner->SetArea(inner->address(), area_end);
7957        inner->set_size(Page::kPageSize);
7958        inner->set_owner(lo_space());
7959        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7960        inner = MemoryChunk::FromAddress(
7961            inner->address() + Page::kPageSize);
7962      }
7963    }
7964  }
7965  isolate_->heap()->store_buffer()->Compact();
7966  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7967  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7968    next = chunk->next_chunk();
7969    isolate_->memory_allocator()->Free(chunk);
7970  }
7971  chunks_queued_for_free_ = NULL;
7972}
7973
7974
7975void Heap::RememberUnmappedPage(Address page, bool compacted) {
7976  uintptr_t p = reinterpret_cast<uintptr_t>(page);
7977  // Tag the page pointer to make it findable in the dump file.
7978  if (compacted) {
7979    p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7980  } else {
7981    p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7982  }
7983  remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7984      reinterpret_cast<Address>(p);
7985  remembered_unmapped_pages_index_++;
7986  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7987}
7988
7989
7990void Heap::ClearObjectStats(bool clear_last_time_stats) {
7991  memset(object_counts_, 0, sizeof(object_counts_));
7992  memset(object_sizes_, 0, sizeof(object_sizes_));
7993  if (clear_last_time_stats) {
7994    memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7995    memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7996  }
7997}
7998
7999
8000static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
8001
8002
8003void Heap::CheckpointObjectStats() {
8004  ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
8005  Counters* counters = isolate()->counters();
8006#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
8007  counters->count_of_##name()->Increment(                                      \
8008      static_cast<int>(object_counts_[name]));                                 \
8009  counters->count_of_##name()->Decrement(                                      \
8010      static_cast<int>(object_counts_last_time_[name]));                       \
8011  counters->size_of_##name()->Increment(                                       \
8012      static_cast<int>(object_sizes_[name]));                                  \
8013  counters->size_of_##name()->Decrement(                                       \
8014      static_cast<int>(object_sizes_last_time_[name]));
8015  INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8016#undef ADJUST_LAST_TIME_OBJECT_COUNT
8017  int index;
8018#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8019  index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
8020  counters->count_of_CODE_TYPE_##name()->Increment(       \
8021      static_cast<int>(object_counts_[index]));           \
8022  counters->count_of_CODE_TYPE_##name()->Decrement(       \
8023      static_cast<int>(object_counts_last_time_[index])); \
8024  counters->size_of_CODE_TYPE_##name()->Increment(        \
8025      static_cast<int>(object_sizes_[index]));            \
8026  counters->size_of_CODE_TYPE_##name()->Decrement(        \
8027      static_cast<int>(object_sizes_last_time_[index]));
8028  CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8029#undef ADJUST_LAST_TIME_OBJECT_COUNT
8030#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8031  index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
8032  counters->count_of_FIXED_ARRAY_##name()->Increment(     \
8033      static_cast<int>(object_counts_[index]));           \
8034  counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
8035      static_cast<int>(object_counts_last_time_[index])); \
8036  counters->size_of_FIXED_ARRAY_##name()->Increment(      \
8037      static_cast<int>(object_sizes_[index]));            \
8038  counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
8039      static_cast<int>(object_sizes_last_time_[index]));
8040  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8041#undef ADJUST_LAST_TIME_OBJECT_COUNT
8042
8043  OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8044  OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8045  ClearObjectStats();
8046}
8047
8048
8049Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8050  if (FLAG_parallel_recompilation) {
8051    heap_->relocation_mutex_->Lock();
8052#ifdef DEBUG
8053    heap_->relocation_mutex_locked_by_optimizer_thread_ =
8054        heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8055#endif  // DEBUG
8056  }
8057}
8058
8059} }  // namespace v8::internal
8060