codegen-arm.cc revision b8e0da25ee8efac3bb05cd6b2730aafbd96119f4
1// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_ARM)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "codegen-inl.h"
35#include "compiler.h"
36#include "debug.h"
37#include "ic-inl.h"
38#include "jsregexp.h"
39#include "jump-target-inl.h"
40#include "parser.h"
41#include "regexp-macro-assembler.h"
42#include "regexp-stack.h"
43#include "register-allocator-inl.h"
44#include "runtime.h"
45#include "scopes.h"
46#include "stub-cache.h"
47#include "virtual-frame-inl.h"
48#include "virtual-frame-arm-inl.h"
49
50namespace v8 {
51namespace internal {
52
53
54#define __ ACCESS_MASM(masm_)
55
56// -------------------------------------------------------------------------
57// Platform-specific DeferredCode functions.
58
59void DeferredCode::SaveRegisters() {
60  // On ARM you either have a completely spilled frame or you
61  // handle it yourself, but at the moment there's no automation
62  // of registers and deferred code.
63}
64
65
66void DeferredCode::RestoreRegisters() {
67}
68
69
70// -------------------------------------------------------------------------
71// Platform-specific RuntimeCallHelper functions.
72
73void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
74  frame_state_->frame()->AssertIsSpilled();
75}
76
77
78void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
79}
80
81
82void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
83  masm->EnterInternalFrame();
84}
85
86
87void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
88  masm->LeaveInternalFrame();
89}
90
91
92// -------------------------------------------------------------------------
93// CodeGenState implementation.
94
95CodeGenState::CodeGenState(CodeGenerator* owner)
96    : owner_(owner),
97      previous_(owner->state()) {
98  owner->set_state(this);
99}
100
101
102ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
103                                             JumpTarget* true_target,
104                                             JumpTarget* false_target)
105    : CodeGenState(owner),
106      true_target_(true_target),
107      false_target_(false_target) {
108  owner->set_state(this);
109}
110
111
112TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
113                                           Slot* slot,
114                                           TypeInfo type_info)
115    : CodeGenState(owner),
116      slot_(slot) {
117  owner->set_state(this);
118  old_type_info_ = owner->set_type_info(slot, type_info);
119}
120
121
122CodeGenState::~CodeGenState() {
123  ASSERT(owner_->state() == this);
124  owner_->set_state(previous_);
125}
126
127
128TypeInfoCodeGenState::~TypeInfoCodeGenState() {
129  owner()->set_type_info(slot_, old_type_info_);
130}
131
132// -------------------------------------------------------------------------
133// CodeGenerator implementation
134
135int CodeGenerator::inlined_write_barrier_size_ = -1;
136
137CodeGenerator::CodeGenerator(MacroAssembler* masm)
138    : deferred_(8),
139      masm_(masm),
140      info_(NULL),
141      frame_(NULL),
142      allocator_(NULL),
143      cc_reg_(al),
144      state_(NULL),
145      loop_nesting_(0),
146      type_info_(NULL),
147      function_return_(JumpTarget::BIDIRECTIONAL),
148      function_return_is_shadowed_(false) {
149}
150
151
152// Calling conventions:
153// fp: caller's frame pointer
154// sp: stack pointer
155// r1: called JS function
156// cp: callee's context
157
158void CodeGenerator::Generate(CompilationInfo* info) {
159  // Record the position for debugging purposes.
160  CodeForFunctionPosition(info->function());
161  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
162
163  // Initialize state.
164  info_ = info;
165
166  int slots = scope()->num_parameters() + scope()->num_stack_slots();
167  ScopedVector<TypeInfo> type_info_array(slots);
168  for (int i = 0; i < slots; i++) {
169    type_info_array[i] = TypeInfo::Unknown();
170  }
171  type_info_ = &type_info_array;
172
173  ASSERT(allocator_ == NULL);
174  RegisterAllocator register_allocator(this);
175  allocator_ = &register_allocator;
176  ASSERT(frame_ == NULL);
177  frame_ = new VirtualFrame();
178  cc_reg_ = al;
179
180  // Adjust for function-level loop nesting.
181  ASSERT_EQ(0, loop_nesting_);
182  loop_nesting_ = info->is_in_loop() ? 1 : 0;
183
184  {
185    CodeGenState state(this);
186
187    // Entry:
188    // Stack: receiver, arguments
189    // lr: return address
190    // fp: caller's frame pointer
191    // sp: stack pointer
192    // r1: called JS function
193    // cp: callee's context
194    allocator_->Initialize();
195
196#ifdef DEBUG
197    if (strlen(FLAG_stop_at) > 0 &&
198        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
199      frame_->SpillAll();
200      __ stop("stop-at");
201    }
202#endif
203
204    frame_->Enter();
205    // tos: code slot
206
207    // Allocate space for locals and initialize them.  This also checks
208    // for stack overflow.
209    frame_->AllocateStackSlots();
210
211    frame_->AssertIsSpilled();
212    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
213    if (heap_slots > 0) {
214      // Allocate local context.
215      // Get outer context and create a new context based on it.
216      __ ldr(r0, frame_->Function());
217      frame_->EmitPush(r0);
218      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
219        FastNewContextStub stub(heap_slots);
220        frame_->CallStub(&stub, 1);
221      } else {
222        frame_->CallRuntime(Runtime::kNewContext, 1);
223      }
224
225#ifdef DEBUG
226      JumpTarget verified_true;
227      __ cmp(r0, cp);
228      verified_true.Branch(eq);
229      __ stop("NewContext: r0 is expected to be the same as cp");
230      verified_true.Bind();
231#endif
232      // Update context local.
233      __ str(cp, frame_->Context());
234    }
235
236    // TODO(1241774): Improve this code:
237    // 1) only needed if we have a context
238    // 2) no need to recompute context ptr every single time
239    // 3) don't copy parameter operand code from SlotOperand!
240    {
241      Comment cmnt2(masm_, "[ copy context parameters into .context");
242      // Note that iteration order is relevant here! If we have the same
243      // parameter twice (e.g., function (x, y, x)), and that parameter
244      // needs to be copied into the context, it must be the last argument
245      // passed to the parameter that needs to be copied. This is a rare
246      // case so we don't check for it, instead we rely on the copying
247      // order: such a parameter is copied repeatedly into the same
248      // context location and thus the last value is what is seen inside
249      // the function.
250      frame_->AssertIsSpilled();
251      for (int i = 0; i < scope()->num_parameters(); i++) {
252        Variable* par = scope()->parameter(i);
253        Slot* slot = par->AsSlot();
254        if (slot != NULL && slot->type() == Slot::CONTEXT) {
255          ASSERT(!scope()->is_global_scope());  // No params in global scope.
256          __ ldr(r1, frame_->ParameterAt(i));
257          // Loads r2 with context; used below in RecordWrite.
258          __ str(r1, SlotOperand(slot, r2));
259          // Load the offset into r3.
260          int slot_offset =
261              FixedArray::kHeaderSize + slot->index() * kPointerSize;
262          __ RecordWrite(r2, Operand(slot_offset), r3, r1);
263        }
264      }
265    }
266
267    // Store the arguments object.  This must happen after context
268    // initialization because the arguments object may be stored in
269    // the context.
270    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
271      StoreArgumentsObject(true);
272    }
273
274    // Initialize ThisFunction reference if present.
275    if (scope()->is_function_scope() && scope()->function() != NULL) {
276      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
277      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
278    }
279
280    // Initialize the function return target after the locals are set
281    // up, because it needs the expected frame height from the frame.
282    function_return_.SetExpectedHeight();
283    function_return_is_shadowed_ = false;
284
285    // Generate code to 'execute' declarations and initialize functions
286    // (source elements). In case of an illegal redeclaration we need to
287    // handle that instead of processing the declarations.
288    if (scope()->HasIllegalRedeclaration()) {
289      Comment cmnt(masm_, "[ illegal redeclarations");
290      scope()->VisitIllegalRedeclaration(this);
291    } else {
292      Comment cmnt(masm_, "[ declarations");
293      ProcessDeclarations(scope()->declarations());
294      // Bail out if a stack-overflow exception occurred when processing
295      // declarations.
296      if (HasStackOverflow()) return;
297    }
298
299    if (FLAG_trace) {
300      frame_->CallRuntime(Runtime::kTraceEnter, 0);
301      // Ignore the return value.
302    }
303
304    // Compile the body of the function in a vanilla state. Don't
305    // bother compiling all the code if the scope has an illegal
306    // redeclaration.
307    if (!scope()->HasIllegalRedeclaration()) {
308      Comment cmnt(masm_, "[ function body");
309#ifdef DEBUG
310      bool is_builtin = Bootstrapper::IsActive();
311      bool should_trace =
312          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
313      if (should_trace) {
314        frame_->CallRuntime(Runtime::kDebugTrace, 0);
315        // Ignore the return value.
316      }
317#endif
318      VisitStatements(info->function()->body());
319    }
320  }
321
322  // Handle the return from the function.
323  if (has_valid_frame()) {
324    // If there is a valid frame, control flow can fall off the end of
325    // the body.  In that case there is an implicit return statement.
326    ASSERT(!function_return_is_shadowed_);
327    frame_->PrepareForReturn();
328    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
329    if (function_return_.is_bound()) {
330      function_return_.Jump();
331    } else {
332      function_return_.Bind();
333      GenerateReturnSequence();
334    }
335  } else if (function_return_.is_linked()) {
336    // If the return target has dangling jumps to it, then we have not
337    // yet generated the return sequence.  This can happen when (a)
338    // control does not flow off the end of the body so we did not
339    // compile an artificial return statement just above, and (b) there
340    // are return statements in the body but (c) they are all shadowed.
341    function_return_.Bind();
342    GenerateReturnSequence();
343  }
344
345  // Adjust for function-level loop nesting.
346  ASSERT(loop_nesting_ == info->is_in_loop()? 1 : 0);
347  loop_nesting_ = 0;
348
349  // Code generation state must be reset.
350  ASSERT(!has_cc());
351  ASSERT(state_ == NULL);
352  ASSERT(loop_nesting() == 0);
353  ASSERT(!function_return_is_shadowed_);
354  function_return_.Unuse();
355  DeleteFrame();
356
357  // Process any deferred code using the register allocator.
358  if (!HasStackOverflow()) {
359    ProcessDeferred();
360  }
361
362  allocator_ = NULL;
363  type_info_ = NULL;
364}
365
366
367int CodeGenerator::NumberOfSlot(Slot* slot) {
368  if (slot == NULL) return kInvalidSlotNumber;
369  switch (slot->type()) {
370    case Slot::PARAMETER:
371      return slot->index();
372    case Slot::LOCAL:
373      return slot->index() + scope()->num_parameters();
374    default:
375      break;
376  }
377  return kInvalidSlotNumber;
378}
379
380
381MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
382  // Currently, this assertion will fail if we try to assign to
383  // a constant variable that is constant because it is read-only
384  // (such as the variable referring to a named function expression).
385  // We need to implement assignments to read-only variables.
386  // Ideally, we should do this during AST generation (by converting
387  // such assignments into expression statements); however, in general
388  // we may not be able to make the decision until past AST generation,
389  // that is when the entire program is known.
390  ASSERT(slot != NULL);
391  int index = slot->index();
392  switch (slot->type()) {
393    case Slot::PARAMETER:
394      return frame_->ParameterAt(index);
395
396    case Slot::LOCAL:
397      return frame_->LocalAt(index);
398
399    case Slot::CONTEXT: {
400      // Follow the context chain if necessary.
401      ASSERT(!tmp.is(cp));  // do not overwrite context register
402      Register context = cp;
403      int chain_length = scope()->ContextChainLength(slot->var()->scope());
404      for (int i = 0; i < chain_length; i++) {
405        // Load the closure.
406        // (All contexts, even 'with' contexts, have a closure,
407        // and it is the same for all contexts inside a function.
408        // There is no need to go to the function context first.)
409        __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
410        // Load the function context (which is the incoming, outer context).
411        __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
412        context = tmp;
413      }
414      // We may have a 'with' context now. Get the function context.
415      // (In fact this mov may never be the needed, since the scope analysis
416      // may not permit a direct context access in this case and thus we are
417      // always at a function context. However it is safe to dereference be-
418      // cause the function context of a function context is itself. Before
419      // deleting this mov we should try to create a counter-example first,
420      // though...)
421      __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
422      return ContextOperand(tmp, index);
423    }
424
425    default:
426      UNREACHABLE();
427      return MemOperand(r0, 0);
428  }
429}
430
431
432MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
433    Slot* slot,
434    Register tmp,
435    Register tmp2,
436    JumpTarget* slow) {
437  ASSERT(slot->type() == Slot::CONTEXT);
438  Register context = cp;
439
440  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
441    if (s->num_heap_slots() > 0) {
442      if (s->calls_eval()) {
443        // Check that extension is NULL.
444        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
445        __ tst(tmp2, tmp2);
446        slow->Branch(ne);
447      }
448      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
449      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
450      context = tmp;
451    }
452  }
453  // Check that last extension is NULL.
454  __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
455  __ tst(tmp2, tmp2);
456  slow->Branch(ne);
457  __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
458  return ContextOperand(tmp, slot->index());
459}
460
461
462// Loads a value on TOS. If it is a boolean value, the result may have been
463// (partially) translated into branches, or it may have set the condition
464// code register. If force_cc is set, the value is forced to set the
465// condition code register and no value is pushed. If the condition code
466// register was set, has_cc() is true and cc_reg_ contains the condition to
467// test for 'true'.
468void CodeGenerator::LoadCondition(Expression* x,
469                                  JumpTarget* true_target,
470                                  JumpTarget* false_target,
471                                  bool force_cc) {
472  ASSERT(!has_cc());
473  int original_height = frame_->height();
474
475  { ConditionCodeGenState new_state(this, true_target, false_target);
476    Visit(x);
477
478    // If we hit a stack overflow, we may not have actually visited
479    // the expression.  In that case, we ensure that we have a
480    // valid-looking frame state because we will continue to generate
481    // code as we unwind the C++ stack.
482    //
483    // It's possible to have both a stack overflow and a valid frame
484    // state (eg, a subexpression overflowed, visiting it returned
485    // with a dummied frame state, and visiting this expression
486    // returned with a normal-looking state).
487    if (HasStackOverflow() &&
488        has_valid_frame() &&
489        !has_cc() &&
490        frame_->height() == original_height) {
491      true_target->Jump();
492    }
493  }
494  if (force_cc && frame_ != NULL && !has_cc()) {
495    // Convert the TOS value to a boolean in the condition code register.
496    ToBoolean(true_target, false_target);
497  }
498  ASSERT(!force_cc || !has_valid_frame() || has_cc());
499  ASSERT(!has_valid_frame() ||
500         (has_cc() && frame_->height() == original_height) ||
501         (!has_cc() && frame_->height() == original_height + 1));
502}
503
504
505void CodeGenerator::Load(Expression* expr) {
506  // We generally assume that we are not in a spilled scope for most
507  // of the code generator.  A failure to ensure this caused issue 815
508  // and this assert is designed to catch similar issues.
509  frame_->AssertIsNotSpilled();
510#ifdef DEBUG
511  int original_height = frame_->height();
512#endif
513  JumpTarget true_target;
514  JumpTarget false_target;
515  LoadCondition(expr, &true_target, &false_target, false);
516
517  if (has_cc()) {
518    // Convert cc_reg_ into a boolean value.
519    JumpTarget loaded;
520    JumpTarget materialize_true;
521    materialize_true.Branch(cc_reg_);
522    frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
523    loaded.Jump();
524    materialize_true.Bind();
525    frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
526    loaded.Bind();
527    cc_reg_ = al;
528  }
529
530  if (true_target.is_linked() || false_target.is_linked()) {
531    // We have at least one condition value that has been "translated"
532    // into a branch, thus it needs to be loaded explicitly.
533    JumpTarget loaded;
534    if (frame_ != NULL) {
535      loaded.Jump();  // Don't lose the current TOS.
536    }
537    bool both = true_target.is_linked() && false_target.is_linked();
538    // Load "true" if necessary.
539    if (true_target.is_linked()) {
540      true_target.Bind();
541      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
542    }
543    // If both "true" and "false" need to be loaded jump across the code for
544    // "false".
545    if (both) {
546      loaded.Jump();
547    }
548    // Load "false" if necessary.
549    if (false_target.is_linked()) {
550      false_target.Bind();
551      frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
552    }
553    // A value is loaded on all paths reaching this point.
554    loaded.Bind();
555  }
556  ASSERT(has_valid_frame());
557  ASSERT(!has_cc());
558  ASSERT_EQ(original_height + 1, frame_->height());
559}
560
561
562void CodeGenerator::LoadGlobal() {
563  Register reg = frame_->GetTOSRegister();
564  __ ldr(reg, GlobalObjectOperand());
565  frame_->EmitPush(reg);
566}
567
568
569void CodeGenerator::LoadGlobalReceiver(Register scratch) {
570  Register reg = frame_->GetTOSRegister();
571  __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
572  __ ldr(reg,
573         FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
574  frame_->EmitPush(reg);
575}
576
577
578ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
579  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
580  ASSERT(scope()->arguments_shadow() != NULL);
581  // We don't want to do lazy arguments allocation for functions that
582  // have heap-allocated contexts, because it interfers with the
583  // uninitialized const tracking in the context objects.
584  return (scope()->num_heap_slots() > 0)
585      ? EAGER_ARGUMENTS_ALLOCATION
586      : LAZY_ARGUMENTS_ALLOCATION;
587}
588
589
590void CodeGenerator::StoreArgumentsObject(bool initial) {
591  ArgumentsAllocationMode mode = ArgumentsMode();
592  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
593
594  Comment cmnt(masm_, "[ store arguments object");
595  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
596    // When using lazy arguments allocation, we store the hole value
597    // as a sentinel indicating that the arguments object hasn't been
598    // allocated yet.
599    frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex);
600  } else {
601    frame_->SpillAll();
602    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
603    __ ldr(r2, frame_->Function());
604    // The receiver is below the arguments, the return address, and the
605    // frame pointer on the stack.
606    const int kReceiverDisplacement = 2 + scope()->num_parameters();
607    __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
608    __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
609    frame_->Adjust(3);
610    __ Push(r2, r1, r0);
611    frame_->CallStub(&stub, 3);
612    frame_->EmitPush(r0);
613  }
614
615  Variable* arguments = scope()->arguments();
616  Variable* shadow = scope()->arguments_shadow();
617  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
618  ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
619  JumpTarget done;
620  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
621    // We have to skip storing into the arguments slot if it has
622    // already been written to. This can happen if the a function
623    // has a local variable named 'arguments'.
624    LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
625    Register arguments = frame_->PopToRegister();
626    __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
627    __ cmp(arguments, ip);
628    done.Branch(ne);
629  }
630  StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
631  if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
632  StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
633}
634
635
636void CodeGenerator::LoadTypeofExpression(Expression* expr) {
637  // Special handling of identifiers as subexpressions of typeof.
638  Variable* variable = expr->AsVariableProxy()->AsVariable();
639  if (variable != NULL && !variable->is_this() && variable->is_global()) {
640    // For a global variable we build the property reference
641    // <global>.<variable> and perform a (regular non-contextual) property
642    // load to make sure we do not get reference errors.
643    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
644    Literal key(variable->name());
645    Property property(&global, &key, RelocInfo::kNoPosition);
646    Reference ref(this, &property);
647    ref.GetValue();
648  } else if (variable != NULL && variable->AsSlot() != NULL) {
649    // For a variable that rewrites to a slot, we signal it is the immediate
650    // subexpression of a typeof.
651    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
652  } else {
653    // Anything else can be handled normally.
654    Load(expr);
655  }
656}
657
658
659Reference::Reference(CodeGenerator* cgen,
660                     Expression* expression,
661                     bool persist_after_get)
662    : cgen_(cgen),
663      expression_(expression),
664      type_(ILLEGAL),
665      persist_after_get_(persist_after_get) {
666  // We generally assume that we are not in a spilled scope for most
667  // of the code generator.  A failure to ensure this caused issue 815
668  // and this assert is designed to catch similar issues.
669  cgen->frame()->AssertIsNotSpilled();
670  cgen->LoadReference(this);
671}
672
673
674Reference::~Reference() {
675  ASSERT(is_unloaded() || is_illegal());
676}
677
678
679void CodeGenerator::LoadReference(Reference* ref) {
680  Comment cmnt(masm_, "[ LoadReference");
681  Expression* e = ref->expression();
682  Property* property = e->AsProperty();
683  Variable* var = e->AsVariableProxy()->AsVariable();
684
685  if (property != NULL) {
686    // The expression is either a property or a variable proxy that rewrites
687    // to a property.
688    Load(property->obj());
689    if (property->key()->IsPropertyName()) {
690      ref->set_type(Reference::NAMED);
691    } else {
692      Load(property->key());
693      ref->set_type(Reference::KEYED);
694    }
695  } else if (var != NULL) {
696    // The expression is a variable proxy that does not rewrite to a
697    // property.  Global variables are treated as named property references.
698    if (var->is_global()) {
699      LoadGlobal();
700      ref->set_type(Reference::NAMED);
701    } else {
702      ASSERT(var->AsSlot() != NULL);
703      ref->set_type(Reference::SLOT);
704    }
705  } else {
706    // Anything else is a runtime error.
707    Load(e);
708    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
709  }
710}
711
712
713void CodeGenerator::UnloadReference(Reference* ref) {
714  int size = ref->size();
715  ref->set_unloaded();
716  if (size == 0) return;
717
718  // Pop a reference from the stack while preserving TOS.
719  VirtualFrame::RegisterAllocationScope scope(this);
720  Comment cmnt(masm_, "[ UnloadReference");
721  if (size > 0) {
722    Register tos = frame_->PopToRegister();
723    frame_->Drop(size);
724    frame_->EmitPush(tos);
725  }
726}
727
728
729// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
730// register to a boolean in the condition code register. The code
731// may jump to 'false_target' in case the register converts to 'false'.
732void CodeGenerator::ToBoolean(JumpTarget* true_target,
733                              JumpTarget* false_target) {
734  // Note: The generated code snippet does not change stack variables.
735  //       Only the condition code should be set.
736  bool known_smi = frame_->KnownSmiAt(0);
737  Register tos = frame_->PopToRegister();
738
739  // Fast case checks
740
741  // Check if the value is 'false'.
742  if (!known_smi) {
743    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
744    __ cmp(tos, ip);
745    false_target->Branch(eq);
746
747    // Check if the value is 'true'.
748    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
749    __ cmp(tos, ip);
750    true_target->Branch(eq);
751
752    // Check if the value is 'undefined'.
753    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
754    __ cmp(tos, ip);
755    false_target->Branch(eq);
756  }
757
758  // Check if the value is a smi.
759  __ cmp(tos, Operand(Smi::FromInt(0)));
760
761  if (!known_smi) {
762    false_target->Branch(eq);
763    __ tst(tos, Operand(kSmiTagMask));
764    true_target->Branch(eq);
765
766    // Slow case.
767    if (CpuFeatures::IsSupported(VFP3)) {
768      CpuFeatures::Scope scope(VFP3);
769      // Implements the slow case by using ToBooleanStub.
770      // The ToBooleanStub takes a single argument, and
771      // returns a non-zero value for true, or zero for false.
772      // Both the argument value and the return value use the
773      // register assigned to tos_
774      ToBooleanStub stub(tos);
775      frame_->CallStub(&stub, 0);
776      // Convert the result in "tos" to a condition code.
777      __ cmp(tos, Operand(0, RelocInfo::NONE));
778    } else {
779      // Implements slow case by calling the runtime.
780      frame_->EmitPush(tos);
781      frame_->CallRuntime(Runtime::kToBool, 1);
782      // Convert the result (r0) to a condition code.
783      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
784      __ cmp(r0, ip);
785    }
786  }
787
788  cc_reg_ = ne;
789}
790
791
792void CodeGenerator::GenericBinaryOperation(Token::Value op,
793                                           OverwriteMode overwrite_mode,
794                                           GenerateInlineSmi inline_smi,
795                                           int constant_rhs) {
796  // top of virtual frame: y
797  // 2nd elt. on virtual frame : x
798  // result : top of virtual frame
799
800  // Stub is entered with a call: 'return address' is in lr.
801  switch (op) {
802    case Token::ADD:
803    case Token::SUB:
804      if (inline_smi) {
805        JumpTarget done;
806        Register rhs = frame_->PopToRegister();
807        Register lhs = frame_->PopToRegister(rhs);
808        Register scratch = VirtualFrame::scratch0();
809        __ orr(scratch, rhs, Operand(lhs));
810        // Check they are both small and positive.
811        __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
812        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
813        STATIC_ASSERT(kSmiTag == 0);
814        if (op == Token::ADD) {
815          __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
816        } else {
817          __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
818        }
819        done.Branch(eq);
820        GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
821        frame_->SpillAll();
822        frame_->CallStub(&stub, 0);
823        done.Bind();
824        frame_->EmitPush(r0);
825        break;
826      } else {
827        // Fall through!
828      }
829    case Token::BIT_OR:
830    case Token::BIT_AND:
831    case Token::BIT_XOR:
832      if (inline_smi) {
833        bool rhs_is_smi = frame_->KnownSmiAt(0);
834        bool lhs_is_smi = frame_->KnownSmiAt(1);
835        Register rhs = frame_->PopToRegister();
836        Register lhs = frame_->PopToRegister(rhs);
837        Register smi_test_reg;
838        Condition cond;
839        if (!rhs_is_smi || !lhs_is_smi) {
840          if (rhs_is_smi) {
841            smi_test_reg = lhs;
842          } else if (lhs_is_smi) {
843            smi_test_reg = rhs;
844          } else {
845            smi_test_reg = VirtualFrame::scratch0();
846            __ orr(smi_test_reg, rhs, Operand(lhs));
847          }
848          // Check they are both Smis.
849          __ tst(smi_test_reg, Operand(kSmiTagMask));
850          cond = eq;
851        } else {
852          cond = al;
853        }
854        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
855        if (op == Token::BIT_OR) {
856          __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
857        } else if (op == Token::BIT_AND) {
858          __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
859        } else {
860          ASSERT(op == Token::BIT_XOR);
861          STATIC_ASSERT(kSmiTag == 0);
862          __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
863        }
864        if (cond != al) {
865          JumpTarget done;
866          done.Branch(cond);
867          GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
868          frame_->SpillAll();
869          frame_->CallStub(&stub, 0);
870          done.Bind();
871        }
872        frame_->EmitPush(r0);
873        break;
874      } else {
875        // Fall through!
876      }
877    case Token::MUL:
878    case Token::DIV:
879    case Token::MOD:
880    case Token::SHL:
881    case Token::SHR:
882    case Token::SAR: {
883      Register rhs = frame_->PopToRegister();
884      Register lhs = frame_->PopToRegister(rhs);  // Don't pop to rhs register.
885      GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
886      frame_->SpillAll();
887      frame_->CallStub(&stub, 0);
888      frame_->EmitPush(r0);
889      break;
890    }
891
892    case Token::COMMA: {
893      Register scratch = frame_->PopToRegister();
894      // Simply discard left value.
895      frame_->Drop();
896      frame_->EmitPush(scratch);
897      break;
898    }
899
900    default:
901      // Other cases should have been handled before this point.
902      UNREACHABLE();
903      break;
904  }
905}
906
907
908class DeferredInlineSmiOperation: public DeferredCode {
909 public:
910  DeferredInlineSmiOperation(Token::Value op,
911                             int value,
912                             bool reversed,
913                             OverwriteMode overwrite_mode,
914                             Register tos)
915      : op_(op),
916        value_(value),
917        reversed_(reversed),
918        overwrite_mode_(overwrite_mode),
919        tos_register_(tos) {
920    set_comment("[ DeferredInlinedSmiOperation");
921  }
922
923  virtual void Generate();
924  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
925  // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
926  // methods, it is the responsibility of the deferred code to save and restore
927  // registers.
928  virtual bool AutoSaveAndRestore() { return false; }
929
930  void JumpToNonSmiInput(Condition cond);
931  void JumpToAnswerOutOfRange(Condition cond);
932
933 private:
934  void GenerateNonSmiInput();
935  void GenerateAnswerOutOfRange();
936  void WriteNonSmiAnswer(Register answer,
937                         Register heap_number,
938                         Register scratch);
939
940  Token::Value op_;
941  int value_;
942  bool reversed_;
943  OverwriteMode overwrite_mode_;
944  Register tos_register_;
945  Label non_smi_input_;
946  Label answer_out_of_range_;
947};
948
949
950// For bit operations we try harder and handle the case where the input is not
951// a Smi but a 32bits integer without calling the generic stub.
952void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
953  ASSERT(Token::IsBitOp(op_));
954
955  __ b(cond, &non_smi_input_);
956}
957
958
959// For bit operations the result is always 32bits so we handle the case where
960// the result does not fit in a Smi without calling the generic stub.
961void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
962  ASSERT(Token::IsBitOp(op_));
963
964  if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
965    // >>> requires an unsigned to double conversion and the non VFP code
966    // does not support this conversion.
967    __ b(cond, entry_label());
968  } else {
969    __ b(cond, &answer_out_of_range_);
970  }
971}
972
973
974// On entry the non-constant side of the binary operation is in tos_register_
975// and the constant smi side is nowhere.  The tos_register_ is not used by the
976// virtual frame.  On exit the answer is in the tos_register_ and the virtual
977// frame is unchanged.
978void DeferredInlineSmiOperation::Generate() {
979  VirtualFrame copied_frame(*frame_state()->frame());
980  copied_frame.SpillAll();
981
982  Register lhs = r1;
983  Register rhs = r0;
984  switch (op_) {
985    case Token::ADD: {
986      // Revert optimistic add.
987      if (reversed_) {
988        __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
989        __ mov(r1, Operand(Smi::FromInt(value_)));
990      } else {
991        __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
992        __ mov(r0, Operand(Smi::FromInt(value_)));
993      }
994      break;
995    }
996
997    case Token::SUB: {
998      // Revert optimistic sub.
999      if (reversed_) {
1000        __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
1001        __ mov(r1, Operand(Smi::FromInt(value_)));
1002      } else {
1003        __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
1004        __ mov(r0, Operand(Smi::FromInt(value_)));
1005      }
1006      break;
1007    }
1008
1009    // For these operations there is no optimistic operation that needs to be
1010    // reverted.
1011    case Token::MUL:
1012    case Token::MOD:
1013    case Token::BIT_OR:
1014    case Token::BIT_XOR:
1015    case Token::BIT_AND:
1016    case Token::SHL:
1017    case Token::SHR:
1018    case Token::SAR: {
1019      if (tos_register_.is(r1)) {
1020        __ mov(r0, Operand(Smi::FromInt(value_)));
1021      } else {
1022        ASSERT(tos_register_.is(r0));
1023        __ mov(r1, Operand(Smi::FromInt(value_)));
1024      }
1025      if (reversed_ == tos_register_.is(r1)) {
1026          lhs = r0;
1027          rhs = r1;
1028      }
1029      break;
1030    }
1031
1032    default:
1033      // Other cases should have been handled before this point.
1034      UNREACHABLE();
1035      break;
1036  }
1037
1038  GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
1039  __ CallStub(&stub);
1040
1041  // The generic stub returns its value in r0, but that's not
1042  // necessarily what we want.  We want whatever the inlined code
1043  // expected, which is that the answer is in the same register as
1044  // the operand was.
1045  __ Move(tos_register_, r0);
1046
1047  // The tos register was not in use for the virtual frame that we
1048  // came into this function with, so we can merge back to that frame
1049  // without trashing it.
1050  copied_frame.MergeTo(frame_state()->frame());
1051
1052  Exit();
1053
1054  if (non_smi_input_.is_linked()) {
1055    GenerateNonSmiInput();
1056  }
1057
1058  if (answer_out_of_range_.is_linked()) {
1059    GenerateAnswerOutOfRange();
1060  }
1061}
1062
1063
1064// Convert and write the integer answer into heap_number.
1065void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
1066                                                   Register heap_number,
1067                                                   Register scratch) {
1068  if (CpuFeatures::IsSupported(VFP3)) {
1069    CpuFeatures::Scope scope(VFP3);
1070    __ vmov(s0, answer);
1071    if (op_ == Token::SHR) {
1072      __ vcvt_f64_u32(d0, s0);
1073    } else {
1074      __ vcvt_f64_s32(d0, s0);
1075    }
1076    __ sub(scratch, heap_number, Operand(kHeapObjectTag));
1077    __ vstr(d0, scratch, HeapNumber::kValueOffset);
1078  } else {
1079    WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
1080    __ CallStub(&stub);
1081  }
1082}
1083
1084
1085void DeferredInlineSmiOperation::GenerateNonSmiInput() {
1086  // We know the left hand side is not a Smi and the right hand side is an
1087  // immediate value (value_) which can be represented as a Smi. We only
1088  // handle bit operations.
1089  ASSERT(Token::IsBitOp(op_));
1090
1091  if (FLAG_debug_code) {
1092    __ Abort("Should not fall through!");
1093  }
1094
1095  __ bind(&non_smi_input_);
1096  if (FLAG_debug_code) {
1097    __ AbortIfSmi(tos_register_);
1098  }
1099
1100  // This routine uses the registers from r2 to r6.  At the moment they are
1101  // not used by the register allocator, but when they are it should use
1102  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
1103
1104  Register heap_number_map = r7;
1105  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1106  __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
1107  __ cmp(r3, heap_number_map);
1108  // Not a number, fall back to the GenericBinaryOpStub.
1109  __ b(ne, entry_label());
1110
1111  Register int32 = r2;
1112  // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
1113  __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());
1114
1115  // tos_register_ (r0 or r1): Original heap number.
1116  // int32: signed 32bits int.
1117
1118  Label result_not_a_smi;
1119  int shift_value = value_ & 0x1f;
1120  switch (op_) {
1121    case Token::BIT_OR:  __ orr(int32, int32, Operand(value_)); break;
1122    case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
1123    case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
1124    case Token::SAR:
1125      ASSERT(!reversed_);
1126      if (shift_value != 0) {
1127         __ mov(int32, Operand(int32, ASR, shift_value));
1128      }
1129      break;
1130    case Token::SHR:
1131      ASSERT(!reversed_);
1132      if (shift_value != 0) {
1133        __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
1134      } else {
1135        // SHR is special because it is required to produce a positive answer.
1136        __ cmp(int32, Operand(0, RelocInfo::NONE));
1137      }
1138      if (CpuFeatures::IsSupported(VFP3)) {
1139        __ b(mi, &result_not_a_smi);
1140      } else {
1141        // Non VFP code cannot convert from unsigned to double, so fall back
1142        // to GenericBinaryOpStub.
1143        __ b(mi, entry_label());
1144      }
1145      break;
1146    case Token::SHL:
1147      ASSERT(!reversed_);
1148      if (shift_value != 0) {
1149        __ mov(int32, Operand(int32, LSL, shift_value));
1150      }
1151      break;
1152    default: UNREACHABLE();
1153  }
1154  // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
1155  // if the shift if more than 0 or SHR if the shit is more than 1.
1156  if (!( (op_ == Token::AND) ||
1157        ((op_ == Token::SAR) && (shift_value > 0)) ||
1158        ((op_ == Token::SHR) && (shift_value > 1)))) {
1159    __ add(r3, int32, Operand(0x40000000), SetCC);
1160    __ b(mi, &result_not_a_smi);
1161  }
1162  __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
1163  Exit();
1164
1165  if (result_not_a_smi.is_linked()) {
1166    __ bind(&result_not_a_smi);
1167    if (overwrite_mode_ != OVERWRITE_LEFT) {
1168      ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
1169             (overwrite_mode_ == OVERWRITE_RIGHT));
1170      // If the allocation fails, fall back to the GenericBinaryOpStub.
1171      __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
1172      // Nothing can go wrong now, so overwrite tos.
1173      __ mov(tos_register_, Operand(r4));
1174    }
1175
1176    // int32: answer as signed 32bits integer.
1177    // tos_register_: Heap number to write the answer into.
1178    WriteNonSmiAnswer(int32, tos_register_, r3);
1179
1180    Exit();
1181  }
1182}
1183
1184
1185void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
1186  // The input from a bitwise operation were Smis but the result cannot fit
1187  // into a Smi, so we store it into a heap number. VirtualFrame::scratch0()
1188  // holds the untagged result to be converted.  tos_register_ contains the
1189  // input.  See the calls to JumpToAnswerOutOfRange to see how we got here.
1190  ASSERT(Token::IsBitOp(op_));
1191  ASSERT(!reversed_);
1192
1193  Register untagged_result = VirtualFrame::scratch0();
1194
1195  if (FLAG_debug_code) {
1196    __ Abort("Should not fall through!");
1197  }
1198
1199  __ bind(&answer_out_of_range_);
1200  if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
1201    // >>> 0 is a special case where the untagged_result register is not set up
1202    // yet.  We untag the input to get it.
1203    __ mov(untagged_result, Operand(tos_register_, ASR, kSmiTagSize));
1204  }
1205
1206  // This routine uses the registers from r2 to r6.  At the moment they are
1207  // not used by the register allocator, but when they are it should use
1208  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
1209
1210  // Allocate the result heap number.
1211  Register heap_number_map = VirtualFrame::scratch1();
1212  Register heap_number = r4;
1213  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1214  // If the allocation fails, fall back to the GenericBinaryOpStub.
1215  __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
1216  WriteNonSmiAnswer(untagged_result, heap_number, r3);
1217  __ mov(tos_register_, Operand(heap_number));
1218
1219  Exit();
1220}
1221
1222
1223static bool PopCountLessThanEqual2(unsigned int x) {
1224  x &= x - 1;
1225  return (x & (x - 1)) == 0;
1226}
1227
1228
1229// Returns the index of the lowest bit set.
1230static int BitPosition(unsigned x) {
1231  int bit_posn = 0;
1232  while ((x & 0xf) == 0) {
1233    bit_posn += 4;
1234    x >>= 4;
1235  }
1236  while ((x & 1) == 0) {
1237    bit_posn++;
1238    x >>= 1;
1239  }
1240  return bit_posn;
1241}
1242
1243
1244// Can we multiply by x with max two shifts and an add.
1245// This answers yes to all integers from 2 to 10.
1246static bool IsEasyToMultiplyBy(int x) {
1247  if (x < 2) return false;                          // Avoid special cases.
1248  if (x > (Smi::kMaxValue + 1) >> 2) return false;  // Almost always overflows.
1249  if (IsPowerOf2(x)) return true;                   // Simple shift.
1250  if (PopCountLessThanEqual2(x)) return true;       // Shift and add and shift.
1251  if (IsPowerOf2(x + 1)) return true;               // Patterns like 11111.
1252  return false;
1253}
1254
1255
1256// Can multiply by anything that IsEasyToMultiplyBy returns true for.
1257// Source and destination may be the same register.  This routine does
1258// not set carry and overflow the way a mul instruction would.
1259static void InlineMultiplyByKnownInt(MacroAssembler* masm,
1260                                     Register source,
1261                                     Register destination,
1262                                     int known_int) {
1263  if (IsPowerOf2(known_int)) {
1264    masm->mov(destination, Operand(source, LSL, BitPosition(known_int)));
1265  } else if (PopCountLessThanEqual2(known_int)) {
1266    int first_bit = BitPosition(known_int);
1267    int second_bit = BitPosition(known_int ^ (1 << first_bit));
1268    masm->add(destination, source,
1269              Operand(source, LSL, second_bit - first_bit));
1270    if (first_bit != 0) {
1271      masm->mov(destination, Operand(destination, LSL, first_bit));
1272    }
1273  } else {
1274    ASSERT(IsPowerOf2(known_int + 1));  // Patterns like 1111.
1275    int the_bit = BitPosition(known_int + 1);
1276    masm->rsb(destination, source, Operand(source, LSL, the_bit));
1277  }
1278}
1279
1280
1281void CodeGenerator::SmiOperation(Token::Value op,
1282                                 Handle<Object> value,
1283                                 bool reversed,
1284                                 OverwriteMode mode) {
1285  int int_value = Smi::cast(*value)->value();
1286
1287  bool both_sides_are_smi = frame_->KnownSmiAt(0);
1288
1289  bool something_to_inline;
1290  switch (op) {
1291    case Token::ADD:
1292    case Token::SUB:
1293    case Token::BIT_AND:
1294    case Token::BIT_OR:
1295    case Token::BIT_XOR: {
1296      something_to_inline = true;
1297      break;
1298    }
1299    case Token::SHL: {
1300      something_to_inline = (both_sides_are_smi || !reversed);
1301      break;
1302    }
1303    case Token::SHR:
1304    case Token::SAR: {
1305      if (reversed) {
1306        something_to_inline = false;
1307      } else {
1308        something_to_inline = true;
1309      }
1310      break;
1311    }
1312    case Token::MOD: {
1313      if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
1314        something_to_inline = false;
1315      } else {
1316        something_to_inline = true;
1317      }
1318      break;
1319    }
1320    case Token::MUL: {
1321      if (!IsEasyToMultiplyBy(int_value)) {
1322        something_to_inline = false;
1323      } else {
1324        something_to_inline = true;
1325      }
1326      break;
1327    }
1328    default: {
1329      something_to_inline = false;
1330      break;
1331    }
1332  }
1333
1334  if (!something_to_inline) {
1335    if (!reversed) {
1336      // Push the rhs onto the virtual frame by putting it in a TOS register.
1337      Register rhs = frame_->GetTOSRegister();
1338      __ mov(rhs, Operand(value));
1339      frame_->EmitPush(rhs, TypeInfo::Smi());
1340      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
1341    } else {
1342      // Pop the rhs, then push lhs and rhs in the right order.  Only performs
1343      // at most one pop, the rest takes place in TOS registers.
1344      Register lhs = frame_->GetTOSRegister();    // Get reg for pushing.
1345      Register rhs = frame_->PopToRegister(lhs);  // Don't use lhs for this.
1346      __ mov(lhs, Operand(value));
1347      frame_->EmitPush(lhs, TypeInfo::Smi());
1348      TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
1349      frame_->EmitPush(rhs, t);
1350      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
1351                             GenericBinaryOpStub::kUnknownIntValue);
1352    }
1353    return;
1354  }
1355
1356  // We move the top of stack to a register (normally no move is invoved).
1357  Register tos = frame_->PopToRegister();
1358  switch (op) {
1359    case Token::ADD: {
1360      DeferredCode* deferred =
1361          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1362
1363      __ add(tos, tos, Operand(value), SetCC);
1364      deferred->Branch(vs);
1365      if (!both_sides_are_smi) {
1366        __ tst(tos, Operand(kSmiTagMask));
1367        deferred->Branch(ne);
1368      }
1369      deferred->BindExit();
1370      frame_->EmitPush(tos);
1371      break;
1372    }
1373
1374    case Token::SUB: {
1375      DeferredCode* deferred =
1376          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1377
1378      if (reversed) {
1379        __ rsb(tos, tos, Operand(value), SetCC);
1380      } else {
1381        __ sub(tos, tos, Operand(value), SetCC);
1382      }
1383      deferred->Branch(vs);
1384      if (!both_sides_are_smi) {
1385        __ tst(tos, Operand(kSmiTagMask));
1386        deferred->Branch(ne);
1387      }
1388      deferred->BindExit();
1389      frame_->EmitPush(tos);
1390      break;
1391    }
1392
1393
1394    case Token::BIT_OR:
1395    case Token::BIT_XOR:
1396    case Token::BIT_AND: {
1397      if (both_sides_are_smi) {
1398        switch (op) {
1399          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
1400          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1401          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1402          default: UNREACHABLE();
1403        }
1404        frame_->EmitPush(tos, TypeInfo::Smi());
1405      } else {
1406        DeferredInlineSmiOperation* deferred =
1407          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1408        __ tst(tos, Operand(kSmiTagMask));
1409        deferred->JumpToNonSmiInput(ne);
1410        switch (op) {
1411          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
1412          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1413          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1414          default: UNREACHABLE();
1415        }
1416        deferred->BindExit();
1417        TypeInfo result_type =
1418            (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
1419        frame_->EmitPush(tos, result_type);
1420      }
1421      break;
1422    }
1423
1424    case Token::SHL:
1425      if (reversed) {
1426        ASSERT(both_sides_are_smi);
1427        int max_shift = 0;
1428        int max_result = int_value == 0 ? 1 : int_value;
1429        while (Smi::IsValid(max_result << 1)) {
1430          max_shift++;
1431          max_result <<= 1;
1432        }
1433        DeferredCode* deferred =
1434          new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
1435        // Mask off the last 5 bits of the shift operand (rhs).  This is part
1436        // of the definition of shift in JS and we know we have a Smi so we
1437        // can safely do this.  The masked version gets passed to the
1438        // deferred code, but that makes no difference.
1439        __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
1440        __ cmp(tos, Operand(Smi::FromInt(max_shift)));
1441        deferred->Branch(ge);
1442        Register scratch = VirtualFrame::scratch0();
1443        __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Untag.
1444        __ mov(tos, Operand(Smi::FromInt(int_value)));    // Load constant.
1445        __ mov(tos, Operand(tos, LSL, scratch));          // Shift constant.
1446        deferred->BindExit();
1447        TypeInfo result = TypeInfo::Integer32();
1448        frame_->EmitPush(tos, result);
1449        break;
1450      }
1451      // Fall through!
1452    case Token::SHR:
1453    case Token::SAR: {
1454      ASSERT(!reversed);
1455      int shift_value = int_value & 0x1f;
1456      TypeInfo result = TypeInfo::Number();
1457
1458      if (op == Token::SHR) {
1459        if (shift_value > 1) {
1460          result = TypeInfo::Smi();
1461        } else if (shift_value > 0) {
1462          result = TypeInfo::Integer32();
1463        }
1464      } else if (op == Token::SAR) {
1465        if (shift_value > 0) {
1466          result = TypeInfo::Smi();
1467        } else {
1468          result = TypeInfo::Integer32();
1469        }
1470      } else {
1471        ASSERT(op == Token::SHL);
1472        result = TypeInfo::Integer32();
1473      }
1474
1475      DeferredInlineSmiOperation* deferred =
1476        new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
1477      if (!both_sides_are_smi) {
1478        __ tst(tos, Operand(kSmiTagMask));
1479        deferred->JumpToNonSmiInput(ne);
1480      }
1481      switch (op) {
1482        case Token::SHL: {
1483          if (shift_value != 0) {
1484            Register untagged_result = VirtualFrame::scratch0();
1485            Register scratch = VirtualFrame::scratch1();
1486            int adjusted_shift = shift_value - kSmiTagSize;
1487            ASSERT(adjusted_shift >= 0);
1488
1489            if (adjusted_shift != 0) {
1490              __ mov(untagged_result, Operand(tos, LSL, adjusted_shift));
1491            } else {
1492              __ mov(untagged_result, Operand(tos));
1493            }
1494            // Check that the *signed* result fits in a smi.
1495            __ add(scratch, untagged_result, Operand(0x40000000), SetCC);
1496            deferred->JumpToAnswerOutOfRange(mi);
1497            __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
1498          }
1499          break;
1500        }
1501        case Token::SHR: {
1502          if (shift_value != 0) {
1503            Register untagged_result = VirtualFrame::scratch0();
1504            // Remove tag.
1505            __ mov(untagged_result, Operand(tos, ASR, kSmiTagSize));
1506            __ mov(untagged_result, Operand(untagged_result, LSR, shift_value));
1507            if (shift_value == 1) {
1508              // Check that the *unsigned* result fits in a smi.
1509              // Neither of the two high-order bits can be set:
1510              // - 0x80000000: high bit would be lost when smi tagging
1511              // - 0x40000000: this number would convert to negative when Smi
1512              //   tagging.
1513              // These two cases can only happen with shifts by 0 or 1 when
1514              // handed a valid smi.
1515              __ tst(untagged_result, Operand(0xc0000000));
1516              deferred->JumpToAnswerOutOfRange(ne);
1517            }
1518            __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
1519          } else {
1520            __ cmp(tos, Operand(0, RelocInfo::NONE));
1521            deferred->JumpToAnswerOutOfRange(mi);
1522          }
1523          break;
1524        }
1525        case Token::SAR: {
1526          if (shift_value != 0) {
1527            // Do the shift and the tag removal in one operation. If the shift
1528            // is 31 bits (the highest possible value) then we emit the
1529            // instruction as a shift by 0 which in the ARM ISA means shift
1530            // arithmetically by 32.
1531            __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
1532            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1533          }
1534          break;
1535        }
1536        default: UNREACHABLE();
1537      }
1538      deferred->BindExit();
1539      frame_->EmitPush(tos, result);
1540      break;
1541    }
1542
1543    case Token::MOD: {
1544      ASSERT(!reversed);
1545      ASSERT(int_value >= 2);
1546      ASSERT(IsPowerOf2(int_value));
1547      DeferredCode* deferred =
1548          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1549      unsigned mask = (0x80000000u | kSmiTagMask);
1550      __ tst(tos, Operand(mask));
1551      deferred->Branch(ne);  // Go to deferred code on non-Smis and negative.
1552      mask = (int_value << kSmiTagSize) - 1;
1553      __ and_(tos, tos, Operand(mask));
1554      deferred->BindExit();
1555      // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
1556      frame_->EmitPush(
1557          tos,
1558          both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
1559      break;
1560    }
1561
1562    case Token::MUL: {
1563      ASSERT(IsEasyToMultiplyBy(int_value));
1564      DeferredCode* deferred =
1565          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1566      unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1567      max_smi_that_wont_overflow <<= kSmiTagSize;
1568      unsigned mask = 0x80000000u;
1569      while ((mask & max_smi_that_wont_overflow) == 0) {
1570        mask |= mask >> 1;
1571      }
1572      mask |= kSmiTagMask;
1573      // This does a single mask that checks for a too high value in a
1574      // conservative way and for a non-Smi.  It also filters out negative
1575      // numbers, unfortunately, but since this code is inline we prefer
1576      // brevity to comprehensiveness.
1577      __ tst(tos, Operand(mask));
1578      deferred->Branch(ne);
1579      InlineMultiplyByKnownInt(masm_, tos, tos, int_value);
1580      deferred->BindExit();
1581      frame_->EmitPush(tos);
1582      break;
1583    }
1584
1585    default:
1586      UNREACHABLE();
1587      break;
1588  }
1589}
1590
1591
1592void CodeGenerator::Comparison(Condition cc,
1593                               Expression* left,
1594                               Expression* right,
1595                               bool strict) {
1596  VirtualFrame::RegisterAllocationScope scope(this);
1597
1598  if (left != NULL) Load(left);
1599  if (right != NULL) Load(right);
1600
1601  // sp[0] : y
1602  // sp[1] : x
1603  // result : cc register
1604
1605  // Strict only makes sense for equality comparisons.
1606  ASSERT(!strict || cc == eq);
1607
1608  Register lhs;
1609  Register rhs;
1610
1611  bool lhs_is_smi;
1612  bool rhs_is_smi;
1613
1614  // We load the top two stack positions into registers chosen by the virtual
1615  // frame.  This should keep the register shuffling to a minimum.
1616  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1617  if (cc == gt || cc == le) {
1618    cc = ReverseCondition(cc);
1619    lhs_is_smi = frame_->KnownSmiAt(0);
1620    rhs_is_smi = frame_->KnownSmiAt(1);
1621    lhs = frame_->PopToRegister();
1622    rhs = frame_->PopToRegister(lhs);  // Don't pop to the same register again!
1623  } else {
1624    rhs_is_smi = frame_->KnownSmiAt(0);
1625    lhs_is_smi = frame_->KnownSmiAt(1);
1626    rhs = frame_->PopToRegister();
1627    lhs = frame_->PopToRegister(rhs);  // Don't pop to the same register again!
1628  }
1629
1630  bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
1631
1632  ASSERT(rhs.is(r0) || rhs.is(r1));
1633  ASSERT(lhs.is(r0) || lhs.is(r1));
1634
1635  JumpTarget exit;
1636
1637  if (!both_sides_are_smi) {
1638    // Now we have the two sides in r0 and r1.  We flush any other registers
1639    // because the stub doesn't know about register allocation.
1640    frame_->SpillAll();
1641    Register scratch = VirtualFrame::scratch0();
1642    Register smi_test_reg;
1643    if (lhs_is_smi) {
1644      smi_test_reg = rhs;
1645    } else if (rhs_is_smi) {
1646      smi_test_reg = lhs;
1647    } else {
1648      __ orr(scratch, lhs, Operand(rhs));
1649      smi_test_reg = scratch;
1650    }
1651    __ tst(smi_test_reg, Operand(kSmiTagMask));
1652    JumpTarget smi;
1653    smi.Branch(eq);
1654
1655    // Perform non-smi comparison by stub.
1656    // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1657    // We call with 0 args because there are 0 on the stack.
1658    CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
1659    frame_->CallStub(&stub, 0);
1660    __ cmp(r0, Operand(0, RelocInfo::NONE));
1661    exit.Jump();
1662
1663    smi.Bind();
1664  }
1665
1666  // Do smi comparisons by pointer comparison.
1667  __ cmp(lhs, Operand(rhs));
1668
1669  exit.Bind();
1670  cc_reg_ = cc;
1671}
1672
1673
1674// Call the function on the stack with the given arguments.
1675void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
1676                                      CallFunctionFlags flags,
1677                                      int position) {
1678  // Push the arguments ("left-to-right") on the stack.
1679  int arg_count = args->length();
1680  for (int i = 0; i < arg_count; i++) {
1681    Load(args->at(i));
1682  }
1683
1684  // Record the position for debugging purposes.
1685  CodeForSourcePosition(position);
1686
1687  // Use the shared code stub to call the function.
1688  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
1689  CallFunctionStub call_function(arg_count, in_loop, flags);
1690  frame_->CallStub(&call_function, arg_count + 1);
1691
1692  // Restore context and pop function from the stack.
1693  __ ldr(cp, frame_->Context());
1694  frame_->Drop();  // discard the TOS
1695}
1696
1697
1698void CodeGenerator::CallApplyLazy(Expression* applicand,
1699                                  Expression* receiver,
1700                                  VariableProxy* arguments,
1701                                  int position) {
1702  // An optimized implementation of expressions of the form
1703  // x.apply(y, arguments).
1704  // If the arguments object of the scope has not been allocated,
1705  // and x.apply is Function.prototype.apply, this optimization
1706  // just copies y and the arguments of the current function on the
1707  // stack, as receiver and arguments, and calls x.
1708  // In the implementation comments, we call x the applicand
1709  // and y the receiver.
1710
1711  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
1712  ASSERT(arguments->IsArguments());
1713
1714  // Load applicand.apply onto the stack. This will usually
1715  // give us a megamorphic load site. Not super, but it works.
1716  Load(applicand);
1717  Handle<String> name = Factory::LookupAsciiSymbol("apply");
1718  frame_->Dup();
1719  frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
1720  frame_->EmitPush(r0);
1721
1722  // Load the receiver and the existing arguments object onto the
1723  // expression stack. Avoid allocating the arguments object here.
1724  Load(receiver);
1725  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
1726
1727  // At this point the top two stack elements are probably in registers
1728  // since they were just loaded.  Ensure they are in regs and get the
1729  // regs.
1730  Register receiver_reg = frame_->Peek2();
1731  Register arguments_reg = frame_->Peek();
1732
1733  // From now on the frame is spilled.
1734  frame_->SpillAll();
1735
1736  // Emit the source position information after having loaded the
1737  // receiver and the arguments.
1738  CodeForSourcePosition(position);
1739  // Contents of the stack at this point:
1740  //   sp[0]: arguments object of the current function or the hole.
1741  //   sp[1]: receiver
1742  //   sp[2]: applicand.apply
1743  //   sp[3]: applicand.
1744
1745  // Check if the arguments object has been lazily allocated
1746  // already. If so, just use that instead of copying the arguments
1747  // from the stack. This also deals with cases where a local variable
1748  // named 'arguments' has been introduced.
1749  JumpTarget slow;
1750  Label done;
1751  __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
1752  __ cmp(ip, arguments_reg);
1753  slow.Branch(ne);
1754
1755  Label build_args;
1756  // Get rid of the arguments object probe.
1757  frame_->Drop();
1758  // Stack now has 3 elements on it.
1759  // Contents of stack at this point:
1760  //   sp[0]: receiver - in the receiver_reg register.
1761  //   sp[1]: applicand.apply
1762  //   sp[2]: applicand.
1763
1764  // Check that the receiver really is a JavaScript object.
1765  __ BranchOnSmi(receiver_reg, &build_args);
1766  // We allow all JSObjects including JSFunctions.  As long as
1767  // JS_FUNCTION_TYPE is the last instance type and it is right
1768  // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
1769  // bound.
1770  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1771  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1772  __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
1773  __ b(lt, &build_args);
1774
1775  // Check that applicand.apply is Function.prototype.apply.
1776  __ ldr(r0, MemOperand(sp, kPointerSize));
1777  __ BranchOnSmi(r0, &build_args);
1778  __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
1779  __ b(ne, &build_args);
1780  Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
1781  __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
1782  __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
1783  __ cmp(r1, Operand(apply_code));
1784  __ b(ne, &build_args);
1785
1786  // Check that applicand is a function.
1787  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1788  __ BranchOnSmi(r1, &build_args);
1789  __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
1790  __ b(ne, &build_args);
1791
1792  // Copy the arguments to this function possibly from the
1793  // adaptor frame below it.
1794  Label invoke, adapted;
1795  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1796  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1797  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1798  __ b(eq, &adapted);
1799
1800  // No arguments adaptor frame. Copy fixed number of arguments.
1801  __ mov(r0, Operand(scope()->num_parameters()));
1802  for (int i = 0; i < scope()->num_parameters(); i++) {
1803    __ ldr(r2, frame_->ParameterAt(i));
1804    __ push(r2);
1805  }
1806  __ jmp(&invoke);
1807
1808  // Arguments adaptor frame present. Copy arguments from there, but
1809  // avoid copying too many arguments to avoid stack overflows.
1810  __ bind(&adapted);
1811  static const uint32_t kArgumentsLimit = 1 * KB;
1812  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1813  __ mov(r0, Operand(r0, LSR, kSmiTagSize));
1814  __ mov(r3, r0);
1815  __ cmp(r0, Operand(kArgumentsLimit));
1816  __ b(gt, &build_args);
1817
1818  // Loop through the arguments pushing them onto the execution
1819  // stack. We don't inform the virtual frame of the push, so we don't
1820  // have to worry about getting rid of the elements from the virtual
1821  // frame.
1822  Label loop;
1823  // r3 is a small non-negative integer, due to the test above.
1824  __ cmp(r3, Operand(0, RelocInfo::NONE));
1825  __ b(eq, &invoke);
1826  // Compute the address of the first argument.
1827  __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
1828  __ add(r2, r2, Operand(kPointerSize));
1829  __ bind(&loop);
1830  // Post-decrement argument address by kPointerSize on each iteration.
1831  __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
1832  __ push(r4);
1833  __ sub(r3, r3, Operand(1), SetCC);
1834  __ b(gt, &loop);
1835
1836  // Invoke the function.
1837  __ bind(&invoke);
1838  ParameterCount actual(r0);
1839  __ InvokeFunction(r1, actual, CALL_FUNCTION);
1840  // Drop applicand.apply and applicand from the stack, and push
1841  // the result of the function call, but leave the spilled frame
1842  // unchanged, with 3 elements, so it is correct when we compile the
1843  // slow-case code.
1844  __ add(sp, sp, Operand(2 * kPointerSize));
1845  __ push(r0);
1846  // Stack now has 1 element:
1847  //   sp[0]: result
1848  __ jmp(&done);
1849
1850  // Slow-case: Allocate the arguments object since we know it isn't
1851  // there, and fall-through to the slow-case where we call
1852  // applicand.apply.
1853  __ bind(&build_args);
1854  // Stack now has 3 elements, because we have jumped from where:
1855  //   sp[0]: receiver
1856  //   sp[1]: applicand.apply
1857  //   sp[2]: applicand.
1858  StoreArgumentsObject(false);
1859
1860  // Stack and frame now have 4 elements.
1861  slow.Bind();
1862
1863  // Generic computation of x.apply(y, args) with no special optimization.
1864  // Flip applicand.apply and applicand on the stack, so
1865  // applicand looks like the receiver of the applicand.apply call.
1866  // Then process it as a normal function call.
1867  __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
1868  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1869  __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
1870
1871  CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
1872  frame_->CallStub(&call_function, 3);
1873  // The function and its two arguments have been dropped.
1874  frame_->Drop();  // Drop the receiver as well.
1875  frame_->EmitPush(r0);
1876  frame_->SpillAll();  // A spilled frame is also jumping to label done.
1877  // Stack now has 1 element:
1878  //   sp[0]: result
1879  __ bind(&done);
1880
1881  // Restore the context register after a call.
1882  __ ldr(cp, frame_->Context());
1883}
1884
1885
1886void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1887  ASSERT(has_cc());
1888  Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1889  target->Branch(cc);
1890  cc_reg_ = al;
1891}
1892
1893
1894void CodeGenerator::CheckStack() {
1895  frame_->SpillAll();
1896  Comment cmnt(masm_, "[ check stack");
1897  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1898  masm_->cmp(sp, Operand(ip));
1899  StackCheckStub stub;
1900  // Call the stub if lower.
1901  masm_->mov(ip,
1902             Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1903                     RelocInfo::CODE_TARGET),
1904             LeaveCC,
1905             lo);
1906  masm_->Call(ip, lo);
1907}
1908
1909
1910void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1911#ifdef DEBUG
1912  int original_height = frame_->height();
1913#endif
1914  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1915    Visit(statements->at(i));
1916  }
1917  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1918}
1919
1920
1921void CodeGenerator::VisitBlock(Block* node) {
1922#ifdef DEBUG
1923  int original_height = frame_->height();
1924#endif
1925  Comment cmnt(masm_, "[ Block");
1926  CodeForStatementPosition(node);
1927  node->break_target()->SetExpectedHeight();
1928  VisitStatements(node->statements());
1929  if (node->break_target()->is_linked()) {
1930    node->break_target()->Bind();
1931  }
1932  node->break_target()->Unuse();
1933  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1934}
1935
1936
1937void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1938  frame_->EmitPush(cp);
1939  frame_->EmitPush(Operand(pairs));
1940  frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1941
1942  frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1943  // The result is discarded.
1944}
1945
1946
1947void CodeGenerator::VisitDeclaration(Declaration* node) {
1948#ifdef DEBUG
1949  int original_height = frame_->height();
1950#endif
1951  Comment cmnt(masm_, "[ Declaration");
1952  Variable* var = node->proxy()->var();
1953  ASSERT(var != NULL);  // must have been resolved
1954  Slot* slot = var->AsSlot();
1955
1956  // If it was not possible to allocate the variable at compile time,
1957  // we need to "declare" it at runtime to make sure it actually
1958  // exists in the local context.
1959  if (slot != NULL && slot->type() == Slot::LOOKUP) {
1960    // Variables with a "LOOKUP" slot were introduced as non-locals
1961    // during variable resolution and must have mode DYNAMIC.
1962    ASSERT(var->is_dynamic());
1963    // For now, just do a runtime call.
1964    frame_->EmitPush(cp);
1965    frame_->EmitPush(Operand(var->name()));
1966    // Declaration nodes are always declared in only two modes.
1967    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1968    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1969    frame_->EmitPush(Operand(Smi::FromInt(attr)));
1970    // Push initial value, if any.
1971    // Note: For variables we must not push an initial value (such as
1972    // 'undefined') because we may have a (legal) redeclaration and we
1973    // must not destroy the current value.
1974    if (node->mode() == Variable::CONST) {
1975      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
1976    } else if (node->fun() != NULL) {
1977      Load(node->fun());
1978    } else {
1979      frame_->EmitPush(Operand(0, RelocInfo::NONE));
1980    }
1981
1982    frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1983    // Ignore the return value (declarations are statements).
1984
1985    ASSERT(frame_->height() == original_height);
1986    return;
1987  }
1988
1989  ASSERT(!var->is_global());
1990
1991  // If we have a function or a constant, we need to initialize the variable.
1992  Expression* val = NULL;
1993  if (node->mode() == Variable::CONST) {
1994    val = new Literal(Factory::the_hole_value());
1995  } else {
1996    val = node->fun();  // NULL if we don't have a function
1997  }
1998
1999
2000  if (val != NULL) {
2001    WriteBarrierCharacter wb_info =
2002        val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
2003    if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
2004    // Set initial value.
2005    Reference target(this, node->proxy());
2006    Load(val);
2007    target.SetValue(NOT_CONST_INIT, wb_info);
2008
2009    // Get rid of the assigned value (declarations are statements).
2010    frame_->Drop();
2011  }
2012  ASSERT(frame_->height() == original_height);
2013}
2014
2015
2016void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
2017#ifdef DEBUG
2018  int original_height = frame_->height();
2019#endif
2020  Comment cmnt(masm_, "[ ExpressionStatement");
2021  CodeForStatementPosition(node);
2022  Expression* expression = node->expression();
2023  expression->MarkAsStatement();
2024  Load(expression);
2025  frame_->Drop();
2026  ASSERT(frame_->height() == original_height);
2027}
2028
2029
2030void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
2031#ifdef DEBUG
2032  int original_height = frame_->height();
2033#endif
2034  Comment cmnt(masm_, "// EmptyStatement");
2035  CodeForStatementPosition(node);
2036  // nothing to do
2037  ASSERT(frame_->height() == original_height);
2038}
2039
2040
2041void CodeGenerator::VisitIfStatement(IfStatement* node) {
2042#ifdef DEBUG
2043  int original_height = frame_->height();
2044#endif
2045  Comment cmnt(masm_, "[ IfStatement");
2046  // Generate different code depending on which parts of the if statement
2047  // are present or not.
2048  bool has_then_stm = node->HasThenStatement();
2049  bool has_else_stm = node->HasElseStatement();
2050
2051  CodeForStatementPosition(node);
2052
2053  JumpTarget exit;
2054  if (has_then_stm && has_else_stm) {
2055    Comment cmnt(masm_, "[ IfThenElse");
2056    JumpTarget then;
2057    JumpTarget else_;
2058    // if (cond)
2059    LoadCondition(node->condition(), &then, &else_, true);
2060    if (frame_ != NULL) {
2061      Branch(false, &else_);
2062    }
2063    // then
2064    if (frame_ != NULL || then.is_linked()) {
2065      then.Bind();
2066      Visit(node->then_statement());
2067    }
2068    if (frame_ != NULL) {
2069      exit.Jump();
2070    }
2071    // else
2072    if (else_.is_linked()) {
2073      else_.Bind();
2074      Visit(node->else_statement());
2075    }
2076
2077  } else if (has_then_stm) {
2078    Comment cmnt(masm_, "[ IfThen");
2079    ASSERT(!has_else_stm);
2080    JumpTarget then;
2081    // if (cond)
2082    LoadCondition(node->condition(), &then, &exit, true);
2083    if (frame_ != NULL) {
2084      Branch(false, &exit);
2085    }
2086    // then
2087    if (frame_ != NULL || then.is_linked()) {
2088      then.Bind();
2089      Visit(node->then_statement());
2090    }
2091
2092  } else if (has_else_stm) {
2093    Comment cmnt(masm_, "[ IfElse");
2094    ASSERT(!has_then_stm);
2095    JumpTarget else_;
2096    // if (!cond)
2097    LoadCondition(node->condition(), &exit, &else_, true);
2098    if (frame_ != NULL) {
2099      Branch(true, &exit);
2100    }
2101    // else
2102    if (frame_ != NULL || else_.is_linked()) {
2103      else_.Bind();
2104      Visit(node->else_statement());
2105    }
2106
2107  } else {
2108    Comment cmnt(masm_, "[ If");
2109    ASSERT(!has_then_stm && !has_else_stm);
2110    // if (cond)
2111    LoadCondition(node->condition(), &exit, &exit, false);
2112    if (frame_ != NULL) {
2113      if (has_cc()) {
2114        cc_reg_ = al;
2115      } else {
2116        frame_->Drop();
2117      }
2118    }
2119  }
2120
2121  // end
2122  if (exit.is_linked()) {
2123    exit.Bind();
2124  }
2125  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2126}
2127
2128
2129void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
2130  Comment cmnt(masm_, "[ ContinueStatement");
2131  CodeForStatementPosition(node);
2132  node->target()->continue_target()->Jump();
2133}
2134
2135
2136void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
2137  Comment cmnt(masm_, "[ BreakStatement");
2138  CodeForStatementPosition(node);
2139  node->target()->break_target()->Jump();
2140}
2141
2142
2143void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
2144  Comment cmnt(masm_, "[ ReturnStatement");
2145
2146  CodeForStatementPosition(node);
2147  Load(node->expression());
2148  frame_->PopToR0();
2149  frame_->PrepareForReturn();
2150  if (function_return_is_shadowed_) {
2151    function_return_.Jump();
2152  } else {
2153    // Pop the result from the frame and prepare the frame for
2154    // returning thus making it easier to merge.
2155    if (function_return_.is_bound()) {
2156      // If the function return label is already bound we reuse the
2157      // code by jumping to the return site.
2158      function_return_.Jump();
2159    } else {
2160      function_return_.Bind();
2161      GenerateReturnSequence();
2162    }
2163  }
2164}
2165
2166
2167void CodeGenerator::GenerateReturnSequence() {
2168  if (FLAG_trace) {
2169    // Push the return value on the stack as the parameter.
2170    // Runtime::TraceExit returns the parameter as it is.
2171    frame_->EmitPush(r0);
2172    frame_->CallRuntime(Runtime::kTraceExit, 1);
2173  }
2174
2175#ifdef DEBUG
2176  // Add a label for checking the size of the code used for returning.
2177  Label check_exit_codesize;
2178  masm_->bind(&check_exit_codesize);
2179#endif
2180  // Make sure that the constant pool is not emitted inside of the return
2181  // sequence.
2182  { Assembler::BlockConstPoolScope block_const_pool(masm_);
2183    // Tear down the frame which will restore the caller's frame pointer and
2184    // the link register.
2185    frame_->Exit();
2186
2187    // Here we use masm_-> instead of the __ macro to avoid the code coverage
2188    // tool from instrumenting as we rely on the code size here.
2189    int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
2190    masm_->add(sp, sp, Operand(sp_delta));
2191    masm_->Jump(lr);
2192    DeleteFrame();
2193
2194#ifdef DEBUG
2195    // Check that the size of the code used for returning matches what is
2196    // expected by the debugger. If the sp_delts above cannot be encoded in
2197    // the add instruction the add will generate two instructions.
2198    int return_sequence_length =
2199        masm_->InstructionsGeneratedSince(&check_exit_codesize);
2200    CHECK(return_sequence_length ==
2201          Assembler::kJSReturnSequenceInstructions ||
2202          return_sequence_length ==
2203          Assembler::kJSReturnSequenceInstructions + 1);
2204#endif
2205  }
2206}
2207
2208
2209void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
2210#ifdef DEBUG
2211  int original_height = frame_->height();
2212#endif
2213  Comment cmnt(masm_, "[ WithEnterStatement");
2214  CodeForStatementPosition(node);
2215  Load(node->expression());
2216  if (node->is_catch_block()) {
2217    frame_->CallRuntime(Runtime::kPushCatchContext, 1);
2218  } else {
2219    frame_->CallRuntime(Runtime::kPushContext, 1);
2220  }
2221#ifdef DEBUG
2222  JumpTarget verified_true;
2223  __ cmp(r0, cp);
2224  verified_true.Branch(eq);
2225  __ stop("PushContext: r0 is expected to be the same as cp");
2226  verified_true.Bind();
2227#endif
2228  // Update context local.
2229  __ str(cp, frame_->Context());
2230  ASSERT(frame_->height() == original_height);
2231}
2232
2233
2234void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
2235#ifdef DEBUG
2236  int original_height = frame_->height();
2237#endif
2238  Comment cmnt(masm_, "[ WithExitStatement");
2239  CodeForStatementPosition(node);
2240  // Pop context.
2241  __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
2242  // Update context local.
2243  __ str(cp, frame_->Context());
2244  ASSERT(frame_->height() == original_height);
2245}
2246
2247
2248void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
2249#ifdef DEBUG
2250  int original_height = frame_->height();
2251#endif
2252  Comment cmnt(masm_, "[ SwitchStatement");
2253  CodeForStatementPosition(node);
2254  node->break_target()->SetExpectedHeight();
2255
2256  Load(node->tag());
2257
2258  JumpTarget next_test;
2259  JumpTarget fall_through;
2260  JumpTarget default_entry;
2261  JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
2262  ZoneList<CaseClause*>* cases = node->cases();
2263  int length = cases->length();
2264  CaseClause* default_clause = NULL;
2265
2266  for (int i = 0; i < length; i++) {
2267    CaseClause* clause = cases->at(i);
2268    if (clause->is_default()) {
2269      // Remember the default clause and compile it at the end.
2270      default_clause = clause;
2271      continue;
2272    }
2273
2274    Comment cmnt(masm_, "[ Case clause");
2275    // Compile the test.
2276    next_test.Bind();
2277    next_test.Unuse();
2278    // Duplicate TOS.
2279    frame_->Dup();
2280    Comparison(eq, NULL, clause->label(), true);
2281    Branch(false, &next_test);
2282
2283    // Before entering the body from the test, remove the switch value from
2284    // the stack.
2285    frame_->Drop();
2286
2287    // Label the body so that fall through is enabled.
2288    if (i > 0 && cases->at(i - 1)->is_default()) {
2289      default_exit.Bind();
2290    } else {
2291      fall_through.Bind();
2292      fall_through.Unuse();
2293    }
2294    VisitStatements(clause->statements());
2295
2296    // If control flow can fall through from the body, jump to the next body
2297    // or the end of the statement.
2298    if (frame_ != NULL) {
2299      if (i < length - 1 && cases->at(i + 1)->is_default()) {
2300        default_entry.Jump();
2301      } else {
2302        fall_through.Jump();
2303      }
2304    }
2305  }
2306
2307  // The final "test" removes the switch value.
2308  next_test.Bind();
2309  frame_->Drop();
2310
2311  // If there is a default clause, compile it.
2312  if (default_clause != NULL) {
2313    Comment cmnt(masm_, "[ Default clause");
2314    default_entry.Bind();
2315    VisitStatements(default_clause->statements());
2316    // If control flow can fall out of the default and there is a case after
2317    // it, jump to that case's body.
2318    if (frame_ != NULL && default_exit.is_bound()) {
2319      default_exit.Jump();
2320    }
2321  }
2322
2323  if (fall_through.is_linked()) {
2324    fall_through.Bind();
2325  }
2326
2327  if (node->break_target()->is_linked()) {
2328    node->break_target()->Bind();
2329  }
2330  node->break_target()->Unuse();
2331  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2332}
2333
2334
2335void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
2336#ifdef DEBUG
2337  int original_height = frame_->height();
2338#endif
2339  Comment cmnt(masm_, "[ DoWhileStatement");
2340  CodeForStatementPosition(node);
2341  node->break_target()->SetExpectedHeight();
2342  JumpTarget body(JumpTarget::BIDIRECTIONAL);
2343  IncrementLoopNesting();
2344
2345  // Label the top of the loop for the backward CFG edge.  If the test
2346  // is always true we can use the continue target, and if the test is
2347  // always false there is no need.
2348  ConditionAnalysis info = AnalyzeCondition(node->cond());
2349  switch (info) {
2350    case ALWAYS_TRUE:
2351      node->continue_target()->SetExpectedHeight();
2352      node->continue_target()->Bind();
2353      break;
2354    case ALWAYS_FALSE:
2355      node->continue_target()->SetExpectedHeight();
2356      break;
2357    case DONT_KNOW:
2358      node->continue_target()->SetExpectedHeight();
2359      body.Bind();
2360      break;
2361  }
2362
2363  CheckStack();  // TODO(1222600): ignore if body contains calls.
2364  Visit(node->body());
2365
2366  // Compile the test.
2367  switch (info) {
2368    case ALWAYS_TRUE:
2369      // If control can fall off the end of the body, jump back to the
2370      // top.
2371      if (has_valid_frame()) {
2372        node->continue_target()->Jump();
2373      }
2374      break;
2375    case ALWAYS_FALSE:
2376      // If we have a continue in the body, we only have to bind its
2377      // jump target.
2378      if (node->continue_target()->is_linked()) {
2379        node->continue_target()->Bind();
2380      }
2381      break;
2382    case DONT_KNOW:
2383      // We have to compile the test expression if it can be reached by
2384      // control flow falling out of the body or via continue.
2385      if (node->continue_target()->is_linked()) {
2386        node->continue_target()->Bind();
2387      }
2388      if (has_valid_frame()) {
2389        Comment cmnt(masm_, "[ DoWhileCondition");
2390        CodeForDoWhileConditionPosition(node);
2391        LoadCondition(node->cond(), &body, node->break_target(), true);
2392        if (has_valid_frame()) {
2393          // A invalid frame here indicates that control did not
2394          // fall out of the test expression.
2395          Branch(true, &body);
2396        }
2397      }
2398      break;
2399  }
2400
2401  if (node->break_target()->is_linked()) {
2402    node->break_target()->Bind();
2403  }
2404  DecrementLoopNesting();
2405  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2406}
2407
2408
2409void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
2410#ifdef DEBUG
2411  int original_height = frame_->height();
2412#endif
2413  Comment cmnt(masm_, "[ WhileStatement");
2414  CodeForStatementPosition(node);
2415
2416  // If the test is never true and has no side effects there is no need
2417  // to compile the test or body.
2418  ConditionAnalysis info = AnalyzeCondition(node->cond());
2419  if (info == ALWAYS_FALSE) return;
2420
2421  node->break_target()->SetExpectedHeight();
2422  IncrementLoopNesting();
2423
2424  // Label the top of the loop with the continue target for the backward
2425  // CFG edge.
2426  node->continue_target()->SetExpectedHeight();
2427  node->continue_target()->Bind();
2428
2429  if (info == DONT_KNOW) {
2430    JumpTarget body(JumpTarget::BIDIRECTIONAL);
2431    LoadCondition(node->cond(), &body, node->break_target(), true);
2432    if (has_valid_frame()) {
2433      // A NULL frame indicates that control did not fall out of the
2434      // test expression.
2435      Branch(false, node->break_target());
2436    }
2437    if (has_valid_frame() || body.is_linked()) {
2438      body.Bind();
2439    }
2440  }
2441
2442  if (has_valid_frame()) {
2443    CheckStack();  // TODO(1222600): ignore if body contains calls.
2444    Visit(node->body());
2445
2446    // If control flow can fall out of the body, jump back to the top.
2447    if (has_valid_frame()) {
2448      node->continue_target()->Jump();
2449    }
2450  }
2451  if (node->break_target()->is_linked()) {
2452    node->break_target()->Bind();
2453  }
2454  DecrementLoopNesting();
2455  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2456}
2457
2458
2459void CodeGenerator::VisitForStatement(ForStatement* node) {
2460#ifdef DEBUG
2461  int original_height = frame_->height();
2462#endif
2463  Comment cmnt(masm_, "[ ForStatement");
2464  CodeForStatementPosition(node);
2465  if (node->init() != NULL) {
2466    Visit(node->init());
2467  }
2468
2469  // If the test is never true there is no need to compile the test or
2470  // body.
2471  ConditionAnalysis info = AnalyzeCondition(node->cond());
2472  if (info == ALWAYS_FALSE) return;
2473
2474  node->break_target()->SetExpectedHeight();
2475  IncrementLoopNesting();
2476
2477  // We know that the loop index is a smi if it is not modified in the
2478  // loop body and it is checked against a constant limit in the loop
2479  // condition.  In this case, we reset the static type information of the
2480  // loop index to smi before compiling the body, the update expression, and
2481  // the bottom check of the loop condition.
2482  TypeInfoCodeGenState type_info_scope(this,
2483                                       node->is_fast_smi_loop() ?
2484                                       node->loop_variable()->AsSlot() :
2485                                       NULL,
2486                                       TypeInfo::Smi());
2487
2488  // If there is no update statement, label the top of the loop with the
2489  // continue target, otherwise with the loop target.
2490  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2491  if (node->next() == NULL) {
2492    node->continue_target()->SetExpectedHeight();
2493    node->continue_target()->Bind();
2494  } else {
2495    node->continue_target()->SetExpectedHeight();
2496    loop.Bind();
2497  }
2498
2499  // If the test is always true, there is no need to compile it.
2500  if (info == DONT_KNOW) {
2501    JumpTarget body;
2502    LoadCondition(node->cond(), &body, node->break_target(), true);
2503    if (has_valid_frame()) {
2504      Branch(false, node->break_target());
2505    }
2506    if (has_valid_frame() || body.is_linked()) {
2507      body.Bind();
2508    }
2509  }
2510
2511  if (has_valid_frame()) {
2512    CheckStack();  // TODO(1222600): ignore if body contains calls.
2513    Visit(node->body());
2514
2515    if (node->next() == NULL) {
2516      // If there is no update statement and control flow can fall out
2517      // of the loop, jump directly to the continue label.
2518      if (has_valid_frame()) {
2519        node->continue_target()->Jump();
2520      }
2521    } else {
2522      // If there is an update statement and control flow can reach it
2523      // via falling out of the body of the loop or continuing, we
2524      // compile the update statement.
2525      if (node->continue_target()->is_linked()) {
2526        node->continue_target()->Bind();
2527      }
2528      if (has_valid_frame()) {
2529        // Record source position of the statement as this code which is
2530        // after the code for the body actually belongs to the loop
2531        // statement and not the body.
2532        CodeForStatementPosition(node);
2533        Visit(node->next());
2534        loop.Jump();
2535      }
2536    }
2537  }
2538  if (node->break_target()->is_linked()) {
2539    node->break_target()->Bind();
2540  }
2541  DecrementLoopNesting();
2542  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2543}
2544
2545
2546void CodeGenerator::VisitForInStatement(ForInStatement* node) {
2547#ifdef DEBUG
2548  int original_height = frame_->height();
2549#endif
2550  Comment cmnt(masm_, "[ ForInStatement");
2551  CodeForStatementPosition(node);
2552
2553  JumpTarget primitive;
2554  JumpTarget jsobject;
2555  JumpTarget fixed_array;
2556  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
2557  JumpTarget end_del_check;
2558  JumpTarget exit;
2559
2560  // Get the object to enumerate over (converted to JSObject).
2561  Load(node->enumerable());
2562
2563  VirtualFrame::SpilledScope spilled_scope(frame_);
2564  // Both SpiderMonkey and kjs ignore null and undefined in contrast
2565  // to the specification.  12.6.4 mandates a call to ToObject.
2566  frame_->EmitPop(r0);
2567  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2568  __ cmp(r0, ip);
2569  exit.Branch(eq);
2570  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2571  __ cmp(r0, ip);
2572  exit.Branch(eq);
2573
2574  // Stack layout in body:
2575  // [iteration counter (Smi)]
2576  // [length of array]
2577  // [FixedArray]
2578  // [Map or 0]
2579  // [Object]
2580
2581  // Check if enumerable is already a JSObject
2582  __ tst(r0, Operand(kSmiTagMask));
2583  primitive.Branch(eq);
2584  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
2585  jsobject.Branch(hs);
2586
2587  primitive.Bind();
2588  frame_->EmitPush(r0);
2589  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
2590
2591  jsobject.Bind();
2592  // Get the set of properties (as a FixedArray or Map).
2593  // r0: value to be iterated over
2594  frame_->EmitPush(r0);  // Push the object being iterated over.
2595
2596  // Check cache validity in generated code. This is a fast case for
2597  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
2598  // guarantee cache validity, call the runtime system to check cache
2599  // validity or get the property names in a fixed array.
2600  JumpTarget call_runtime;
2601  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2602  JumpTarget check_prototype;
2603  JumpTarget use_cache;
2604  __ mov(r1, Operand(r0));
2605  loop.Bind();
2606  // Check that there are no elements.
2607  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
2608  __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
2609  __ cmp(r2, r4);
2610  call_runtime.Branch(ne);
2611  // Check that instance descriptors are not empty so that we can
2612  // check for an enum cache.  Leave the map in r3 for the subsequent
2613  // prototype load.
2614  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2615  __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
2616  __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
2617  __ cmp(r2, ip);
2618  call_runtime.Branch(eq);
2619  // Check that there in an enum cache in the non-empty instance
2620  // descriptors.  This is the case if the next enumeration index
2621  // field does not contain a smi.
2622  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
2623  __ tst(r2, Operand(kSmiTagMask));
2624  call_runtime.Branch(eq);
2625  // For all objects but the receiver, check that the cache is empty.
2626  // r4: empty fixed array root.
2627  __ cmp(r1, r0);
2628  check_prototype.Branch(eq);
2629  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
2630  __ cmp(r2, r4);
2631  call_runtime.Branch(ne);
2632  check_prototype.Bind();
2633  // Load the prototype from the map and loop if non-null.
2634  __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
2635  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2636  __ cmp(r1, ip);
2637  loop.Branch(ne);
2638  // The enum cache is valid.  Load the map of the object being
2639  // iterated over and use the cache for the iteration.
2640  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
2641  use_cache.Jump();
2642
2643  call_runtime.Bind();
2644  // Call the runtime to get the property names for the object.
2645  frame_->EmitPush(r0);  // push the object (slot 4) for the runtime call
2646  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
2647
2648  // If we got a map from the runtime call, we can do a fast
2649  // modification check. Otherwise, we got a fixed array, and we have
2650  // to do a slow check.
2651  // r0: map or fixed array (result from call to
2652  // Runtime::kGetPropertyNamesFast)
2653  __ mov(r2, Operand(r0));
2654  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
2655  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
2656  __ cmp(r1, ip);
2657  fixed_array.Branch(ne);
2658
2659  use_cache.Bind();
2660  // Get enum cache
2661  // r0: map (either the result from a call to
2662  // Runtime::kGetPropertyNamesFast or has been fetched directly from
2663  // the object)
2664  __ mov(r1, Operand(r0));
2665  __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
2666  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
2667  __ ldr(r2,
2668         FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
2669
2670  frame_->EmitPush(r0);  // map
2671  frame_->EmitPush(r2);  // enum cache bridge cache
2672  __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
2673  frame_->EmitPush(r0);
2674  __ mov(r0, Operand(Smi::FromInt(0)));
2675  frame_->EmitPush(r0);
2676  entry.Jump();
2677
2678  fixed_array.Bind();
2679  __ mov(r1, Operand(Smi::FromInt(0)));
2680  frame_->EmitPush(r1);  // insert 0 in place of Map
2681  frame_->EmitPush(r0);
2682
2683  // Push the length of the array and the initial index onto the stack.
2684  __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
2685  frame_->EmitPush(r0);
2686  __ mov(r0, Operand(Smi::FromInt(0)));  // init index
2687  frame_->EmitPush(r0);
2688
2689  // Condition.
2690  entry.Bind();
2691  // sp[0] : index
2692  // sp[1] : array/enum cache length
2693  // sp[2] : array or enum cache
2694  // sp[3] : 0 or map
2695  // sp[4] : enumerable
2696  // Grab the current frame's height for the break and continue
2697  // targets only after all the state is pushed on the frame.
2698  node->break_target()->SetExpectedHeight();
2699  node->continue_target()->SetExpectedHeight();
2700
2701  // Load the current count to r0, load the length to r1.
2702  __ Ldrd(r0, r1, frame_->ElementAt(0));
2703  __ cmp(r0, r1);  // compare to the array length
2704  node->break_target()->Branch(hs);
2705
2706  // Get the i'th entry of the array.
2707  __ ldr(r2, frame_->ElementAt(2));
2708  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2709  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
2710
2711  // Get Map or 0.
2712  __ ldr(r2, frame_->ElementAt(3));
2713  // Check if this (still) matches the map of the enumerable.
2714  // If not, we have to filter the key.
2715  __ ldr(r1, frame_->ElementAt(4));
2716  __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
2717  __ cmp(r1, Operand(r2));
2718  end_del_check.Branch(eq);
2719
2720  // Convert the entry to a string (or null if it isn't a property anymore).
2721  __ ldr(r0, frame_->ElementAt(4));  // push enumerable
2722  frame_->EmitPush(r0);
2723  frame_->EmitPush(r3);  // push entry
2724  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
2725  __ mov(r3, Operand(r0), SetCC);
2726  // If the property has been removed while iterating, we just skip it.
2727  node->continue_target()->Branch(eq);
2728
2729  end_del_check.Bind();
2730  // Store the entry in the 'each' expression and take another spin in the
2731  // loop.  r3: i'th entry of the enum cache (or string there of)
2732  frame_->EmitPush(r3);  // push entry
2733  { VirtualFrame::RegisterAllocationScope scope(this);
2734    Reference each(this, node->each());
2735    if (!each.is_illegal()) {
2736      if (each.size() > 0) {
2737        // Loading a reference may leave the frame in an unspilled state.
2738        frame_->SpillAll();  // Sync stack to memory.
2739        // Get the value (under the reference on the stack) from memory.
2740        __ ldr(r0, frame_->ElementAt(each.size()));
2741        frame_->EmitPush(r0);
2742        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
2743        frame_->Drop(2);  // The result of the set and the extra pushed value.
2744      } else {
2745        // If the reference was to a slot we rely on the convenient property
2746        // that it doesn't matter whether a value (eg, ebx pushed above) is
2747        // right on top of or right underneath a zero-sized reference.
2748        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
2749        frame_->Drop(1);  // Drop the result of the set operation.
2750      }
2751    }
2752  }
2753  // Body.
2754  CheckStack();  // TODO(1222600): ignore if body contains calls.
2755  { VirtualFrame::RegisterAllocationScope scope(this);
2756    Visit(node->body());
2757  }
2758
2759  // Next.  Reestablish a spilled frame in case we are coming here via
2760  // a continue in the body.
2761  node->continue_target()->Bind();
2762  frame_->SpillAll();
2763  frame_->EmitPop(r0);
2764  __ add(r0, r0, Operand(Smi::FromInt(1)));
2765  frame_->EmitPush(r0);
2766  entry.Jump();
2767
2768  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
2769  // any frame.
2770  node->break_target()->Bind();
2771  frame_->Drop(5);
2772
2773  // Exit.
2774  exit.Bind();
2775  node->continue_target()->Unuse();
2776  node->break_target()->Unuse();
2777  ASSERT(frame_->height() == original_height);
2778}
2779
2780
2781void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
2782#ifdef DEBUG
2783  int original_height = frame_->height();
2784#endif
2785  VirtualFrame::SpilledScope spilled_scope(frame_);
2786  Comment cmnt(masm_, "[ TryCatchStatement");
2787  CodeForStatementPosition(node);
2788
2789  JumpTarget try_block;
2790  JumpTarget exit;
2791
2792  try_block.Call();
2793  // --- Catch block ---
2794  frame_->EmitPush(r0);
2795
2796  // Store the caught exception in the catch variable.
2797  Variable* catch_var = node->catch_var()->var();
2798  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
2799  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
2800
2801  // Remove the exception from the stack.
2802  frame_->Drop();
2803
2804  { VirtualFrame::RegisterAllocationScope scope(this);
2805    VisitStatements(node->catch_block()->statements());
2806  }
2807  if (frame_ != NULL) {
2808    exit.Jump();
2809  }
2810
2811
2812  // --- Try block ---
2813  try_block.Bind();
2814
2815  frame_->PushTryHandler(TRY_CATCH_HANDLER);
2816  int handler_height = frame_->height();
2817
2818  // Shadow the labels for all escapes from the try block, including
2819  // returns. During shadowing, the original label is hidden as the
2820  // LabelShadow and operations on the original actually affect the
2821  // shadowing label.
2822  //
2823  // We should probably try to unify the escaping labels and the return
2824  // label.
2825  int nof_escapes = node->escaping_targets()->length();
2826  List<ShadowTarget*> shadows(1 + nof_escapes);
2827
2828  // Add the shadow target for the function return.
2829  static const int kReturnShadowIndex = 0;
2830  shadows.Add(new ShadowTarget(&function_return_));
2831  bool function_return_was_shadowed = function_return_is_shadowed_;
2832  function_return_is_shadowed_ = true;
2833  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2834
2835  // Add the remaining shadow targets.
2836  for (int i = 0; i < nof_escapes; i++) {
2837    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2838  }
2839
2840  // Generate code for the statements in the try block.
2841  { VirtualFrame::RegisterAllocationScope scope(this);
2842    VisitStatements(node->try_block()->statements());
2843  }
2844
2845  // Stop the introduced shadowing and count the number of required unlinks.
2846  // After shadowing stops, the original labels are unshadowed and the
2847  // LabelShadows represent the formerly shadowing labels.
2848  bool has_unlinks = false;
2849  for (int i = 0; i < shadows.length(); i++) {
2850    shadows[i]->StopShadowing();
2851    has_unlinks = has_unlinks || shadows[i]->is_linked();
2852  }
2853  function_return_is_shadowed_ = function_return_was_shadowed;
2854
2855  // Get an external reference to the handler address.
2856  ExternalReference handler_address(Top::k_handler_address);
2857
2858  // If we can fall off the end of the try block, unlink from try chain.
2859  if (has_valid_frame()) {
2860    // The next handler address is on top of the frame.  Unlink from
2861    // the handler list and drop the rest of this handler from the
2862    // frame.
2863    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2864    frame_->EmitPop(r1);  // r0 can contain the return value.
2865    __ mov(r3, Operand(handler_address));
2866    __ str(r1, MemOperand(r3));
2867    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2868    if (has_unlinks) {
2869      exit.Jump();
2870    }
2871  }
2872
2873  // Generate unlink code for the (formerly) shadowing labels that have been
2874  // jumped to.  Deallocate each shadow target.
2875  for (int i = 0; i < shadows.length(); i++) {
2876    if (shadows[i]->is_linked()) {
2877      // Unlink from try chain;
2878      shadows[i]->Bind();
2879      // Because we can be jumping here (to spilled code) from unspilled
2880      // code, we need to reestablish a spilled frame at this block.
2881      frame_->SpillAll();
2882
2883      // Reload sp from the top handler, because some statements that we
2884      // break from (eg, for...in) may have left stuff on the stack.
2885      __ mov(r3, Operand(handler_address));
2886      __ ldr(sp, MemOperand(r3));
2887      frame_->Forget(frame_->height() - handler_height);
2888
2889      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2890      frame_->EmitPop(r1);  // r0 can contain the return value.
2891      __ str(r1, MemOperand(r3));
2892      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2893
2894      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2895        frame_->PrepareForReturn();
2896      }
2897      shadows[i]->other_target()->Jump();
2898    }
2899  }
2900
2901  exit.Bind();
2902  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2903}
2904
2905
2906void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
2907#ifdef DEBUG
2908  int original_height = frame_->height();
2909#endif
2910  VirtualFrame::SpilledScope spilled_scope(frame_);
2911  Comment cmnt(masm_, "[ TryFinallyStatement");
2912  CodeForStatementPosition(node);
2913
2914  // State: Used to keep track of reason for entering the finally
2915  // block. Should probably be extended to hold information for
2916  // break/continue from within the try block.
2917  enum { FALLING, THROWING, JUMPING };
2918
2919  JumpTarget try_block;
2920  JumpTarget finally_block;
2921
2922  try_block.Call();
2923
2924  frame_->EmitPush(r0);  // save exception object on the stack
2925  // In case of thrown exceptions, this is where we continue.
2926  __ mov(r2, Operand(Smi::FromInt(THROWING)));
2927  finally_block.Jump();
2928
2929  // --- Try block ---
2930  try_block.Bind();
2931
2932  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2933  int handler_height = frame_->height();
2934
2935  // Shadow the labels for all escapes from the try block, including
2936  // returns.  Shadowing hides the original label as the LabelShadow and
2937  // operations on the original actually affect the shadowing label.
2938  //
2939  // We should probably try to unify the escaping labels and the return
2940  // label.
2941  int nof_escapes = node->escaping_targets()->length();
2942  List<ShadowTarget*> shadows(1 + nof_escapes);
2943
2944  // Add the shadow target for the function return.
2945  static const int kReturnShadowIndex = 0;
2946  shadows.Add(new ShadowTarget(&function_return_));
2947  bool function_return_was_shadowed = function_return_is_shadowed_;
2948  function_return_is_shadowed_ = true;
2949  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2950
2951  // Add the remaining shadow targets.
2952  for (int i = 0; i < nof_escapes; i++) {
2953    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2954  }
2955
2956  // Generate code for the statements in the try block.
2957  { VirtualFrame::RegisterAllocationScope scope(this);
2958    VisitStatements(node->try_block()->statements());
2959  }
2960
2961  // Stop the introduced shadowing and count the number of required unlinks.
2962  // After shadowing stops, the original labels are unshadowed and the
2963  // LabelShadows represent the formerly shadowing labels.
2964  int nof_unlinks = 0;
2965  for (int i = 0; i < shadows.length(); i++) {
2966    shadows[i]->StopShadowing();
2967    if (shadows[i]->is_linked()) nof_unlinks++;
2968  }
2969  function_return_is_shadowed_ = function_return_was_shadowed;
2970
2971  // Get an external reference to the handler address.
2972  ExternalReference handler_address(Top::k_handler_address);
2973
2974  // If we can fall off the end of the try block, unlink from the try
2975  // chain and set the state on the frame to FALLING.
2976  if (has_valid_frame()) {
2977    // The next handler address is on top of the frame.
2978    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2979    frame_->EmitPop(r1);
2980    __ mov(r3, Operand(handler_address));
2981    __ str(r1, MemOperand(r3));
2982    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2983
2984    // Fake a top of stack value (unneeded when FALLING) and set the
2985    // state in r2, then jump around the unlink blocks if any.
2986    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2987    frame_->EmitPush(r0);
2988    __ mov(r2, Operand(Smi::FromInt(FALLING)));
2989    if (nof_unlinks > 0) {
2990      finally_block.Jump();
2991    }
2992  }
2993
2994  // Generate code to unlink and set the state for the (formerly)
2995  // shadowing targets that have been jumped to.
2996  for (int i = 0; i < shadows.length(); i++) {
2997    if (shadows[i]->is_linked()) {
2998      // If we have come from the shadowed return, the return value is
2999      // in (a non-refcounted reference to) r0.  We must preserve it
3000      // until it is pushed.
3001      //
3002      // Because we can be jumping here (to spilled code) from
3003      // unspilled code, we need to reestablish a spilled frame at
3004      // this block.
3005      shadows[i]->Bind();
3006      frame_->SpillAll();
3007
3008      // Reload sp from the top handler, because some statements that
3009      // we break from (eg, for...in) may have left stuff on the
3010      // stack.
3011      __ mov(r3, Operand(handler_address));
3012      __ ldr(sp, MemOperand(r3));
3013      frame_->Forget(frame_->height() - handler_height);
3014
3015      // Unlink this handler and drop it from the frame.  The next
3016      // handler address is currently on top of the frame.
3017      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3018      frame_->EmitPop(r1);
3019      __ str(r1, MemOperand(r3));
3020      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
3021
3022      if (i == kReturnShadowIndex) {
3023        // If this label shadowed the function return, materialize the
3024        // return value on the stack.
3025        frame_->EmitPush(r0);
3026      } else {
3027        // Fake TOS for targets that shadowed breaks and continues.
3028        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
3029        frame_->EmitPush(r0);
3030      }
3031      __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
3032      if (--nof_unlinks > 0) {
3033        // If this is not the last unlink block, jump around the next.
3034        finally_block.Jump();
3035      }
3036    }
3037  }
3038
3039  // --- Finally block ---
3040  finally_block.Bind();
3041
3042  // Push the state on the stack.
3043  frame_->EmitPush(r2);
3044
3045  // We keep two elements on the stack - the (possibly faked) result
3046  // and the state - while evaluating the finally block.
3047  //
3048  // Generate code for the statements in the finally block.
3049  { VirtualFrame::RegisterAllocationScope scope(this);
3050    VisitStatements(node->finally_block()->statements());
3051  }
3052
3053  if (has_valid_frame()) {
3054    // Restore state and return value or faked TOS.
3055    frame_->EmitPop(r2);
3056    frame_->EmitPop(r0);
3057  }
3058
3059  // Generate code to jump to the right destination for all used
3060  // formerly shadowing targets.  Deallocate each shadow target.
3061  for (int i = 0; i < shadows.length(); i++) {
3062    if (has_valid_frame() && shadows[i]->is_bound()) {
3063      JumpTarget* original = shadows[i]->other_target();
3064      __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
3065      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
3066        JumpTarget skip;
3067        skip.Branch(ne);
3068        frame_->PrepareForReturn();
3069        original->Jump();
3070        skip.Bind();
3071      } else {
3072        original->Branch(eq);
3073      }
3074    }
3075  }
3076
3077  if (has_valid_frame()) {
3078    // Check if we need to rethrow the exception.
3079    JumpTarget exit;
3080    __ cmp(r2, Operand(Smi::FromInt(THROWING)));
3081    exit.Branch(ne);
3082
3083    // Rethrow exception.
3084    frame_->EmitPush(r0);
3085    frame_->CallRuntime(Runtime::kReThrow, 1);
3086
3087    // Done.
3088    exit.Bind();
3089  }
3090  ASSERT(!has_valid_frame() || frame_->height() == original_height);
3091}
3092
3093
3094void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
3095#ifdef DEBUG
3096  int original_height = frame_->height();
3097#endif
3098  Comment cmnt(masm_, "[ DebuggerStatament");
3099  CodeForStatementPosition(node);
3100#ifdef ENABLE_DEBUGGER_SUPPORT
3101  frame_->DebugBreak();
3102#endif
3103  // Ignore the return value.
3104  ASSERT(frame_->height() == original_height);
3105}
3106
3107
3108void CodeGenerator::InstantiateFunction(
3109    Handle<SharedFunctionInfo> function_info,
3110    bool pretenure) {
3111  // Use the fast case closure allocation code that allocates in new
3112  // space for nested functions that don't need literals cloning.
3113  if (scope()->is_function_scope() &&
3114      function_info->num_literals() == 0 &&
3115      !pretenure) {
3116    FastNewClosureStub stub;
3117    frame_->EmitPush(Operand(function_info));
3118    frame_->SpillAll();
3119    frame_->CallStub(&stub, 1);
3120    frame_->EmitPush(r0);
3121  } else {
3122    // Create a new closure.
3123    frame_->EmitPush(cp);
3124    frame_->EmitPush(Operand(function_info));
3125    frame_->EmitPush(Operand(pretenure
3126                             ? Factory::true_value()
3127                             : Factory::false_value()));
3128    frame_->CallRuntime(Runtime::kNewClosure, 3);
3129    frame_->EmitPush(r0);
3130  }
3131}
3132
3133
3134void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
3135#ifdef DEBUG
3136  int original_height = frame_->height();
3137#endif
3138  Comment cmnt(masm_, "[ FunctionLiteral");
3139
3140  // Build the function info and instantiate it.
3141  Handle<SharedFunctionInfo> function_info =
3142      Compiler::BuildFunctionInfo(node, script());
3143  if (function_info.is_null()) {
3144    SetStackOverflow();
3145    ASSERT(frame_->height() == original_height);
3146    return;
3147  }
3148  InstantiateFunction(function_info, node->pretenure());
3149  ASSERT_EQ(original_height + 1, frame_->height());
3150}
3151
3152
3153void CodeGenerator::VisitSharedFunctionInfoLiteral(
3154    SharedFunctionInfoLiteral* node) {
3155#ifdef DEBUG
3156  int original_height = frame_->height();
3157#endif
3158  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
3159  InstantiateFunction(node->shared_function_info(), false);
3160  ASSERT_EQ(original_height + 1, frame_->height());
3161}
3162
3163
3164void CodeGenerator::VisitConditional(Conditional* node) {
3165#ifdef DEBUG
3166  int original_height = frame_->height();
3167#endif
3168  Comment cmnt(masm_, "[ Conditional");
3169  JumpTarget then;
3170  JumpTarget else_;
3171  LoadCondition(node->condition(), &then, &else_, true);
3172  if (has_valid_frame()) {
3173    Branch(false, &else_);
3174  }
3175  if (has_valid_frame() || then.is_linked()) {
3176    then.Bind();
3177    Load(node->then_expression());
3178  }
3179  if (else_.is_linked()) {
3180    JumpTarget exit;
3181    if (has_valid_frame()) exit.Jump();
3182    else_.Bind();
3183    Load(node->else_expression());
3184    if (exit.is_linked()) exit.Bind();
3185  }
3186  ASSERT_EQ(original_height + 1, frame_->height());
3187}
3188
3189
3190void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
3191  if (slot->type() == Slot::LOOKUP) {
3192    ASSERT(slot->var()->is_dynamic());
3193
3194    // JumpTargets do not yet support merging frames so the frame must be
3195    // spilled when jumping to these targets.
3196    JumpTarget slow;
3197    JumpTarget done;
3198
3199    // Generate fast case for loading from slots that correspond to
3200    // local/global variables or arguments unless they are shadowed by
3201    // eval-introduced bindings.
3202    EmitDynamicLoadFromSlotFastCase(slot,
3203                                    typeof_state,
3204                                    &slow,
3205                                    &done);
3206
3207    slow.Bind();
3208    frame_->EmitPush(cp);
3209    frame_->EmitPush(Operand(slot->var()->name()));
3210
3211    if (typeof_state == INSIDE_TYPEOF) {
3212      frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
3213    } else {
3214      frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3215    }
3216
3217    done.Bind();
3218    frame_->EmitPush(r0);
3219
3220  } else {
3221    Register scratch = VirtualFrame::scratch0();
3222    TypeInfo info = type_info(slot);
3223    frame_->EmitPush(SlotOperand(slot, scratch), info);
3224
3225    if (slot->var()->mode() == Variable::CONST) {
3226      // Const slots may contain 'the hole' value (the constant hasn't been
3227      // initialized yet) which needs to be converted into the 'undefined'
3228      // value.
3229      Comment cmnt(masm_, "[ Unhole const");
3230      Register tos = frame_->PopToRegister();
3231      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3232      __ cmp(tos, ip);
3233      __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
3234      frame_->EmitPush(tos);
3235    }
3236  }
3237}
3238
3239
3240void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
3241                                                  TypeofState state) {
3242  VirtualFrame::RegisterAllocationScope scope(this);
3243  LoadFromSlot(slot, state);
3244
3245  // Bail out quickly if we're not using lazy arguments allocation.
3246  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
3247
3248  // ... or if the slot isn't a non-parameter arguments slot.
3249  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
3250
3251  // Load the loaded value from the stack into a register but leave it on the
3252  // stack.
3253  Register tos = frame_->Peek();
3254
3255  // If the loaded value is the sentinel that indicates that we
3256  // haven't loaded the arguments object yet, we need to do it now.
3257  JumpTarget exit;
3258  __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
3259  __ cmp(tos, ip);
3260  exit.Branch(ne);
3261  frame_->Drop();
3262  StoreArgumentsObject(false);
3263  exit.Bind();
3264}
3265
3266
3267void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
3268  ASSERT(slot != NULL);
3269  VirtualFrame::RegisterAllocationScope scope(this);
3270  if (slot->type() == Slot::LOOKUP) {
3271    ASSERT(slot->var()->is_dynamic());
3272
3273    // For now, just do a runtime call.
3274    frame_->EmitPush(cp);
3275    frame_->EmitPush(Operand(slot->var()->name()));
3276
3277    if (init_state == CONST_INIT) {
3278      // Same as the case for a normal store, but ignores attribute
3279      // (e.g. READ_ONLY) of context slot so that we can initialize
3280      // const properties (introduced via eval("const foo = (some
3281      // expr);")). Also, uses the current function context instead of
3282      // the top context.
3283      //
3284      // Note that we must declare the foo upon entry of eval(), via a
3285      // context slot declaration, but we cannot initialize it at the
3286      // same time, because the const declaration may be at the end of
3287      // the eval code (sigh...) and the const variable may have been
3288      // used before (where its value is 'undefined'). Thus, we can only
3289      // do the initialization when we actually encounter the expression
3290      // and when the expression operands are defined and valid, and
3291      // thus we need the split into 2 operations: declaration of the
3292      // context slot followed by initialization.
3293      frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
3294    } else {
3295      frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
3296    }
3297    // Storing a variable must keep the (new) value on the expression
3298    // stack. This is necessary for compiling assignment expressions.
3299    frame_->EmitPush(r0);
3300
3301  } else {
3302    ASSERT(!slot->var()->is_dynamic());
3303    Register scratch = VirtualFrame::scratch0();
3304    Register scratch2 = VirtualFrame::scratch1();
3305
3306    // The frame must be spilled when branching to this target.
3307    JumpTarget exit;
3308
3309    if (init_state == CONST_INIT) {
3310      ASSERT(slot->var()->mode() == Variable::CONST);
3311      // Only the first const initialization must be executed (the slot
3312      // still contains 'the hole' value). When the assignment is
3313      // executed, the code is identical to a normal store (see below).
3314      Comment cmnt(masm_, "[ Init const");
3315      __ ldr(scratch, SlotOperand(slot, scratch));
3316      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3317      __ cmp(scratch, ip);
3318      exit.Branch(ne);
3319    }
3320
3321    // We must execute the store.  Storing a variable must keep the
3322    // (new) value on the stack. This is necessary for compiling
3323    // assignment expressions.
3324    //
3325    // Note: We will reach here even with slot->var()->mode() ==
3326    // Variable::CONST because of const declarations which will
3327    // initialize consts to 'the hole' value and by doing so, end up
3328    // calling this code.  r2 may be loaded with context; used below in
3329    // RecordWrite.
3330    Register tos = frame_->Peek();
3331    __ str(tos, SlotOperand(slot, scratch));
3332    if (slot->type() == Slot::CONTEXT) {
3333      // Skip write barrier if the written value is a smi.
3334      __ tst(tos, Operand(kSmiTagMask));
3335      // We don't use tos any more after here.
3336      exit.Branch(eq);
3337      // scratch is loaded with context when calling SlotOperand above.
3338      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
3339      // We need an extra register.  Until we have a way to do that in the
3340      // virtual frame we will cheat and ask for a free TOS register.
3341      Register scratch3 = frame_->GetTOSRegister();
3342      __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
3343    }
3344    // If we definitely did not jump over the assignment, we do not need
3345    // to bind the exit label.  Doing so can defeat peephole
3346    // optimization.
3347    if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
3348      exit.Bind();
3349    }
3350  }
3351}
3352
3353
3354void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
3355                                                      TypeofState typeof_state,
3356                                                      JumpTarget* slow) {
3357  // Check that no extension objects have been created by calls to
3358  // eval from the current scope to the global scope.
3359  Register tmp = frame_->scratch0();
3360  Register tmp2 = frame_->scratch1();
3361  Register context = cp;
3362  Scope* s = scope();
3363  while (s != NULL) {
3364    if (s->num_heap_slots() > 0) {
3365      if (s->calls_eval()) {
3366        frame_->SpillAll();
3367        // Check that extension is NULL.
3368        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
3369        __ tst(tmp2, tmp2);
3370        slow->Branch(ne);
3371      }
3372      // Load next context in chain.
3373      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
3374      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
3375      context = tmp;
3376    }
3377    // If no outer scope calls eval, we do not need to check more
3378    // context extensions.
3379    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
3380    s = s->outer_scope();
3381  }
3382
3383  if (s->is_eval_scope()) {
3384    frame_->SpillAll();
3385    Label next, fast;
3386    __ Move(tmp, context);
3387    __ bind(&next);
3388    // Terminate at global context.
3389    __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
3390    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
3391    __ cmp(tmp2, ip);
3392    __ b(eq, &fast);
3393    // Check that extension is NULL.
3394    __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
3395    __ tst(tmp2, tmp2);
3396    slow->Branch(ne);
3397    // Load next context in chain.
3398    __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
3399    __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
3400    __ b(&next);
3401    __ bind(&fast);
3402  }
3403
3404  // Load the global object.
3405  LoadGlobal();
3406  // Setup the name register and call load IC.
3407  frame_->CallLoadIC(slot->var()->name(),
3408                     typeof_state == INSIDE_TYPEOF
3409                         ? RelocInfo::CODE_TARGET
3410                         : RelocInfo::CODE_TARGET_CONTEXT);
3411}
3412
3413
3414void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
3415                                                    TypeofState typeof_state,
3416                                                    JumpTarget* slow,
3417                                                    JumpTarget* done) {
3418  // Generate fast-case code for variables that might be shadowed by
3419  // eval-introduced variables.  Eval is used a lot without
3420  // introducing variables.  In those cases, we do not want to
3421  // perform a runtime call for all variables in the scope
3422  // containing the eval.
3423  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
3424    LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
3425    frame_->SpillAll();
3426    done->Jump();
3427
3428  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
3429    frame_->SpillAll();
3430    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
3431    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
3432    if (potential_slot != NULL) {
3433      // Generate fast case for locals that rewrite to slots.
3434      __ ldr(r0,
3435             ContextSlotOperandCheckExtensions(potential_slot,
3436                                               r1,
3437                                               r2,
3438                                               slow));
3439      if (potential_slot->var()->mode() == Variable::CONST) {
3440        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3441        __ cmp(r0, ip);
3442        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
3443      }
3444      done->Jump();
3445    } else if (rewrite != NULL) {
3446      // Generate fast case for argument loads.
3447      Property* property = rewrite->AsProperty();
3448      if (property != NULL) {
3449        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
3450        Literal* key_literal = property->key()->AsLiteral();
3451        if (obj_proxy != NULL &&
3452            key_literal != NULL &&
3453            obj_proxy->IsArguments() &&
3454            key_literal->handle()->IsSmi()) {
3455          // Load arguments object if there are no eval-introduced
3456          // variables. Then load the argument from the arguments
3457          // object using keyed load.
3458          __ ldr(r0,
3459                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
3460                                                   r1,
3461                                                   r2,
3462                                                   slow));
3463          frame_->EmitPush(r0);
3464          __ mov(r1, Operand(key_literal->handle()));
3465          frame_->EmitPush(r1);
3466          EmitKeyedLoad();
3467          done->Jump();
3468        }
3469      }
3470    }
3471  }
3472}
3473
3474
3475void CodeGenerator::VisitSlot(Slot* node) {
3476#ifdef DEBUG
3477  int original_height = frame_->height();
3478#endif
3479  Comment cmnt(masm_, "[ Slot");
3480  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
3481  ASSERT_EQ(original_height + 1, frame_->height());
3482}
3483
3484
3485void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
3486#ifdef DEBUG
3487  int original_height = frame_->height();
3488#endif
3489  Comment cmnt(masm_, "[ VariableProxy");
3490
3491  Variable* var = node->var();
3492  Expression* expr = var->rewrite();
3493  if (expr != NULL) {
3494    Visit(expr);
3495  } else {
3496    ASSERT(var->is_global());
3497    Reference ref(this, node);
3498    ref.GetValue();
3499  }
3500  ASSERT_EQ(original_height + 1, frame_->height());
3501}
3502
3503
3504void CodeGenerator::VisitLiteral(Literal* node) {
3505#ifdef DEBUG
3506  int original_height = frame_->height();
3507#endif
3508  Comment cmnt(masm_, "[ Literal");
3509  Register reg = frame_->GetTOSRegister();
3510  bool is_smi = node->handle()->IsSmi();
3511  __ mov(reg, Operand(node->handle()));
3512  frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
3513  ASSERT_EQ(original_height + 1, frame_->height());
3514}
3515
3516
3517void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
3518#ifdef DEBUG
3519  int original_height = frame_->height();
3520#endif
3521  Comment cmnt(masm_, "[ RexExp Literal");
3522
3523  Register tmp = VirtualFrame::scratch0();
3524  // Free up a TOS register that can be used to push the literal.
3525  Register literal = frame_->GetTOSRegister();
3526
3527  // Retrieve the literal array and check the allocated entry.
3528
3529  // Load the function of this activation.
3530  __ ldr(tmp, frame_->Function());
3531
3532  // Load the literals array of the function.
3533  __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
3534
3535  // Load the literal at the ast saved index.
3536  int literal_offset =
3537      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
3538  __ ldr(literal, FieldMemOperand(tmp, literal_offset));
3539
3540  JumpTarget materialized;
3541  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3542  __ cmp(literal, ip);
3543  // This branch locks the virtual frame at the done label to match the
3544  // one we have here, where the literal register is not on the stack and
3545  // nothing is spilled.
3546  materialized.Branch(ne);
3547
3548  // If the entry is undefined we call the runtime system to compute
3549  // the literal.
3550  // literal array  (0)
3551  frame_->EmitPush(tmp);
3552  // literal index  (1)
3553  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3554  // RegExp pattern (2)
3555  frame_->EmitPush(Operand(node->pattern()));
3556  // RegExp flags   (3)
3557  frame_->EmitPush(Operand(node->flags()));
3558  frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
3559  __ Move(literal, r0);
3560
3561  materialized.Bind();
3562
3563  frame_->EmitPush(literal);
3564  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
3565  frame_->EmitPush(Operand(Smi::FromInt(size)));
3566  frame_->CallRuntime(Runtime::kAllocateInNewSpace, 1);
3567  // TODO(lrn): Use AllocateInNewSpace macro with fallback to runtime.
3568  // r0 is newly allocated space.
3569
3570  // Reuse literal variable with (possibly) a new register, still holding
3571  // the materialized boilerplate.
3572  literal = frame_->PopToRegister(r0);
3573
3574  __ CopyFields(r0, literal, tmp.bit(), size / kPointerSize);
3575
3576  // Push the clone.
3577  frame_->EmitPush(r0);
3578  ASSERT_EQ(original_height + 1, frame_->height());
3579}
3580
3581
3582void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
3583#ifdef DEBUG
3584  int original_height = frame_->height();
3585#endif
3586  Comment cmnt(masm_, "[ ObjectLiteral");
3587
3588  Register literal = frame_->GetTOSRegister();
3589  // Load the function of this activation.
3590  __ ldr(literal, frame_->Function());
3591  // Literal array.
3592  __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
3593  frame_->EmitPush(literal);
3594  // Literal index.
3595  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3596  // Constant properties.
3597  frame_->EmitPush(Operand(node->constant_properties()));
3598  // Should the object literal have fast elements?
3599  frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
3600  if (node->depth() > 1) {
3601    frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
3602  } else {
3603    frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
3604  }
3605  frame_->EmitPush(r0);  // save the result
3606
3607  // Mark all computed expressions that are bound to a key that
3608  // is shadowed by a later occurrence of the same key. For the
3609  // marked expressions, no store code is emitted.
3610  node->CalculateEmitStore();
3611
3612  for (int i = 0; i < node->properties()->length(); i++) {
3613    // At the start of each iteration, the top of stack contains
3614    // the newly created object literal.
3615    ObjectLiteral::Property* property = node->properties()->at(i);
3616    Literal* key = property->key();
3617    Expression* value = property->value();
3618    switch (property->kind()) {
3619      case ObjectLiteral::Property::CONSTANT:
3620        break;
3621      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
3622        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
3623        // else fall through
3624      case ObjectLiteral::Property::COMPUTED:
3625        if (key->handle()->IsSymbol()) {
3626          Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
3627          Load(value);
3628          if (property->emit_store()) {
3629            frame_->PopToR0();
3630            // Fetch the object literal.
3631            frame_->SpillAllButCopyTOSToR1();
3632            __ mov(r2, Operand(key->handle()));
3633            frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
3634          } else {
3635            frame_->Drop();
3636          }
3637          break;
3638        }
3639        // else fall through
3640      case ObjectLiteral::Property::PROTOTYPE: {
3641        frame_->Dup();
3642        Load(key);
3643        Load(value);
3644        if (property->emit_store()) {
3645          frame_->CallRuntime(Runtime::kSetProperty, 3);
3646        } else {
3647          frame_->Drop(3);
3648        }
3649        break;
3650      }
3651      case ObjectLiteral::Property::SETTER: {
3652        frame_->Dup();
3653        Load(key);
3654        frame_->EmitPush(Operand(Smi::FromInt(1)));
3655        Load(value);
3656        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3657        break;
3658      }
3659      case ObjectLiteral::Property::GETTER: {
3660        frame_->Dup();
3661        Load(key);
3662        frame_->EmitPush(Operand(Smi::FromInt(0)));
3663        Load(value);
3664        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3665        break;
3666      }
3667    }
3668  }
3669  ASSERT_EQ(original_height + 1, frame_->height());
3670}
3671
3672
3673void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
3674#ifdef DEBUG
3675  int original_height = frame_->height();
3676#endif
3677  Comment cmnt(masm_, "[ ArrayLiteral");
3678
3679  Register tos = frame_->GetTOSRegister();
3680  // Load the function of this activation.
3681  __ ldr(tos, frame_->Function());
3682  // Load the literals array of the function.
3683  __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
3684  frame_->EmitPush(tos);
3685  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3686  frame_->EmitPush(Operand(node->constant_elements()));
3687  int length = node->values()->length();
3688  if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) {
3689    FastCloneShallowArrayStub stub(
3690        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
3691    frame_->CallStub(&stub, 3);
3692    __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2);
3693  } else if (node->depth() > 1) {
3694    frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
3695  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
3696    frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
3697  } else {
3698    FastCloneShallowArrayStub stub(
3699        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
3700    frame_->CallStub(&stub, 3);
3701  }
3702  frame_->EmitPush(r0);  // save the result
3703  // r0: created object literal
3704
3705  // Generate code to set the elements in the array that are not
3706  // literals.
3707  for (int i = 0; i < node->values()->length(); i++) {
3708    Expression* value = node->values()->at(i);
3709
3710    // If value is a literal the property value is already set in the
3711    // boilerplate object.
3712    if (value->AsLiteral() != NULL) continue;
3713    // If value is a materialized literal the property value is already set
3714    // in the boilerplate object if it is simple.
3715    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
3716
3717    // The property must be set by generated code.
3718    Load(value);
3719    frame_->PopToR0();
3720    // Fetch the object literal.
3721    frame_->SpillAllButCopyTOSToR1();
3722
3723    // Get the elements array.
3724    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
3725
3726    // Write to the indexed properties array.
3727    int offset = i * kPointerSize + FixedArray::kHeaderSize;
3728    __ str(r0, FieldMemOperand(r1, offset));
3729
3730    // Update the write barrier for the array address.
3731    __ RecordWrite(r1, Operand(offset), r3, r2);
3732  }
3733  ASSERT_EQ(original_height + 1, frame_->height());
3734}
3735
3736
3737void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
3738#ifdef DEBUG
3739  int original_height = frame_->height();
3740#endif
3741  // Call runtime routine to allocate the catch extension object and
3742  // assign the exception value to the catch variable.
3743  Comment cmnt(masm_, "[ CatchExtensionObject");
3744  Load(node->key());
3745  Load(node->value());
3746  frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
3747  frame_->EmitPush(r0);
3748  ASSERT_EQ(original_height + 1, frame_->height());
3749}
3750
3751
3752void CodeGenerator::EmitSlotAssignment(Assignment* node) {
3753#ifdef DEBUG
3754  int original_height = frame_->height();
3755#endif
3756  Comment cmnt(masm(), "[ Variable Assignment");
3757  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3758  ASSERT(var != NULL);
3759  Slot* slot = var->AsSlot();
3760  ASSERT(slot != NULL);
3761
3762  // Evaluate the right-hand side.
3763  if (node->is_compound()) {
3764    // For a compound assignment the right-hand side is a binary operation
3765    // between the current property value and the actual right-hand side.
3766    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
3767
3768    // Perform the binary operation.
3769    Literal* literal = node->value()->AsLiteral();
3770    bool overwrite_value = node->value()->ResultOverwriteAllowed();
3771    if (literal != NULL && literal->handle()->IsSmi()) {
3772      SmiOperation(node->binary_op(),
3773                   literal->handle(),
3774                   false,
3775                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3776    } else {
3777      GenerateInlineSmi inline_smi =
3778          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3779      if (literal != NULL) {
3780        ASSERT(!literal->handle()->IsSmi());
3781        inline_smi = DONT_GENERATE_INLINE_SMI;
3782      }
3783      Load(node->value());
3784      GenericBinaryOperation(node->binary_op(),
3785                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3786                             inline_smi);
3787    }
3788  } else {
3789    Load(node->value());
3790  }
3791
3792  // Perform the assignment.
3793  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
3794    CodeForSourcePosition(node->position());
3795    StoreToSlot(slot,
3796                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
3797  }
3798  ASSERT_EQ(original_height + 1, frame_->height());
3799}
3800
3801
3802void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
3803#ifdef DEBUG
3804  int original_height = frame_->height();
3805#endif
3806  Comment cmnt(masm(), "[ Named Property Assignment");
3807  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3808  Property* prop = node->target()->AsProperty();
3809  ASSERT(var == NULL || (prop == NULL && var->is_global()));
3810
3811  // Initialize name and evaluate the receiver sub-expression if necessary. If
3812  // the receiver is trivial it is not placed on the stack at this point, but
3813  // loaded whenever actually needed.
3814  Handle<String> name;
3815  bool is_trivial_receiver = false;
3816  if (var != NULL) {
3817    name = var->name();
3818  } else {
3819    Literal* lit = prop->key()->AsLiteral();
3820    ASSERT_NOT_NULL(lit);
3821    name = Handle<String>::cast(lit->handle());
3822    // Do not materialize the receiver on the frame if it is trivial.
3823    is_trivial_receiver = prop->obj()->IsTrivial();
3824    if (!is_trivial_receiver) Load(prop->obj());
3825  }
3826
3827  // Change to slow case in the beginning of an initialization block to
3828  // avoid the quadratic behavior of repeatedly adding fast properties.
3829  if (node->starts_initialization_block()) {
3830    // Initialization block consists of assignments of the form expr.x = ..., so
3831    // this will never be an assignment to a variable, so there must be a
3832    // receiver object.
3833    ASSERT_EQ(NULL, var);
3834    if (is_trivial_receiver) {
3835      Load(prop->obj());
3836    } else {
3837      frame_->Dup();
3838    }
3839    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3840  }
3841
3842  // Change to fast case at the end of an initialization block. To prepare for
3843  // that add an extra copy of the receiver to the frame, so that it can be
3844  // converted back to fast case after the assignment.
3845  if (node->ends_initialization_block() && !is_trivial_receiver) {
3846    frame_->Dup();
3847  }
3848
3849  // Stack layout:
3850  // [tos]   : receiver (only materialized if non-trivial)
3851  // [tos+1] : receiver if at the end of an initialization block
3852
3853  // Evaluate the right-hand side.
3854  if (node->is_compound()) {
3855    // For a compound assignment the right-hand side is a binary operation
3856    // between the current property value and the actual right-hand side.
3857    if (is_trivial_receiver) {
3858      Load(prop->obj());
3859    } else if (var != NULL) {
3860      LoadGlobal();
3861    } else {
3862      frame_->Dup();
3863    }
3864    EmitNamedLoad(name, var != NULL);
3865
3866    // Perform the binary operation.
3867    Literal* literal = node->value()->AsLiteral();
3868    bool overwrite_value = node->value()->ResultOverwriteAllowed();
3869    if (literal != NULL && literal->handle()->IsSmi()) {
3870      SmiOperation(node->binary_op(),
3871                   literal->handle(),
3872                   false,
3873                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3874    } else {
3875      GenerateInlineSmi inline_smi =
3876          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3877      if (literal != NULL) {
3878        ASSERT(!literal->handle()->IsSmi());
3879        inline_smi = DONT_GENERATE_INLINE_SMI;
3880      }
3881      Load(node->value());
3882      GenericBinaryOperation(node->binary_op(),
3883                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3884                             inline_smi);
3885    }
3886  } else {
3887    // For non-compound assignment just load the right-hand side.
3888    Load(node->value());
3889  }
3890
3891  // Stack layout:
3892  // [tos]   : value
3893  // [tos+1] : receiver (only materialized if non-trivial)
3894  // [tos+2] : receiver if at the end of an initialization block
3895
3896  // Perform the assignment.  It is safe to ignore constants here.
3897  ASSERT(var == NULL || var->mode() != Variable::CONST);
3898  ASSERT_NE(Token::INIT_CONST, node->op());
3899  if (is_trivial_receiver) {
3900    // Load the receiver and swap with the value.
3901    Load(prop->obj());
3902    Register t0 = frame_->PopToRegister();
3903    Register t1 = frame_->PopToRegister(t0);
3904    frame_->EmitPush(t0);
3905    frame_->EmitPush(t1);
3906  }
3907  CodeForSourcePosition(node->position());
3908  bool is_contextual = (var != NULL);
3909  EmitNamedStore(name, is_contextual);
3910  frame_->EmitPush(r0);
3911
3912  // Change to fast case at the end of an initialization block.
3913  if (node->ends_initialization_block()) {
3914    ASSERT_EQ(NULL, var);
3915    // The argument to the runtime call is the receiver.
3916    if (is_trivial_receiver) {
3917      Load(prop->obj());
3918    } else {
3919      // A copy of the receiver is below the value of the assignment. Swap
3920      // the receiver and the value of the assignment expression.
3921      Register t0 = frame_->PopToRegister();
3922      Register t1 = frame_->PopToRegister(t0);
3923      frame_->EmitPush(t0);
3924      frame_->EmitPush(t1);
3925    }
3926    frame_->CallRuntime(Runtime::kToFastProperties, 1);
3927  }
3928
3929  // Stack layout:
3930  // [tos]   : result
3931
3932  ASSERT_EQ(original_height + 1, frame_->height());
3933}
3934
3935
3936void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
3937#ifdef DEBUG
3938  int original_height = frame_->height();
3939#endif
3940  Comment cmnt(masm_, "[ Keyed Property Assignment");
3941  Property* prop = node->target()->AsProperty();
3942  ASSERT_NOT_NULL(prop);
3943
3944  // Evaluate the receiver subexpression.
3945  Load(prop->obj());
3946
3947  WriteBarrierCharacter wb_info;
3948
3949  // Change to slow case in the beginning of an initialization block to
3950  // avoid the quadratic behavior of repeatedly adding fast properties.
3951  if (node->starts_initialization_block()) {
3952    frame_->Dup();
3953    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3954  }
3955
3956  // Change to fast case at the end of an initialization block. To prepare for
3957  // that add an extra copy of the receiver to the frame, so that it can be
3958  // converted back to fast case after the assignment.
3959  if (node->ends_initialization_block()) {
3960    frame_->Dup();
3961  }
3962
3963  // Evaluate the key subexpression.
3964  Load(prop->key());
3965
3966  // Stack layout:
3967  // [tos]   : key
3968  // [tos+1] : receiver
3969  // [tos+2] : receiver if at the end of an initialization block
3970  //
3971  // Evaluate the right-hand side.
3972  if (node->is_compound()) {
3973    // For a compound assignment the right-hand side is a binary operation
3974    // between the current property value and the actual right-hand side.
3975    // Duplicate receiver and key for loading the current property value.
3976    frame_->Dup2();
3977    EmitKeyedLoad();
3978    frame_->EmitPush(r0);
3979
3980    // Perform the binary operation.
3981    Literal* literal = node->value()->AsLiteral();
3982    bool overwrite_value = node->value()->ResultOverwriteAllowed();
3983    if (literal != NULL && literal->handle()->IsSmi()) {
3984      SmiOperation(node->binary_op(),
3985                   literal->handle(),
3986                   false,
3987                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3988    } else {
3989      GenerateInlineSmi inline_smi =
3990          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3991      if (literal != NULL) {
3992        ASSERT(!literal->handle()->IsSmi());
3993        inline_smi = DONT_GENERATE_INLINE_SMI;
3994      }
3995      Load(node->value());
3996      GenericBinaryOperation(node->binary_op(),
3997                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3998                             inline_smi);
3999    }
4000    wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
4001  } else {
4002    // For non-compound assignment just load the right-hand side.
4003    Load(node->value());
4004    wb_info = node->value()->AsLiteral() != NULL ?
4005        NEVER_NEWSPACE :
4006        (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
4007  }
4008
4009  // Stack layout:
4010  // [tos]   : value
4011  // [tos+1] : key
4012  // [tos+2] : receiver
4013  // [tos+3] : receiver if at the end of an initialization block
4014
4015  // Perform the assignment.  It is safe to ignore constants here.
4016  ASSERT(node->op() != Token::INIT_CONST);
4017  CodeForSourcePosition(node->position());
4018  EmitKeyedStore(prop->key()->type(), wb_info);
4019  frame_->EmitPush(r0);
4020
4021  // Stack layout:
4022  // [tos]   : result
4023  // [tos+1] : receiver if at the end of an initialization block
4024
4025  // Change to fast case at the end of an initialization block.
4026  if (node->ends_initialization_block()) {
4027    // The argument to the runtime call is the extra copy of the receiver,
4028    // which is below the value of the assignment.  Swap the receiver and
4029    // the value of the assignment expression.
4030    Register t0 = frame_->PopToRegister();
4031    Register t1 = frame_->PopToRegister(t0);
4032    frame_->EmitPush(t1);
4033    frame_->EmitPush(t0);
4034    frame_->CallRuntime(Runtime::kToFastProperties, 1);
4035  }
4036
4037  // Stack layout:
4038  // [tos]   : result
4039
4040  ASSERT_EQ(original_height + 1, frame_->height());
4041}
4042
4043
4044void CodeGenerator::VisitAssignment(Assignment* node) {
4045  VirtualFrame::RegisterAllocationScope scope(this);
4046#ifdef DEBUG
4047  int original_height = frame_->height();
4048#endif
4049  Comment cmnt(masm_, "[ Assignment");
4050
4051  Variable* var = node->target()->AsVariableProxy()->AsVariable();
4052  Property* prop = node->target()->AsProperty();
4053
4054  if (var != NULL && !var->is_global()) {
4055    EmitSlotAssignment(node);
4056
4057  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
4058             (var != NULL && var->is_global())) {
4059    // Properties whose keys are property names and global variables are
4060    // treated as named property references.  We do not need to consider
4061    // global 'this' because it is not a valid left-hand side.
4062    EmitNamedPropertyAssignment(node);
4063
4064  } else if (prop != NULL) {
4065    // Other properties (including rewritten parameters for a function that
4066    // uses arguments) are keyed property assignments.
4067    EmitKeyedPropertyAssignment(node);
4068
4069  } else {
4070    // Invalid left-hand side.
4071    Load(node->target());
4072    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
4073    // The runtime call doesn't actually return but the code generator will
4074    // still generate code and expects a certain frame height.
4075    frame_->EmitPush(r0);
4076  }
4077  ASSERT_EQ(original_height + 1, frame_->height());
4078}
4079
4080
4081void CodeGenerator::VisitThrow(Throw* node) {
4082#ifdef DEBUG
4083  int original_height = frame_->height();
4084#endif
4085  Comment cmnt(masm_, "[ Throw");
4086
4087  Load(node->exception());
4088  CodeForSourcePosition(node->position());
4089  frame_->CallRuntime(Runtime::kThrow, 1);
4090  frame_->EmitPush(r0);
4091  ASSERT_EQ(original_height + 1, frame_->height());
4092}
4093
4094
4095void CodeGenerator::VisitProperty(Property* node) {
4096#ifdef DEBUG
4097  int original_height = frame_->height();
4098#endif
4099  Comment cmnt(masm_, "[ Property");
4100
4101  { Reference property(this, node);
4102    property.GetValue();
4103  }
4104  ASSERT_EQ(original_height + 1, frame_->height());
4105}
4106
4107
4108void CodeGenerator::VisitCall(Call* node) {
4109#ifdef DEBUG
4110  int original_height = frame_->height();
4111#endif
4112  Comment cmnt(masm_, "[ Call");
4113
4114  Expression* function = node->expression();
4115  ZoneList<Expression*>* args = node->arguments();
4116
4117  // Standard function call.
4118  // Check if the function is a variable or a property.
4119  Variable* var = function->AsVariableProxy()->AsVariable();
4120  Property* property = function->AsProperty();
4121
4122  // ------------------------------------------------------------------------
4123  // Fast-case: Use inline caching.
4124  // ---
4125  // According to ECMA-262, section 11.2.3, page 44, the function to call
4126  // must be resolved after the arguments have been evaluated. The IC code
4127  // automatically handles this by loading the arguments before the function
4128  // is resolved in cache misses (this also holds for megamorphic calls).
4129  // ------------------------------------------------------------------------
4130
4131  if (var != NULL && var->is_possibly_eval()) {
4132    // ----------------------------------
4133    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
4134    // ----------------------------------
4135
4136    // In a call to eval, we first call %ResolvePossiblyDirectEval to
4137    // resolve the function we need to call and the receiver of the
4138    // call.  Then we call the resolved function using the given
4139    // arguments.
4140
4141    // Prepare stack for call to resolved function.
4142    Load(function);
4143
4144    // Allocate a frame slot for the receiver.
4145    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
4146
4147    // Load the arguments.
4148    int arg_count = args->length();
4149    for (int i = 0; i < arg_count; i++) {
4150      Load(args->at(i));
4151    }
4152
4153    VirtualFrame::SpilledScope spilled_scope(frame_);
4154
4155    // If we know that eval can only be shadowed by eval-introduced
4156    // variables we attempt to load the global eval function directly
4157    // in generated code. If we succeed, there is no need to perform a
4158    // context lookup in the runtime system.
4159    JumpTarget done;
4160    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
4161      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
4162      JumpTarget slow;
4163      // Prepare the stack for the call to
4164      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
4165      // function, the first argument to the eval call and the
4166      // receiver.
4167      LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
4168                                        NOT_INSIDE_TYPEOF,
4169                                        &slow);
4170      frame_->EmitPush(r0);
4171      if (arg_count > 0) {
4172        __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
4173        frame_->EmitPush(r1);
4174      } else {
4175        frame_->EmitPush(r2);
4176      }
4177      __ ldr(r1, frame_->Receiver());
4178      frame_->EmitPush(r1);
4179
4180      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
4181
4182      done.Jump();
4183      slow.Bind();
4184    }
4185
4186    // Prepare the stack for the call to ResolvePossiblyDirectEval by
4187    // pushing the loaded function, the first argument to the eval
4188    // call and the receiver.
4189    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
4190    frame_->EmitPush(r1);
4191    if (arg_count > 0) {
4192      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
4193      frame_->EmitPush(r1);
4194    } else {
4195      frame_->EmitPush(r2);
4196    }
4197    __ ldr(r1, frame_->Receiver());
4198    frame_->EmitPush(r1);
4199
4200    // Resolve the call.
4201    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
4202
4203    // If we generated fast-case code bind the jump-target where fast
4204    // and slow case merge.
4205    if (done.is_linked()) done.Bind();
4206
4207    // Touch up stack with the right values for the function and the receiver.
4208    __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
4209    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
4210
4211    // Call the function.
4212    CodeForSourcePosition(node->position());
4213
4214    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4215    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
4216    frame_->CallStub(&call_function, arg_count + 1);
4217
4218    __ ldr(cp, frame_->Context());
4219    // Remove the function from the stack.
4220    frame_->Drop();
4221    frame_->EmitPush(r0);
4222
4223  } else if (var != NULL && !var->is_this() && var->is_global()) {
4224    // ----------------------------------
4225    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
4226    // ----------------------------------
4227    // Pass the global object as the receiver and let the IC stub
4228    // patch the stack to use the global proxy as 'this' in the
4229    // invoked function.
4230    LoadGlobal();
4231
4232    // Load the arguments.
4233    int arg_count = args->length();
4234    for (int i = 0; i < arg_count; i++) {
4235      Load(args->at(i));
4236    }
4237
4238    VirtualFrame::SpilledScope spilled_scope(frame_);
4239    // Setup the name register and call the IC initialization code.
4240    __ mov(r2, Operand(var->name()));
4241    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4242    Handle<Code> stub = StubCache::ComputeCallInitialize(arg_count, in_loop);
4243    CodeForSourcePosition(node->position());
4244    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
4245                           arg_count + 1);
4246    __ ldr(cp, frame_->Context());
4247    frame_->EmitPush(r0);
4248
4249  } else if (var != NULL && var->AsSlot() != NULL &&
4250             var->AsSlot()->type() == Slot::LOOKUP) {
4251    // ----------------------------------
4252    // JavaScript examples:
4253    //
4254    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
4255    //
4256    //  function f() {};
4257    //  function g() {
4258    //    eval(...);
4259    //    f();  // f could be in extension object.
4260    //  }
4261    // ----------------------------------
4262
4263    JumpTarget slow, done;
4264
4265    // Generate fast case for loading functions from slots that
4266    // correspond to local/global variables or arguments unless they
4267    // are shadowed by eval-introduced bindings.
4268    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
4269                                    NOT_INSIDE_TYPEOF,
4270                                    &slow,
4271                                    &done);
4272
4273    slow.Bind();
4274    // Load the function
4275    frame_->EmitPush(cp);
4276    frame_->EmitPush(Operand(var->name()));
4277    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4278    // r0: slot value; r1: receiver
4279
4280    // Load the receiver.
4281    frame_->EmitPush(r0);  // function
4282    frame_->EmitPush(r1);  // receiver
4283
4284    // If fast case code has been generated, emit code to push the
4285    // function and receiver and have the slow path jump around this
4286    // code.
4287    if (done.is_linked()) {
4288      JumpTarget call;
4289      call.Jump();
4290      done.Bind();
4291      frame_->EmitPush(r0);  // function
4292      LoadGlobalReceiver(VirtualFrame::scratch0());  // receiver
4293      call.Bind();
4294    }
4295
4296    // Call the function. At this point, everything is spilled but the
4297    // function and receiver are in r0 and r1.
4298    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4299    frame_->EmitPush(r0);
4300
4301  } else if (property != NULL) {
4302    // Check if the key is a literal string.
4303    Literal* literal = property->key()->AsLiteral();
4304
4305    if (literal != NULL && literal->handle()->IsSymbol()) {
4306      // ------------------------------------------------------------------
4307      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
4308      // ------------------------------------------------------------------
4309
4310      Handle<String> name = Handle<String>::cast(literal->handle());
4311
4312      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
4313          name->IsEqualTo(CStrVector("apply")) &&
4314          args->length() == 2 &&
4315          args->at(1)->AsVariableProxy() != NULL &&
4316          args->at(1)->AsVariableProxy()->IsArguments()) {
4317        // Use the optimized Function.prototype.apply that avoids
4318        // allocating lazily allocated arguments objects.
4319        CallApplyLazy(property->obj(),
4320                      args->at(0),
4321                      args->at(1)->AsVariableProxy(),
4322                      node->position());
4323
4324      } else {
4325        Load(property->obj());  // Receiver.
4326        // Load the arguments.
4327        int arg_count = args->length();
4328        for (int i = 0; i < arg_count; i++) {
4329          Load(args->at(i));
4330        }
4331
4332        VirtualFrame::SpilledScope spilled_scope(frame_);
4333        // Set the name register and call the IC initialization code.
4334        __ mov(r2, Operand(name));
4335        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4336        Handle<Code> stub =
4337            StubCache::ComputeCallInitialize(arg_count, in_loop);
4338        CodeForSourcePosition(node->position());
4339        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4340        __ ldr(cp, frame_->Context());
4341        frame_->EmitPush(r0);
4342      }
4343
4344    } else {
4345      // -------------------------------------------
4346      // JavaScript example: 'array[index](1, 2, 3)'
4347      // -------------------------------------------
4348
4349      // Load the receiver and name of the function.
4350      Load(property->obj());
4351      Load(property->key());
4352
4353      if (property->is_synthetic()) {
4354        EmitKeyedLoad();
4355        // Put the function below the receiver.
4356        // Use the global receiver.
4357        frame_->EmitPush(r0);  // Function.
4358        LoadGlobalReceiver(VirtualFrame::scratch0());
4359        // Call the function.
4360        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
4361        frame_->EmitPush(r0);
4362      } else {
4363        // Swap the name of the function and the receiver on the stack to follow
4364        // the calling convention for call ICs.
4365        Register key = frame_->PopToRegister();
4366        Register receiver = frame_->PopToRegister(key);
4367        frame_->EmitPush(key);
4368        frame_->EmitPush(receiver);
4369
4370        // Load the arguments.
4371        int arg_count = args->length();
4372        for (int i = 0; i < arg_count; i++) {
4373          Load(args->at(i));
4374        }
4375
4376        // Load the key into r2 and call the IC initialization code.
4377        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4378        Handle<Code> stub =
4379            StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
4380        CodeForSourcePosition(node->position());
4381        frame_->SpillAll();
4382        __ ldr(r2, frame_->ElementAt(arg_count + 1));
4383        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4384        frame_->Drop();  // Drop the key still on the stack.
4385        __ ldr(cp, frame_->Context());
4386        frame_->EmitPush(r0);
4387      }
4388    }
4389
4390  } else {
4391    // ----------------------------------
4392    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
4393    // ----------------------------------
4394
4395    // Load the function.
4396    Load(function);
4397
4398    // Pass the global proxy as the receiver.
4399    LoadGlobalReceiver(VirtualFrame::scratch0());
4400
4401    // Call the function.
4402    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4403    frame_->EmitPush(r0);
4404  }
4405  ASSERT_EQ(original_height + 1, frame_->height());
4406}
4407
4408
4409void CodeGenerator::VisitCallNew(CallNew* node) {
4410#ifdef DEBUG
4411  int original_height = frame_->height();
4412#endif
4413  Comment cmnt(masm_, "[ CallNew");
4414
4415  // According to ECMA-262, section 11.2.2, page 44, the function
4416  // expression in new calls must be evaluated before the
4417  // arguments. This is different from ordinary calls, where the
4418  // actual function to call is resolved after the arguments have been
4419  // evaluated.
4420
4421  // Push constructor on the stack.  If it's not a function it's used as
4422  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
4423  // ignored.
4424  Load(node->expression());
4425
4426  // Push the arguments ("left-to-right") on the stack.
4427  ZoneList<Expression*>* args = node->arguments();
4428  int arg_count = args->length();
4429  for (int i = 0; i < arg_count; i++) {
4430    Load(args->at(i));
4431  }
4432
4433  // Spill everything from here to simplify the implementation.
4434  VirtualFrame::SpilledScope spilled_scope(frame_);
4435
4436  // Load the argument count into r0 and the function into r1 as per
4437  // calling convention.
4438  __ mov(r0, Operand(arg_count));
4439  __ ldr(r1, frame_->ElementAt(arg_count));
4440
4441  // Call the construct call builtin that handles allocation and
4442  // constructor invocation.
4443  CodeForSourcePosition(node->position());
4444  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
4445  frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
4446  frame_->EmitPush(r0);
4447
4448  ASSERT_EQ(original_height + 1, frame_->height());
4449}
4450
4451
4452void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4453  Register scratch = VirtualFrame::scratch0();
4454  JumpTarget null, function, leave, non_function_constructor;
4455
4456  // Load the object into register.
4457  ASSERT(args->length() == 1);
4458  Load(args->at(0));
4459  Register tos = frame_->PopToRegister();
4460
4461  // If the object is a smi, we return null.
4462  __ tst(tos, Operand(kSmiTagMask));
4463  null.Branch(eq);
4464
4465  // Check that the object is a JS object but take special care of JS
4466  // functions to make sure they have 'Function' as their class.
4467  __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
4468  null.Branch(lt);
4469
4470  // As long as JS_FUNCTION_TYPE is the last instance type and it is
4471  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4472  // LAST_JS_OBJECT_TYPE.
4473  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4474  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4475  __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
4476  function.Branch(eq);
4477
4478  // Check if the constructor in the map is a function.
4479  __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
4480  __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
4481  non_function_constructor.Branch(ne);
4482
4483  // The tos register now contains the constructor function. Grab the
4484  // instance class name from there.
4485  __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
4486  __ ldr(tos,
4487         FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
4488  frame_->EmitPush(tos);
4489  leave.Jump();
4490
4491  // Functions have class 'Function'.
4492  function.Bind();
4493  __ mov(tos, Operand(Factory::function_class_symbol()));
4494  frame_->EmitPush(tos);
4495  leave.Jump();
4496
4497  // Objects with a non-function constructor have class 'Object'.
4498  non_function_constructor.Bind();
4499  __ mov(tos, Operand(Factory::Object_symbol()));
4500  frame_->EmitPush(tos);
4501  leave.Jump();
4502
4503  // Non-JS objects have class null.
4504  null.Bind();
4505  __ LoadRoot(tos, Heap::kNullValueRootIndex);
4506  frame_->EmitPush(tos);
4507
4508  // All done.
4509  leave.Bind();
4510}
4511
4512
4513void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4514  Register scratch = VirtualFrame::scratch0();
4515  JumpTarget leave;
4516
4517  ASSERT(args->length() == 1);
4518  Load(args->at(0));
4519  Register tos = frame_->PopToRegister();  // tos contains object.
4520  // if (object->IsSmi()) return the object.
4521  __ tst(tos, Operand(kSmiTagMask));
4522  leave.Branch(eq);
4523  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4524  __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
4525  leave.Branch(ne);
4526  // Load the value.
4527  __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
4528  leave.Bind();
4529  frame_->EmitPush(tos);
4530}
4531
4532
4533void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4534  Register scratch1 = VirtualFrame::scratch0();
4535  Register scratch2 = VirtualFrame::scratch1();
4536  JumpTarget leave;
4537
4538  ASSERT(args->length() == 2);
4539  Load(args->at(0));    // Load the object.
4540  Load(args->at(1));    // Load the value.
4541  Register value = frame_->PopToRegister();
4542  Register object = frame_->PopToRegister(value);
4543  // if (object->IsSmi()) return object.
4544  __ tst(object, Operand(kSmiTagMask));
4545  leave.Branch(eq);
4546  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4547  __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
4548  leave.Branch(ne);
4549  // Store the value.
4550  __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
4551  // Update the write barrier.
4552  __ RecordWrite(object,
4553                 Operand(JSValue::kValueOffset - kHeapObjectTag),
4554                 scratch1,
4555                 scratch2);
4556  // Leave.
4557  leave.Bind();
4558  frame_->EmitPush(value);
4559}
4560
4561
4562void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
4563  ASSERT(args->length() == 1);
4564  Load(args->at(0));
4565  Register reg = frame_->PopToRegister();
4566  __ tst(reg, Operand(kSmiTagMask));
4567  cc_reg_ = eq;
4568}
4569
4570
4571void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
4572  // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
4573  ASSERT_EQ(args->length(), 3);
4574#ifdef ENABLE_LOGGING_AND_PROFILING
4575  if (ShouldGenerateLog(args->at(0))) {
4576    Load(args->at(1));
4577    Load(args->at(2));
4578    frame_->CallRuntime(Runtime::kLog, 2);
4579  }
4580#endif
4581  frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
4582}
4583
4584
4585void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
4586  ASSERT(args->length() == 1);
4587  Load(args->at(0));
4588  Register reg = frame_->PopToRegister();
4589  __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
4590  cc_reg_ = eq;
4591}
4592
4593
4594// Generates the Math.pow method.
4595void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
4596  ASSERT(args->length() == 2);
4597  Load(args->at(0));
4598  Load(args->at(1));
4599
4600  if (!CpuFeatures::IsSupported(VFP3)) {
4601    frame_->CallRuntime(Runtime::kMath_pow, 2);
4602    frame_->EmitPush(r0);
4603  } else {
4604    CpuFeatures::Scope scope(VFP3);
4605    JumpTarget runtime, done;
4606    Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
4607
4608    Register scratch1 = VirtualFrame::scratch0();
4609    Register scratch2 = VirtualFrame::scratch1();
4610
4611    // Get base and exponent to registers.
4612    Register exponent = frame_->PopToRegister();
4613    Register base = frame_->PopToRegister(exponent);
4614    Register heap_number_map = no_reg;
4615
4616    // Set the frame for the runtime jump target. The code below jumps to the
4617    // jump target label so the frame needs to be established before that.
4618    ASSERT(runtime.entry_frame() == NULL);
4619    runtime.set_entry_frame(frame_);
4620
4621    __ BranchOnNotSmi(exponent, &exponent_nonsmi);
4622    __ BranchOnNotSmi(base, &base_nonsmi);
4623
4624    heap_number_map = r6;
4625    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4626
4627    // Exponent is a smi and base is a smi. Get the smi value into vfp register
4628    // d1.
4629    __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
4630    __ b(&powi);
4631
4632    __ bind(&base_nonsmi);
4633    // Exponent is smi and base is non smi. Get the double value from the base
4634    // into vfp register d1.
4635    __ ObjectToDoubleVFPRegister(base, d1,
4636                                 scratch1, scratch2, heap_number_map, s0,
4637                                 runtime.entry_label());
4638
4639    __ bind(&powi);
4640
4641    // Load 1.0 into d0.
4642    __ vmov(d0, 1.0);
4643
4644    // Get the absolute untagged value of the exponent and use that for the
4645    // calculation.
4646    __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
4647    // Negate if negative.
4648    __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi);
4649    __ vmov(d2, d0, mi);  // 1.0 needed in d2 later if exponent is negative.
4650
4651    // Run through all the bits in the exponent. The result is calculated in d0
4652    // and d1 holds base^(bit^2).
4653    Label more_bits;
4654    __ bind(&more_bits);
4655    __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
4656    __ vmul(d0, d0, d1, cs);  // Multiply with base^(bit^2) if bit is set.
4657    __ vmul(d1, d1, d1, ne);  // Don't bother calculating next d1 if done.
4658    __ b(ne, &more_bits);
4659
4660    // If exponent is positive we are done.
4661    __ cmp(exponent, Operand(0, RelocInfo::NONE));
4662    __ b(ge, &allocate_return);
4663
4664    // If exponent is negative result is 1/result (d2 already holds 1.0 in that
4665    // case). However if d0 has reached infinity this will not provide the
4666    // correct result, so call runtime if that is the case.
4667    __ mov(scratch2, Operand(0x7FF00000));
4668    __ mov(scratch1, Operand(0, RelocInfo::NONE));
4669    __ vmov(d1, scratch1, scratch2);  // Load infinity into d1.
4670    __ VFPCompareAndSetFlags(d0, d1);
4671    runtime.Branch(eq);  // d0 reached infinity.
4672    __ vdiv(d0, d2, d0);
4673    __ b(&allocate_return);
4674
4675    __ bind(&exponent_nonsmi);
4676    // Special handling of raising to the power of -0.5 and 0.5. First check
4677    // that the value is a heap number and that the lower bits (which for both
4678    // values are zero).
4679    heap_number_map = r6;
4680    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4681    __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
4682    __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
4683    __ cmp(scratch1, heap_number_map);
4684    runtime.Branch(ne);
4685    __ tst(scratch2, scratch2);
4686    runtime.Branch(ne);
4687
4688    // Load the higher bits (which contains the floating point exponent).
4689    __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
4690
4691    // Compare exponent with -0.5.
4692    __ cmp(scratch1, Operand(0xbfe00000));
4693    __ b(ne, &not_minus_half);
4694
4695    // Get the double value from the base into vfp register d0.
4696    __ ObjectToDoubleVFPRegister(base, d0,
4697                                 scratch1, scratch2, heap_number_map, s0,
4698                                 runtime.entry_label(),
4699                                 AVOID_NANS_AND_INFINITIES);
4700
4701    // Load 1.0 into d2.
4702    __ vmov(d2, 1.0);
4703
4704    // Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x).
4705    __ vdiv(d0, d2, d0);
4706    __ vsqrt(d0, d0);
4707
4708    __ b(&allocate_return);
4709
4710    __ bind(&not_minus_half);
4711    // Compare exponent with 0.5.
4712    __ cmp(scratch1, Operand(0x3fe00000));
4713    runtime.Branch(ne);
4714
4715      // Get the double value from the base into vfp register d0.
4716    __ ObjectToDoubleVFPRegister(base, d0,
4717                                 scratch1, scratch2, heap_number_map, s0,
4718                                 runtime.entry_label(),
4719                                 AVOID_NANS_AND_INFINITIES);
4720    __ vsqrt(d0, d0);
4721
4722    __ bind(&allocate_return);
4723    Register scratch3 = r5;
4724    __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
4725                                   heap_number_map, runtime.entry_label());
4726    __ mov(base, scratch3);
4727    done.Jump();
4728
4729    runtime.Bind();
4730
4731    // Push back the arguments again for the runtime call.
4732    frame_->EmitPush(base);
4733    frame_->EmitPush(exponent);
4734    frame_->CallRuntime(Runtime::kMath_pow, 2);
4735    __ Move(base, r0);
4736
4737    done.Bind();
4738    frame_->EmitPush(base);
4739  }
4740}
4741
4742
4743// Generates the Math.sqrt method.
4744void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
4745  ASSERT(args->length() == 1);
4746  Load(args->at(0));
4747
4748  if (!CpuFeatures::IsSupported(VFP3)) {
4749    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4750    frame_->EmitPush(r0);
4751  } else {
4752    CpuFeatures::Scope scope(VFP3);
4753    JumpTarget runtime, done;
4754
4755    Register scratch1 = VirtualFrame::scratch0();
4756    Register scratch2 = VirtualFrame::scratch1();
4757
4758    // Get the value from the frame.
4759    Register tos = frame_->PopToRegister();
4760
4761    // Set the frame for the runtime jump target. The code below jumps to the
4762    // jump target label so the frame needs to be established before that.
4763    ASSERT(runtime.entry_frame() == NULL);
4764    runtime.set_entry_frame(frame_);
4765
4766    Register heap_number_map = r6;
4767    Register new_heap_number = r5;
4768    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4769
4770    // Get the double value from the heap number into vfp register d0.
4771    __ ObjectToDoubleVFPRegister(tos, d0,
4772                                 scratch1, scratch2, heap_number_map, s0,
4773                                 runtime.entry_label());
4774
4775    // Calculate the square root of d0 and place result in a heap number object.
4776    __ vsqrt(d0, d0);
4777    __ AllocateHeapNumberWithValue(new_heap_number,
4778                                   d0,
4779                                   scratch1, scratch2,
4780                                   heap_number_map,
4781                                   runtime.entry_label());
4782    __ mov(tos, Operand(new_heap_number));
4783    done.Jump();
4784
4785    runtime.Bind();
4786    // Push back the argument again for the runtime call.
4787    frame_->EmitPush(tos);
4788    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4789    __ Move(tos, r0);
4790
4791    done.Bind();
4792    frame_->EmitPush(tos);
4793  }
4794}
4795
4796
4797class DeferredStringCharCodeAt : public DeferredCode {
4798 public:
4799  DeferredStringCharCodeAt(Register object,
4800                           Register index,
4801                           Register scratch,
4802                           Register result)
4803      : result_(result),
4804        char_code_at_generator_(object,
4805                                index,
4806                                scratch,
4807                                result,
4808                                &need_conversion_,
4809                                &need_conversion_,
4810                                &index_out_of_range_,
4811                                STRING_INDEX_IS_NUMBER) {}
4812
4813  StringCharCodeAtGenerator* fast_case_generator() {
4814    return &char_code_at_generator_;
4815  }
4816
4817  virtual void Generate() {
4818    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4819    char_code_at_generator_.GenerateSlow(masm(), call_helper);
4820
4821    __ bind(&need_conversion_);
4822    // Move the undefined value into the result register, which will
4823    // trigger conversion.
4824    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
4825    __ jmp(exit_label());
4826
4827    __ bind(&index_out_of_range_);
4828    // When the index is out of range, the spec requires us to return
4829    // NaN.
4830    __ LoadRoot(result_, Heap::kNanValueRootIndex);
4831    __ jmp(exit_label());
4832  }
4833
4834 private:
4835  Register result_;
4836
4837  Label need_conversion_;
4838  Label index_out_of_range_;
4839
4840  StringCharCodeAtGenerator char_code_at_generator_;
4841};
4842
4843
4844// This generates code that performs a String.prototype.charCodeAt() call
4845// or returns a smi in order to trigger conversion.
4846void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
4847  Comment(masm_, "[ GenerateStringCharCodeAt");
4848  ASSERT(args->length() == 2);
4849
4850  Load(args->at(0));
4851  Load(args->at(1));
4852
4853  Register index = frame_->PopToRegister();
4854  Register object = frame_->PopToRegister(index);
4855
4856  // We need two extra registers.
4857  Register scratch = VirtualFrame::scratch0();
4858  Register result = VirtualFrame::scratch1();
4859
4860  DeferredStringCharCodeAt* deferred =
4861      new DeferredStringCharCodeAt(object,
4862                                   index,
4863                                   scratch,
4864                                   result);
4865  deferred->fast_case_generator()->GenerateFast(masm_);
4866  deferred->BindExit();
4867  frame_->EmitPush(result);
4868}
4869
4870
4871class DeferredStringCharFromCode : public DeferredCode {
4872 public:
4873  DeferredStringCharFromCode(Register code,
4874                             Register result)
4875      : char_from_code_generator_(code, result) {}
4876
4877  StringCharFromCodeGenerator* fast_case_generator() {
4878    return &char_from_code_generator_;
4879  }
4880
4881  virtual void Generate() {
4882    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4883    char_from_code_generator_.GenerateSlow(masm(), call_helper);
4884  }
4885
4886 private:
4887  StringCharFromCodeGenerator char_from_code_generator_;
4888};
4889
4890
4891// Generates code for creating a one-char string from a char code.
4892void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
4893  Comment(masm_, "[ GenerateStringCharFromCode");
4894  ASSERT(args->length() == 1);
4895
4896  Load(args->at(0));
4897
4898  Register result = frame_->GetTOSRegister();
4899  Register code = frame_->PopToRegister(result);
4900
4901  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
4902      code, result);
4903  deferred->fast_case_generator()->GenerateFast(masm_);
4904  deferred->BindExit();
4905  frame_->EmitPush(result);
4906}
4907
4908
4909class DeferredStringCharAt : public DeferredCode {
4910 public:
4911  DeferredStringCharAt(Register object,
4912                       Register index,
4913                       Register scratch1,
4914                       Register scratch2,
4915                       Register result)
4916      : result_(result),
4917        char_at_generator_(object,
4918                           index,
4919                           scratch1,
4920                           scratch2,
4921                           result,
4922                           &need_conversion_,
4923                           &need_conversion_,
4924                           &index_out_of_range_,
4925                           STRING_INDEX_IS_NUMBER) {}
4926
4927  StringCharAtGenerator* fast_case_generator() {
4928    return &char_at_generator_;
4929  }
4930
4931  virtual void Generate() {
4932    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4933    char_at_generator_.GenerateSlow(masm(), call_helper);
4934
4935    __ bind(&need_conversion_);
4936    // Move smi zero into the result register, which will trigger
4937    // conversion.
4938    __ mov(result_, Operand(Smi::FromInt(0)));
4939    __ jmp(exit_label());
4940
4941    __ bind(&index_out_of_range_);
4942    // When the index is out of range, the spec requires us to return
4943    // the empty string.
4944    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
4945    __ jmp(exit_label());
4946  }
4947
4948 private:
4949  Register result_;
4950
4951  Label need_conversion_;
4952  Label index_out_of_range_;
4953
4954  StringCharAtGenerator char_at_generator_;
4955};
4956
4957
4958// This generates code that performs a String.prototype.charAt() call
4959// or returns a smi in order to trigger conversion.
4960void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
4961  Comment(masm_, "[ GenerateStringCharAt");
4962  ASSERT(args->length() == 2);
4963
4964  Load(args->at(0));
4965  Load(args->at(1));
4966
4967  Register index = frame_->PopToRegister();
4968  Register object = frame_->PopToRegister(index);
4969
4970  // We need three extra registers.
4971  Register scratch1 = VirtualFrame::scratch0();
4972  Register scratch2 = VirtualFrame::scratch1();
4973  // Use r6 without notifying the virtual frame.
4974  Register result = r6;
4975
4976  DeferredStringCharAt* deferred =
4977      new DeferredStringCharAt(object,
4978                               index,
4979                               scratch1,
4980                               scratch2,
4981                               result);
4982  deferred->fast_case_generator()->GenerateFast(masm_);
4983  deferred->BindExit();
4984  frame_->EmitPush(result);
4985}
4986
4987
4988void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
4989  ASSERT(args->length() == 1);
4990  Load(args->at(0));
4991  JumpTarget answer;
4992  // We need the CC bits to come out as not_equal in the case where the
4993  // object is a smi.  This can't be done with the usual test opcode so
4994  // we use XOR to get the right CC bits.
4995  Register possible_array = frame_->PopToRegister();
4996  Register scratch = VirtualFrame::scratch0();
4997  __ and_(scratch, possible_array, Operand(kSmiTagMask));
4998  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
4999  answer.Branch(ne);
5000  // It is a heap object - get the map. Check if the object is a JS array.
5001  __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
5002  answer.Bind();
5003  cc_reg_ = eq;
5004}
5005
5006
5007void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
5008  ASSERT(args->length() == 1);
5009  Load(args->at(0));
5010  JumpTarget answer;
5011  // We need the CC bits to come out as not_equal in the case where the
5012  // object is a smi.  This can't be done with the usual test opcode so
5013  // we use XOR to get the right CC bits.
5014  Register possible_regexp = frame_->PopToRegister();
5015  Register scratch = VirtualFrame::scratch0();
5016  __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
5017  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
5018  answer.Branch(ne);
5019  // It is a heap object - get the map. Check if the object is a regexp.
5020  __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
5021  answer.Bind();
5022  cc_reg_ = eq;
5023}
5024
5025
5026void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
5027  // This generates a fast version of:
5028  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
5029  ASSERT(args->length() == 1);
5030  Load(args->at(0));
5031  Register possible_object = frame_->PopToRegister();
5032  __ tst(possible_object, Operand(kSmiTagMask));
5033  false_target()->Branch(eq);
5034
5035  __ LoadRoot(ip, Heap::kNullValueRootIndex);
5036  __ cmp(possible_object, ip);
5037  true_target()->Branch(eq);
5038
5039  Register map_reg = VirtualFrame::scratch0();
5040  __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
5041  // Undetectable objects behave like undefined when tested with typeof.
5042  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
5043  __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
5044  false_target()->Branch(ne);
5045
5046  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
5047  __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
5048  false_target()->Branch(lt);
5049  __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
5050  cc_reg_ = le;
5051}
5052
5053
5054void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
5055  // This generates a fast version of:
5056  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
5057  // typeof(arg) == function).
5058  // It includes undetectable objects (as opposed to IsObject).
5059  ASSERT(args->length() == 1);
5060  Load(args->at(0));
5061  Register value = frame_->PopToRegister();
5062  __ tst(value, Operand(kSmiTagMask));
5063  false_target()->Branch(eq);
5064  // Check that this is an object.
5065  __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
5066  __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
5067  __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
5068  cc_reg_ = ge;
5069}
5070
5071
5072// Deferred code to check whether the String JavaScript object is safe for using
5073// default value of. This code is called after the bit caching this information
5074// in the map has been checked with the map for the object in the map_result_
5075// register. On return the register map_result_ contains 1 for true and 0 for
5076// false.
5077class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
5078 public:
5079  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
5080                                               Register map_result,
5081                                               Register scratch1,
5082                                               Register scratch2)
5083      : object_(object),
5084        map_result_(map_result),
5085        scratch1_(scratch1),
5086        scratch2_(scratch2) { }
5087
5088  virtual void Generate() {
5089    Label false_result;
5090
5091    // Check that map is loaded as expected.
5092    if (FLAG_debug_code) {
5093      __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset));
5094      __ cmp(map_result_, ip);
5095      __ Assert(eq, "Map not in expected register");
5096    }
5097
5098    // Check for fast case object. Generate false result for slow case object.
5099    __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset));
5100    __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
5101    __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
5102    __ cmp(scratch1_, ip);
5103    __ b(eq, &false_result);
5104
5105    // Look for valueOf symbol in the descriptor array, and indicate false if
5106    // found. The type is not checked, so if it is a transition it is a false
5107    // negative.
5108    __ ldr(map_result_,
5109           FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset));
5110    __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset));
5111    // map_result_: descriptor array
5112    // scratch2_: length of descriptor array
5113    // Calculate the end of the descriptor array.
5114    STATIC_ASSERT(kSmiTag == 0);
5115    STATIC_ASSERT(kSmiTagSize == 1);
5116    STATIC_ASSERT(kPointerSize == 4);
5117    __ add(scratch1_,
5118           map_result_,
5119           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5120    __ add(scratch1_,
5121           scratch1_,
5122           Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize));
5123
5124    // Calculate location of the first key name.
5125    __ add(map_result_,
5126           map_result_,
5127           Operand(FixedArray::kHeaderSize - kHeapObjectTag +
5128                   DescriptorArray::kFirstIndex * kPointerSize));
5129    // Loop through all the keys in the descriptor array. If one of these is the
5130    // symbol valueOf the result is false.
5131    Label entry, loop;
5132    // The use of ip to store the valueOf symbol asumes that it is not otherwise
5133    // used in the loop below.
5134    __ mov(ip, Operand(Factory::value_of_symbol()));
5135    __ jmp(&entry);
5136    __ bind(&loop);
5137    __ ldr(scratch2_, MemOperand(map_result_, 0));
5138    __ cmp(scratch2_, ip);
5139    __ b(eq, &false_result);
5140    __ add(map_result_, map_result_, Operand(kPointerSize));
5141    __ bind(&entry);
5142    __ cmp(map_result_, Operand(scratch1_));
5143    __ b(ne, &loop);
5144
5145    // Reload map as register map_result_ was used as temporary above.
5146    __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5147
5148    // If a valueOf property is not found on the object check that it's
5149    // prototype is the un-modified String prototype. If not result is false.
5150    __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset));
5151    __ tst(scratch1_, Operand(kSmiTagMask));
5152    __ b(eq, &false_result);
5153    __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
5154    __ ldr(scratch2_,
5155           ContextOperand(cp, Context::GLOBAL_INDEX));
5156    __ ldr(scratch2_,
5157           FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset));
5158    __ ldr(scratch2_,
5159           ContextOperand(
5160               scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
5161    __ cmp(scratch1_, scratch2_);
5162    __ b(ne, &false_result);
5163
5164    // Set the bit in the map to indicate that it has been checked safe for
5165    // default valueOf and set true result.
5166    __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
5167    __ orr(scratch1_,
5168           scratch1_,
5169           Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
5170    __ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
5171    __ mov(map_result_, Operand(1));
5172    __ jmp(exit_label());
5173    __ bind(&false_result);
5174    // Set false result.
5175    __ mov(map_result_, Operand(0, RelocInfo::NONE));
5176  }
5177
5178 private:
5179  Register object_;
5180  Register map_result_;
5181  Register scratch1_;
5182  Register scratch2_;
5183};
5184
5185
5186void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
5187    ZoneList<Expression*>* args) {
5188  ASSERT(args->length() == 1);
5189  Load(args->at(0));
5190  Register obj = frame_->PopToRegister();  // Pop the string wrapper.
5191  if (FLAG_debug_code) {
5192    __ AbortIfSmi(obj);
5193  }
5194
5195  // Check whether this map has already been checked to be safe for default
5196  // valueOf.
5197  Register map_result = VirtualFrame::scratch0();
5198  __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset));
5199  __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset));
5200  __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
5201  true_target()->Branch(ne);
5202
5203  // We need an additional two scratch registers for the deferred code.
5204  Register scratch1 = VirtualFrame::scratch1();
5205  // Use r6 without notifying the virtual frame.
5206  Register scratch2 = r6;
5207
5208  DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
5209      new DeferredIsStringWrapperSafeForDefaultValueOf(
5210          obj, map_result, scratch1, scratch2);
5211  deferred->Branch(eq);
5212  deferred->BindExit();
5213  __ tst(map_result, Operand(map_result));
5214  cc_reg_ = ne;
5215}
5216
5217
5218void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
5219  // This generates a fast version of:
5220  // (%_ClassOf(arg) === 'Function')
5221  ASSERT(args->length() == 1);
5222  Load(args->at(0));
5223  Register possible_function = frame_->PopToRegister();
5224  __ tst(possible_function, Operand(kSmiTagMask));
5225  false_target()->Branch(eq);
5226  Register map_reg = VirtualFrame::scratch0();
5227  Register scratch = VirtualFrame::scratch1();
5228  __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
5229  cc_reg_ = eq;
5230}
5231
5232
5233void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
5234  ASSERT(args->length() == 1);
5235  Load(args->at(0));
5236  Register possible_undetectable = frame_->PopToRegister();
5237  __ tst(possible_undetectable, Operand(kSmiTagMask));
5238  false_target()->Branch(eq);
5239  Register scratch = VirtualFrame::scratch0();
5240  __ ldr(scratch,
5241         FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
5242  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5243  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5244  cc_reg_ = ne;
5245}
5246
5247
5248void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
5249  ASSERT(args->length() == 0);
5250
5251  Register scratch0 = VirtualFrame::scratch0();
5252  Register scratch1 = VirtualFrame::scratch1();
5253  // Get the frame pointer for the calling frame.
5254  __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5255
5256  // Skip the arguments adaptor frame if it exists.
5257  __ ldr(scratch1,
5258         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
5259  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5260  __ ldr(scratch0,
5261         MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
5262
5263  // Check the marker in the calling frame.
5264  __ ldr(scratch1,
5265         MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
5266  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5267  cc_reg_ = eq;
5268}
5269
5270
5271void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
5272  ASSERT(args->length() == 0);
5273
5274  Register tos = frame_->GetTOSRegister();
5275  Register scratch0 = VirtualFrame::scratch0();
5276  Register scratch1 = VirtualFrame::scratch1();
5277
5278  // Check if the calling frame is an arguments adaptor frame.
5279  __ ldr(scratch0,
5280         MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5281  __ ldr(scratch1,
5282         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
5283  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5284
5285  // Get the number of formal parameters.
5286  __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
5287
5288  // Arguments adaptor case: Read the arguments length from the
5289  // adaptor frame.
5290  __ ldr(tos,
5291         MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
5292         eq);
5293
5294  frame_->EmitPush(tos);
5295}
5296
5297
5298void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
5299  ASSERT(args->length() == 1);
5300
5301  // Satisfy contract with ArgumentsAccessStub:
5302  // Load the key into r1 and the formal parameters count into r0.
5303  Load(args->at(0));
5304  frame_->PopToR1();
5305  frame_->SpillAll();
5306  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
5307
5308  // Call the shared stub to get to arguments[key].
5309  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
5310  frame_->CallStub(&stub, 0);
5311  frame_->EmitPush(r0);
5312}
5313
5314
5315void CodeGenerator::GenerateRandomHeapNumber(
5316    ZoneList<Expression*>* args) {
5317  VirtualFrame::SpilledScope spilled_scope(frame_);
5318  ASSERT(args->length() == 0);
5319
5320  Label slow_allocate_heapnumber;
5321  Label heapnumber_allocated;
5322
5323  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
5324  __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
5325  __ jmp(&heapnumber_allocated);
5326
5327  __ bind(&slow_allocate_heapnumber);
5328  // Allocate a heap number.
5329  __ CallRuntime(Runtime::kNumberAlloc, 0);
5330  __ mov(r4, Operand(r0));
5331
5332  __ bind(&heapnumber_allocated);
5333
5334  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
5335  // by computing:
5336  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
5337  if (CpuFeatures::IsSupported(VFP3)) {
5338    __ PrepareCallCFunction(0, r1);
5339    __ CallCFunction(ExternalReference::random_uint32_function(), 0);
5340
5341    CpuFeatures::Scope scope(VFP3);
5342    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
5343    // Create this constant using mov/orr to avoid PC relative load.
5344    __ mov(r1, Operand(0x41000000));
5345    __ orr(r1, r1, Operand(0x300000));
5346    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
5347    __ vmov(d7, r0, r1);
5348    // Move 0x4130000000000000 to VFP.
5349    __ mov(r0, Operand(0, RelocInfo::NONE));
5350    __ vmov(d8, r0, r1);
5351    // Subtract and store the result in the heap number.
5352    __ vsub(d7, d7, d8);
5353    __ sub(r0, r4, Operand(kHeapObjectTag));
5354    __ vstr(d7, r0, HeapNumber::kValueOffset);
5355    frame_->EmitPush(r4);
5356  } else {
5357    __ mov(r0, Operand(r4));
5358    __ PrepareCallCFunction(1, r1);
5359    __ CallCFunction(
5360        ExternalReference::fill_heap_number_with_random_function(), 1);
5361    frame_->EmitPush(r0);
5362  }
5363}
5364
5365
5366void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
5367  ASSERT_EQ(2, args->length());
5368
5369  Load(args->at(0));
5370  Load(args->at(1));
5371
5372  StringAddStub stub(NO_STRING_ADD_FLAGS);
5373  frame_->SpillAll();
5374  frame_->CallStub(&stub, 2);
5375  frame_->EmitPush(r0);
5376}
5377
5378
5379void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
5380  ASSERT_EQ(3, args->length());
5381
5382  Load(args->at(0));
5383  Load(args->at(1));
5384  Load(args->at(2));
5385
5386  SubStringStub stub;
5387  frame_->SpillAll();
5388  frame_->CallStub(&stub, 3);
5389  frame_->EmitPush(r0);
5390}
5391
5392
5393void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
5394  ASSERT_EQ(2, args->length());
5395
5396  Load(args->at(0));
5397  Load(args->at(1));
5398
5399  StringCompareStub stub;
5400  frame_->SpillAll();
5401  frame_->CallStub(&stub, 2);
5402  frame_->EmitPush(r0);
5403}
5404
5405
5406void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
5407  ASSERT_EQ(4, args->length());
5408
5409  Load(args->at(0));
5410  Load(args->at(1));
5411  Load(args->at(2));
5412  Load(args->at(3));
5413  RegExpExecStub stub;
5414  frame_->SpillAll();
5415  frame_->CallStub(&stub, 4);
5416  frame_->EmitPush(r0);
5417}
5418
5419
5420void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
5421  ASSERT_EQ(3, args->length());
5422
5423  Load(args->at(0));  // Size of array, smi.
5424  Load(args->at(1));  // "index" property value.
5425  Load(args->at(2));  // "input" property value.
5426  RegExpConstructResultStub stub;
5427  frame_->SpillAll();
5428  frame_->CallStub(&stub, 3);
5429  frame_->EmitPush(r0);
5430}
5431
5432
5433class DeferredSearchCache: public DeferredCode {
5434 public:
5435  DeferredSearchCache(Register dst, Register cache, Register key)
5436      : dst_(dst), cache_(cache), key_(key) {
5437    set_comment("[ DeferredSearchCache");
5438  }
5439
5440  virtual void Generate();
5441
5442 private:
5443  Register dst_, cache_, key_;
5444};
5445
5446
5447void DeferredSearchCache::Generate() {
5448  __ Push(cache_, key_);
5449  __ CallRuntime(Runtime::kGetFromCache, 2);
5450  __ Move(dst_, r0);
5451}
5452
5453
5454void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
5455  ASSERT_EQ(2, args->length());
5456
5457  ASSERT_NE(NULL, args->at(0)->AsLiteral());
5458  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
5459
5460  Handle<FixedArray> jsfunction_result_caches(
5461      Top::global_context()->jsfunction_result_caches());
5462  if (jsfunction_result_caches->length() <= cache_id) {
5463    __ Abort("Attempt to use undefined cache.");
5464    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
5465    return;
5466  }
5467
5468  Load(args->at(1));
5469
5470  frame_->PopToR1();
5471  frame_->SpillAll();
5472  Register key = r1;  // Just poped to r1
5473  Register result = r0;  // Free, as frame has just been spilled.
5474  Register scratch1 = VirtualFrame::scratch0();
5475  Register scratch2 = VirtualFrame::scratch1();
5476
5477  __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
5478  __ ldr(scratch1,
5479         FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
5480  __ ldr(scratch1,
5481         ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
5482  __ ldr(scratch1,
5483         FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
5484
5485  DeferredSearchCache* deferred =
5486      new DeferredSearchCache(result, scratch1, key);
5487
5488  const int kFingerOffset =
5489      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
5490  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5491  __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
5492  // result now holds finger offset as a smi.
5493  __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5494  // scratch2 now points to the start of fixed array elements.
5495  __ ldr(result,
5496         MemOperand(
5497             scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
5498  // Note side effect of PreIndex: scratch2 now points to the key of the pair.
5499  __ cmp(key, result);
5500  deferred->Branch(ne);
5501
5502  __ ldr(result, MemOperand(scratch2, kPointerSize));
5503
5504  deferred->BindExit();
5505  frame_->EmitPush(result);
5506}
5507
5508
5509void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
5510  ASSERT_EQ(args->length(), 1);
5511
5512  // Load the argument on the stack and jump to the runtime.
5513  Load(args->at(0));
5514
5515  NumberToStringStub stub;
5516  frame_->SpillAll();
5517  frame_->CallStub(&stub, 1);
5518  frame_->EmitPush(r0);
5519}
5520
5521
5522class DeferredSwapElements: public DeferredCode {
5523 public:
5524  DeferredSwapElements(Register object, Register index1, Register index2)
5525      : object_(object), index1_(index1), index2_(index2) {
5526    set_comment("[ DeferredSwapElements");
5527  }
5528
5529  virtual void Generate();
5530
5531 private:
5532  Register object_, index1_, index2_;
5533};
5534
5535
5536void DeferredSwapElements::Generate() {
5537  __ push(object_);
5538  __ push(index1_);
5539  __ push(index2_);
5540  __ CallRuntime(Runtime::kSwapElements, 3);
5541}
5542
5543
5544void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
5545  Comment cmnt(masm_, "[ GenerateSwapElements");
5546
5547  ASSERT_EQ(3, args->length());
5548
5549  Load(args->at(0));
5550  Load(args->at(1));
5551  Load(args->at(2));
5552
5553  VirtualFrame::SpilledScope spilled_scope(frame_);
5554
5555  Register index2 = r2;
5556  Register index1 = r1;
5557  Register object = r0;
5558  Register tmp1 = r3;
5559  Register tmp2 = r4;
5560
5561  frame_->EmitPop(index2);
5562  frame_->EmitPop(index1);
5563  frame_->EmitPop(object);
5564
5565  DeferredSwapElements* deferred =
5566      new DeferredSwapElements(object, index1, index2);
5567
5568  // Fetch the map and check if array is in fast case.
5569  // Check that object doesn't require security checks and
5570  // has no indexed interceptor.
5571  __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
5572  deferred->Branch(lt);
5573  __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
5574  __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
5575  deferred->Branch(nz);
5576
5577  // Check the object's elements are in fast case and writable.
5578  __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
5579  __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
5580  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
5581  __ cmp(tmp2, ip);
5582  deferred->Branch(ne);
5583
5584  // Smi-tagging is equivalent to multiplying by 2.
5585  STATIC_ASSERT(kSmiTag == 0);
5586  STATIC_ASSERT(kSmiTagSize == 1);
5587
5588  // Check that both indices are smis.
5589  __ mov(tmp2, index1);
5590  __ orr(tmp2, tmp2, index2);
5591  __ tst(tmp2, Operand(kSmiTagMask));
5592  deferred->Branch(nz);
5593
5594  // Check that both indices are valid.
5595  __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
5596  __ cmp(tmp2, index1);
5597  __ cmp(tmp2, index2, hi);
5598  deferred->Branch(ls);
5599
5600  // Bring the offsets into the fixed array in tmp1 into index1 and
5601  // index2.
5602  __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5603  __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
5604  __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
5605
5606  // Swap elements.
5607  Register tmp3 = object;
5608  object = no_reg;
5609  __ ldr(tmp3, MemOperand(tmp1, index1));
5610  __ ldr(tmp2, MemOperand(tmp1, index2));
5611  __ str(tmp3, MemOperand(tmp1, index2));
5612  __ str(tmp2, MemOperand(tmp1, index1));
5613
5614  Label done;
5615  __ InNewSpace(tmp1, tmp2, eq, &done);
5616  // Possible optimization: do a check that both values are Smis
5617  // (or them and test against Smi mask.)
5618
5619  __ mov(tmp2, tmp1);
5620  __ add(index1, index1, tmp1);
5621  __ add(index2, index2, tmp1);
5622  __ RecordWriteHelper(tmp1, index1, tmp3);
5623  __ RecordWriteHelper(tmp2, index2, tmp3);
5624  __ bind(&done);
5625
5626  deferred->BindExit();
5627  __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
5628  frame_->EmitPush(tmp1);
5629}
5630
5631
5632void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
5633  Comment cmnt(masm_, "[ GenerateCallFunction");
5634
5635  ASSERT(args->length() >= 2);
5636
5637  int n_args = args->length() - 2;  // for receiver and function.
5638  Load(args->at(0));  // receiver
5639  for (int i = 0; i < n_args; i++) {
5640    Load(args->at(i + 1));
5641  }
5642  Load(args->at(n_args + 1));  // function
5643  frame_->CallJSFunction(n_args);
5644  frame_->EmitPush(r0);
5645}
5646
5647
5648void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
5649  ASSERT_EQ(args->length(), 1);
5650  Load(args->at(0));
5651  if (CpuFeatures::IsSupported(VFP3)) {
5652    TranscendentalCacheStub stub(TranscendentalCache::SIN);
5653    frame_->SpillAllButCopyTOSToR0();
5654    frame_->CallStub(&stub, 1);
5655  } else {
5656    frame_->CallRuntime(Runtime::kMath_sin, 1);
5657  }
5658  frame_->EmitPush(r0);
5659}
5660
5661
5662void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
5663  ASSERT_EQ(args->length(), 1);
5664  Load(args->at(0));
5665  if (CpuFeatures::IsSupported(VFP3)) {
5666    TranscendentalCacheStub stub(TranscendentalCache::COS);
5667    frame_->SpillAllButCopyTOSToR0();
5668    frame_->CallStub(&stub, 1);
5669  } else {
5670    frame_->CallRuntime(Runtime::kMath_cos, 1);
5671  }
5672  frame_->EmitPush(r0);
5673}
5674
5675
5676void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
5677  ASSERT_EQ(args->length(), 1);
5678  Load(args->at(0));
5679  if (CpuFeatures::IsSupported(VFP3)) {
5680    TranscendentalCacheStub stub(TranscendentalCache::LOG);
5681    frame_->SpillAllButCopyTOSToR0();
5682    frame_->CallStub(&stub, 1);
5683  } else {
5684    frame_->CallRuntime(Runtime::kMath_log, 1);
5685  }
5686  frame_->EmitPush(r0);
5687}
5688
5689
5690void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
5691  ASSERT(args->length() == 2);
5692
5693  // Load the two objects into registers and perform the comparison.
5694  Load(args->at(0));
5695  Load(args->at(1));
5696  Register lhs = frame_->PopToRegister();
5697  Register rhs = frame_->PopToRegister(lhs);
5698  __ cmp(lhs, rhs);
5699  cc_reg_ = eq;
5700}
5701
5702
5703void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
5704  ASSERT(args->length() == 2);
5705
5706  // Load the two objects into registers and perform the comparison.
5707  Load(args->at(0));
5708  Load(args->at(1));
5709  Register right = frame_->PopToRegister();
5710  Register left = frame_->PopToRegister(right);
5711  Register tmp = frame_->scratch0();
5712  Register tmp2 = frame_->scratch1();
5713
5714  // Jumps to done must have the eq flag set if the test is successful
5715  // and clear if the test has failed.
5716  Label done;
5717
5718  // Fail if either is a non-HeapObject.
5719  __ cmp(left, Operand(right));
5720  __ b(eq, &done);
5721  __ and_(tmp, left, Operand(right));
5722  __ eor(tmp, tmp, Operand(kSmiTagMask));
5723  __ tst(tmp, Operand(kSmiTagMask));
5724  __ b(ne, &done);
5725  __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
5726  __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
5727  __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
5728  __ b(ne, &done);
5729  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
5730  __ cmp(tmp, Operand(tmp2));
5731  __ b(ne, &done);
5732  __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
5733  __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
5734  __ cmp(tmp, tmp2);
5735  __ bind(&done);
5736  cc_reg_ = eq;
5737}
5738
5739
5740void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
5741  ASSERT(args->length() == 1);
5742  Load(args->at(0));
5743  Register value = frame_->PopToRegister();
5744  Register tmp = frame_->scratch0();
5745  __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset));
5746  __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask));
5747  cc_reg_ = eq;
5748}
5749
5750
5751void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
5752  ASSERT(args->length() == 1);
5753  Load(args->at(0));
5754  Register value = frame_->PopToRegister();
5755
5756  __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset));
5757  __ IndexFromHash(value, value);
5758  frame_->EmitPush(value);
5759}
5760
5761
5762void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
5763  ASSERT(args->length() == 2);
5764  Load(args->at(0));
5765  Register value = frame_->PopToRegister();
5766  __ LoadRoot(value, Heap::kUndefinedValueRootIndex);
5767  frame_->EmitPush(value);
5768}
5769
5770
5771void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
5772#ifdef DEBUG
5773  int original_height = frame_->height();
5774#endif
5775  if (CheckForInlineRuntimeCall(node)) {
5776    ASSERT((has_cc() && frame_->height() == original_height) ||
5777           (!has_cc() && frame_->height() == original_height + 1));
5778    return;
5779  }
5780
5781  ZoneList<Expression*>* args = node->arguments();
5782  Comment cmnt(masm_, "[ CallRuntime");
5783  Runtime::Function* function = node->function();
5784
5785  if (function == NULL) {
5786    // Prepare stack for calling JS runtime function.
5787    // Push the builtins object found in the current global object.
5788    Register scratch = VirtualFrame::scratch0();
5789    __ ldr(scratch, GlobalObjectOperand());
5790    Register builtins = frame_->GetTOSRegister();
5791    __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
5792    frame_->EmitPush(builtins);
5793  }
5794
5795  // Push the arguments ("left-to-right").
5796  int arg_count = args->length();
5797  for (int i = 0; i < arg_count; i++) {
5798    Load(args->at(i));
5799  }
5800
5801  VirtualFrame::SpilledScope spilled_scope(frame_);
5802
5803  if (function == NULL) {
5804    // Call the JS runtime function.
5805    __ mov(r2, Operand(node->name()));
5806    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
5807    Handle<Code> stub = StubCache::ComputeCallInitialize(arg_count, in_loop);
5808    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
5809    __ ldr(cp, frame_->Context());
5810    frame_->EmitPush(r0);
5811  } else {
5812    // Call the C runtime function.
5813    frame_->CallRuntime(function, arg_count);
5814    frame_->EmitPush(r0);
5815  }
5816  ASSERT_EQ(original_height + 1, frame_->height());
5817}
5818
5819
5820void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
5821#ifdef DEBUG
5822  int original_height = frame_->height();
5823#endif
5824  Comment cmnt(masm_, "[ UnaryOperation");
5825
5826  Token::Value op = node->op();
5827
5828  if (op == Token::NOT) {
5829    LoadCondition(node->expression(), false_target(), true_target(), true);
5830    // LoadCondition may (and usually does) leave a test and branch to
5831    // be emitted by the caller.  In that case, negate the condition.
5832    if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
5833
5834  } else if (op == Token::DELETE) {
5835    Property* property = node->expression()->AsProperty();
5836    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
5837    if (property != NULL) {
5838      Load(property->obj());
5839      Load(property->key());
5840      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5841      frame_->EmitPush(r0);
5842
5843    } else if (variable != NULL) {
5844      Slot* slot = variable->AsSlot();
5845      if (variable->is_global()) {
5846        LoadGlobal();
5847        frame_->EmitPush(Operand(variable->name()));
5848        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5849        frame_->EmitPush(r0);
5850
5851      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
5852        // lookup the context holding the named variable
5853        frame_->EmitPush(cp);
5854        frame_->EmitPush(Operand(variable->name()));
5855        frame_->CallRuntime(Runtime::kLookupContext, 2);
5856        // r0: context
5857        frame_->EmitPush(r0);
5858        frame_->EmitPush(Operand(variable->name()));
5859        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5860        frame_->EmitPush(r0);
5861
5862      } else {
5863        // Default: Result of deleting non-global, not dynamically
5864        // introduced variables is false.
5865        frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
5866      }
5867
5868    } else {
5869      // Default: Result of deleting expressions is true.
5870      Load(node->expression());  // may have side-effects
5871      frame_->Drop();
5872      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
5873    }
5874
5875  } else if (op == Token::TYPEOF) {
5876    // Special case for loading the typeof expression; see comment on
5877    // LoadTypeofExpression().
5878    LoadTypeofExpression(node->expression());
5879    frame_->CallRuntime(Runtime::kTypeof, 1);
5880    frame_->EmitPush(r0);  // r0 has result
5881
5882  } else {
5883    bool can_overwrite = node->expression()->ResultOverwriteAllowed();
5884    UnaryOverwriteMode overwrite =
5885        can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
5886
5887    bool no_negative_zero = node->expression()->no_negative_zero();
5888    Load(node->expression());
5889    switch (op) {
5890      case Token::NOT:
5891      case Token::DELETE:
5892      case Token::TYPEOF:
5893        UNREACHABLE();  // handled above
5894        break;
5895
5896      case Token::SUB: {
5897        frame_->PopToR0();
5898        GenericUnaryOpStub stub(
5899            Token::SUB,
5900            overwrite,
5901            NO_UNARY_FLAGS,
5902            no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
5903        frame_->CallStub(&stub, 0);
5904        frame_->EmitPush(r0);  // r0 has result
5905        break;
5906      }
5907
5908      case Token::BIT_NOT: {
5909        Register tos = frame_->PopToRegister();
5910        JumpTarget not_smi_label;
5911        JumpTarget continue_label;
5912        // Smi check.
5913        __ tst(tos, Operand(kSmiTagMask));
5914        not_smi_label.Branch(ne);
5915
5916        __ mvn(tos, Operand(tos));
5917        __ bic(tos, tos, Operand(kSmiTagMask));  // Bit-clear inverted smi-tag.
5918        frame_->EmitPush(tos);
5919        // The fast case is the first to jump to the continue label, so it gets
5920        // to decide the virtual frame layout.
5921        continue_label.Jump();
5922
5923        not_smi_label.Bind();
5924        frame_->SpillAll();
5925        __ Move(r0, tos);
5926        GenericUnaryOpStub stub(Token::BIT_NOT,
5927                                overwrite,
5928                                NO_UNARY_SMI_CODE_IN_STUB);
5929        frame_->CallStub(&stub, 0);
5930        frame_->EmitPush(r0);
5931
5932        continue_label.Bind();
5933        break;
5934      }
5935
5936      case Token::VOID:
5937        frame_->Drop();
5938        frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
5939        break;
5940
5941      case Token::ADD: {
5942        Register tos = frame_->Peek();
5943        // Smi check.
5944        JumpTarget continue_label;
5945        __ tst(tos, Operand(kSmiTagMask));
5946        continue_label.Branch(eq);
5947
5948        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
5949        frame_->EmitPush(r0);
5950
5951        continue_label.Bind();
5952        break;
5953      }
5954      default:
5955        UNREACHABLE();
5956    }
5957  }
5958  ASSERT(!has_valid_frame() ||
5959         (has_cc() && frame_->height() == original_height) ||
5960         (!has_cc() && frame_->height() == original_height + 1));
5961}
5962
5963
5964class DeferredCountOperation: public DeferredCode {
5965 public:
5966  DeferredCountOperation(Register value,
5967                         bool is_increment,
5968                         bool is_postfix,
5969                         int target_size)
5970      : value_(value),
5971        is_increment_(is_increment),
5972        is_postfix_(is_postfix),
5973        target_size_(target_size) {}
5974
5975  virtual void Generate() {
5976    VirtualFrame copied_frame(*frame_state()->frame());
5977
5978    Label slow;
5979    // Check for smi operand.
5980    __ tst(value_, Operand(kSmiTagMask));
5981    __ b(ne, &slow);
5982
5983    // Revert optimistic increment/decrement.
5984    if (is_increment_) {
5985      __ sub(value_, value_, Operand(Smi::FromInt(1)));
5986    } else {
5987      __ add(value_, value_, Operand(Smi::FromInt(1)));
5988    }
5989
5990    // Slow case: Convert to number.  At this point the
5991    // value to be incremented is in the value register..
5992    __ bind(&slow);
5993
5994    // Convert the operand to a number.
5995    copied_frame.EmitPush(value_);
5996
5997    copied_frame.InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
5998
5999    if (is_postfix_) {
6000      // Postfix: store to result (on the stack).
6001      __ str(r0,  MemOperand(sp, target_size_ * kPointerSize));
6002    }
6003
6004    copied_frame.EmitPush(r0);
6005    copied_frame.EmitPush(Operand(Smi::FromInt(1)));
6006
6007    if (is_increment_) {
6008      copied_frame.CallRuntime(Runtime::kNumberAdd, 2);
6009    } else {
6010      copied_frame.CallRuntime(Runtime::kNumberSub, 2);
6011    }
6012
6013    __ Move(value_, r0);
6014
6015    copied_frame.MergeTo(frame_state()->frame());
6016  }
6017
6018 private:
6019  Register value_;
6020  bool is_increment_;
6021  bool is_postfix_;
6022  int target_size_;
6023};
6024
6025
6026void CodeGenerator::VisitCountOperation(CountOperation* node) {
6027#ifdef DEBUG
6028  int original_height = frame_->height();
6029#endif
6030  Comment cmnt(masm_, "[ CountOperation");
6031  VirtualFrame::RegisterAllocationScope scope(this);
6032
6033  bool is_postfix = node->is_postfix();
6034  bool is_increment = node->op() == Token::INC;
6035
6036  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
6037  bool is_const = (var != NULL && var->mode() == Variable::CONST);
6038  bool is_slot = (var != NULL && var->mode() == Variable::VAR);
6039
6040  if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
6041    // The type info declares that this variable is always a Smi.  That
6042    // means it is a Smi both before and after the increment/decrement.
6043    // Lets make use of that to make a very minimal count.
6044    Reference target(this, node->expression(), !is_const);
6045    ASSERT(!target.is_illegal());
6046    target.GetValue();  // Pushes the value.
6047    Register value = frame_->PopToRegister();
6048    if (is_postfix) frame_->EmitPush(value);
6049    if (is_increment) {
6050      __ add(value, value, Operand(Smi::FromInt(1)));
6051    } else {
6052      __ sub(value, value, Operand(Smi::FromInt(1)));
6053    }
6054    frame_->EmitPush(value);
6055    target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
6056    if (is_postfix) frame_->Pop();
6057    ASSERT_EQ(original_height + 1, frame_->height());
6058    return;
6059  }
6060
6061  // If it's a postfix expression and its result is not ignored and the
6062  // reference is non-trivial, then push a placeholder on the stack now
6063  // to hold the result of the expression.
6064  bool placeholder_pushed = false;
6065  if (!is_slot && is_postfix) {
6066    frame_->EmitPush(Operand(Smi::FromInt(0)));
6067    placeholder_pushed = true;
6068  }
6069
6070  // A constant reference is not saved to, so a constant reference is not a
6071  // compound assignment reference.
6072  { Reference target(this, node->expression(), !is_const);
6073    if (target.is_illegal()) {
6074      // Spoof the virtual frame to have the expected height (one higher
6075      // than on entry).
6076      if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
6077      ASSERT_EQ(original_height + 1, frame_->height());
6078      return;
6079    }
6080
6081    // This pushes 0, 1 or 2 words on the object to be used later when updating
6082    // the target.  It also pushes the current value of the target.
6083    target.GetValue();
6084
6085    bool value_is_known_smi = frame_->KnownSmiAt(0);
6086    Register value = frame_->PopToRegister();
6087
6088    // Postfix: Store the old value as the result.
6089    if (placeholder_pushed) {
6090      frame_->SetElementAt(value, target.size());
6091    } else if (is_postfix) {
6092      frame_->EmitPush(value);
6093      __ mov(VirtualFrame::scratch0(), value);
6094      value = VirtualFrame::scratch0();
6095    }
6096
6097    // We can't use any type information here since the virtual frame from the
6098    // deferred code may have lost information and we can't merge a virtual
6099    // frame with less specific type knowledge to a virtual frame with more
6100    // specific knowledge that has already used that specific knowledge to
6101    // generate code.
6102    frame_->ForgetTypeInfo();
6103
6104    // The constructor here will capture the current virtual frame and use it to
6105    // merge to after the deferred code has run.  No virtual frame changes are
6106    // allowed from here until the 'BindExit' below.
6107    DeferredCode* deferred =
6108        new DeferredCountOperation(value,
6109                                   is_increment,
6110                                   is_postfix,
6111                                   target.size());
6112    if (!value_is_known_smi) {
6113      // Check for smi operand.
6114      __ tst(value, Operand(kSmiTagMask));
6115
6116      deferred->Branch(ne);
6117    }
6118
6119    // Perform optimistic increment/decrement.
6120    if (is_increment) {
6121      __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
6122    } else {
6123      __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
6124    }
6125
6126    // If increment/decrement overflows, go to deferred code.
6127    deferred->Branch(vs);
6128
6129    deferred->BindExit();
6130
6131    // Store the new value in the target if not const.
6132    // At this point the answer is in the value register.
6133    frame_->EmitPush(value);
6134    // Set the target with the result, leaving the result on
6135    // top of the stack.  Removes the target from the stack if
6136    // it has a non-zero size.
6137    if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
6138  }
6139
6140  // Postfix: Discard the new value and use the old.
6141  if (is_postfix) frame_->Pop();
6142  ASSERT_EQ(original_height + 1, frame_->height());
6143}
6144
6145
6146void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
6147  // According to ECMA-262 section 11.11, page 58, the binary logical
6148  // operators must yield the result of one of the two expressions
6149  // before any ToBoolean() conversions. This means that the value
6150  // produced by a && or || operator is not necessarily a boolean.
6151
6152  // NOTE: If the left hand side produces a materialized value (not in
6153  // the CC register), we force the right hand side to do the
6154  // same. This is necessary because we may have to branch to the exit
6155  // after evaluating the left hand side (due to the shortcut
6156  // semantics), but the compiler must (statically) know if the result
6157  // of compiling the binary operation is materialized or not.
6158  if (node->op() == Token::AND) {
6159    JumpTarget is_true;
6160    LoadCondition(node->left(), &is_true, false_target(), false);
6161    if (has_valid_frame() && !has_cc()) {
6162      // The left-hand side result is on top of the virtual frame.
6163      JumpTarget pop_and_continue;
6164      JumpTarget exit;
6165
6166      frame_->Dup();
6167      // Avoid popping the result if it converts to 'false' using the
6168      // standard ToBoolean() conversion as described in ECMA-262,
6169      // section 9.2, page 30.
6170      ToBoolean(&pop_and_continue, &exit);
6171      Branch(false, &exit);
6172
6173      // Pop the result of evaluating the first part.
6174      pop_and_continue.Bind();
6175      frame_->Pop();
6176
6177      // Evaluate right side expression.
6178      is_true.Bind();
6179      Load(node->right());
6180
6181      // Exit (always with a materialized value).
6182      exit.Bind();
6183    } else if (has_cc() || is_true.is_linked()) {
6184      // The left-hand side is either (a) partially compiled to
6185      // control flow with a final branch left to emit or (b) fully
6186      // compiled to control flow and possibly true.
6187      if (has_cc()) {
6188        Branch(false, false_target());
6189      }
6190      is_true.Bind();
6191      LoadCondition(node->right(), true_target(), false_target(), false);
6192    } else {
6193      // Nothing to do.
6194      ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
6195    }
6196
6197  } else {
6198    ASSERT(node->op() == Token::OR);
6199    JumpTarget is_false;
6200    LoadCondition(node->left(), true_target(), &is_false, false);
6201    if (has_valid_frame() && !has_cc()) {
6202      // The left-hand side result is on top of the virtual frame.
6203      JumpTarget pop_and_continue;
6204      JumpTarget exit;
6205
6206      frame_->Dup();
6207      // Avoid popping the result if it converts to 'true' using the
6208      // standard ToBoolean() conversion as described in ECMA-262,
6209      // section 9.2, page 30.
6210      ToBoolean(&exit, &pop_and_continue);
6211      Branch(true, &exit);
6212
6213      // Pop the result of evaluating the first part.
6214      pop_and_continue.Bind();
6215      frame_->Pop();
6216
6217      // Evaluate right side expression.
6218      is_false.Bind();
6219      Load(node->right());
6220
6221      // Exit (always with a materialized value).
6222      exit.Bind();
6223    } else if (has_cc() || is_false.is_linked()) {
6224      // The left-hand side is either (a) partially compiled to
6225      // control flow with a final branch left to emit or (b) fully
6226      // compiled to control flow and possibly false.
6227      if (has_cc()) {
6228        Branch(true, true_target());
6229      }
6230      is_false.Bind();
6231      LoadCondition(node->right(), true_target(), false_target(), false);
6232    } else {
6233      // Nothing to do.
6234      ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
6235    }
6236  }
6237}
6238
6239
6240void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
6241#ifdef DEBUG
6242  int original_height = frame_->height();
6243#endif
6244  Comment cmnt(masm_, "[ BinaryOperation");
6245
6246  if (node->op() == Token::AND || node->op() == Token::OR) {
6247    GenerateLogicalBooleanOperation(node);
6248  } else {
6249    // Optimize for the case where (at least) one of the expressions
6250    // is a literal small integer.
6251    Literal* lliteral = node->left()->AsLiteral();
6252    Literal* rliteral = node->right()->AsLiteral();
6253    // NOTE: The code below assumes that the slow cases (calls to runtime)
6254    // never return a constant/immutable object.
6255    bool overwrite_left = node->left()->ResultOverwriteAllowed();
6256    bool overwrite_right = node->right()->ResultOverwriteAllowed();
6257
6258    if (rliteral != NULL && rliteral->handle()->IsSmi()) {
6259      VirtualFrame::RegisterAllocationScope scope(this);
6260      Load(node->left());
6261      if (frame_->KnownSmiAt(0)) overwrite_left = false;
6262      SmiOperation(node->op(),
6263                   rliteral->handle(),
6264                   false,
6265                   overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
6266    } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
6267      VirtualFrame::RegisterAllocationScope scope(this);
6268      Load(node->right());
6269      if (frame_->KnownSmiAt(0)) overwrite_right = false;
6270      SmiOperation(node->op(),
6271                   lliteral->handle(),
6272                   true,
6273                   overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
6274    } else {
6275      GenerateInlineSmi inline_smi =
6276          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
6277      if (lliteral != NULL) {
6278        ASSERT(!lliteral->handle()->IsSmi());
6279        inline_smi = DONT_GENERATE_INLINE_SMI;
6280      }
6281      if (rliteral != NULL) {
6282        ASSERT(!rliteral->handle()->IsSmi());
6283        inline_smi = DONT_GENERATE_INLINE_SMI;
6284      }
6285      VirtualFrame::RegisterAllocationScope scope(this);
6286      OverwriteMode overwrite_mode = NO_OVERWRITE;
6287      if (overwrite_left) {
6288        overwrite_mode = OVERWRITE_LEFT;
6289      } else if (overwrite_right) {
6290        overwrite_mode = OVERWRITE_RIGHT;
6291      }
6292      Load(node->left());
6293      Load(node->right());
6294      GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
6295    }
6296  }
6297  ASSERT(!has_valid_frame() ||
6298         (has_cc() && frame_->height() == original_height) ||
6299         (!has_cc() && frame_->height() == original_height + 1));
6300}
6301
6302
6303void CodeGenerator::VisitThisFunction(ThisFunction* node) {
6304#ifdef DEBUG
6305  int original_height = frame_->height();
6306#endif
6307  frame_->EmitPush(MemOperand(frame_->Function()));
6308  ASSERT_EQ(original_height + 1, frame_->height());
6309}
6310
6311
6312void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
6313#ifdef DEBUG
6314  int original_height = frame_->height();
6315#endif
6316  Comment cmnt(masm_, "[ CompareOperation");
6317
6318  VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
6319
6320  // Get the expressions from the node.
6321  Expression* left = node->left();
6322  Expression* right = node->right();
6323  Token::Value op = node->op();
6324
6325  // To make typeof testing for natives implemented in JavaScript really
6326  // efficient, we generate special code for expressions of the form:
6327  // 'typeof <expression> == <string>'.
6328  UnaryOperation* operation = left->AsUnaryOperation();
6329  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
6330      (operation != NULL && operation->op() == Token::TYPEOF) &&
6331      (right->AsLiteral() != NULL &&
6332       right->AsLiteral()->handle()->IsString())) {
6333    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
6334
6335    // Load the operand, move it to a register.
6336    LoadTypeofExpression(operation->expression());
6337    Register tos = frame_->PopToRegister();
6338
6339    Register scratch = VirtualFrame::scratch0();
6340
6341    if (check->Equals(Heap::number_symbol())) {
6342      __ tst(tos, Operand(kSmiTagMask));
6343      true_target()->Branch(eq);
6344      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
6345      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
6346      __ cmp(tos, ip);
6347      cc_reg_ = eq;
6348
6349    } else if (check->Equals(Heap::string_symbol())) {
6350      __ tst(tos, Operand(kSmiTagMask));
6351      false_target()->Branch(eq);
6352
6353      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
6354
6355      // It can be an undetectable string object.
6356      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
6357      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
6358      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
6359      false_target()->Branch(eq);
6360
6361      __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
6362      __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
6363      cc_reg_ = lt;
6364
6365    } else if (check->Equals(Heap::boolean_symbol())) {
6366      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
6367      __ cmp(tos, ip);
6368      true_target()->Branch(eq);
6369      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
6370      __ cmp(tos, ip);
6371      cc_reg_ = eq;
6372
6373    } else if (check->Equals(Heap::undefined_symbol())) {
6374      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6375      __ cmp(tos, ip);
6376      true_target()->Branch(eq);
6377
6378      __ tst(tos, Operand(kSmiTagMask));
6379      false_target()->Branch(eq);
6380
6381      // It can be an undetectable object.
6382      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
6383      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
6384      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
6385      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
6386
6387      cc_reg_ = eq;
6388
6389    } else if (check->Equals(Heap::function_symbol())) {
6390      __ tst(tos, Operand(kSmiTagMask));
6391      false_target()->Branch(eq);
6392      Register map_reg = scratch;
6393      __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
6394      true_target()->Branch(eq);
6395      // Regular expressions are callable so typeof == 'function'.
6396      __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
6397      cc_reg_ = eq;
6398
6399    } else if (check->Equals(Heap::object_symbol())) {
6400      __ tst(tos, Operand(kSmiTagMask));
6401      false_target()->Branch(eq);
6402
6403      __ LoadRoot(ip, Heap::kNullValueRootIndex);
6404      __ cmp(tos, ip);
6405      true_target()->Branch(eq);
6406
6407      Register map_reg = scratch;
6408      __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
6409      false_target()->Branch(eq);
6410
6411      // It can be an undetectable object.
6412      __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
6413      __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
6414      __ cmp(tos, Operand(1 << Map::kIsUndetectable));
6415      false_target()->Branch(eq);
6416
6417      __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
6418      __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
6419      false_target()->Branch(lt);
6420      __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
6421      cc_reg_ = le;
6422
6423    } else {
6424      // Uncommon case: typeof testing against a string literal that is
6425      // never returned from the typeof operator.
6426      false_target()->Jump();
6427    }
6428    ASSERT(!has_valid_frame() ||
6429           (has_cc() && frame_->height() == original_height));
6430    return;
6431  }
6432
6433  switch (op) {
6434    case Token::EQ:
6435      Comparison(eq, left, right, false);
6436      break;
6437
6438    case Token::LT:
6439      Comparison(lt, left, right);
6440      break;
6441
6442    case Token::GT:
6443      Comparison(gt, left, right);
6444      break;
6445
6446    case Token::LTE:
6447      Comparison(le, left, right);
6448      break;
6449
6450    case Token::GTE:
6451      Comparison(ge, left, right);
6452      break;
6453
6454    case Token::EQ_STRICT:
6455      Comparison(eq, left, right, true);
6456      break;
6457
6458    case Token::IN: {
6459      Load(left);
6460      Load(right);
6461      frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
6462      frame_->EmitPush(r0);
6463      break;
6464    }
6465
6466    case Token::INSTANCEOF: {
6467      Load(left);
6468      Load(right);
6469      InstanceofStub stub(InstanceofStub::kNoFlags);
6470      frame_->CallStub(&stub, 2);
6471      // At this point if instanceof succeeded then r0 == 0.
6472      __ tst(r0, Operand(r0));
6473      cc_reg_ = eq;
6474      break;
6475    }
6476
6477    default:
6478      UNREACHABLE();
6479  }
6480  ASSERT((has_cc() && frame_->height() == original_height) ||
6481         (!has_cc() && frame_->height() == original_height + 1));
6482}
6483
6484
6485void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
6486#ifdef DEBUG
6487  int original_height = frame_->height();
6488#endif
6489  Comment cmnt(masm_, "[ CompareToNull");
6490
6491  Load(node->expression());
6492  Register tos = frame_->PopToRegister();
6493  __ LoadRoot(ip, Heap::kNullValueRootIndex);
6494  __ cmp(tos, ip);
6495
6496  // The 'null' value is only equal to 'undefined' if using non-strict
6497  // comparisons.
6498  if (!node->is_strict()) {
6499    true_target()->Branch(eq);
6500    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6501    __ cmp(tos, Operand(ip));
6502    true_target()->Branch(eq);
6503
6504    __ tst(tos, Operand(kSmiTagMask));
6505    false_target()->Branch(eq);
6506
6507    // It can be an undetectable object.
6508    __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
6509    __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
6510    __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
6511    __ cmp(tos, Operand(1 << Map::kIsUndetectable));
6512  }
6513
6514  cc_reg_ = eq;
6515  ASSERT(has_cc() && frame_->height() == original_height);
6516}
6517
6518
6519class DeferredReferenceGetNamedValue: public DeferredCode {
6520 public:
6521  explicit DeferredReferenceGetNamedValue(Register receiver,
6522                                          Handle<String> name,
6523                                          bool is_contextual)
6524      : receiver_(receiver),
6525        name_(name),
6526        is_contextual_(is_contextual),
6527        is_dont_delete_(false) {
6528    set_comment(is_contextual
6529                ? "[ DeferredReferenceGetNamedValue (contextual)"
6530                : "[ DeferredReferenceGetNamedValue");
6531  }
6532
6533  virtual void Generate();
6534
6535  void set_is_dont_delete(bool value) {
6536    ASSERT(is_contextual_);
6537    is_dont_delete_ = value;
6538  }
6539
6540 private:
6541  Register receiver_;
6542  Handle<String> name_;
6543  bool is_contextual_;
6544  bool is_dont_delete_;
6545};
6546
6547
6548// Convention for this is that on entry the receiver is in a register that
6549// is not used by the stack.  On exit the answer is found in that same
6550// register and the stack has the same height.
6551void DeferredReferenceGetNamedValue::Generate() {
6552#ifdef DEBUG
6553  int expected_height = frame_state()->frame()->height();
6554#endif
6555  VirtualFrame copied_frame(*frame_state()->frame());
6556  copied_frame.SpillAll();
6557
6558  Register scratch1 = VirtualFrame::scratch0();
6559  Register scratch2 = VirtualFrame::scratch1();
6560  ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
6561  __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
6562  __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
6563
6564  // Ensure receiver in r0 and name in r2 to match load ic calling convention.
6565  __ Move(r0, receiver_);
6566  __ mov(r2, Operand(name_));
6567
6568  // The rest of the instructions in the deferred code must be together.
6569  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6570    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
6571    RelocInfo::Mode mode = is_contextual_
6572        ? RelocInfo::CODE_TARGET_CONTEXT
6573        : RelocInfo::CODE_TARGET;
6574    __ Call(ic,  mode);
6575    // We must mark the code just after the call with the correct marker.
6576    MacroAssembler::NopMarkerTypes code_marker;
6577    if (is_contextual_) {
6578      code_marker = is_dont_delete_
6579                   ? MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE
6580                   : MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT;
6581    } else {
6582      code_marker = MacroAssembler::PROPERTY_ACCESS_INLINED;
6583    }
6584    __ MarkCode(code_marker);
6585
6586    // At this point the answer is in r0.  We move it to the expected register
6587    // if necessary.
6588    __ Move(receiver_, r0);
6589
6590    // Now go back to the frame that we entered with.  This will not overwrite
6591    // the receiver register since that register was not in use when we came
6592    // in.  The instructions emitted by this merge are skipped over by the
6593    // inline load patching mechanism when looking for the branch instruction
6594    // that tells it where the code to patch is.
6595    copied_frame.MergeTo(frame_state()->frame());
6596
6597    // Block the constant pool for one more instruction after leaving this
6598    // constant pool block scope to include the branch instruction ending the
6599    // deferred code.
6600    __ BlockConstPoolFor(1);
6601  }
6602  ASSERT_EQ(expected_height, frame_state()->frame()->height());
6603}
6604
6605
6606class DeferredReferenceGetKeyedValue: public DeferredCode {
6607 public:
6608  DeferredReferenceGetKeyedValue(Register key, Register receiver)
6609      : key_(key), receiver_(receiver) {
6610    set_comment("[ DeferredReferenceGetKeyedValue");
6611  }
6612
6613  virtual void Generate();
6614
6615 private:
6616  Register key_;
6617  Register receiver_;
6618};
6619
6620
6621// Takes key and register in r0 and r1 or vice versa.  Returns result
6622// in r0.
6623void DeferredReferenceGetKeyedValue::Generate() {
6624  ASSERT((key_.is(r0) && receiver_.is(r1)) ||
6625         (key_.is(r1) && receiver_.is(r0)));
6626
6627  VirtualFrame copied_frame(*frame_state()->frame());
6628  copied_frame.SpillAll();
6629
6630  Register scratch1 = VirtualFrame::scratch0();
6631  Register scratch2 = VirtualFrame::scratch1();
6632  __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
6633  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
6634
6635  // Ensure key in r0 and receiver in r1 to match keyed load ic calling
6636  // convention.
6637  if (key_.is(r1)) {
6638    __ Swap(r0, r1, ip);
6639  }
6640
6641  // The rest of the instructions in the deferred code must be together.
6642  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6643    // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
6644    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
6645    __ Call(ic, RelocInfo::CODE_TARGET);
6646    // The call must be followed by a nop instruction to indicate that the
6647    // keyed load has been inlined.
6648    __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
6649
6650    // Now go back to the frame that we entered with.  This will not overwrite
6651    // the receiver or key registers since they were not in use when we came
6652    // in.  The instructions emitted by this merge are skipped over by the
6653    // inline load patching mechanism when looking for the branch instruction
6654    // that tells it where the code to patch is.
6655    copied_frame.MergeTo(frame_state()->frame());
6656
6657    // Block the constant pool for one more instruction after leaving this
6658    // constant pool block scope to include the branch instruction ending the
6659    // deferred code.
6660    __ BlockConstPoolFor(1);
6661  }
6662}
6663
6664
6665class DeferredReferenceSetKeyedValue: public DeferredCode {
6666 public:
6667  DeferredReferenceSetKeyedValue(Register value,
6668                                 Register key,
6669                                 Register receiver)
6670      : value_(value), key_(key), receiver_(receiver) {
6671    set_comment("[ DeferredReferenceSetKeyedValue");
6672  }
6673
6674  virtual void Generate();
6675
6676 private:
6677  Register value_;
6678  Register key_;
6679  Register receiver_;
6680};
6681
6682
6683void DeferredReferenceSetKeyedValue::Generate() {
6684  Register scratch1 = VirtualFrame::scratch0();
6685  Register scratch2 = VirtualFrame::scratch1();
6686  __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
6687  __ IncrementCounter(
6688      &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
6689
6690  // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
6691  // calling convention.
6692  if (value_.is(r1)) {
6693    __ Swap(r0, r1, ip);
6694  }
6695  ASSERT(receiver_.is(r2));
6696
6697  // The rest of the instructions in the deferred code must be together.
6698  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6699    // Call keyed store IC. It has the arguments value, key and receiver in r0,
6700    // r1 and r2.
6701    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
6702    __ Call(ic, RelocInfo::CODE_TARGET);
6703    // The call must be followed by a nop instruction to indicate that the
6704    // keyed store has been inlined.
6705    __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
6706
6707    // Block the constant pool for one more instruction after leaving this
6708    // constant pool block scope to include the branch instruction ending the
6709    // deferred code.
6710    __ BlockConstPoolFor(1);
6711  }
6712}
6713
6714
6715class DeferredReferenceSetNamedValue: public DeferredCode {
6716 public:
6717  DeferredReferenceSetNamedValue(Register value,
6718                                 Register receiver,
6719                                 Handle<String> name)
6720      : value_(value), receiver_(receiver), name_(name) {
6721    set_comment("[ DeferredReferenceSetNamedValue");
6722  }
6723
6724  virtual void Generate();
6725
6726 private:
6727  Register value_;
6728  Register receiver_;
6729  Handle<String> name_;
6730};
6731
6732
6733// Takes value in r0, receiver in r1 and returns the result (the
6734// value) in r0.
6735void DeferredReferenceSetNamedValue::Generate() {
6736  // Record the entry frame and spill.
6737  VirtualFrame copied_frame(*frame_state()->frame());
6738  copied_frame.SpillAll();
6739
6740  // Ensure value in r0, receiver in r1 to match store ic calling
6741  // convention.
6742  ASSERT(value_.is(r0) && receiver_.is(r1));
6743  __ mov(r2, Operand(name_));
6744
6745  // The rest of the instructions in the deferred code must be together.
6746  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6747    // Call keyed store IC. It has the arguments value, key and receiver in r0,
6748    // r1 and r2.
6749    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
6750    __ Call(ic, RelocInfo::CODE_TARGET);
6751    // The call must be followed by a nop instruction to indicate that the
6752    // named store has been inlined.
6753    __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
6754
6755    // Go back to the frame we entered with. The instructions
6756    // generated by this merge are skipped over by the inline store
6757    // patching mechanism when looking for the branch instruction that
6758    // tells it where the code to patch is.
6759    copied_frame.MergeTo(frame_state()->frame());
6760
6761    // Block the constant pool for one more instruction after leaving this
6762    // constant pool block scope to include the branch instruction ending the
6763    // deferred code.
6764    __ BlockConstPoolFor(1);
6765  }
6766}
6767
6768
6769// Consumes the top of stack (the receiver) and pushes the result instead.
6770void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
6771  bool contextual_load_in_builtin =
6772      is_contextual &&
6773      (Bootstrapper::IsActive() ||
6774      (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
6775
6776  if (scope()->is_global_scope() ||
6777      loop_nesting() == 0 ||
6778      contextual_load_in_builtin) {
6779    Comment cmnt(masm(), "[ Load from named Property");
6780    // Setup the name register and call load IC.
6781    frame_->CallLoadIC(name,
6782                       is_contextual
6783                           ? RelocInfo::CODE_TARGET_CONTEXT
6784                           : RelocInfo::CODE_TARGET);
6785    frame_->EmitPush(r0);  // Push answer.
6786  } else {
6787    // Inline the in-object property case.
6788    Comment cmnt(masm(), is_contextual
6789                             ? "[ Inlined contextual property load"
6790                             : "[ Inlined named property load");
6791
6792    // Counter will be decremented in the deferred code. Placed here to avoid
6793    // having it in the instruction stream below where patching will occur.
6794    if (is_contextual) {
6795      __ IncrementCounter(&Counters::named_load_global_inline, 1,
6796                          frame_->scratch0(), frame_->scratch1());
6797    } else {
6798      __ IncrementCounter(&Counters::named_load_inline, 1,
6799                          frame_->scratch0(), frame_->scratch1());
6800    }
6801
6802    // The following instructions are the inlined load of an in-object property.
6803    // Parts of this code is patched, so the exact instructions generated needs
6804    // to be fixed. Therefore the instruction pool is blocked when generating
6805    // this code
6806
6807    // Load the receiver from the stack.
6808    Register receiver = frame_->PopToRegister();
6809
6810    DeferredReferenceGetNamedValue* deferred =
6811        new DeferredReferenceGetNamedValue(receiver, name, is_contextual);
6812
6813    bool is_dont_delete = false;
6814    if (is_contextual) {
6815      if (!info_->closure().is_null()) {
6816        // When doing lazy compilation we can check if the global cell
6817        // already exists and use its "don't delete" status as a hint.
6818        AssertNoAllocation no_gc;
6819        v8::internal::GlobalObject* global_object =
6820            info_->closure()->context()->global();
6821        LookupResult lookup;
6822        global_object->LocalLookupRealNamedProperty(*name, &lookup);
6823        if (lookup.IsProperty() && lookup.type() == NORMAL) {
6824          ASSERT(lookup.holder() == global_object);
6825          ASSERT(global_object->property_dictionary()->ValueAt(
6826              lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
6827          is_dont_delete = lookup.IsDontDelete();
6828        }
6829      }
6830      if (is_dont_delete) {
6831        __ IncrementCounter(&Counters::dont_delete_hint_hit, 1,
6832                            frame_->scratch0(), frame_->scratch1());
6833      }
6834    }
6835
6836    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6837      if (!is_contextual) {
6838        // Check that the receiver is a heap object.
6839        __ tst(receiver, Operand(kSmiTagMask));
6840        deferred->Branch(eq);
6841      }
6842
6843      // Check for the_hole_value if necessary.
6844      // Below we rely on the number of instructions generated, and we can't
6845      // cope with the Check macro which does not generate a fixed number of
6846      // instructions.
6847      Label skip, check_the_hole, cont;
6848      if (FLAG_debug_code && is_contextual && is_dont_delete) {
6849        __ b(&skip);
6850        __ bind(&check_the_hole);
6851        __ Check(ne, "DontDelete cells can't contain the hole");
6852        __ b(&cont);
6853        __ bind(&skip);
6854      }
6855
6856#ifdef DEBUG
6857      int InlinedNamedLoadInstructions = 5;
6858      Label check_inlined_codesize;
6859      masm_->bind(&check_inlined_codesize);
6860#endif
6861
6862      Register scratch = VirtualFrame::scratch0();
6863      Register scratch2 = VirtualFrame::scratch1();
6864
6865      // Check the map. The null map used below is patched by the inline cache
6866      // code.  Therefore we can't use a LoadRoot call.
6867      __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
6868      __ mov(scratch2, Operand(Factory::null_value()));
6869      __ cmp(scratch, scratch2);
6870      deferred->Branch(ne);
6871
6872      if (is_contextual) {
6873#ifdef DEBUG
6874        InlinedNamedLoadInstructions += 1;
6875#endif
6876        // Load the (initially invalid) cell and get its value.
6877        masm()->mov(receiver, Operand(Factory::null_value()));
6878        __ ldr(receiver,
6879               FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
6880
6881        deferred->set_is_dont_delete(is_dont_delete);
6882
6883        if (!is_dont_delete) {
6884#ifdef DEBUG
6885          InlinedNamedLoadInstructions += 3;
6886#endif
6887          __ cmp(receiver, Operand(Factory::the_hole_value()));
6888          deferred->Branch(eq);
6889        } else if (FLAG_debug_code) {
6890#ifdef DEBUG
6891          InlinedNamedLoadInstructions += 3;
6892#endif
6893          __ cmp(receiver, Operand(Factory::the_hole_value()));
6894          __ b(&check_the_hole, eq);
6895          __ bind(&cont);
6896        }
6897      } else {
6898        // Initially use an invalid index. The index will be patched by the
6899        // inline cache code.
6900        __ ldr(receiver, MemOperand(receiver, 0));
6901      }
6902
6903      // Make sure that the expected number of instructions are generated.
6904      // If the code before is updated, the offsets in ic-arm.cc
6905      // LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need
6906      // to be updated.
6907      ASSERT_EQ(InlinedNamedLoadInstructions,
6908                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6909    }
6910
6911    deferred->BindExit();
6912    // At this point the receiver register has the result, either from the
6913    // deferred code or from the inlined code.
6914    frame_->EmitPush(receiver);
6915  }
6916}
6917
6918
6919void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
6920#ifdef DEBUG
6921  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
6922#endif
6923
6924  Result result;
6925  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
6926    frame()->CallStoreIC(name, is_contextual);
6927  } else {
6928    // Inline the in-object property case.
6929    JumpTarget slow, done;
6930
6931    // Get the value and receiver from the stack.
6932    frame()->PopToR0();
6933    Register value = r0;
6934    frame()->PopToR1();
6935    Register receiver = r1;
6936
6937    DeferredReferenceSetNamedValue* deferred =
6938        new DeferredReferenceSetNamedValue(value, receiver, name);
6939
6940    // Check that the receiver is a heap object.
6941    __ tst(receiver, Operand(kSmiTagMask));
6942    deferred->Branch(eq);
6943
6944    // The following instructions are the part of the inlined
6945    // in-object property store code which can be patched. Therefore
6946    // the exact number of instructions generated must be fixed, so
6947    // the constant pool is blocked while generating this code.
6948    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6949      Register scratch0 = VirtualFrame::scratch0();
6950      Register scratch1 = VirtualFrame::scratch1();
6951
6952      // Check the map. Initially use an invalid map to force a
6953      // failure. The map check will be patched in the runtime system.
6954      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
6955
6956#ifdef DEBUG
6957      Label check_inlined_codesize;
6958      masm_->bind(&check_inlined_codesize);
6959#endif
6960      __ mov(scratch0, Operand(Factory::null_value()));
6961      __ cmp(scratch0, scratch1);
6962      deferred->Branch(ne);
6963
6964      int offset = 0;
6965      __ str(value, MemOperand(receiver, offset));
6966
6967      // Update the write barrier and record its size. We do not use
6968      // the RecordWrite macro here because we want the offset
6969      // addition instruction first to make it easy to patch.
6970      Label record_write_start, record_write_done;
6971      __ bind(&record_write_start);
6972      // Add offset into the object.
6973      __ add(scratch0, receiver, Operand(offset));
6974      // Test that the object is not in the new space.  We cannot set
6975      // region marks for new space pages.
6976      __ InNewSpace(receiver, scratch1, eq, &record_write_done);
6977      // Record the actual write.
6978      __ RecordWriteHelper(receiver, scratch0, scratch1);
6979      __ bind(&record_write_done);
6980      // Clobber all input registers when running with the debug-code flag
6981      // turned on to provoke errors.
6982      if (FLAG_debug_code) {
6983        __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
6984        __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
6985        __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
6986      }
6987      // Check that this is the first inlined write barrier or that
6988      // this inlined write barrier has the same size as all the other
6989      // inlined write barriers.
6990      ASSERT((inlined_write_barrier_size_ == -1) ||
6991             (inlined_write_barrier_size_ ==
6992              masm()->InstructionsGeneratedSince(&record_write_start)));
6993      inlined_write_barrier_size_ =
6994          masm()->InstructionsGeneratedSince(&record_write_start);
6995
6996      // Make sure that the expected number of instructions are generated.
6997      ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
6998                masm()->InstructionsGeneratedSince(&check_inlined_codesize));
6999    }
7000    deferred->BindExit();
7001  }
7002  ASSERT_EQ(expected_height, frame()->height());
7003}
7004
7005
7006void CodeGenerator::EmitKeyedLoad() {
7007  if (loop_nesting() == 0) {
7008    Comment cmnt(masm_, "[ Load from keyed property");
7009    frame_->CallKeyedLoadIC();
7010  } else {
7011    // Inline the keyed load.
7012    Comment cmnt(masm_, "[ Inlined load from keyed property");
7013
7014    // Counter will be decremented in the deferred code. Placed here to avoid
7015    // having it in the instruction stream below where patching will occur.
7016    __ IncrementCounter(&Counters::keyed_load_inline, 1,
7017                        frame_->scratch0(), frame_->scratch1());
7018
7019    // Load the key and receiver from the stack.
7020    bool key_is_known_smi = frame_->KnownSmiAt(0);
7021    Register key = frame_->PopToRegister();
7022    Register receiver = frame_->PopToRegister(key);
7023
7024    // The deferred code expects key and receiver in registers.
7025    DeferredReferenceGetKeyedValue* deferred =
7026        new DeferredReferenceGetKeyedValue(key, receiver);
7027
7028    // Check that the receiver is a heap object.
7029    __ tst(receiver, Operand(kSmiTagMask));
7030    deferred->Branch(eq);
7031
7032    // The following instructions are the part of the inlined load keyed
7033    // property code which can be patched. Therefore the exact number of
7034    // instructions generated need to be fixed, so the constant pool is blocked
7035    // while generating this code.
7036    { Assembler::BlockConstPoolScope block_const_pool(masm_);
7037      Register scratch1 = VirtualFrame::scratch0();
7038      Register scratch2 = VirtualFrame::scratch1();
7039      // Check the map. The null map used below is patched by the inline cache
7040      // code.
7041      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
7042
7043      // Check that the key is a smi.
7044      if (!key_is_known_smi) {
7045        __ tst(key, Operand(kSmiTagMask));
7046        deferred->Branch(ne);
7047      }
7048
7049#ifdef DEBUG
7050      Label check_inlined_codesize;
7051      masm_->bind(&check_inlined_codesize);
7052#endif
7053      __ mov(scratch2, Operand(Factory::null_value()));
7054      __ cmp(scratch1, scratch2);
7055      deferred->Branch(ne);
7056
7057      // Get the elements array from the receiver.
7058      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
7059      __ AssertFastElements(scratch1);
7060
7061      // Check that key is within bounds. Use unsigned comparison to handle
7062      // negative keys.
7063      __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
7064      __ cmp(scratch2, key);
7065      deferred->Branch(ls);  // Unsigned less equal.
7066
7067      // Load and check that the result is not the hole (key is a smi).
7068      __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
7069      __ add(scratch1,
7070             scratch1,
7071             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7072      __ ldr(scratch1,
7073             MemOperand(scratch1, key, LSL,
7074                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
7075      __ cmp(scratch1, scratch2);
7076      deferred->Branch(eq);
7077
7078      __ mov(r0, scratch1);
7079      // Make sure that the expected number of instructions are generated.
7080      ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
7081                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
7082    }
7083
7084    deferred->BindExit();
7085  }
7086}
7087
7088
7089void CodeGenerator::EmitKeyedStore(StaticType* key_type,
7090                                   WriteBarrierCharacter wb_info) {
7091  // Generate inlined version of the keyed store if the code is in a loop
7092  // and the key is likely to be a smi.
7093  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
7094    // Inline the keyed store.
7095    Comment cmnt(masm_, "[ Inlined store to keyed property");
7096
7097    Register scratch1 = VirtualFrame::scratch0();
7098    Register scratch2 = VirtualFrame::scratch1();
7099    Register scratch3 = r3;
7100
7101    // Counter will be decremented in the deferred code. Placed here to avoid
7102    // having it in the instruction stream below where patching will occur.
7103    __ IncrementCounter(&Counters::keyed_store_inline, 1,
7104                        scratch1, scratch2);
7105
7106
7107
7108    // Load the value, key and receiver from the stack.
7109    bool value_is_harmless = frame_->KnownSmiAt(0);
7110    if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
7111    bool key_is_smi = frame_->KnownSmiAt(1);
7112    Register value = frame_->PopToRegister();
7113    Register key = frame_->PopToRegister(value);
7114    VirtualFrame::SpilledScope spilled(frame_);
7115    Register receiver = r2;
7116    frame_->EmitPop(receiver);
7117
7118#ifdef DEBUG
7119    bool we_remembered_the_write_barrier = value_is_harmless;
7120#endif
7121
7122    // The deferred code expects value, key and receiver in registers.
7123    DeferredReferenceSetKeyedValue* deferred =
7124        new DeferredReferenceSetKeyedValue(value, key, receiver);
7125
7126    // Check that the value is a smi. As this inlined code does not set the
7127    // write barrier it is only possible to store smi values.
7128    if (!value_is_harmless) {
7129      // If the value is not likely to be a Smi then let's test the fixed array
7130      // for new space instead.  See below.
7131      if (wb_info == LIKELY_SMI) {
7132        __ tst(value, Operand(kSmiTagMask));
7133        deferred->Branch(ne);
7134#ifdef DEBUG
7135        we_remembered_the_write_barrier = true;
7136#endif
7137      }
7138    }
7139
7140    if (!key_is_smi) {
7141      // Check that the key is a smi.
7142      __ tst(key, Operand(kSmiTagMask));
7143      deferred->Branch(ne);
7144    }
7145
7146    // Check that the receiver is a heap object.
7147    __ tst(receiver, Operand(kSmiTagMask));
7148    deferred->Branch(eq);
7149
7150    // Check that the receiver is a JSArray.
7151    __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
7152    deferred->Branch(ne);
7153
7154    // Check that the key is within bounds. Both the key and the length of
7155    // the JSArray are smis. Use unsigned comparison to handle negative keys.
7156    __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
7157    __ cmp(scratch1, key);
7158    deferred->Branch(ls);  // Unsigned less equal.
7159
7160    // Get the elements array from the receiver.
7161    __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
7162    if (!value_is_harmless && wb_info != LIKELY_SMI) {
7163      Label ok;
7164      __ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask()));
7165      __ cmp(scratch2, Operand(ExternalReference::new_space_start()));
7166      __ tst(value, Operand(kSmiTagMask), ne);
7167      deferred->Branch(ne);
7168#ifdef DEBUG
7169      we_remembered_the_write_barrier = true;
7170#endif
7171    }
7172    // Check that the elements array is not a dictionary.
7173    __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
7174    // The following instructions are the part of the inlined store keyed
7175    // property code which can be patched. Therefore the exact number of
7176    // instructions generated need to be fixed, so the constant pool is blocked
7177    // while generating this code.
7178    { Assembler::BlockConstPoolScope block_const_pool(masm_);
7179#ifdef DEBUG
7180      Label check_inlined_codesize;
7181      masm_->bind(&check_inlined_codesize);
7182#endif
7183
7184      // Read the fixed array map from the constant pool (not from the root
7185      // array) so that the value can be patched.  When debugging, we patch this
7186      // comparison to always fail so that we will hit the IC call in the
7187      // deferred code which will allow the debugger to break for fast case
7188      // stores.
7189      __ mov(scratch3, Operand(Factory::fixed_array_map()));
7190      __ cmp(scratch2, scratch3);
7191      deferred->Branch(ne);
7192
7193      // Store the value.
7194      __ add(scratch1, scratch1,
7195             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7196      __ str(value,
7197             MemOperand(scratch1, key, LSL,
7198                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
7199
7200      // Make sure that the expected number of instructions are generated.
7201      ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
7202                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
7203    }
7204
7205    ASSERT(we_remembered_the_write_barrier);
7206
7207    deferred->BindExit();
7208  } else {
7209    frame()->CallKeyedStoreIC();
7210  }
7211}
7212
7213
7214#ifdef DEBUG
7215bool CodeGenerator::HasValidEntryRegisters() { return true; }
7216#endif
7217
7218
7219#undef __
7220#define __ ACCESS_MASM(masm)
7221
7222Handle<String> Reference::GetName() {
7223  ASSERT(type_ == NAMED);
7224  Property* property = expression_->AsProperty();
7225  if (property == NULL) {
7226    // Global variable reference treated as a named property reference.
7227    VariableProxy* proxy = expression_->AsVariableProxy();
7228    ASSERT(proxy->AsVariable() != NULL);
7229    ASSERT(proxy->AsVariable()->is_global());
7230    return proxy->name();
7231  } else {
7232    Literal* raw_name = property->key()->AsLiteral();
7233    ASSERT(raw_name != NULL);
7234    return Handle<String>(String::cast(*raw_name->handle()));
7235  }
7236}
7237
7238
7239void Reference::DupIfPersist() {
7240  if (persist_after_get_) {
7241    switch (type_) {
7242      case KEYED:
7243        cgen_->frame()->Dup2();
7244        break;
7245      case NAMED:
7246        cgen_->frame()->Dup();
7247        // Fall through.
7248      case UNLOADED:
7249      case ILLEGAL:
7250      case SLOT:
7251        // Do nothing.
7252        ;
7253    }
7254  } else {
7255    set_unloaded();
7256  }
7257}
7258
7259
7260void Reference::GetValue() {
7261  ASSERT(cgen_->HasValidEntryRegisters());
7262  ASSERT(!is_illegal());
7263  ASSERT(!cgen_->has_cc());
7264  MacroAssembler* masm = cgen_->masm();
7265  Property* property = expression_->AsProperty();
7266  if (property != NULL) {
7267    cgen_->CodeForSourcePosition(property->position());
7268  }
7269
7270  switch (type_) {
7271    case SLOT: {
7272      Comment cmnt(masm, "[ Load from Slot");
7273      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
7274      ASSERT(slot != NULL);
7275      DupIfPersist();
7276      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
7277      break;
7278    }
7279
7280    case NAMED: {
7281      Variable* var = expression_->AsVariableProxy()->AsVariable();
7282      bool is_global = var != NULL;
7283      ASSERT(!is_global || var->is_global());
7284      Handle<String> name = GetName();
7285      DupIfPersist();
7286      cgen_->EmitNamedLoad(name, is_global);
7287      break;
7288    }
7289
7290    case KEYED: {
7291      ASSERT(property != NULL);
7292      DupIfPersist();
7293      cgen_->EmitKeyedLoad();
7294      cgen_->frame()->EmitPush(r0);
7295      break;
7296    }
7297
7298    default:
7299      UNREACHABLE();
7300  }
7301}
7302
7303
7304void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
7305  ASSERT(!is_illegal());
7306  ASSERT(!cgen_->has_cc());
7307  MacroAssembler* masm = cgen_->masm();
7308  VirtualFrame* frame = cgen_->frame();
7309  Property* property = expression_->AsProperty();
7310  if (property != NULL) {
7311    cgen_->CodeForSourcePosition(property->position());
7312  }
7313
7314  switch (type_) {
7315    case SLOT: {
7316      Comment cmnt(masm, "[ Store to Slot");
7317      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
7318      cgen_->StoreToSlot(slot, init_state);
7319      set_unloaded();
7320      break;
7321    }
7322
7323    case NAMED: {
7324      Comment cmnt(masm, "[ Store to named Property");
7325      cgen_->EmitNamedStore(GetName(), false);
7326      frame->EmitPush(r0);
7327      set_unloaded();
7328      break;
7329    }
7330
7331    case KEYED: {
7332      Comment cmnt(masm, "[ Store to keyed Property");
7333      Property* property = expression_->AsProperty();
7334      ASSERT(property != NULL);
7335      cgen_->CodeForSourcePosition(property->position());
7336      cgen_->EmitKeyedStore(property->key()->type(), wb_info);
7337      frame->EmitPush(r0);
7338      set_unloaded();
7339      break;
7340    }
7341
7342    default:
7343      UNREACHABLE();
7344  }
7345}
7346
7347
7348const char* GenericBinaryOpStub::GetName() {
7349  if (name_ != NULL) return name_;
7350  const int len = 100;
7351  name_ = Bootstrapper::AllocateAutoDeletedArray(len);
7352  if (name_ == NULL) return "OOM";
7353  const char* op_name = Token::Name(op_);
7354  const char* overwrite_name;
7355  switch (mode_) {
7356    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
7357    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
7358    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
7359    default: overwrite_name = "UnknownOverwrite"; break;
7360  }
7361
7362  OS::SNPrintF(Vector<char>(name_, len),
7363               "GenericBinaryOpStub_%s_%s%s_%s",
7364               op_name,
7365               overwrite_name,
7366               specialized_on_rhs_ ? "_ConstantRhs" : "",
7367               BinaryOpIC::GetName(runtime_operands_type_));
7368  return name_;
7369}
7370
7371
7372#undef __
7373
7374} }  // namespace v8::internal
7375
7376#endif  // V8_TARGET_ARCH_ARM
7377